divbuf-0.4.1/.cargo_vcs_info.json0000644000000001360000000000100123150ustar { "git": { "sha1": "106814081c4ba52175235a8ecaff1a5b48f3d141" }, "path_in_vcs": "" }divbuf-0.4.1/.cirrus.yml000064400000000000000000000027441046102023000132240ustar 00000000000000test_task: matrix: - container: image: rust:1.40.0 - container: image: rust:latest - container: image: rustlang/rust:nightly env: # rustc 1.31.0-nightly 2018-10-20 on Travis reports ODR violations within # asan itself. # https://travis-ci.org/asomers/divbuf/jobs/447109902 ASAN_OPTIONS: "detect_odr_violation=0" cargo_cache: folder: $CARGO_HOME/registry test_script: - cargo test --all-features clippy_script: - if rustc --version | grep -q nightly; then - rustup component add clippy - cargo clippy --all-features --all-targets -- -D warnings - fi audit_script: - if rustc --version | grep -q nightly; then - cargo install cargo-audit - cargo audit - fi bench_script: - if rustc --version | grep -q nightly; then - cargo test --all-features --bench '*' - fi fmt_script: - if rustc --version | grep -q nightly; then - rustup component add rustfmt - cargo fmt -- --check - fi asan_script: - if rustc --version | grep -q nightly; then - env RUSTFLAGS="-Z sanitizer=address" cargo test --all-features --tests - fi before_cache_script: rm -rf $CARGO_HOME/registry/index minver_task: depends_on: - test matrix: - container: image: rustlang/rust:nightly cargo_cache: folder: $CARGO_HOME/registry test_script: - cargo update -Zminimal-versions - cargo test before_cache_script: rm -rf $CARGO_HOME/registry/index divbuf-0.4.1/CHANGELOG.md000064400000000000000000000043421046102023000127210ustar 00000000000000## [0.4.1] - 2025-02-20 ### Fixed - Fixed the documentation on docs.rs. No code change. ([#27](https://github.com/asomers/divbuf/pull/27)) ## [0.4.0] - 2025-01-18 ### Added - `DivBufInaccessible` has neither read nor write access, but it is `Clone`, and can be upgraded to an accessible buffer. It's useful for recreating a `DivBufMut` that must be thrown away. ([#15](https://github.com/asomers/divbuf/pull/15)) - `DivBufShared::uninitialized` creates a DivBufShared with an uninitialized buffer. It is gated by the `experimental` feature, and won't likely remain in its current form indefinitely. ([#6](https://github.com/asomers/divbuf/pull/6)) - `impl TryFrom for Vec` to extract the backing Vec from a `DivBufShared` if there are no other DivBufs for the same `DivBufShared`. ([#17](https://github.com/asomers/divbuf/pull/17)) ### Changed - MSRV has been raised to 1.40.0 ([#8](https://github.com/asomers/divbuf/pull/8)) ([#10](https://github.com/asomers/divbuf/pull/10)) ([#17](https://github.com/asomers/divbuf/pull/17)) ### Fixed - Eliminated usage of `compare_and_swap`, deprecated in Rust 1.50.0. ([#8](https://github.com/asomers/divbuf/pull/8)) - All public methods now return error tyeps that implement `std::error::Error`. ([#12](https://github.com/asomers/divbuf/pull/12)) ## [0.3.1] - 2018-12-08 ### Changed - `DivBufShared::try` has been replaced with `try_const` since `try` is a reserved word in Rust 2018. https://github.com/asomers/divbuf/pull/5 ## [0.3.0] - 2018-10-27 ### Added - `DivBuf`s and `DivBufMut`s now share ownership of the data, so they can live even after the original `DivBufShared` has been dropped. https://github.com/asomers/divbuf/pull/1 ### Changed - Better Debug formatting for `DivBufShared` https://github.com/asomers/divbuf/pull/2 ## [0.2.0] - 2018-07-01 ### Added - Implemented `Borrow` and `BorrowMut` for `DivBuf` and `DivBufMut` - Added {DivBuf,DivBufMut}::into_chunks - Implemented Eq, Ord, PartialEq, and PartialOrd on `DivBuf` and `DivBufMut`. - Implemented `std::io::Write` for `DivBufMut` - Added `DivBufMut::try_resize` - Implemented `Send` and `Sync` for all `DivBuf` variants. ### Changed ### Fixed - Don't double-panic during Drop ### Removed divbuf-0.4.1/Cargo.lock0000644000000005740000000000100102760ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "divbuf" version = "0.4.1" dependencies = [ "lazy_static", ] [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" divbuf-0.4.1/Cargo.toml0000644000000026460000000000100103230ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "divbuf" version = "0.4.1" authors = ["Alan Somers "] build = false exclude = [ ".gitignore", ".travis.yml", ] autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = """ Buffer object that can be recursively divided into smaller buffers """ documentation = "https://docs.rs/divbuf" readme = "README.md" keywords = [ "buffers", "io", "zero-copy", ] categories = ["data-structures"] license = "MIT" repository = "https://github.com/asomers/divbuf" [package.metadata.docs.rs] features = ["experimental"] rustdoc-args = [ "--cfg", "docsrs", ] [features] experimental = [] [lib] name = "divbuf" path = "src/lib.rs" [[test]] name = "functional" path = "tests/functional.rs" [[test]] name = "thread_race" path = "tests/thread_race.rs" [[bench]] name = "hash" path = "benches/hash.rs" [dependencies] [dev-dependencies.lazy_static] version = "1.1" divbuf-0.4.1/Cargo.toml.orig000064400000000000000000000011671046102023000140010ustar 00000000000000[package] name = "divbuf" version = "0.4.1" authors = ["Alan Somers "] edition = "2018" license = "MIT" readme = "README.md" repository = "https://github.com/asomers/divbuf" description = """ Buffer object that can be recursively divided into smaller buffers """ documentation = "https://docs.rs/divbuf" categories = ["data-structures"] keywords = ["buffers", "io", "zero-copy"] exclude = [ ".gitignore", ".travis.yml" ] [package.metadata.docs.rs] features = ["experimental"] rustdoc-args = ["--cfg", "docsrs"] [features] experimental = [] [dependencies] [dev-dependencies] lazy_static = "1.1" divbuf-0.4.1/LICENSE000064400000000000000000000021111046102023000121050ustar 00000000000000The MIT License (MIT) Copyright (c) 2015 Carl Lerche + nix-rust Authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. divbuf-0.4.1/README.md000064400000000000000000000032761046102023000123740ustar 00000000000000# DivBuf A library providing recursively divisible buffer objects. [![Build Status](https://travis-ci.org/asomers/divbuf.svg?branch=master)](https://travis-ci.org/asomers/divbuf) [![Crates.io](https://img.shields.io/crates/v/divbuf.svg?maxAge=2592000)](https://crates.io/crates/divbuf) [Documentation](https://docs.rs/divbuf) The `divbuf` crate provides a buffer structure `DivBufShared` that can be efficiently and safely divided into multiple smaller buffers. Each child buffer can be further divided, recursively. A primitive form of range-locking is available: there is no way to create overlapping mutable child buffers. This crate is similar to [`bytes`](https://crates.io/crates/bytes), but with a few key differences: - `bytes` is a COW crate. Data will be shared between multiple objects as much as possible, but sometimes the data will be copied to new storage. `divbuf`, onthe other hand, will _never_ copy data unless explicitly requested. - A `BytesMut` object always has the sole ability to access its own data. Once a `BytesMut` object is created, there is no other way to modify or even read its data that doesn't involve that object. A `DivBufMut`, on the other hand, can share its data with a `DivBufShared`. After that `DivBufMut` has been dropped, another can be created from the `DivBufShared`. - `bytes` contains numerous optimizations for dealing with small arrays, such as inline storage. However, some of those optimizations result in data copying, which is anathema to `divbuf`. `divbuf` therefore does not include them, and is optimized for working with large arrays. # License `divbuf` is distributed under the MIT license. See [LICENSE](LICENSE) for details. divbuf-0.4.1/benches/hash.rs000064400000000000000000000006431046102023000140100ustar 00000000000000#![feature(test)] extern crate test; use std::{collections::hash_map::DefaultHasher, hash::Hash}; use divbuf::*; use test::Bencher; #[bench] fn bench_divbuf_hash(bench: &mut Bencher) { let dbs = DivBufShared::from(vec![0u8; 8]); let db = dbs.try_const().unwrap(); let mut hasher = DefaultHasher::new(); bench.bytes = db.len() as u64; bench.iter(move || { db.hash(&mut hasher); }) } divbuf-0.4.1/release.toml000064400000000000000000000002501046102023000134170ustar 00000000000000pre-release-replacements = [ { file="CHANGELOG.md", search="Unreleased", replace="{{version}}" }, { file="CHANGELOG.md", search="ReleaseDate", replace="{{date}}" } ] divbuf-0.4.1/rustfmt.toml000064400000000000000000000005211046102023000135040ustar 00000000000000brace_style = "SameLineWhere" edition = "2021" format_strings = true group_imports = "StdExternalCrate" imports_granularity = "Crate" imports_layout = "HorizontalVertical" match_block_trailing_comma = false max_width = 80 reorder_impl_items = true reorder_imports = true struct_field_align_threshold = 16 use_field_init_shorthand = true divbuf-0.4.1/src/divbuf.rs000064400000000000000000001154371046102023000135340ustar 00000000000000// vim: tw=80 use std::{ borrow::{Borrow, BorrowMut}, cmp, convert::TryFrom, error, fmt::{self, Debug, Formatter}, hash, io, mem, ops, sync::atomic::{ self, AtomicUsize, Ordering::{AcqRel, Acquire, Relaxed, Release}, }, }; #[cfg(target_pointer_width = "64")] const WRITER_SHIFT: usize = 32; #[cfg(target_pointer_width = "64")] const READER_MASK: usize = 0xFFFF_FFFF; #[cfg(target_pointer_width = "32")] const WRITER_SHIFT: usize = 16; #[cfg(target_pointer_width = "32")] const READER_MASK: usize = 0xFFFF; const ONE_WRITER: usize = 1 << WRITER_SHIFT; /// DivBuf's error type #[derive(Clone, Copy, Debug)] pub struct Error(&'static str); impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) } } impl error::Error for Error {} /// The return type of /// [`DivBuf::into_chunks`](struct.DivBuf.html#method.into_chunks) // LCOV_EXCL_START #[derive(Debug)] pub struct Chunks { db: DivBuf, chunksize: usize, } // LCOV_EXCL_STOP impl Chunks { fn new(db: DivBuf, chunksize: usize) -> Self { Chunks { db, chunksize } } } impl Iterator for Chunks { type Item = DivBuf; fn next(&mut self) -> Option { if self.db.is_empty() { None } else { let size = cmp::min(self.chunksize, self.db.len()); Some(self.db.split_to(size)) } } fn size_hint(&self) -> (usize, Option) { let mut c = self.db.len() / self.chunksize; if self.db.len() % self.chunksize != 0 { c += 1; } (c, Some(c)) } } /// The return type of /// [`DivBufMut::into_chunks`](struct.DivBufMut.html#method.into_chunks) // LCOV_EXCL_START #[derive(Debug)] pub struct ChunksMut { db: DivBufMut, chunksize: usize, } // LCOV_EXCL_STOP impl ChunksMut { fn new(db: DivBufMut, chunksize: usize) -> Self { ChunksMut { db, chunksize } } } impl Iterator for ChunksMut { type Item = DivBufMut; fn next(&mut self) -> Option { if self.db.is_empty() { None } else { let size = cmp::min(self.chunksize, self.db.len()); Some(self.db.split_to(size)) } } fn size_hint(&self) -> (usize, Option) { let mut c = self.db.len() / self.chunksize; if self.db.len() % self.chunksize != 0 { c += 1; } (c, Some(c)) } } // LCOV_EXCL_START #[derive(Debug)] struct Inner { vec: Vec, /// Stores the number of readers in the low half, and writers in the high /// half. accessors: AtomicUsize, /// Stores the total number of DivBufShareds owning this Inner sharers: AtomicUsize, } // LCOV_EXCL_STOP /// The "entry point" to the `divbuf` crate. /// /// A `DivBufShared` owns storage, but cannot directly access it. An /// application will typically create an instance of this class for every /// independent buffer it wants to manage, and then create child `DivBuf`s or /// `DivBufMut`s to access the storage. pub struct DivBufShared { inner: *mut Inner, } /// Provides read-only access to a buffer. /// /// This struct provides a window into a region of a `DivBufShared`, allowing /// read-only access. It can be divided into smaller `DivBuf` using the /// [`split_to`], [`split_off`], [`slice`], [`slice_from`], and [`slice_to`] /// methods. Adjacent `DivBuf`s can be combined using the [`unsplit`] method. /// Finally, a `DivBuf` can be upgraded to a writable [`DivBufMut`] using the /// [`try_mut`] method, but only if there are no other `DivBuf`s that reference /// the same `DivBufShared`. /// /// # Examples /// /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let mut db0 : DivBuf = dbs.try_const().unwrap(); /// assert_eq!(db0, [1, 2, 3, 4, 5, 6][..]); /// ``` /// /// Unlike [`DivBufMut`], a `DivBuf` cannot be used to modify the buffer. The /// following example will fail. /// /// ```compile_fail /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3]); /// let mut db = dbs.try_const().unwrap(); /// db[0] = 9; /// ``` /// /// [`DivBufMut`]: struct.DivBufMut.html /// [`slice_from`]: #method.slice_from /// [`slice_to`]: #method.slice_to /// [`slice`]: #method.slice /// [`split_off`]: #method.split_off /// [`split_to`]: #method.split_to /// [`try_mut`]: #method.try_mut /// [`unsplit`]: #method.unsplit // LCOV_EXCL_START #[derive(Debug)] pub struct DivBuf { // inner must be *mut just to support the try_mut method inner: *mut Inner, // In the future, consider optimizing by replacing begin with a pointer begin: usize, len: usize, } // LCOV_EXCL_STOP /// Provides read-write access to a buffer /// /// This structure provides a window into a region of a `DivBufShared`, allowing /// read-write access. It can be divided into smaller `DivBufMut` using the /// [`split_to`], and [`split_off`] methods. Adjacent `DivBufMut`s can be /// combined using the [`unsplit`] method. `DivBufMut` dereferences to a /// `&[u8]`, which is usually the easiest way to access its contents. However, /// it can also be modified using the `Vec`-like methods [`extend`], /// [`try_extend`], [`reserve`], and [`try_truncate`]. Crucially, those methods /// will only work for terminal `DivBufMut`s. That is, a `DivBufMut` whose /// range includes the end of the `DivBufShared`'s buffer. /// /// `divbuf` includes a primitive form of range-locking. It's possible to have /// multiple `DivBufMut`s simultaneously referencing a single `DivBufShared`, /// but there's no way to create overlapping `DivBufMut`s. /// /// # Examples /// /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![0; 64]); /// let mut dbm = dbs.try_mut().unwrap(); /// dbm[0..4].copy_from_slice(&b"Blue"[..]); /// ``` /// /// [`split_off`]: #method.split_off /// [`split_to`]: #method.split_to /// [`unsplit`]: #method.unsplit /// [`extend`]: #method.extend /// [`try_extend`]: #method.try_extend /// [`reserve`]: #method.reserve /// [`try_truncate`]: #method.try_truncate // LCOV_EXCL_START #[derive(Debug)] pub struct DivBufMut { inner: *mut Inner, // In the future, consider optimizing by replacing begin with a pointer begin: usize, len: usize, } // LCOV_EXCL_STOP /// Does not offer either read or write access to the data, but can be upgraded /// to a buffer that does. /// /// Useful because it implements `Clone`, and does not block other [`DivBufMut`] /// structures from existing. #[derive(Debug)] pub struct DivBufInaccessible { inner: *mut Inner, // In the future, consider optimizing by replacing begin with a pointer begin: usize, len: usize, } impl DivBufShared { /// Returns the number of bytes the buffer can hold without reallocating. pub fn capacity(&self) -> usize { let inner = unsafe { &*self.inner }; inner.vec.capacity() } /// Returns true if the `DivBufShared` has length 0 pub fn is_empty(&self) -> bool { let inner = unsafe { &*self.inner }; inner.vec.is_empty() } /// Returns the number of bytes contained in this buffer. pub fn len(&self) -> usize { let inner = unsafe { &*self.inner }; inner.vec.len() } #[deprecated(since = "0.3.1", note = "use try_const instead")] #[doc(hidden)] pub fn r#try(&self) -> Result { self.try_const() } /// Try to create a read-only [`DivBuf`] that refers to the entirety of this /// buffer. Will fail if there are any [`DivBufMut`] objects referring to /// this buffer. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::with_capacity(4096); /// let db = dbs.try_const().unwrap(); /// ``` /// /// [`DivBuf`]: struct.DivBuf.html /// [`DivBufMut`]: struct.DivBufMut.html pub fn try_const(&self) -> Result { let inner = unsafe { &*self.inner }; if inner.accessors.fetch_add(1, Acquire) >> WRITER_SHIFT != 0 { inner.accessors.fetch_sub(1, Relaxed); Err(Error("Cannot create a DivBuf when DivBufMuts are active")) } else { let l = inner.vec.len(); Ok(DivBuf { inner: self.inner, begin: 0, len: l, }) } } /// Try to create a mutable `DivBufMut` that refers to the entirety of this /// buffer. Will fail if there are any [`DivBufMut`] or [`DivBuf`] objects /// referring to this buffer. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::with_capacity(4096); /// let dbm = dbs.try_mut().unwrap(); /// ``` /// /// [`DivBuf`]: struct.DivBuf.html /// [`DivBufMut`]: struct.DivBufMut.html pub fn try_mut(&self) -> Result { let inner = unsafe { &*self.inner }; if inner .accessors .compare_exchange(0, ONE_WRITER, AcqRel, Acquire) .is_ok() { let l = inner.vec.len(); Ok(DivBufMut { inner: self.inner, begin: 0, len: l, }) } else { Err(Error( "Cannot create a new DivBufMut when other DivBufs or \ DivBufMuts are active", )) } } /// Create a new DivBufShared with an uninitialized buffer of specified /// length. /// /// # Safety /// /// This method technically causes undefined behavior, but it works with /// current compilers. A good replacement is not possible until the /// read-buf feature stabilizes. /// /// #[cfg(any(feature = "experimental", docsrs))] #[cfg_attr(docsrs, doc(cfg(feature = "experimental")))] #[allow(clippy::uninit_vec)] // Needs the read-buf feature to fix pub fn uninitialized(capacity: usize) -> Self { let mut v = Vec::::with_capacity(capacity); // safe because all possible byte patterns for u8 are valid unsafe { v.set_len(capacity) }; Self::from(v) } /// Creates a new, empty, `DivBufShared` with a specified capacity. /// /// After constructing a `DivBufShared` this way, it can only be populated /// via a child `DivBufMut`. pub fn with_capacity(capacity: usize) -> Self { Self::from(Vec::with_capacity(capacity)) } } impl Debug for DivBufShared { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { let inner = unsafe { &*self.inner }; write!(f, "DivBufShared {{ inner: {:?} }}", inner) } } impl Drop for DivBufShared { fn drop(&mut self) { let inner = unsafe { &*self.inner }; if inner.sharers.fetch_sub(1, Release) == 1 && inner.accessors.load(Relaxed) == 0 { // See the comments in std::sync::Arc::drop for why the fence is // required. atomic::fence(Acquire); unsafe { drop(Box::from_raw(self.inner)); } } } } impl<'a> From<&'a [u8]> for DivBufShared { fn from(src: &'a [u8]) -> DivBufShared { DivBufShared::from(src.to_vec()) } } impl From> for DivBufShared { fn from(src: Vec) -> DivBufShared { let rc = AtomicUsize::new(0); let sharers = AtomicUsize::new(1); let inner = Box::new(Inner { vec: src, accessors: rc, sharers, }); DivBufShared { inner: Box::into_raw(inner), } } } impl TryFrom for Vec { type Error = DivBufShared; /// Attempt to extract the owned storage from a DivBufShared. /// /// This will fail if there are any other living references to this same /// `DivBufShared` (`DivBuf`s, `DivBufMut`s, etc), in which case the /// `DivBufShared` will be returned unmodified. /// /// # Examples /// /// ``` /// # use divbuf::*; /// use std::convert::TryInto; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let vec: Vec = dbs.try_into().unwrap(); /// assert_eq!(vec, vec![1, 2, 3, 4, 5, 6]); /// ``` fn try_from(buf: DivBufShared) -> Result { let inner = unsafe { &*buf.inner }; if inner.sharers.load(Acquire) == 1 && inner.accessors.load(Acquire) == 0 { // See the comments in std::sync::Arc::drop for why the fence is // required. atomic::fence(Acquire); let mut inner_box = unsafe { Box::from_raw(buf.inner) }; mem::forget(buf); Ok(mem::take(&mut inner_box.vec)) } else { Err(buf) } } } // DivBufShared owns the target of the `inner` pointer, and no method allows // that pointer to be mutated. Atomic refcounts guarantee that no more than one // writer at a time can modify `inner`'s contents (as long as DivBufMut is Sync, // which it is). Therefore, DivBufShared is both Send and Sync. unsafe impl Send for DivBufShared {} unsafe impl Sync for DivBufShared {} impl DivBuf { /// Create a [`DivBufInaccessible`]. /// /// It may later be upgraded to one of the accessible forms. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::with_capacity(4096); /// let db = dbs.try_const().unwrap(); /// let _dbi = db.clone_inaccessible(); /// ``` pub fn clone_inaccessible(&self) -> DivBufInaccessible { let inner = unsafe { &*self.inner }; let old = inner.sharers.fetch_add(1, Acquire); debug_assert!(old > 0); DivBufInaccessible { inner: self.inner, begin: self.begin, len: self.len, } } /// Break the buffer up into equal sized chunks /// /// Returns an interator which will yield equal sized chunks as smaller /// `DivBuf`s. If the `DivBuf` is not evenly divisible by `size`, then the /// last chunk will be smaller. This method is based on /// `slice::chunks`, but with a few key differences: /// /// - It consumes `self` /// - Yields smaller `DivBuf`s, not slices /// - Yields owned objects, not references /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![0, 1, 2, 3, 4, 5, 6, 7]); /// let db = dbs.try_const().unwrap(); /// let mut iter = db.into_chunks(3); /// assert_eq!(&iter.next().unwrap()[..], &[0, 1, 2][..]); /// assert_eq!(&iter.next().unwrap()[..], &[3, 4, 5][..]); /// assert_eq!(&iter.next().unwrap()[..], &[6, 7][..]); /// assert!(&iter.next().is_none()) /// ``` pub fn into_chunks(self, size: usize) -> Chunks { assert!(size != 0); Chunks::new(self, size) } /// Returns true if the `DivBuf` has length 0 pub fn is_empty(&self) -> bool { self.len == 0 } /// Get the length of this `DivBuf`, _not_ the underlying storage pub fn len(&self) -> usize { self.len } /// Create a new DivBuf that spans a subset of this one. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let db0 = dbs.try_const().unwrap(); /// let db1 = db0.slice(1, 4); /// assert_eq!(db1, [2, 3, 4][..]); /// ``` pub fn slice(&self, begin: usize, end: usize) -> DivBuf { assert!(begin <= end); assert!(end <= self.len); let inner = unsafe { &*self.inner }; let old_accessors = inner.accessors.fetch_add(1, Relaxed); debug_assert!(old_accessors & READER_MASK > 0); DivBuf { inner: self.inner, begin: self.begin + begin, len: end - begin, } } /// Creates a new DivBuf that spans a subset of this one, including the end /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let db0 = dbs.try_const().unwrap(); /// let db1 = db0.slice_from(3); /// assert_eq!(db1, [4, 5, 6][..]); /// ``` pub fn slice_from(&self, begin: usize) -> DivBuf { self.slice(begin, self.len()) } /// Creates a new DivBuf that spans a subset of self, including the /// beginning /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let db0 = dbs.try_const().unwrap(); /// let db1 = db0.slice_to(3); /// assert_eq!(db1, [1, 2, 3][..]); /// ``` pub fn slice_to(&self, end: usize) -> DivBuf { self.slice(0, end) } /// Splits the DivBuf into two at the given index. /// /// Afterwards self contains elements `[0, at)`, and the returned DivBuf /// contains elements `[at, self.len)`. /// /// This is an O(1) operation /// /// # Examples /// /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let mut db0 = dbs.try_const().unwrap(); /// let db1 = db0.split_off(4); /// assert_eq!(db0, [1, 2, 3, 4][..]); /// assert_eq!(db1, [5, 6][..]); /// ``` pub fn split_off(&mut self, at: usize) -> DivBuf { assert!(at <= self.len, "Can't split past the end"); let inner = unsafe { &*self.inner }; let old_accessors = inner.accessors.fetch_add(1, Relaxed); debug_assert!(old_accessors & READER_MASK > 0); let right_half = DivBuf { inner: self.inner, begin: self.begin + at, len: self.len - at, }; self.len = at; right_half } /// Splits the DivBuf into two at the given index. /// /// Afterwards self contains elements `[at, self.len)`, and the returned /// DivBuf contains elements `[0, at)`. /// This is an O(1) operation. /// /// # Examples /// /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let mut db0 = dbs.try_const().unwrap(); /// let db1 = db0.split_to(4); /// assert_eq!(db0, [5, 6][..]); /// assert_eq!(db1, [1, 2, 3, 4][..]); /// ``` pub fn split_to(&mut self, at: usize) -> DivBuf { assert!(at <= self.len, "Can't split past the end"); let inner = unsafe { &*self.inner }; let old_accessors = inner.accessors.fetch_add(1, Relaxed); debug_assert!(old_accessors & READER_MASK > 0); let left_half = DivBuf { inner: self.inner, begin: self.begin, len: at, }; self.begin += at; self.len -= at; left_half } /// Attempt to upgrade Self to a writable DivBufMut /// /// This will fail if there are any other living DivBufs for this same /// DivBufShared /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::with_capacity(4096); /// let db = dbs.try_const().unwrap(); /// db.try_mut().unwrap(); /// ``` pub fn try_mut(self) -> Result { let inner = unsafe { &*self.inner }; if inner .accessors .compare_exchange(1, ONE_WRITER, AcqRel, Acquire) .is_ok() { let mutable_self = Ok(DivBufMut { inner: self.inner, begin: self.begin, len: self.len, }); mem::forget(self); mutable_self } else { // LCOV_EXCL_LINE kcov false negative Err(self) } } /// Combine splitted DivBuf objects back into a contiguous single /// /// If `DivBuf` objects were not contiguous originally, the operation will /// fail and return `other` unmodified /// /// # Examples /// /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let mut db0 = dbs.try_const().unwrap(); /// let db1 = db0.split_off(4); /// db0.unsplit(db1); /// assert_eq!(db0, [1, 2, 3, 4, 5, 6][..]); /// ``` pub fn unsplit(&mut self, other: DivBuf) -> Result<(), DivBuf> { if self.inner != other.inner || (self.begin + self.len) != other.begin { Err(other) } else { self.len += other.len; Ok(()) } } } impl AsRef<[u8]> for DivBuf { fn as_ref(&self) -> &[u8] { unsafe { let inner = &*self.inner; &inner.vec[self.begin..(self.begin + self.len)][..] } } } impl Borrow<[u8]> for DivBuf { fn borrow(&self) -> &[u8] { let inner = unsafe { &*self.inner }; &inner.vec[..] } } impl hash::Hash for DivBuf { fn hash(&self, state: &mut H) where H: hash::Hasher, { let s: &[u8] = self.as_ref(); s.hash(state); } } impl ops::Deref for DivBuf { type Target = [u8]; fn deref(&self) -> &[u8] { unsafe { let inner = &*self.inner; &inner.vec[self.begin..(self.begin + self.len)][..] } } } impl Clone for DivBuf { fn clone(&self) -> DivBuf { self.slice_from(0) } } impl Drop for DivBuf { fn drop(&mut self) { let inner = unsafe { &*self.inner }; if inner.accessors.fetch_sub(1, Release) == 1 && inner.sharers.load(Relaxed) == 0 { atomic::fence(Acquire); unsafe { drop(Box::from_raw(self.inner)); } } } } impl Eq for DivBuf {} impl From for DivBuf { fn from(src: DivBufMut) -> DivBuf { src.freeze() } } impl Ord for DivBuf { fn cmp(&self, other: &DivBuf) -> cmp::Ordering { self.as_ref().cmp(other.as_ref()) } } impl PartialEq for DivBuf { fn eq(&self, other: &DivBuf) -> bool { self.as_ref() == other.as_ref() } } impl PartialEq<[u8]> for DivBuf { fn eq(&self, other: &[u8]) -> bool { self.as_ref() == other } } impl PartialOrd for DivBuf { fn partial_cmp(&self, other: &DivBuf) -> Option { Some(self.cmp(other)) } } // Atomic refcounts provide shared ownership over the `inner` pointer, // guaranteeing that it won't be freed as long as a `DivBuf` exists. No method // allows that pointer to be mutated. Atomic refcounts also guarantee that no // more than one writer at a time can modify `inner`'s contents (as long as // DivBufMut is Sync, which it is). Therefore, DivBuf is both Send and Sync. unsafe impl Send for DivBuf {} unsafe impl Sync for DivBuf {} impl DivBufMut { /// Create a [`DivBufInaccessible`]. /// /// It may later be upgraded to one of the accessible forms. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::with_capacity(4096); /// let dbm = dbs.try_mut().unwrap(); /// let _dbi = dbm.clone_inaccessible(); /// ``` pub fn clone_inaccessible(&self) -> DivBufInaccessible { let inner = unsafe { &*self.inner }; let old = inner.sharers.fetch_add(1, Acquire); debug_assert!(old > 0); DivBufInaccessible { inner: self.inner, begin: self.begin, len: self.len, } } /// Extend self from iterator, without checking for validity fn extend_unchecked<'a, T>(&mut self, iter: T) where T: IntoIterator, { let inner = unsafe { &mut *self.inner }; let oldlen = inner.vec.len(); inner.vec.extend(iter); self.len += inner.vec.len() - oldlen; } /// Downgrade this `DivBufMut` into a read-only `DivBuf` /// /// Note that this method will always succeed, but subsequently calling /// [`try_mut`] on the returned `DivBuf` may not. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let dbm0 = dbs.try_mut().unwrap(); /// let db : DivBuf = dbm0.freeze(); /// ``` /// /// [`try_mut`]: struct.DivBuf.html#method.try_mut pub fn freeze(self) -> DivBuf { // Construct a new DivBuf, then drop self. We know that there are no // other DivButMuts that overlap with this one, so it's safe to create a // DivBuf whose range is restricted to what self covers let inner = unsafe { &*self.inner }; let old_accessors = inner.accessors.fetch_add(1, Relaxed); debug_assert!(old_accessors >> WRITER_SHIFT > 0); DivBuf { inner: self.inner, begin: self.begin, len: self.len, } } /// Break the buffer up into equal sized chunks /// /// Returns an interator which will yield equal sized chunks as smaller /// `DivBufMut`s. If the `DivBufMut` is not evenly divisible by `size`, /// then the last chunk will be smaller. This method is based on /// `slice::chunk_muts`, but with a few key differences: /// /// - It consumes `self` /// - Yields smaller `DivBufMut`s, not slices /// - Yields owned objects, not references /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![0, 1, 2, 3, 4, 5, 6, 7]); /// let dbm = dbs.try_mut().unwrap(); /// let mut iter = dbm.into_chunks(3); /// assert_eq!(&iter.next().unwrap()[..], &[0, 1, 2][..]); /// assert_eq!(&iter.next().unwrap()[..], &[3, 4, 5][..]); /// assert_eq!(&iter.next().unwrap()[..], &[6, 7][..]); /// assert!(&iter.next().is_none()) /// ``` pub fn into_chunks(self, size: usize) -> ChunksMut { assert!(size != 0); ChunksMut::new(self, size) } /// Returns true if the `DivBufMut` has length 0 pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns true if the `DivBufMut` extends to the end of the `DivBufShared` fn is_terminal(&self) -> bool { let inner = unsafe { &*self.inner }; let oldlen = inner.vec.len(); self.begin + self.len == oldlen } /// Get the length of this `DivBufMut`, _not_ the underlying storage pub fn len(&self) -> usize { self.len } /// Reserves capacity for at least `additional` more bytes to be inserted /// into the buffer. /// /// Like [`extend`], this method will panic if the `DivBufMut` is /// non-terminal. /// /// [`extend`]: #method.extend pub fn reserve(&mut self, additional: usize) { // panic if this DivBufMut does not extend to the end of the // DivBufShared assert!( self.is_terminal(), "Can't reserve from the middle of a buffer" ); let inner = unsafe { &mut *self.inner }; inner.vec.reserve(additional) } /// Splits the DivBufMut into two at the given index. /// /// Afterwards self contains elements `[0, at)`, and the returned DivBufMut /// contains elements `[at, self.len)`. /// /// This is an O(1) operation /// /// # Examples /// /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let mut dbm0 = dbs.try_mut().unwrap(); /// let dbm1 = dbm0.split_off(4); /// assert_eq!(dbm0, [1, 2, 3, 4][..]); /// assert_eq!(dbm1, [5, 6][..]); /// ``` pub fn split_off(&mut self, at: usize) -> DivBufMut { assert!(at <= self.len, "Can't split past the end"); let inner = unsafe { &*self.inner }; let old_accessors = inner.accessors.fetch_add(ONE_WRITER, Relaxed); debug_assert!(old_accessors >> WRITER_SHIFT > 0); let right_half = DivBufMut { inner: self.inner, begin: self.begin + at, len: self.len - at, }; self.len = at; right_half } /// Splits the DivBufMut into two at the given index. /// /// Afterwards self contains elements `[at, self.len)`, and the returned /// DivBufMut contains elements `[0, at)`. /// This is an O(1) operation. /// /// # Examples /// /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let mut dbm0 = dbs.try_mut().unwrap(); /// let dbm1 = dbm0.split_to(4); /// assert_eq!(dbm0, [5, 6][..]); /// assert_eq!(dbm1, [1, 2, 3, 4][..]); /// ``` pub fn split_to(&mut self, at: usize) -> DivBufMut { assert!(at <= self.len, "Can't split past the end"); let inner = unsafe { &*self.inner }; let old_accessors = inner.accessors.fetch_add(ONE_WRITER, Relaxed); debug_assert!(old_accessors >> WRITER_SHIFT > 0); let left_half = DivBufMut { inner: self.inner, begin: self.begin, len: at, }; self.begin += at; self.len -= at; left_half } /// Attempt to extend this `DivBufMut` with bytes from the provided /// iterator. /// /// If this `DivBufMut` is not terminal, that is if it does not extend to /// the end of the `DivBufShared`, then this operation will return an error /// and the buffer will not be modified. The [`extend`] method from the /// `Extend` Trait, by contrast, will panic under the same condition. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::with_capacity(64); /// let mut dbm0 = dbs.try_mut().unwrap(); /// assert!(dbm0.try_extend([1, 2, 3].iter()).is_ok()); /// ``` /// /// [`extend`]: #method.extend pub fn try_extend<'a, T>(&mut self, iter: T) -> Result<(), Error> where T: IntoIterator, { if self.is_terminal() { self.extend_unchecked(iter); Ok(()) } else { Err(Error("Can't extend into the middle of a buffer")) } } /// Attempt to resize this `DivBufMut` in-place. /// /// If `new_len` is greater than the existing length, then the buffer will /// be extended by the difference, with each element filled by `value`. If /// `new_len` is less than the existing length, then the buffer is simply /// truncated. /// /// If this `DivBufMut` is not terminal, that is if it does not extend to /// the end of the `DivBufShared`, then this operation will return an error /// and the buffer will not be modified. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::with_capacity(64); /// let mut dbm0 = dbs.try_mut().unwrap(); /// assert!(dbm0.try_resize(4, 0).is_ok()); /// assert_eq!(&dbm0[..], &[0, 0, 0, 0][..]); /// ``` pub fn try_resize( &mut self, new_len: usize, value: u8, ) -> Result<(), Error> { if self.is_terminal() { let inner = unsafe { &mut *self.inner }; inner.vec.resize(new_len + self.begin, value); self.len = new_len; Ok(()) } else { Err(Error("Can't resize from a non-terminal buffer")) } } /// Shortens the buffer, keeping the first `len` bytes and dropping the /// rest. /// /// If `len` is greater than the buffer's current length, this has no /// effect. /// /// Like [`try_extend`], will fail if this DivButMut is non-terminal. /// /// # Examples /// /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let mut dbm0 = dbs.try_mut().unwrap(); /// assert!(dbm0.try_truncate(3).is_ok()); /// assert_eq!(dbm0, [1, 2, 3][..]); /// ``` /// /// [`try_extend`]: #method.try_extend pub fn try_truncate(&mut self, len: usize) -> Result<(), Error> { if self.is_terminal() { let inner = unsafe { &mut *self.inner }; inner.vec.truncate(self.begin + len); self.len = cmp::min(self.len, len); Ok(()) } else { Err(Error("Can't truncate a non-terminal DivBufMut")) } } /// Combine splitted DivBufMut objects back into a contiguous single /// /// If `DivBufMut` objects were not contiguous originally, the operation /// will fail and return `other` unmodified /// /// # Examples /// /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); /// let mut dbm0 = dbs.try_mut().unwrap(); /// let dbm1 = dbm0.split_off(4); /// dbm0.unsplit(dbm1); /// assert_eq!(dbm0, [1, 2, 3, 4, 5, 6][..]); /// ``` pub fn unsplit(&mut self, other: DivBufMut) -> Result<(), DivBufMut> { if self.inner != other.inner || (self.begin + self.len) != other.begin { Err(other) } else { self.len += other.len; Ok(()) } } } impl AsRef<[u8]> for DivBufMut { fn as_ref(&self) -> &[u8] { unsafe { let inner = &*self.inner; &inner.vec[self.begin..(self.begin + self.len)][..] } } } impl Borrow<[u8]> for DivBufMut { fn borrow(&self) -> &[u8] { let inner = unsafe { &*self.inner }; &inner.vec[..] } } impl BorrowMut<[u8]> for DivBufMut { fn borrow_mut(&mut self) -> &mut [u8] { let inner = unsafe { &mut *self.inner }; &mut inner.vec[..] } } impl ops::Deref for DivBufMut { type Target = [u8]; fn deref(&self) -> &[u8] { unsafe { let inner = &*self.inner; &inner.vec[self.begin..(self.begin + self.len)][..] } } } impl ops::DerefMut for DivBufMut { fn deref_mut(&mut self) -> &mut [u8] { unsafe { let inner = &mut *self.inner; &mut inner.vec[self.begin..(self.begin + self.len)][..] } } } impl Drop for DivBufMut { fn drop(&mut self) { let inner = unsafe { &*self.inner }; if inner.accessors.fetch_sub(ONE_WRITER, Release) == ONE_WRITER && inner.sharers.load(Relaxed) == 0 { atomic::fence(Acquire); unsafe { drop(Box::from_raw(self.inner)); } } } } impl<'a> Extend<&'a u8> for DivBufMut { fn extend(&mut self, iter: T) where T: IntoIterator, { // panic if this DivBufMut does not extend to the end of the // DivBufShared assert!( self.is_terminal(), "Can't extend into the middle of a buffer" ); self.extend_unchecked(iter); } } impl hash::Hash for DivBufMut { fn hash(&self, state: &mut H) where H: hash::Hasher, { let s: &[u8] = self.as_ref(); s.hash(state); } } impl Eq for DivBufMut {} impl Ord for DivBufMut { fn cmp(&self, other: &DivBufMut) -> cmp::Ordering { self.as_ref().cmp(other.as_ref()) } } impl PartialEq for DivBufMut { fn eq(&self, other: &DivBufMut) -> bool { self.as_ref() == other.as_ref() } } impl PartialEq<[u8]> for DivBufMut { fn eq(&self, other: &[u8]) -> bool { self.as_ref() == other } } impl PartialOrd for DivBufMut { fn partial_cmp(&self, other: &DivBufMut) -> Option { Some(self.cmp(other)) } } // Atomic refcounts provide shared ownership over the `inner` pointer, // guaranteeing that it won't be freed as long as a `DivBufMut` exists. No method // allows that pointer to be mutated. Atomic refcounts also guarantee that no // more than one writer at a time can modify `inner`'s contents (as long as // DivBufMut is Sync, which it is). Thereforce, DivBufMut is Send. And while // DivBufMut allows `inner`'s contents to be mutated, it does not provide // interior mutability; a &mut DivButMut is required. Therefore, DivBufMut is // Sync as well. unsafe impl Send for DivBufMut {} unsafe impl Sync for DivBufMut {} impl io::Write for DivBufMut { fn write(&mut self, buf: &[u8]) -> io::Result { self.try_extend(buf) .map(|_| buf.len()) .map_err(|s| io::Error::new(io::ErrorKind::Other, s)) } fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.try_extend(buf) .map_err(|s| io::Error::new(io::ErrorKind::Other, s)) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl DivBufInaccessible { /// Try to upgrade to a [`DivBuf`]. /// /// Will fail if there are any [`DivBufMut`]s referring to this same buffer. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::with_capacity(4096); /// let dbm = dbs.try_mut().unwrap(); /// let dbi = dbm.clone_inaccessible(); /// drop(dbm); /// let _db: DivBuf = dbi.try_const().unwrap(); /// ``` pub fn try_const(&self) -> Result { let inner = unsafe { &*self.inner }; if inner.accessors.fetch_add(1, Acquire) >> WRITER_SHIFT != 0 { inner.accessors.fetch_sub(1, Relaxed); Err(Error("Cannot create a DivBuf when DivBufMuts are active")) } else { Ok(DivBuf { inner: self.inner, begin: self.begin, len: self.len, }) } } /// Try to upgrade to a [`DivBufMut`]. /// /// Will fail if there are any [`DivBufMut`]s referring to this same buffer. /// /// # Examples /// ``` /// # use divbuf::*; /// let dbs = DivBufShared::with_capacity(4096); /// let dbm = dbs.try_mut().unwrap(); /// let dbi = dbm.clone_inaccessible(); /// drop(dbm); /// let _dbm: DivBufMut = dbi.try_mut().unwrap(); /// ``` pub fn try_mut(&self) -> Result { let inner = unsafe { &*self.inner }; if inner .accessors .compare_exchange(0, ONE_WRITER, AcqRel, Acquire) .is_ok() { Ok(DivBufMut { inner: self.inner, begin: self.begin, len: self.len, }) } else { Err(Error("Cannot upgrade when DivBufMuts are active")) } } } impl Clone for DivBufInaccessible { fn clone(&self) -> Self { let inner = unsafe { &*self.inner }; let old = inner.sharers.fetch_add(1, Acquire); debug_assert!(old > 0); DivBufInaccessible { inner: self.inner, begin: self.begin, len: self.len, } } } impl Drop for DivBufInaccessible { fn drop(&mut self) { let inner = unsafe { &*self.inner }; if inner.sharers.fetch_sub(1, Release) == 1 && inner.accessors.load(Relaxed) == 0 { // See the comments in std::sync::Arc::drop for why the fence is // required. atomic::fence(Acquire); unsafe { drop(Box::from_raw(self.inner)); } } } } // DivBufInaccessible owns the target of the `inner` pointer, and no method // allows that pointer to be mutated. Atomic refcounts guarantee that no more // than one writer at a time can modify `inner`'s contents (as long as DivBufMut // is Sync, which it is). Therefore, DivBufInaccessible is both Send and Sync. unsafe impl Send for DivBufInaccessible {} unsafe impl Sync for DivBufInaccessible {} divbuf-0.4.1/src/lib.rs000064400000000000000000000042321046102023000130110ustar 00000000000000// vim: tw=80 //! Recursively divisible buffer class //! //! The `divbuf` crate provides a buffer structure //! ([`DivBufShared`](struct.DivBufShared.html)) that can be efficiently and //! safely divided into multiple smaller buffers. Each child buffer can be //! further divided, recursively. A primitive form of range-locking is //! available: there is no way to create overlapping mutable child buffers. //! //! This crate is similar to [`bytes`], but with a few key differences: //! - `bytes` is a COW crate. Data will be shared between multiple objects as //! much as possible, but sometimes the data will be copied to new storage. //! `divbuf`, onthe other hand, will _never_ copy data unless explicitly //! requested. //! - A `BytesMut` object always has the sole ability to access its own data. //! Once a `BytesMut` object is created, there is no other way to modify or //! even read its data that doesn't involve that object. A `DivBufMut`, on //! the other hand, shares its data with its parent `DivBufShared`. After //! that `DivBufMut` has been dropped, another can be created from the //! parent. //! - `bytes` contains numerous optimizations for dealing with small arrays, //! such as inline storage. However, some of those optimizations result in //! data copying, which is anathema to `divbuf`. `divbuf` therefore does not //! include them, and is optimized for working with large arrays. //! //! # Examples //! ``` //! use divbuf::*; //! //! let v = String::from("Some Green Stuff").into_bytes(); //! let dbs = DivBufShared::from(v); //! { //! let mut dbm = dbs.try_mut().unwrap(); //! let mut right_half = dbm.split_off(5); //! let mut color_buffer = right_half.split_to(5); //! color_buffer[..].copy_from_slice(&b"Black"[..]); //! } //! let db = dbs.try_const().unwrap(); //! assert_eq!(db, b"Some Black Stuff"[..]); //! ``` //! //! [`bytes`]: https://carllerche.github.io/bytes/bytes/index.html #![cfg_attr(docsrs, feature(doc_cfg))] #![deny(warnings, missing_docs, missing_debug_implementations)] mod divbuf; pub use self::divbuf::{ Chunks, ChunksMut, DivBuf, DivBufInaccessible, DivBufMut, DivBufShared, }; divbuf-0.4.1/tests/functional.rs000064400000000000000000001020101046102023000147510ustar 00000000000000// vim: tw=80 use std::{ borrow::{Borrow, BorrowMut}, cmp::Ordering, collections::hash_map::DefaultHasher, convert::TryInto, hash::{Hash, Hasher}, io::Write, thread, }; use divbuf::*; use lazy_static::lazy_static; fn simple_hash(t: &T) -> u64 { let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() } // // Chunks methods // mod chunks { use super::*; #[test] pub fn iter() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let db = dbs.try_const().unwrap(); let mut chunks = db.into_chunks(3); assert_eq!(&chunks.next().unwrap()[..], &[1, 2, 3][..]); assert_eq!(&chunks.next().unwrap()[..], &[4, 5, 6][..]); assert!(chunks.next().is_none()); } #[test] #[should_panic] pub fn zero() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let db = dbs.try_const().unwrap(); db.into_chunks(0); } #[test] pub fn size_hint() { let dbs = DivBufShared::from(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); assert_eq!( dbs.try_const().unwrap().into_chunks(1).size_hint(), (12, Some(12)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(2).size_hint(), (6, Some(6)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(3).size_hint(), (4, Some(4)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(4).size_hint(), (3, Some(3)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(5).size_hint(), (3, Some(3)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(6).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(7).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(8).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(9).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(10).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(11).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_const().unwrap().into_chunks(12).size_hint(), (1, Some(1)) ); } } // // ChunksMut methods // mod chunks_mut { use super::*; #[test] pub fn iter() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let dbm = dbs.try_mut().unwrap(); let mut chunks = dbm.into_chunks(3); assert_eq!(&chunks.next().unwrap()[..], &[1, 2, 3][..]); assert_eq!(&chunks.next().unwrap()[..], &[4, 5, 6][..]); assert!(chunks.next().is_none()); } #[test] #[should_panic] pub fn zero() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let dbm = dbs.try_mut().unwrap(); dbm.into_chunks(0); } #[test] pub fn size_hint() { let dbs = DivBufShared::from(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); assert_eq!( dbs.try_mut().unwrap().into_chunks(1).size_hint(), (12, Some(12)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(2).size_hint(), (6, Some(6)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(3).size_hint(), (4, Some(4)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(4).size_hint(), (3, Some(3)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(5).size_hint(), (3, Some(3)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(6).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(7).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(8).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(9).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(10).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(11).size_hint(), (2, Some(2)) ); assert_eq!( dbs.try_mut().unwrap().into_chunks(12).size_hint(), (1, Some(1)) ); } } // // DivBufShared methods // mod divbufshared { use super::*; #[test] pub fn cap_and_len() { let mut v = Vec::::with_capacity(64); v.push(0); let dbs = DivBufShared::from(v); assert_eq!(dbs.capacity(), 64); assert_eq!(dbs.len(), 1); } #[test] pub fn fmt() { let v = Vec::::with_capacity(64); let dbs = DivBufShared::from(v); let output = format!("{:?}", &dbs); let expected = "DivBufShared { inner: Inner { vec: [], accessors: 0, \ sharers: 1 } }"; assert_eq!(output, expected); } #[test] pub fn from_slice() { let s = b"abcdefg"; let dbs = DivBufShared::from(&s[..]); let mut dbm = dbs.try_mut().unwrap(); assert_eq!(dbm, s[..]); // dbs should've been copy constructed, so we can mutate it without changing // the original slice dbm[0] = b'A'; assert_ne!(dbm, s[..]); } #[test] pub fn is_empty() { assert!(DivBufShared::with_capacity(4096).is_empty()); assert!(!DivBufShared::from(vec![1, 2, 3]).is_empty()); } #[test] pub fn send() { let dbs = DivBufShared::with_capacity(4096); thread::spawn(move || { let _ = dbs; }) .join() .unwrap(); } #[test] pub fn sync() { lazy_static! { pub static ref DBS: DivBufShared = DivBufShared::from(vec![0; 4096]); } let r = &DBS; thread::spawn(move || { let _ = r; }) .join() .unwrap(); } #[test] pub fn try_const() { let dbs = DivBufShared::with_capacity(4096); // Create an initial DivBuf let _db0 = dbs.try_const().unwrap(); // Creating a second is allowed, too let _db1 = dbs.try_const().unwrap(); } #[test] pub fn try_const_after_try_mut() { let dbs = DivBufShared::with_capacity(4096); // Create an initial DivBufMut let _dbm = dbs.try_mut().unwrap(); // Creating a DivBuf should fail, because there are writers assert!(dbs.try_const().is_err()); } #[test] pub fn try_mut() { let dbs = DivBufShared::with_capacity(4096); // Create an initial DivBufMut let _dbm0 = dbs.try_mut().unwrap(); // Creating a second is not allowed assert!(dbs.try_mut().is_err()); } #[test] pub fn try_mut_after_try_const() { let dbs = DivBufShared::with_capacity(4096); // Create an initial DivBuf let _db0 = dbs.try_const().unwrap(); // Now creating a mutable buffer is not allowed assert!(dbs.try_mut().is_err()); } #[cfg(feature = "experimental")] #[test] pub fn uninitialized() { let cap = 4096; let dbs = DivBufShared::uninitialized(cap); assert_eq!(dbs.capacity(), cap); assert_eq!(dbs.len(), cap); } #[test] pub fn to_vec() { let v = vec![1, 2, 3, 4]; let dbs = DivBufShared::from(v); { let mut dbm = dbs.try_mut().unwrap(); assert_eq!(dbm, [1, 2, 3, 4][..]); dbm[0] = 5; assert_eq!(dbm, [5, 2, 3, 4][..]); } let v2: Vec = dbs.try_into().unwrap(); assert_eq!(v2, vec![5, 2, 3, 4]); } #[test] pub fn to_vec_after_try_mut() { let v = vec![1, 2, 3, 4]; let dbs = DivBufShared::from(v); let _dbm = dbs.try_mut().unwrap(); let maybe_v: Result, _> = dbs.try_into(); assert!(maybe_v.is_err()); } #[test] pub fn to_vec_after_try_const() { let v = vec![1, 2, 3, 4]; let dbs = DivBufShared::from(v); let _db = dbs.try_const().unwrap(); let maybe_v: Result, _> = dbs.try_into(); assert!(maybe_v.is_err()); } #[test] pub fn to_vec_after_clone_inaccessible() { let v = vec![1, 2, 3, 4]; let dbs = DivBufShared::from(v); let dbm = dbs.try_mut().unwrap(); let _dbi = dbm.clone_inaccessible(); drop(dbm); let maybe_v: Result, _> = dbs.try_into(); assert!(maybe_v.is_err()); } } // // DivBuf methods // mod divbuf_ { use super::*; #[test] pub fn as_ref() { let dbs = DivBufShared::from(vec![1, 2, 3]); let db0 = dbs.try_const().unwrap(); let s: &[u8] = db0.as_ref(); assert_eq!(s, &[1, 2, 3]); } #[test] pub fn as_ref_empty() { let dbs = DivBufShared::from(vec![]); let db0 = dbs.try_const().unwrap(); let s: &[u8] = db0.as_ref(); assert_eq!(s, &[]); } #[test] pub fn borrow() { let dbs = DivBufShared::from(vec![1, 2, 3]); let db0 = dbs.try_const().unwrap(); let s: &[u8] = db0.borrow(); assert_eq!(s, &[1, 2, 3]); } #[test] pub fn clone() { let dbs = DivBufShared::from(vec![1, 2, 3]); let db0 = dbs.try_const().unwrap(); let mut db1 = db0.clone(); assert_eq!(db0, db1); // We should be able to modify one DivBuf without affecting the other db1.split_off(1); assert_ne!(db0, db1); } #[test] pub fn clone_inaccessible() { let dbs = DivBufShared::from(vec![1, 2, 3]); let dbm = dbs.try_mut().unwrap(); let _dbi: DivBufInaccessible = dbm.clone_inaccessible(); } #[test] pub fn deref() { let dbs = DivBufShared::from(vec![1, 2, 3]); let db = dbs.try_const().unwrap(); let slice: &[u8] = &db; assert_eq!(slice, &[1, 2, 3]); } #[test] pub fn deref_empty() { let dbs = DivBufShared::from(vec![]); let db = dbs.try_const().unwrap(); let slice: &[u8] = &db; assert_eq!(slice, &[]); } // A DivBuf should be able to own its storage, and will free it on last drop #[test] pub fn drop_last() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let _db0 = dbs0.try_const().unwrap(); drop(dbs0); } #[test] pub fn eq() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let dbs1 = DivBufShared::from(vec![1, 2, 3]); let dbs2 = DivBufShared::from(vec![1, 2]); let db0 = dbs0.try_const().unwrap(); let db1 = dbs1.try_const().unwrap(); let db2 = dbs2.try_const().unwrap(); assert_eq!(db0, db1); assert_ne!(db0, db2); } #[test] pub fn from_divbufmut() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let dbm = dbs.try_mut().unwrap(); let _db = DivBuf::from(dbm); } #[test] pub fn is_empty() { let dbs0 = DivBufShared::with_capacity(64); let db0 = dbs0.try_const().unwrap(); assert!(db0.is_empty()); let dbs1 = DivBufShared::from(vec![1]); let db1 = dbs1.try_const().unwrap(); assert!(!db1.is_empty()); } #[test] pub fn hash() { let v = vec![1, 2, 3, 4, 5, 6]; let expected = simple_hash(&v); let dbs = DivBufShared::from(v); let db0 = dbs.try_const().unwrap(); assert_eq!(simple_hash(&db0), expected); } #[test] pub fn ord() { let dbs = DivBufShared::from(vec![0, 1, 0, 2]); let db0 = dbs.try_const().unwrap().slice_to(2); let db1 = dbs.try_const().unwrap().slice_from(2); assert_eq!(db0.cmp(&db1), Ordering::Less); } #[test] pub fn partial_ord() { let dbs = DivBufShared::from(vec![0, 1, 0, 2]); let db0 = dbs.try_const().unwrap().slice_to(2); let db1 = dbs.try_const().unwrap().slice_from(2); assert!(db0 < db1); } #[test] pub fn send() { let dbs = DivBufShared::with_capacity(4096); let db = dbs.try_const().unwrap(); thread::spawn(move || { let _ = db; }) .join() .unwrap(); } #[test] pub fn sync() { lazy_static! { pub static ref DBS: DivBufShared = DivBufShared::from(vec![0; 4096]); pub static ref DB: DivBuf = DBS.try_const().unwrap(); } let r = &DB; thread::spawn(move || { let _ = r; }) .join() .unwrap(); } #[test] pub fn slice() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let db0 = dbs.try_const().unwrap(); assert_eq!(db0.slice(0, 0), [][..]); assert_eq!(db0.slice(1, 5), [2, 3, 4, 5][..]); assert_eq!(db0.slice(1, 1), [][..]); assert_eq!(db0.slice(0, 6), db0); assert_eq!(db0, [1, 2, 3, 4, 5, 6][..]); } #[test] #[should_panic(expected = "begin <= end")] pub fn slice_backwards() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let db0 = dbs.try_const().unwrap(); db0.slice(1, 0); } #[test] #[should_panic(expected = "end <= self.len")] pub fn slice_after_end() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let db0 = dbs.try_const().unwrap(); db0.slice(3, 7); } #[test] pub fn slice_from() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let db0 = dbs.try_const().unwrap(); assert_eq!(db0.slice_from(0), db0); assert_eq!(db0.slice_from(3), [4, 5, 6][..]); } #[test] pub fn slice_to() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let db0 = dbs.try_const().unwrap(); assert_eq!(db0.slice_to(6), db0); assert_eq!(db0.slice_to(3), [1, 2, 3][..]); } #[test] pub fn split_off() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut db0 = dbs.try_const().unwrap(); // split in the middle let db_mid = db0.split_off(4); assert_eq!(db0, [1, 2, 3, 4][..]); assert_eq!(db0.len(), 4); assert_eq!(db_mid, [5, 6][..]); assert_eq!(db_mid.len(), 2); // split at the beginning let mut db_begin = db0.split_off(0); assert_eq!(db0, [][..]); assert_eq!(db_begin, [1, 2, 3, 4][..]); // split at the end let db_end = db_begin.split_off(4); assert_eq!(db_begin, [1, 2, 3, 4][..]); assert_eq!(db_end, [][..]); } #[test] #[should_panic(expected = "Can't split past the end")] pub fn split_off_past_the_end() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut db0 = dbs.try_const().unwrap(); db0.split_off(7); } #[test] pub fn split_to() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut db0 = dbs.try_const().unwrap(); // split in the middle let mut db_left = db0.split_to(4); assert_eq!(db_left, [1, 2, 3, 4][..]); assert_eq!(db_left.len(), 4); assert_eq!(db0, [5, 6][..]); assert_eq!(db0.len(), 2); // split at the beginning let db_begin = db_left.split_to(0); assert_eq!(db_begin, [][..]); assert_eq!(db_left, [1, 2, 3, 4][..]); // split at the end let db_mid = db_left.split_to(4); assert_eq!(db_mid, [1, 2, 3, 4][..]); assert_eq!(db_left, [][..]); } #[test] #[should_panic(expected = "Can't split past the end")] pub fn split_to_past_the_end() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut db0 = dbs.try_const().unwrap(); db0.split_to(7); } #[test] pub fn try_mut() { let dbs = DivBufShared::with_capacity(64); let mut db0 = dbs.try_const().unwrap(); db0 = { let db1 = dbs.try_const().unwrap(); // When multiple DivBufs are active, none can be upgraded let db2 = db0.try_mut(); assert!(db2.is_err()); let db3 = db1.try_mut(); assert!(db3.is_err()); db2.unwrap_err() }; // A single DivBuf alone can be upgraded assert!(db0.try_mut().is_ok()); } #[test] pub fn unsplit() { let dbs0 = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut db0 = dbs0.try_const().unwrap(); { // split in the middle let db_mid = db0.split_off(4); // put it back together assert!(db0.unsplit(db_mid).is_ok()); assert_eq!(db0, [1, 2, 3, 4, 5, 6][..]); } { // unsplit should fail for noncontiguous DivBufs let mut db_begin = db0.slice_to(2); let db_end = db0.slice_from(4); assert!(db_begin.unsplit(db_end).is_err()); } { // unsplit should fail for overlapping DivBufs let mut db_begin = db0.slice_to(4); let db_end = db0.slice_from(2); assert!(db_begin.unsplit(db_end).is_err()); } { // unsplit should fail for unrelated DivBufs let dbs1 = DivBufShared::from(vec![7, 8, 9]); let mut db_end = db0.slice_from(4); let db_unrelated = dbs1.try_const().unwrap(); assert!(db_end.unsplit(db_unrelated).is_err()); } } } mod divbuf_inaccessible { use super::*; /// DivBufInaccessible's superpower is clone. #[test] #[allow(clippy::redundant_clone)] pub fn clone() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let db0 = dbs0.try_const().unwrap(); let dbi0 = db0.clone_inaccessible(); let _dbi1 = dbi0.clone(); } // A DivBufInaccessible should be able to own its storage, and will free it // on last drop. #[test] pub fn drop_last() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let db0 = dbs0.try_const().unwrap(); let _dbi = db0.clone_inaccessible(); drop(db0); drop(dbs0); } #[test] pub fn send() { let dbs = DivBufShared::with_capacity(4096); let db = dbs.try_const().unwrap(); let dbi = db.clone_inaccessible(); thread::spawn(move || { let _ = dbi; }) .join() .unwrap(); } #[test] pub fn sync() { lazy_static! { pub static ref DBS: DivBufShared = DivBufShared::from(vec![0; 4096]); pub static ref DB: DivBuf = DBS.try_const().unwrap(); pub static ref DBI: DivBufInaccessible = DB.clone_inaccessible(); } let r = &DBI; thread::spawn(move || { let _ = r; }) .join() .unwrap(); } #[test] pub fn try_const_failure() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let dbm = dbs0.try_mut().unwrap(); let dbi = dbm.clone_inaccessible(); dbi.try_const().unwrap_err(); } #[test] pub fn try_const_success() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let db0 = dbs0.try_const().unwrap(); let dbi = db0.clone_inaccessible(); dbi.try_const().unwrap(); } #[test] pub fn try_mut_failure() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let db = dbs0.try_const().unwrap(); let dbi = db.clone_inaccessible(); dbi.try_mut().unwrap_err(); } #[test] pub fn try_mut_success() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let db0 = dbs0.try_const().unwrap(); let dbi = db0.clone_inaccessible(); drop(db0); dbi.try_mut().unwrap(); } } // // DivBufMut methods // mod divbuf_mut { use super::*; #[test] pub fn as_ref() { let dbs = DivBufShared::from(vec![1, 2, 3]); let dbm0 = dbs.try_mut().unwrap(); let s: &[u8] = dbm0.as_ref(); assert_eq!(s, &[1, 2, 3]); } #[test] pub fn as_ref_empty() { let dbs = DivBufShared::from(vec![]); let dbm0 = dbs.try_mut().unwrap(); let s: &[u8] = dbm0.as_ref(); assert_eq!(s, &[]); } #[test] pub fn borrow() { let dbs = DivBufShared::from(vec![1, 2, 3]); let dbm0 = dbs.try_mut().unwrap(); let s: &[u8] = dbm0.borrow(); assert_eq!(s, &[1, 2, 3]); } #[test] pub fn borrowmut() { let dbs = DivBufShared::from(vec![1, 2, 3]); { let mut dbm0 = dbs.try_mut().unwrap(); let s: &mut [u8] = dbm0.borrow_mut(); s[0] = 9; } let db0 = dbs.try_const().unwrap(); let slice: &[u8] = &db0; assert_eq!(slice, &[9, 2, 3]); } #[test] pub fn clone_inaccessible() { let dbs = DivBufShared::from(vec![1, 2, 3]); let db = dbs.try_const().unwrap(); let _dbi: DivBufInaccessible = db.clone_inaccessible(); } #[test] pub fn deref() { let dbs = DivBufShared::from(vec![1, 2, 3]); let dbm = dbs.try_mut().unwrap(); let slice: &[u8] = &dbm; assert_eq!(slice, &[1, 2, 3]); } #[test] pub fn deref_empty() { let dbs = DivBufShared::from(vec![]); let dbm = dbs.try_mut().unwrap(); let slice: &[u8] = &dbm; assert_eq!(slice, &[]); } #[test] pub fn derefmut() { let dbs = DivBufShared::from(vec![1, 2, 3]); let mut dbm = dbs.try_mut().unwrap(); // Unlike DivBuf, we _can_ update DivBufMuts randomly dbm[0] = 9; let slice: &mut [u8] = &mut dbm; assert_eq!(slice, &[9, 2, 3]); } #[test] pub fn derefmut_empty() { let dbs = DivBufShared::from(vec![]); let mut dbm = dbs.try_mut().unwrap(); let slice: &mut [u8] = &mut dbm; assert_eq!(slice, &[]); } // A DivBufMut should be able to own its storage, and will free it on last drop #[test] pub fn drop_last() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let _dbm0 = dbs0.try_mut().unwrap(); drop(dbs0); } #[test] pub fn eq() { let dbs0 = DivBufShared::from(vec![1, 2, 3]); let dbs1 = DivBufShared::from(vec![1, 2, 3]); let dbs2 = DivBufShared::from(vec![1, 2]); let dbm0 = dbs0.try_mut().unwrap(); let dbm1 = dbs1.try_mut().unwrap(); let dbm2 = dbs2.try_mut().unwrap(); assert_eq!(dbm0, dbm1); assert_ne!(dbm0, dbm2); } #[test] pub fn extend() { let dbs = DivBufShared::from(vec![1, 2, 3]); { let mut dbm = dbs.try_mut().unwrap(); dbm.extend([4, 5, 6].iter()); } // verify that dbs.inner.vec was extended let db = dbs.try_const().unwrap(); let slice: &[u8] = &db; assert_eq!(slice, &[1, 2, 3, 4, 5, 6]); } #[test] #[should_panic(expected = "extend into the middle of a buffer")] pub fn extend_from_the_middle() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut dbm = dbs.try_mut().unwrap(); let mut dbm_begin = dbm.split_to(3); dbm_begin.extend([7, 8, 9].iter()); } #[test] pub fn freeze() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6, 7, 8]); { // Simplest case: freeze the entire buffer let dbm = dbs.try_mut().unwrap(); let _: DivBuf = dbm.freeze(); } { // Freeze a buffer in the presence of other readers && writers let mut dbm = dbs.try_mut().unwrap(); let right_half = dbm.split_off(4); let _db_right_half = right_half.freeze(); let left_quarter = dbm.split_to(2); let _db_left_quarter = left_quarter.freeze(); // We should still be able to mutate from the remaining DivBufMut dbm[0] = 33; } } #[test] pub fn hash() { let v = vec![1, 2, 3, 4, 5, 6]; let expected = simple_hash(&v); let dbs = DivBufShared::from(v); let dbm0 = dbs.try_mut().unwrap(); assert_eq!(simple_hash(&dbm0), expected); } #[test] pub fn is_empty() { let dbs0 = DivBufShared::with_capacity(64); let mut dbm0 = dbs0.try_mut().unwrap(); assert!(dbm0.is_empty()); dbm0.extend([4, 5, 6].iter()); assert!(!dbm0.is_empty()); } #[test] pub fn ord() { let dbs = DivBufShared::from(vec![0, 1, 0, 2]); let mut dbm0 = dbs.try_mut().unwrap(); let dbm1 = dbm0.split_off(2); assert_eq!(dbm0.cmp(&dbm1), Ordering::Less); } #[test] pub fn partial_ord() { let dbs = DivBufShared::from(vec![0, 1, 0, 2]); let mut dbm0 = dbs.try_mut().unwrap(); let dbm1 = dbm0.split_off(2); assert!(dbm0 < dbm1); } #[test] pub fn reserve() { let v = Vec::::with_capacity(64); let dbs = DivBufShared::from(v); let mut dbm = dbs.try_mut().unwrap(); dbm.reserve(128); assert_eq!(dbs.capacity(), 128); } #[test] #[should_panic(expected = "reserve from the middle of a buffer")] pub fn reserve_from_the_middle() { let v = vec![1, 2, 3, 4, 5, 6]; let dbs = DivBufShared::from(v); let mut dbm = dbs.try_mut().unwrap(); let mut left_half = dbm.split_to(3); left_half.reserve(128); } #[test] pub fn send() { let dbs = DivBufShared::with_capacity(4096); let dbm = dbs.try_mut().unwrap(); thread::spawn(move || { let _ = dbm; }) .join() .unwrap(); } #[test] pub fn sync() { lazy_static! { pub static ref DBS: DivBufShared = DivBufShared::from(vec![0; 4096]); pub static ref DBM: DivBufMut = DBS.try_mut().unwrap(); } let r = &DBM; thread::spawn(move || { let _ = r; }) .join() .unwrap(); } #[test] pub fn split_off() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut dbm0 = dbs.try_mut().unwrap(); // split in the middle let dbm_mid = dbm0.split_off(4); assert_eq!(dbm0, [1, 2, 3, 4][..]); assert_eq!(dbm0.len(), 4); assert_eq!(dbm_mid, [5, 6][..]); assert_eq!(dbm_mid.len(), 2); // split at the beginning let mut dbm_begin = dbm0.split_off(0); assert_eq!(dbm0, [][..]); assert_eq!(dbm_begin, [1, 2, 3, 4][..]); // split at the end let dbm_end = dbm_begin.split_off(4); assert_eq!(dbm_begin, [1, 2, 3, 4][..]); assert_eq!(dbm_end, [][..]); } #[test] #[should_panic(expected = "Can't split past the end")] pub fn split_off_past_the_end() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut dbm0 = dbs.try_mut().unwrap(); dbm0.split_off(7); } #[test] pub fn split_to() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut dbm0 = dbs.try_mut().unwrap(); // split in the middle let mut dbm_left = dbm0.split_to(4); assert_eq!(dbm_left, [1, 2, 3, 4][..]); assert_eq!(dbm_left.len(), 4); assert_eq!(dbm0, [5, 6][..]); assert_eq!(dbm0.len(), 2); // split at the beginning let dbm_begin = dbm_left.split_to(0); assert_eq!(dbm_begin, [][..]); assert_eq!(dbm_left, [1, 2, 3, 4][..]); // split at the end let dbm_mid = dbm_left.split_to(4); assert_eq!(dbm_mid, [1, 2, 3, 4][..]); assert_eq!(dbm_left, [][..]); } #[test] #[should_panic(expected = "Can't split past the end")] pub fn split_to_past_the_end() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); let mut dbm0 = dbs.try_mut().unwrap(); dbm0.split_to(7); } #[test] pub fn try_extend() { let dbs = DivBufShared::from(vec![1, 2, 3]); { let mut dbm0 = dbs.try_mut().unwrap(); assert!(dbm0.try_extend([4, 5, 6].iter()).is_ok()); // Extending from the middle of the vec should fail let mut dbm1 = dbm0.split_to(2); assert!(dbm1.try_extend([7, 8, 9].iter()).is_err()); } // verify that dbs.inner.vec was extended the first time, but not the // second. let db = dbs.try_const().unwrap(); assert_eq!(db, [1, 2, 3, 4, 5, 6][..]); } #[test] pub fn try_resize() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); { let mut dbm0 = dbs.try_mut().unwrap(); // First, resize past the end of the vector assert!(dbm0.try_resize(7, 42).is_ok()); assert_eq!(dbm0.len(), 7); assert_eq!(&dbm0[..], &[1, 2, 3, 4, 5, 6, 42][..]); // Then, do a truncation assert!(dbm0.try_resize(4, 42).is_ok()); assert_eq!(dbm0, [1, 2, 3, 4][..]); // Check that the shared vector was truncated, too assert_eq!(dbs.len(), 4); // A resize of a non-terminal DivBufMut should fail let mut dbm1 = dbm0.split_to(2); assert!(dbm1.try_resize(3, 42).is_err()); assert!(dbm1.try_resize(10, 42).is_err()); assert_eq!(dbs.len(), 4); // Resizing a terminal DivBufMut should work, even if it doesn't start // at the vector's beginning assert!(dbm0.try_resize(5, 0).is_ok()); assert_eq!(&dbm0[..], &[3, 4, 0, 0, 0][..]); } } #[test] pub fn try_truncate() { let dbs = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); { let mut dbm0 = dbs.try_mut().unwrap(); // First, truncate past the end of the vector assert!(dbm0.try_truncate(7).is_ok()); assert_eq!(dbm0.len(), 6); // Then, do a normal truncation assert!(dbm0.try_truncate(4).is_ok()); assert_eq!(dbm0, [1, 2, 3, 4][..]); // Check that the shared vector was truncated, too assert_eq!(dbs.len(), 4); // A truncation of a non-terminal DivBufMut should fail let mut dbm1 = dbm0.split_to(2); assert!(dbm1.try_truncate(3).is_err()); assert_eq!(dbs.len(), 4); // Truncating a terminal DivBufMut should work, even if it doesn't start // at the vector's beginning assert!(dbm0.try_truncate(1).is_ok()); assert_eq!(dbs.len(), 3); } } #[test] pub fn unsplit() { let dbs0 = DivBufShared::from(vec![1, 2, 3, 4, 5, 6]); { let mut dbm0 = dbs0.try_mut().unwrap(); // split in the middle let dbm_mid = dbm0.split_off(4); // put it back together assert!(dbm0.unsplit(dbm_mid).is_ok()); assert_eq!(dbm0, [1, 2, 3, 4, 5, 6][..]); } { // unsplit should fail for noncontiguous DivBufMuts let mut dbm0 = dbs0.try_mut().unwrap(); let mut dbm_begin = dbm0.split_to(2); let dbm_end = dbm0.split_off(2); assert!(dbm_begin.unsplit(dbm_end).is_err()); } { // unsplit should fail for unrelated DivBufMuts let mut dbm0 = dbs0.try_mut().unwrap(); let dbs1 = DivBufShared::from(vec![7, 8, 9]); let mut dbm_end = dbm0.split_off(4); let dbm_unrelated = dbs1.try_mut().unwrap(); assert!(dbm_end.unsplit(dbm_unrelated).is_err()); } } #[test] pub fn write() { const MSG: &[u8] = b"ABCD"; let dbs0 = DivBufShared::with_capacity(0); let mut dbm0 = dbs0.try_mut().unwrap(); assert_eq!(MSG.len(), dbm0.write(MSG).unwrap()); assert_eq!(&dbm0[..], &[65u8, 66u8, 67u8, 68u8][..]) } #[test] pub fn write_nonterminal() { let dbs0 = DivBufShared::from(vec![0, 1, 2, 3]); let mut dbm0 = dbs0.try_mut().unwrap(); let _ = dbm0.split_off(2); assert!(dbm0.write("ABCD".as_bytes()).is_err()); } #[test] pub fn write_all() { let dbs0 = DivBufShared::with_capacity(0); let mut dbm0 = dbs0.try_mut().unwrap(); dbm0.write_all("ABCD".as_bytes()).unwrap(); assert_eq!(&dbm0[..], &[65u8, 66u8, 67u8, 68u8][..]) } #[test] pub fn write_all_nonterminal() { let dbs0 = DivBufShared::from(vec![0, 1, 2, 3]); let mut dbm0 = dbs0.try_mut().unwrap(); let _ = dbm0.split_off(2); assert!(dbm0.write_all("ABCD".as_bytes()).is_err()); } #[test] pub fn flush() { let dbs0 = DivBufShared::with_capacity(0); let mut dbm0 = dbs0.try_mut().unwrap(); dbm0.write_all("ABCD".as_bytes()).unwrap(); dbm0.flush().unwrap(); assert_eq!(&dbm0[..], &[65u8, 66u8, 67u8, 68u8][..]) } } divbuf-0.4.1/tests/thread_race.rs000064400000000000000000000032161046102023000150600ustar 00000000000000// vim: tw=80 use std::{ sync::atomic::{AtomicBool, Ordering::Relaxed}, thread, time, }; use divbuf::*; use lazy_static::lazy_static; lazy_static! { pub static ref DBS: DivBufShared = DivBufShared::from(vec![0; 4096]); pub static ref SHUTDOWN: AtomicBool = AtomicBool::new(false); } fn readfunc() { let mut losses: u64 = 0; let mut wins: u64 = 0; while !SHUTDOWN.load(Relaxed) { if let Ok(db) = (DBS).try_const() { let mut db0 = db.slice(0, 1024); let db1 = db.slice(1024, 2048); db0.unsplit(db1).unwrap(); wins += 1; } else { losses += 1; } } println!("reader won {} races and lost {}", wins, losses); } fn writefunc() { let mut losses: u64 = 0; let mut wins: u64 = 0; while !SHUTDOWN.load(Relaxed) { if let Ok(mut dbm) = (DBS).try_mut() { let dbm1 = dbm.split_off(2048); dbm.unsplit(dbm1).unwrap(); wins += 1; } else { losses += 1; } } println!("writer won {} races and lost {}", wins, losses); } /// Create a multitude of threads that each try to divide a common static /// buffer. They run for a fixed time. Success happens if nobody panics. #[test] fn test_thread_race() { let reader0 = thread::spawn(readfunc); let reader1 = thread::spawn(readfunc); let writer0 = thread::spawn(writefunc); let writer1 = thread::spawn(writefunc); thread::sleep(time::Duration::from_secs(1)); SHUTDOWN.store(true, Relaxed); reader0.join().unwrap(); reader1.join().unwrap(); writer0.join().unwrap(); writer1.join().unwrap(); }