ringbuf-0.3.3/.cargo_vcs_info.json0000644000000001360000000000100124730ustar { "git": { "sha1": "3822b2e22d9de3185c2012ff9c937b8bcdd21fd7" }, "path_in_vcs": "" }ringbuf-0.3.3/.github/workflows/test.yml000064400000000000000000000005431046102023000163640ustar 00000000000000on: [push, pull_request] jobs: build_and_test: name: ringbuf runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: toolchain: stable - run: cargo check --no-default-features - run: cargo check --no-default-features --features alloc - run: cargo test ringbuf-0.3.3/.gitignore000064400000000000000000000000701046102023000132500ustar 00000000000000/target **/*.rs.bk Cargo.lock .vscode *.code-workspace ringbuf-0.3.3/Cargo.lock0000644000000011420000000000100104440ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "crossbeam-utils" version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] [[package]] name = "ringbuf" version = "0.3.3" dependencies = [ "crossbeam-utils", ] ringbuf-0.3.3/Cargo.toml0000644000000025160000000000100104750ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "ringbuf" version = "0.3.3" authors = ["Alexey Gerasev "] description = "Lock-free SPSC FIFO ring buffer with direct access to inner data" homepage = "https://github.com/agerasev/ringbuf" documentation = "https://docs.rs/ringbuf" readme = "README.md" keywords = [ "lock-free", "spsc", "ring-buffer", "rb", "fifo", ] categories = [ "concurrency", "data-structures", "no-std", ] license = "MIT/Apache-2.0" repository = "https://github.com/agerasev/ringbuf.git" [[example]] name = "simple" required-features = ["alloc"] [[example]] name = "overwrite" required-features = ["alloc"] [[example]] name = "message" required-features = ["std"] [dependencies.crossbeam-utils] version = "0.8" default-features = false [features] alloc = [] bench = [] default = [ "alloc", "std", ] std = ["alloc"] ringbuf-0.3.3/Cargo.toml.orig000064400000000000000000000015221046102023000141520ustar 00000000000000[package] name = "ringbuf" version = "0.3.3" authors = ["Alexey Gerasev "] edition = "2021" description = "Lock-free SPSC FIFO ring buffer with direct access to inner data" documentation = "https://docs.rs/ringbuf" homepage = "https://github.com/agerasev/ringbuf" repository = "https://github.com/agerasev/ringbuf.git" readme = "README.md" keywords = ["lock-free", "spsc", "ring-buffer", "rb", "fifo"] categories = ["concurrency", "data-structures", "no-std"] license = "MIT/Apache-2.0" [features] default = ["alloc", "std"] alloc = [] std = ["alloc"] bench = [] [dependencies] crossbeam-utils = { version = "0.8", default-features = false } [[example]] name = "simple" required-features = ["alloc"] [[example]] name = "overwrite" required-features = ["alloc"] [[example]] name = "message" required-features = ["std"] ringbuf-0.3.3/LICENSE-APACHE000064400000000000000000000251371046102023000132170ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ringbuf-0.3.3/LICENSE-MIT000064400000000000000000000020421046102023000127150ustar 00000000000000Copyright (c) 2019 Alexey Gerasev Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ringbuf-0.3.3/README.md000064400000000000000000000110231046102023000125370ustar 00000000000000# ringbuf [![Crates.io][crates_badge]][crates] [![Docs.rs][docs_badge]][docs] [![Github Actions][github_badge]][github] [![License][license_badge]][license] [crates_badge]: https://img.shields.io/crates/v/ringbuf.svg [docs_badge]: https://docs.rs/ringbuf/badge.svg [github_badge]: https://github.com/agerasev/ringbuf/actions/workflows/test.yml/badge.svg [license_badge]: https://img.shields.io/crates/l/ringbuf.svg [crates]: https://crates.io/crates/ringbuf [docs]: https://docs.rs/ringbuf [github]: https://github.com/agerasev/ringbuf/actions/workflows/test.yml [license]: #license Lock-free SPSC FIFO ring buffer with direct access to inner data. ## Features + Lock-free operations - they succeed or fail immediately without blocking or waiting. + Arbitrary item type (not only `Copy`). + Items can be inserted and removed one by one or many at once. + Thread-safe direct access to the internal ring buffer memory. + `Read` and `Write` implementation. + Overwriting mode support. + Can be used without `std` and even without `alloc` (using only statically-allocated memory). + [Experimental `async`/`.await` support](https://github.com/agerasev/async-ringbuf). ## Usage At first you need to create the ring buffer itself. `HeapRb` is recommended but you may [choose another one](#types). After the ring buffer is created it may be splitted into pair of `Producer` and `Consumer`. `Producer` is used to insert items to the ring buffer, `Consumer` - to remove items from it. For `SharedRb` and its derivatives they can be used in different threads. ## Types There are several types of ring buffers provided: + `LocalRb`. Only for single-threaded use. + `SharedRb`. Can be shared between threads. Its derivatives: + `HeapRb`. Contents are stored in dynamic memory. *Recommended for use in most cases.* + `StaticRb`. Contents can be stored in statically-allocated memory. ## Performance `SharedRb` needs to synchronize CPU cache between CPU cores. This synchronization has some overhead. To avoid multiple unnecessary synchronizations you may use postponed mode of operation (see description for `Producer` and `Consumer`) or methods that operates many items at once (`Producer::push_slice`/`Producer::push_iter`, `Consumer::pop_slice`, etc.). For single-threaded usage `LocalRb` is recommended because it is faster than `SharedRb` due to absence of CPU cache synchronization. ### Benchmarks You may see typical performance of different methods in benchmarks: ```bash cargo +nightly bench --features bench ``` Nightly toolchain is required. ## Examples ### Simple ```rust use ringbuf::HeapRb; # fn main() { let rb = HeapRb::::new(2); let (mut prod, mut cons) = rb.split(); prod.push(0).unwrap(); prod.push(1).unwrap(); assert_eq!(prod.push(2), Err(2)); assert_eq!(cons.pop(), Some(0)); prod.push(2).unwrap(); assert_eq!(cons.pop(), Some(1)); assert_eq!(cons.pop(), Some(2)); assert_eq!(cons.pop(), None); # } ``` ### No heap ```rust use ringbuf::StaticRb; # fn main() { const RB_SIZE: usize = 1; let mut rb = StaticRb::::default(); let (mut prod, mut cons) = rb.split_ref(); assert_eq!(prod.push(123), Ok(())); assert_eq!(prod.push(321), Err(321)); assert_eq!(cons.pop(), Some(123)); assert_eq!(cons.pop(), None); # } ``` ## Overwrite Ring buffer can be used in overwriting mode when insertion overwrites the latest element if the buffer is full. ```rust use ringbuf::{HeapRb, Rb}; # fn main() { let mut rb = HeapRb::::new(2); assert_eq!(rb.push_overwrite(0), None); assert_eq!(rb.push_overwrite(1), None); assert_eq!(rb.push_overwrite(2), Some(0)); assert_eq!(rb.pop(), Some(1)); assert_eq!(rb.pop(), Some(2)); assert_eq!(rb.pop(), None); # } ``` Note that [`push_overwrite`](`Rb::push_overwrite`) requires exclusive access to the ring buffer so to perform it concurrently you need to guard the ring buffer with [`Mutex`](`std::sync::Mutex`) or some other lock. ## `async`/`.await` There is an experimental crate [`async-ringbuf`](https://github.com/agerasev/async-ringbuf) which is built on top of `ringbuf` and implements asynchronous ring buffer operations. ## License Licensed under either of * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ringbuf-0.3.3/examples/message.rs000064400000000000000000000032141046102023000150730ustar 00000000000000use std::{io::Read, thread, time::Duration}; use ringbuf::HeapRb; fn main() { let buf = HeapRb::::new(10); let (mut prod, mut cons) = buf.split(); let smsg = "The quick brown fox jumps over the lazy dog"; let pjh = thread::spawn(move || { println!("-> sending message: '{}'", smsg); let zero = [0]; let mut bytes = smsg.as_bytes().chain(&zero[..]); loop { if prod.is_full() { println!("-> buffer is full, waiting"); thread::sleep(Duration::from_millis(1)); } else { let n = prod.read_from(&mut bytes, None).unwrap(); if n == 0 { break; } println!("-> {} bytes sent", n); } } println!("-> message sent"); }); let cjh = thread::spawn(move || { println!("<- receiving message"); let mut bytes = Vec::::new(); loop { if cons.is_empty() { if bytes.ends_with(&[0]) { break; } else { println!("<- buffer is empty, waiting"); thread::sleep(Duration::from_millis(1)); } } else { let n = cons.write_into(&mut bytes, None).unwrap(); println!("<- {} bytes received", n); } } assert_eq!(bytes.pop().unwrap(), 0); let msg = String::from_utf8(bytes).unwrap(); println!("<- message received: '{}'", msg); msg }); pjh.join().unwrap(); let rmsg = cjh.join().unwrap(); assert_eq!(smsg, rmsg); } ringbuf-0.3.3/examples/overwrite.rs000064400000000000000000000005011046102023000154710ustar 00000000000000use ringbuf::{HeapRb, Rb}; fn main() { let mut rb = HeapRb::::new(2); assert_eq!(rb.push_overwrite(0), None); assert_eq!(rb.push_overwrite(1), None); assert_eq!(rb.push_overwrite(2), Some(0)); assert_eq!(rb.pop(), Some(1)); assert_eq!(rb.pop(), Some(2)); assert_eq!(rb.pop(), None); } ringbuf-0.3.3/examples/simple.rs000064400000000000000000000006101046102023000147350ustar 00000000000000use ringbuf::HeapRb; fn main() { let rb = HeapRb::::new(2); let (mut prod, mut cons) = rb.split(); prod.push(0).unwrap(); prod.push(1).unwrap(); assert_eq!(prod.push(2), Err(2)); assert_eq!(cons.pop().unwrap(), 0); prod.push(2).unwrap(); assert_eq!(cons.pop().unwrap(), 1); assert_eq!(cons.pop().unwrap(), 2); assert_eq!(cons.pop(), None); } ringbuf-0.3.3/examples/static.rs000064400000000000000000000005221046102023000147350ustar 00000000000000#![no_std] use ringbuf::StaticRb; fn main() { const RB_SIZE: usize = 1; let mut rb = StaticRb::::default(); let (mut prod, mut cons) = rb.split_ref(); assert_eq!(prod.push(123), Ok(())); assert_eq!(prod.push(321), Err(321)); assert_eq!(cons.pop(), Some(123)); assert_eq!(cons.pop(), None); } ringbuf-0.3.3/src/alias.rs000064400000000000000000000016221046102023000135120ustar 00000000000000use crate::{Consumer, Producer, SharedRb}; use core::mem::MaybeUninit; #[cfg(feature = "alloc")] use alloc::{sync::Arc, vec::Vec}; /// Stack-allocated ring buffer with static capacity. /// /// *Capacity (`N`) must be greater that zero.* pub type StaticRb = SharedRb; N]>; /// Alias for [`StaticRb`] [`Producer`]. pub type StaticProducer<'a, T, const N: usize> = Producer>; /// Alias for [`StaticRb`] [`Consumer`]. pub type StaticConsumer<'a, T, const N: usize> = Consumer>; /// Heap-allocated ring buffer. #[cfg(feature = "alloc")] pub type HeapRb = SharedRb>>; /// Alias for [`HeapRb`] [`Producer`]. #[cfg(feature = "alloc")] pub type HeapProducer = Producer>>; /// Alias for [`HeapRb`] [`Consumer`]. #[cfg(feature = "alloc")] pub type HeapConsumer = Consumer>>; ringbuf-0.3.3/src/benchmarks/base.rs000064400000000000000000000031551046102023000154530ustar 00000000000000use crate::{LocalRb, SharedRb}; use test::{black_box, Bencher}; const RB_SIZE: usize = 256; const BATCH_SIZE: usize = 100; #[bench] fn push_pop_shared(b: &mut Bencher) { let buf = SharedRb::::default(); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; RB_SIZE / 2]); b.iter(|| { prod.push(1).unwrap(); black_box(cons.pop().unwrap()); }); } #[bench] fn push_pop_local(b: &mut Bencher) { let buf = LocalRb::::default(); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; RB_SIZE / 2]); b.iter(|| { prod.push(1).unwrap(); black_box(cons.pop().unwrap()); }); } #[bench] fn push_pop_x100_immediate(b: &mut Bencher) { let buf = SharedRb::::default(); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; RB_SIZE / 2]); b.iter(|| { for _ in 0..BATCH_SIZE { prod.push(1).unwrap(); } for _ in 0..BATCH_SIZE { black_box(cons.pop().unwrap()); } }); } #[bench] fn push_pop_x100_postponed(b: &mut Bencher) { let buf = SharedRb::::default(); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; RB_SIZE / 2]); b.iter(|| { { let mut prod_cache = prod.postponed(); for _ in 0..BATCH_SIZE { prod_cache.push(1).unwrap(); } } { let mut cons_cache = cons.postponed(); for _ in 0..BATCH_SIZE { black_box(cons_cache.pop().unwrap()); } } }); } ringbuf-0.3.3/src/benchmarks/iter.rs000064400000000000000000000014561046102023000155060ustar 00000000000000use crate::HeapRb; use test::{black_box, Bencher}; const RB_SIZE: usize = 1024; #[bench] fn push_iter_x1000(b: &mut Bencher) { let buf = HeapRb::::new(RB_SIZE); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[0; RB_SIZE / 2]); cons.skip(RB_SIZE / 2); b.iter(|| { prod.push_iter(&mut (0..1000).into_iter()); black_box(cons.as_slices()); unsafe { cons.advance(1000) }; }); } #[bench] fn pop_iter_x1000(b: &mut Bencher) { let buf = HeapRb::::new(RB_SIZE); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[0; RB_SIZE / 2]); cons.skip(RB_SIZE / 2); prod.push_slice(&[1; 1000]); b.iter(|| { for x in cons.pop_iter() { black_box(x); } unsafe { prod.advance(1000) }; }); } ringbuf-0.3.3/src/benchmarks/mod.rs000064400000000000000000000000521046102023000153110ustar 00000000000000mod base; mod iter; mod parts; mod slice; ringbuf-0.3.3/src/benchmarks/parts.rs000064400000000000000000000032541046102023000156720ustar 00000000000000use crate::SharedRb; use test::{black_box, Bencher}; const RB_SIZE: usize = 256; #[bench] fn make_postponed(b: &mut Bencher) { let buf = SharedRb::::default(); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; RB_SIZE / 2]); b.iter(|| { black_box(prod.postponed()); black_box(cons.postponed()); }); } #[bench] fn advance(b: &mut Bencher) { let buf = SharedRb::::default(); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; RB_SIZE / 2]); b.iter(|| { unsafe { prod.advance(1) }; unsafe { cons.advance(1) }; }); } #[bench] fn advance_postponed(b: &mut Bencher) { let buf = SharedRb::::default(); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; RB_SIZE / 2]); b.iter(|| { unsafe { prod.postponed().advance(1) }; unsafe { cons.postponed().advance(1) }; }); } #[bench] fn get_occupied_slices(b: &mut Bencher) { let buf = SharedRb::::default(); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[0; 3 * RB_SIZE / 4]); cons.skip(RB_SIZE); prod.push_slice(&[1; RB_SIZE / 2]); b.iter(|| { black_box(unsafe { cons.as_mut_uninit_slices() }); black_box(&mut cons); }); } #[bench] fn get_vacant_slices(b: &mut Bencher) { let buf = SharedRb::::default(); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[0; 1 * RB_SIZE / 4]); cons.skip(RB_SIZE); prod.push_slice(&[1; RB_SIZE / 2]); b.iter(|| { black_box(unsafe { prod.free_space_as_slices() }); black_box(&mut prod); }); } ringbuf-0.3.3/src/benchmarks/slice.rs000064400000000000000000000017731046102023000156440ustar 00000000000000use crate::HeapRb; use test::{black_box, Bencher}; const RB_SIZE: usize = 1024; #[bench] fn slice_x10(b: &mut Bencher) { let buf = HeapRb::::new(RB_SIZE); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; RB_SIZE / 2]); let mut data = [1; 10]; b.iter(|| { prod.push_slice(&data); cons.pop_slice(&mut data); black_box(data); }); } #[bench] fn slice_x100(b: &mut Bencher) { let buf = HeapRb::::new(RB_SIZE); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; RB_SIZE / 2]); let mut data = [1; 100]; b.iter(|| { prod.push_slice(&data); cons.pop_slice(&mut data); black_box(data); }); } #[bench] fn slice_x1000(b: &mut Bencher) { let buf = HeapRb::::new(RB_SIZE); let (mut prod, mut cons) = buf.split(); prod.push_slice(&[1; 12]); let mut data = [1; 1000]; b.iter(|| { prod.push_slice(&data); cons.pop_slice(&mut data); }); black_box(data); } ringbuf-0.3.3/src/consumer.rs000064400000000000000000000322321046102023000142550ustar 00000000000000use crate::{ ring_buffer::{RbBase, RbRead, RbReadCache, RbRef, RbWrap}, utils::{slice_assume_init_mut, slice_assume_init_ref, write_uninit_slice}, }; use core::{cmp, iter::ExactSizeIterator, marker::PhantomData, mem::MaybeUninit}; #[cfg(feature = "std")] use std::io::{self, Read, Write}; /// Consumer part of ring buffer. /// /// # Mode /// /// It can operate in immediate (by default) or postponed mode. /// Mode could be switched using [`Self::postponed`]/[`Self::into_postponed`] and [`Self::into_immediate`] methods. /// /// + In immediate mode removed and inserted items are automatically synchronized with the other end. /// + In postponed mode synchronization occurs only when [`Self::sync`] or [`Self::into_immediate`] is called or when `Self` is dropped. /// The reason to use postponed mode is that multiple subsequent operations are performed faster due to less frequent cache synchronization. pub struct Consumer where R::Rb: RbRead, { target: R, _phantom: PhantomData, } impl Consumer where R::Rb: RbRead, { /// Creates consumer from the ring buffer reference. /// /// # Safety /// /// There must be only one consumer containing the same ring buffer reference. pub unsafe fn new(target: R) -> Self { Self { target, _phantom: PhantomData, } } /// Returns reference to the underlying ring buffer. #[inline] pub fn rb(&self) -> &R::Rb { &self.target } /// Consumes `self` and returns underlying ring buffer reference. pub fn into_rb_ref(self) -> R { self.target } /// Returns postponed consumer that borrows [`Self`]. pub fn postponed(&mut self) -> Consumer>> { unsafe { Consumer::new(RbWrap(RbReadCache::new(&self.target))) } } /// Transforms [`Self`] into postponed consumer. pub fn into_postponed(self) -> Consumer>> { unsafe { Consumer::new(RbWrap(RbReadCache::new(self.target))) } } /// Returns capacity of the ring buffer. /// /// The capacity of the buffer is constant. #[inline] pub fn capacity(&self) -> usize { self.target.capacity_nonzero().get() } /// Checks if the ring buffer is empty. /// /// *The result may become irrelevant at any time because of concurring producer activity.* #[inline] pub fn is_empty(&self) -> bool { self.target.is_empty() } /// Checks if the ring buffer is full. #[inline] pub fn is_full(&self) -> bool { self.target.is_full() } /// The number of items stored in the buffer. /// /// *Actual number may be greater than the returned value because of concurring producer activity.* #[inline] pub fn len(&self) -> usize { self.target.occupied_len() } /// The number of remaining free places in the buffer. /// /// *Actual number may be less than the returned value because of concurring producer activity.* #[inline] pub fn free_len(&self) -> usize { self.target.vacant_len() } /// Provides a direct access to the ring buffer occupied memory. /// The difference from [`Self::as_slices`] is that this method provides slices of [`MaybeUninit`], so items may be moved out of slices. /// /// Returns a pair of slices of stored items, the second one may be empty. /// Elements with lower indices in slice are older. First slice contains older items that second one. /// /// # Safety /// /// All items are initialized. Elements must be removed starting from the beginning of first slice. /// When all items are removed from the first slice then items must be removed from the beginning of the second slice. /// /// *This method must be followed by [`Self::advance`] call with the number of items being removed previously as argument.* /// *No other mutating calls allowed before that.* #[inline] pub unsafe fn as_uninit_slices(&self) -> (&[MaybeUninit], &[MaybeUninit]) { let (left, right) = self.target.occupied_slices(); (left as &[_], right as &[_]) } /// Provides a direct mutable access to the ring buffer occupied memory. /// /// Same as [`Self::as_uninit_slices`]. /// /// # Safety /// /// See [`Self::as_uninit_slices`]. #[inline] pub unsafe fn as_mut_uninit_slices(&self) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { self.target.occupied_slices() } /// Moves `head` target by `count` places. /// /// # Safety /// /// First `count` items in occupied memory must be moved out or dropped. #[inline] pub unsafe fn advance(&mut self, count: usize) { self.target.advance_head(count); } /// Returns a pair of slices which contain, in order, the contents of the ring buffer. #[inline] pub fn as_slices(&self) -> (&[T], &[T]) { unsafe { let (left, right) = self.as_uninit_slices(); (slice_assume_init_ref(left), slice_assume_init_ref(right)) } } /// Returns a pair of mutable slices which contain, in order, the contents of the ring buffer. #[inline] pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { unsafe { let (left, right) = self.as_mut_uninit_slices(); (slice_assume_init_mut(left), slice_assume_init_mut(right)) } } /// Removes latest item from the ring buffer and returns it. /// /// Returns `None` if the ring buffer is empty. pub fn pop(&mut self) -> Option { if !self.is_empty() { let elem = unsafe { self.as_uninit_slices() .0 .get_unchecked(0) .assume_init_read() }; unsafe { self.advance(1) }; Some(elem) } else { None } } /// Returns an iterator that removes items one by one from the ring buffer. /// /// Iterator provides only items that are available for consumer at the moment of `pop_iter` call, it will not contain new items added after it was created. /// /// *Information about removed items is commited to the buffer only when iterator is destroyed.* pub fn pop_iter(&mut self) -> PopIterator<'_, T, R> { PopIterator::new(&self.target) } /// Returns a front-to-back iterator containing references to items in the ring buffer. /// /// This iterator does not remove items out of the ring buffer. pub fn iter(&self) -> impl Iterator + '_ { let (left, right) = self.as_slices(); left.iter().chain(right.iter()) } /// Returns a front-to-back iterator that returns mutable references to items in the ring buffer. /// /// This iterator does not remove items out of the ring buffer. pub fn iter_mut(&mut self) -> impl Iterator + '_ { let (left, right) = self.as_mut_slices(); left.iter_mut().chain(right.iter_mut()) } /// Removes at most `n` and at least `min(n, Self::len())` items from the buffer and safely drops them. /// /// If there is no concurring producer activity then exactly `min(n, Self::len())` items are removed. /// /// Returns the number of deleted items. /// #[cfg_attr( feature = "alloc", doc = r##" ```rust # extern crate ringbuf; # use ringbuf::HeapRb; # fn main() { let target = HeapRb::::new(8); let (mut prod, mut cons) = target.split(); assert_eq!(prod.push_iter(&mut (0..8)), 8); assert_eq!(cons.skip(4), 4); assert_eq!(cons.skip(8), 4); assert_eq!(cons.skip(8), 0); # } ``` "## )] pub fn skip(&mut self, count: usize) -> usize { let count = cmp::min(count, self.len()); assert_eq!(unsafe { self.target.skip_internal(Some(count)) }, count); count } /// Removes all items from the buffer and safely drops them. /// /// Returns the number of deleted items. pub fn clear(&mut self) -> usize { unsafe { self.target.skip_internal(None) } } } /// An iterator that removes items from the ring buffer. pub struct PopIterator<'a, T, R: RbRef + ?Sized> where R::Rb: RbRead, { target: &'a R, slices: (&'a [MaybeUninit], &'a [MaybeUninit]), initial_len: usize, } impl<'a, T, R: RbRef + ?Sized> PopIterator<'a, T, R> where R::Rb: RbRead, { pub(crate) fn new(target: &'a R) -> Self { let slices = unsafe { target.occupied_slices() }; Self { target, initial_len: slices.0.len() + slices.1.len(), slices: (slices.0, slices.1), } } } impl<'a, T, R: RbRef + ?Sized> Iterator for PopIterator<'a, T, R> where R::Rb: RbRead, { type Item = T; #[inline] fn next(&mut self) -> Option { match self.slices.0.len() { 0 => None, n => { let item = unsafe { self.slices.0.get_unchecked(0).assume_init_read() }; if n == 1 { (self.slices.0, self.slices.1) = (self.slices.1, &[]); } else { self.slices.0 = unsafe { self.slices.0.get_unchecked(1..n) }; } Some(item) } } } #[inline] fn size_hint(&self) -> (usize, Option) { (self.len(), Some(self.len())) } } impl<'a, T, R: RbRef + ?Sized> ExactSizeIterator for PopIterator<'a, T, R> where R::Rb: RbRead, { fn len(&self) -> usize { self.slices.0.len() + self.slices.1.len() } } impl<'a, T, R: RbRef + ?Sized> Drop for PopIterator<'a, T, R> where R::Rb: RbRead, { fn drop(&mut self) { unsafe { self.target.advance_head(self.initial_len - self.len()) }; } } impl Consumer where R::Rb: RbRead, { /// Removes first items from the ring buffer and writes them into a slice. /// Elements must be [`Copy`]. /// /// Returns count of items been removed from the ring buffer. pub fn pop_slice(&mut self, elems: &mut [T]) -> usize { let (left, right) = unsafe { self.as_uninit_slices() }; let count = if elems.len() < left.len() { unsafe { write_uninit_slice(elems, &left[..elems.len()]) }; elems.len() } else { let (left_elems, elems) = elems.split_at_mut(left.len()); unsafe { write_uninit_slice(left_elems, left) }; left.len() + if elems.len() < right.len() { unsafe { write_uninit_slice(elems, &right[..elems.len()]) }; elems.len() } else { unsafe { write_uninit_slice(&mut elems[..right.len()], right) }; right.len() } }; unsafe { self.advance(count) }; count } } /// Postponed consumer. pub type PostponedConsumer = Consumer>>; impl PostponedConsumer where R::Rb: RbRead, { /// Create new postponed consumer. /// /// # Safety /// /// There must be only one consumer containing the same ring buffer reference. pub unsafe fn new_postponed(target: R) -> Self { Consumer::new(RbWrap(RbReadCache::new(target))) } /// Synchronize changes with the ring buffer. /// /// Postponed consumer requires manual synchronization to make freed space visible for the producer. pub fn sync(&mut self) { self.target.0.sync(); } /// Synchronize and transform back to immediate consumer. pub fn into_immediate(self) -> Consumer { unsafe { Consumer::new(self.target.0.release()) } } } #[cfg(feature = "std")] impl Consumer where R::Rb: RbRead, { /// Removes at most first `count` bytes from the ring buffer and writes them into a [`Write`] instance. /// If `count` is `None` then as much as possible bytes will be written. /// /// Returns `Ok(n)` if `write` succeeded. `n` is number of bytes been written. /// `n == 0` means that either `write` returned zero or ring buffer is empty. /// /// If `write` is failed then original error is returned. In this case it is guaranteed that no items was written to the writer. /// To achieve this we write only one contiguous slice at once. So this call may write less than `len` items even if the writer is ready to get more. pub fn write_into( &mut self, writer: &mut P, count: Option, ) -> io::Result { let (left, _) = unsafe { self.as_uninit_slices() }; let count = cmp::min(count.unwrap_or(left.len()), left.len()); let left_init = unsafe { slice_assume_init_ref(&left[..count]) }; let write_count = writer.write(left_init)?; assert!(write_count <= count); unsafe { self.advance(write_count) }; Ok(write_count) } } #[cfg(feature = "std")] impl Read for Consumer where R::Rb: RbRead, { fn read(&mut self, buffer: &mut [u8]) -> io::Result { let n = self.pop_slice(buffer); if n == 0 && !buffer.is_empty() { Err(io::ErrorKind::WouldBlock.into()) } else { Ok(n) } } } ringbuf-0.3.3/src/lib.rs000064400000000000000000000112421046102023000131660ustar 00000000000000//! Lock-free SPSC FIFO ring buffer with direct access to inner data. //! //! # Features //! //! + Lock-free operations - they succeed or fail immediately without blocking or waiting. //! + Arbitrary item type (not only [`Copy`]). //! + Items can be inserted and removed one by one or many at once. //! + Thread-safe direct access to the internal ring buffer memory. //! + [`Read`](`std::io::Read`) and [`Write`](`std::io::Write`) implementation. //! + Can be used without `std` and even without `alloc` (using only statically-allocated memory). //! + [Experimental `async`/`.await` support](https://github.com/agerasev/async-ringbuf). //! //! # Usage //! //! At first you need to create the ring buffer itself. [`HeapRb`] is recommended but you may [choose another one](#types). //! //! After the ring buffer is created it may be splitted into pair of [`Producer`] and [`Consumer`]. //! [`Producer`] is used to insert items to the ring buffer, [`Consumer`] - to remove items from it. //! For [`SharedRb`] and its derivatives they can be used in different threads. //! //! Also you can use the ring buffer without splitting at all via methods provided by [`Rb`] trait. //! //! # Types //! //! There are several types of ring buffers provided: //! //! + [`LocalRb`]. Only for single-threaded use. //! + [`SharedRb`]. Can be shared between threads. Its derivatives: //! + [`HeapRb`]. Contents are stored in dynamic memory. *Recommended for use in most cases.* //! + [`StaticRb`]. Contents can be stored in statically-allocated memory. //! //! # Performance //! //! [`SharedRb`] needs to synchronize CPU cache between CPU cores. This synchronization has some overhead. //! To avoid multiple unnecessary synchronizations you may use postponed mode of operation (see description for [`Producer#mode`] and [`Consumer#mode`]) //! or methods that operate many items at once ([`Producer::push_slice`]/[`Producer::push_iter`], [`Consumer::pop_slice`], etc.). //! //! For single-threaded usage [`LocalRb`] is recommended because it is faster than [`SharedRb`] due to absence of CPU cache synchronization. //! //! ## Benchmarks //! //! You may see typical performance of different methods in benchmarks: //! //! ```bash //! cargo +nightly bench --features bench //! ``` //! //! Nightly toolchain is required. //! //! # Examples //! #![cfg_attr( feature = "alloc", doc = r##" ## Simple ```rust use ringbuf::HeapRb; # fn main() { let rb = HeapRb::::new(2); let (mut prod, mut cons) = rb.split(); prod.push(0).unwrap(); prod.push(1).unwrap(); assert_eq!(prod.push(2), Err(2)); assert_eq!(cons.pop(), Some(0)); prod.push(2).unwrap(); assert_eq!(cons.pop(), Some(1)); assert_eq!(cons.pop(), Some(2)); assert_eq!(cons.pop(), None); # } ``` "## )] #![doc = r##" ## No heap ```rust use ringbuf::StaticRb; # fn main() { const RB_SIZE: usize = 1; let mut rb = StaticRb::::default(); let (mut prod, mut cons) = rb.split_ref(); assert_eq!(prod.push(123), Ok(())); assert_eq!(prod.push(321), Err(321)); assert_eq!(cons.pop(), Some(123)); assert_eq!(cons.pop(), None); # } ``` "##] #![cfg_attr( feature = "std", doc = r##" ## Overwrite Ring buffer can be used in overwriting mode when insertion overwrites the latest element if the buffer is full. ```rust use ringbuf::{HeapRb, Rb}; # fn main() { let mut rb = HeapRb::::new(2); assert_eq!(rb.push_overwrite(0), None); assert_eq!(rb.push_overwrite(1), None); assert_eq!(rb.push_overwrite(2), Some(0)); assert_eq!(rb.pop(), Some(1)); assert_eq!(rb.pop(), Some(2)); assert_eq!(rb.pop(), None); # } ``` Note that [`push_overwrite`](`Rb::push_overwrite`) requires exclusive access to the ring buffer so to perform it concurrently you need to guard the ring buffer with [`Mutex`](`std::sync::Mutex`) or some other lock. "## )] //! ## `async`/`.await` //! //! There is an experimental crate [`async-ringbuf`](https://github.com/agerasev/async-ringbuf) //! which is built on top of `ringbuf` and implements asynchronous ring buffer operations. //! #![no_std] #![cfg_attr(feature = "bench", feature(test))] #[cfg(feature = "alloc")] extern crate alloc; #[cfg(feature = "std")] extern crate std; mod alias; mod utils; /// [`Consumer`] and additional types. pub mod consumer; /// [`Producer`] and additional types. pub mod producer; /// Ring buffer traits and implementations. pub mod ring_buffer; mod transfer; #[cfg(feature = "alloc")] pub use alias::{HeapConsumer, HeapProducer, HeapRb}; pub use alias::{StaticConsumer, StaticProducer, StaticRb}; pub use consumer::Consumer; pub use producer::Producer; pub use ring_buffer::{LocalRb, Rb, SharedRb}; pub use transfer::transfer; #[cfg(test)] mod tests; #[cfg(feature = "bench")] extern crate test; #[cfg(feature = "bench")] mod benchmarks; ringbuf-0.3.3/src/producer.rs000064400000000000000000000224641046102023000142530ustar 00000000000000use crate::{ ring_buffer::{RbBase, RbRef, RbWrap, RbWrite, RbWriteCache}, utils::write_slice, }; use core::{marker::PhantomData, mem::MaybeUninit}; #[cfg(feature = "std")] use crate::utils::slice_assume_init_mut; #[cfg(feature = "std")] use core::cmp; #[cfg(feature = "std")] use std::io::{self, Read, Write}; /// Producer part of ring buffer. /// /// # Mode /// /// It can operate in immediate (by default) or postponed mode. /// Mode could be switched using [`Self::postponed`]/[`Self::into_postponed`] and [`Self::into_immediate`] methods. /// /// + In immediate mode removed and inserted items are automatically synchronized with the other end. /// + In postponed mode synchronization occurs only when [`Self::sync`] or [`Self::into_immediate`] is called or when `Self` is dropped. /// The reason to use postponed mode is that multiple subsequent operations are performed faster due to less frequent cache synchronization. pub struct Producer where R::Rb: RbWrite, { target: R, _phantom: PhantomData, } impl Producer where R::Rb: RbWrite, { /// Creates producer from the ring buffer reference. /// /// # Safety /// /// There must be only one producer containing the same ring buffer reference. pub unsafe fn new(target: R) -> Self { Self { target, _phantom: PhantomData, } } /// Returns reference to the underlying ring buffer. #[inline] pub fn rb(&self) -> &R::Rb { &self.target } /// Consumes `self` and returns underlying ring buffer reference. pub fn into_rb_ref(self) -> R { self.target } /// Returns postponed producer that borrows [`Self`]. pub fn postponed(&mut self) -> PostponedProducer { unsafe { Producer::new(RbWrap(RbWriteCache::new(&self.target))) } } /// Transforms [`Self`] into postponed producer. pub fn into_postponed(self) -> PostponedProducer { unsafe { Producer::new(RbWrap(RbWriteCache::new(self.target))) } } /// Returns capacity of the ring buffer. /// /// The capacity of the buffer is constant. #[inline] pub fn capacity(&self) -> usize { self.target.capacity_nonzero().get() } /// Checks if the ring buffer is empty. #[inline] pub fn is_empty(&self) -> bool { self.target.is_empty() } /// Checks if the ring buffer is full. /// /// *The result may become irrelevant at any time because of concurring consumer activity.* #[inline] pub fn is_full(&self) -> bool { self.target.is_full() } /// The number of items stored in the buffer. /// /// *Actual number may be less than the returned value because of concurring consumer activity.* #[inline] pub fn len(&self) -> usize { self.target.occupied_len() } /// The number of remaining free places in the buffer. /// /// *Actual number may be greater than the returning value because of concurring consumer activity.* #[inline] pub fn free_len(&self) -> usize { self.target.vacant_len() } /// Provides a direct access to the ring buffer vacant memory. /// Returns a pair of slices of uninitialized memory, the second one may be empty. /// /// # Safety /// /// Vacant memory is uninitialized. Initialized items must be put starting from the beginning of first slice. /// When first slice is fully filled then items must be put to the beginning of the second slice. /// /// *This method must be followed by `Self::advance` call with the number of items being put previously as argument.* /// *No other mutating calls allowed before that.* #[inline] pub unsafe fn free_space_as_slices( &mut self, ) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { self.target.vacant_slices() } /// Moves `tail` counter by `count` places. /// /// # Safety /// /// First `count` items in free space must be initialized. #[inline] pub unsafe fn advance(&mut self, count: usize) { self.target.advance_tail(count) } /// Appends an item to the ring buffer. /// /// On failure returns an `Err` containing the item that hasn't been appended. pub fn push(&mut self, elem: T) -> Result<(), T> { if !self.is_full() { unsafe { self.free_space_as_slices() .0 .get_unchecked_mut(0) .write(elem) }; unsafe { self.advance(1) }; Ok(()) } else { Err(elem) } } /// Appends items from an iterator to the ring buffer. /// Elements that haven't been added to the ring buffer remain in the iterator. /// /// Returns count of items been appended to the ring buffer. /// /// *Inserted items are committed to the ring buffer all at once in the end,* /// *e.g. when buffer is full or iterator has ended.* pub fn push_iter>(&mut self, iter: &mut I) -> usize { let (left, right) = unsafe { self.free_space_as_slices() }; let mut count = 0; for place in left.iter_mut().chain(right.iter_mut()) { match iter.next() { Some(elem) => unsafe { place.as_mut_ptr().write(elem) }, None => break, } count += 1; } unsafe { self.advance(count) }; count } } impl Producer where R::Rb: RbWrite, { /// Appends items from slice to the ring buffer. /// Elements must be [`Copy`]. /// /// Returns count of items been appended to the ring buffer. pub fn push_slice(&mut self, elems: &[T]) -> usize { let (left, right) = unsafe { self.free_space_as_slices() }; let count = if elems.len() < left.len() { write_slice(&mut left[..elems.len()], elems); elems.len() } else { let (left_elems, elems) = elems.split_at(left.len()); write_slice(left, left_elems); left.len() + if elems.len() < right.len() { write_slice(&mut right[..elems.len()], elems); elems.len() } else { write_slice(right, &elems[..right.len()]); right.len() } }; unsafe { self.advance(count) }; count } } /// Postponed producer. pub type PostponedProducer = Producer>>; impl PostponedProducer where R::Rb: RbWrite, { /// Create new postponed producer. /// /// # Safety /// /// There must be only one producer containing the same ring buffer reference. pub unsafe fn new_postponed(target: R) -> Self { Producer::new(RbWrap(RbWriteCache::new(target))) } /// Synchronize changes with the ring buffer. /// /// Postponed producer requires manual synchronization to make pushed items visible for the consumer. pub fn sync(&mut self) { self.target.0.sync(); } /// Don't publish and drop items inserted since last synchronization. pub fn discard(&mut self) { self.target.0.discard(); } /// Synchronize and transform back to immediate producer. pub fn into_immediate(self) -> Producer { unsafe { Producer::new(self.target.0.release()) } } } #[cfg(feature = "std")] impl Producer where R::Rb: RbWrite, { /// Reads at most `count` bytes from `Read` instance and appends them to the ring buffer. /// If `count` is `None` then as much as possible bytes will be read. /// /// Returns `Ok(n)` if `read` succeeded. `n` is number of bytes been read. /// `n == 0` means that either `read` returned zero or ring buffer is full. /// /// If `read` is failed then original error is returned. In this case it is guaranteed that no items was read from the reader. /// To achieve this we read only one contiguous slice at once. So this call may read less than `remaining` items in the buffer even if the reader is ready to provide more. pub fn read_from( &mut self, reader: &mut P, count: Option, ) -> io::Result { let (left, _) = unsafe { self.free_space_as_slices() }; let count = cmp::min(count.unwrap_or(left.len()), left.len()); let left_init = unsafe { slice_assume_init_mut(&mut left[..count]) }; let read_count = reader.read(left_init)?; assert!(read_count <= count); unsafe { self.advance(read_count) }; Ok(read_count) } } #[cfg(feature = "std")] impl Write for Producer where R::Rb: RbWrite, { fn write(&mut self, buffer: &[u8]) -> io::Result { let n = self.push_slice(buffer); if n == 0 && !buffer.is_empty() { Err(io::ErrorKind::WouldBlock.into()) } else { Ok(n) } } fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl core::fmt::Write for Producer where R::Rb: RbWrite, { fn write_str(&mut self, s: &str) -> core::fmt::Result { let n = self.push_slice(s.as_bytes()); if n != s.len() { Err(core::fmt::Error::default()) } else { Ok(()) } } } ringbuf-0.3.3/src/ring_buffer/base.rs000064400000000000000000000204671046102023000156330ustar 00000000000000use crate::utils::ring_buffer_ranges; use core::{mem::MaybeUninit, num::NonZeroUsize, ops::Range, ptr}; /// Basic ring buffer functionality. /// /// Provides an access to raw underlying memory and `head`/`tail` counters. /// /// *It is recommended not to use this trait directly. Use [`Producer`](`crate::Producer`) and [`Consumer`](`crate::Consumer`) instead.* /// /// # Details /// /// The ring buffer consists of an array (of `capacity` size) and two counters: `head` and `tail`. /// When an item is extracted from the ring buffer it is taken from the `head` position and after that `head` is incremented. /// New item is appended to the `tail` position and `tail` is incremented after that. /// /// The `head` and `tail` counters are modulo `2 * capacity` (not just `capacity`). /// It allows us to distinguish situations when the buffer is empty (`head == tail`) and when the buffer is full (`tail - head` modulo `2 * capacity` equals to `capacity`) /// without using the space for an extra element in container. /// And obviously we cannot store more than `capacity` items in the buffer, so `tail - head` modulo `2 * capacity` is not allowed to be greater than `capacity`. pub trait RbBase { /// Returns part of underlying raw ring buffer memory as slices. /// /// For more information see [`SharedStorage::as_mut_slices`](`crate::ring_buffer::storage::SharedStorage::as_mut_slices`). /// /// # Safety /// /// Only non-overlapping slices allowed to exist at the same time. /// /// Modifications of this data must properly update `head` and `tail` positions. /// /// *Accessing raw data is extremely unsafe.* /// It is recommended to use [`Consumer::as_slices`](`crate::Consumer::as_slices`) and [`Producer::free_space_as_slices`](`crate::Producer::free_space_as_slices`) instead. unsafe fn slices( &self, head: usize, tail: usize, ) -> (&mut [MaybeUninit], &mut [MaybeUninit]); /// Capacity of the ring buffer. /// /// It is constant during the whole ring buffer lifetime. fn capacity_nonzero(&self) -> NonZeroUsize; /// Head position. fn head(&self) -> usize; /// Tail position. fn tail(&self) -> usize; /// Modulus for `head` and `tail` values. /// /// Equals to `2 * len`. #[inline] fn modulus(&self) -> NonZeroUsize { unsafe { NonZeroUsize::new_unchecked(2 * self.capacity_nonzero().get()) } } /// The number of items stored in the buffer at the moment. fn occupied_len(&self) -> usize { let modulus = self.modulus(); (modulus.get() + self.tail() - self.head()) % modulus } /// The number of vacant places in the buffer at the moment. fn vacant_len(&self) -> usize { let modulus = self.modulus(); (self.capacity_nonzero().get() + self.head() - self.tail()) % modulus } /// Checks if the occupied range is empty. fn is_empty(&self) -> bool { self.head() == self.tail() } /// Checks if the vacant range is empty. fn is_full(&self) -> bool { self.vacant_len() == 0 } } /// Ring buffer read end. /// /// Provides access to occupied memory and mechanism of item extraction. /// /// *It is recommended not to use this trait directly. Use [`Producer`](`crate::Producer`) and [`Consumer`](`crate::Consumer`) instead.* pub trait RbRead: RbBase { /// Sets the new **head** position. /// /// # Safety /// /// This call must cohere with ring buffer data modification. /// /// It is recommended to use `Self::advance_head` instead. unsafe fn set_head(&self, value: usize); /// Move **head** position by `count` items forward. /// /// # Safety /// /// First `count` items in occupied area must be **initialized** before this call. /// /// *In debug mode panics if `count` is greater than number of items in the ring buffer.* unsafe fn advance_head(&self, count: usize) { debug_assert!(count <= self.occupied_len()); self.set_head((self.head() + count) % self.modulus()); } /// Returns a pair of ranges of [`Self::occupied_slices`] location in underlying container. #[inline] fn occupied_ranges(&self) -> (Range, Range) { ring_buffer_ranges(self.capacity_nonzero(), self.head(), self.tail()) } /// Provides a direct mutable access to the ring buffer occupied memory. /// /// Returns a pair of slices of stored items, the second one may be empty. /// Elements with lower indices in slice are older. First slice contains older items that second one. /// /// # Safety /// /// All items are initialized. Elements must be removed starting from the beginning of first slice. /// When all items are removed from the first slice then items must be removed from the beginning of the second slice. /// /// *This method must be followed by [`Self::advance_head`] call with the number of items being removed previously as argument.* /// *No other mutating calls allowed before that.* #[inline] unsafe fn occupied_slices(&self) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { self.slices(self.head(), self.tail()) } /// Removes items from the head of ring buffer and drops them. /// /// + If `count_or_all` is `Some(count)` then exactly `count` items will be removed. /// *In debug mode panics if `count` is greater than number of items stored in the buffer.* /// + If `count_or_all` is `None` then all items in ring buffer will be removed. /// *If there is concurring producer activity then the buffer may be not empty after this call.* /// /// Returns the number of removed items. /// /// # Safety /// /// Must not be called concurrently. unsafe fn skip_internal(&self, count_or_all: Option) -> usize { let (left, right) = self.occupied_slices(); let count = match count_or_all { Some(count) => { debug_assert!(count <= left.len() + right.len()); count } None => left.len() + right.len(), }; for elem in left.iter_mut().chain(right.iter_mut()).take(count) { ptr::drop_in_place(elem.as_mut_ptr()); } self.advance_head(count); count } } /// Ring buffer write end. /// /// Provides access to vacant memory and mechanism of item insertion. /// /// *It is recommended not to use this trait directly. Use [`Producer`](`crate::Producer`) and [`Consumer`](`crate::Consumer`) instead.* pub trait RbWrite: RbBase { /// Sets the new **tail** position. /// /// # Safety /// /// This call must cohere with ring buffer data modification. /// /// It is recommended to use `Self::advance_tail` instead. unsafe fn set_tail(&self, value: usize); /// Move **tail** position by `count` items forward. /// /// # Safety /// /// First `count` items in vacant area must be **de-initialized** (dropped) before this call. /// /// *In debug mode panics if `count` is greater than number of vacant places in the ring buffer.* unsafe fn advance_tail(&self, count: usize) { debug_assert!(count <= self.vacant_len()); self.set_tail((self.tail() + count) % self.modulus()); } /// Returns a pair of ranges of [`Self::vacant_slices`] location in underlying container. #[inline] fn vacant_ranges(&self) -> (Range, Range) { ring_buffer_ranges( self.capacity_nonzero(), self.tail(), self.head() + self.capacity_nonzero().get(), ) } /// Provides a direct access to the ring buffer vacant memory. /// Returns a pair of slices of uninitialized memory, the second one may be empty. /// /// # Safety /// /// Vacant memory is uninitialized. Initialized items must be put starting from the beginning of first slice. /// When first slice is fully filled then items must be put to the beginning of the second slice. /// /// *This method must be followed by [`Self::advance_tail`] call with the number of items being put previously as argument.* /// *No other mutating calls allowed before that.* #[inline] unsafe fn vacant_slices(&self) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { self.slices(self.tail(), self.head() + self.capacity_nonzero().get()) } } ringbuf-0.3.3/src/ring_buffer/cache.rs000064400000000000000000000123301046102023000157520ustar 00000000000000use super::{RbBase, RbRead, RbRef, RbWrite}; use core::{cell::Cell, marker::PhantomData, mem::MaybeUninit, num::NonZeroUsize, ptr}; /// Caching read end of some ring buffer. /// /// A free space of removed items is not visible for an opposite write end until [`Self::commit`]/[`Self::sync`] is called or `Self` is dropped. /// Items inserted by an opposite write end is not visible for `Self` until [`Self::sync`] is called. /// /// Used to implement [`PostponedConsumer`](`crate::consumer::PostponedConsumer`). pub struct RbReadCache where R::Rb: RbRead, { target: R, head: Cell, tail: usize, _phantom: PhantomData, } /// Caching write end of some ring buffer. /// /// Inserted items is not visible for an opposite write end until [`Self::commit`]/[`Self::sync`] is called or `Self` is dropped. /// A free space of items removed by an opposite write end is not visible for `Self` until [`Self::sync`] is called. /// /// Used to implement [`PostponedConsumer`](`crate::consumer::PostponedConsumer`). pub struct RbWriteCache where R::Rb: RbWrite, { target: R, head: usize, tail: Cell, _phantom: PhantomData, } impl RbBase for RbReadCache where R::Rb: RbRead, { #[inline] unsafe fn slices( &self, head: usize, tail: usize, ) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { self.target.slices(head, tail) } #[inline] fn capacity_nonzero(&self) -> NonZeroUsize { self.target.capacity_nonzero() } #[inline] fn head(&self) -> usize { self.head.get() } #[inline] fn tail(&self) -> usize { self.tail } } impl RbBase for RbWriteCache where R::Rb: RbWrite, { #[inline] unsafe fn slices( &self, head: usize, tail: usize, ) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { self.target.slices(head, tail) } #[inline] fn capacity_nonzero(&self) -> NonZeroUsize { self.target.capacity_nonzero() } #[inline] fn head(&self) -> usize { self.head } #[inline] fn tail(&self) -> usize { self.tail.get() } } impl RbRead for RbReadCache where R::Rb: RbRead, { #[inline] unsafe fn set_head(&self, value: usize) { self.head.set(value); } } impl RbWrite for RbWriteCache where R::Rb: RbWrite, { #[inline] unsafe fn set_tail(&self, value: usize) { self.tail.set(value); } } impl Drop for RbReadCache where R::Rb: RbRead, { fn drop(&mut self) { self.commit(); } } impl Drop for RbWriteCache where R::Rb: RbWrite, { fn drop(&mut self) { self.commit(); } } impl RbReadCache where R::Rb: RbRead, { /// Create new ring buffer cache. /// /// # Safety /// /// There must be only one instance containing the same ring buffer reference. pub unsafe fn new(rb_ref: R) -> Self { Self { head: Cell::new(rb_ref.head()), tail: rb_ref.tail(), target: rb_ref, _phantom: PhantomData, } } /// Commit changes to the ring buffer. pub fn commit(&mut self) { unsafe { self.target.set_head(self.head.get()) } } /// Commit changes and fetch updates from the ring buffer. pub fn sync(&mut self) { self.commit(); self.tail = self.target.tail(); } /// Commit and destroy `Self` returning underlying ring buffer. pub fn release(mut self) -> R { self.commit(); let self_uninit = MaybeUninit::new(self); unsafe { ptr::read(&self_uninit.assume_init_ref().target) } // Self will not be dropped. } } impl RbWriteCache where R::Rb: RbWrite, { /// Create new ring buffer cache. /// /// # Safety /// /// There must be only one instance containing the same ring buffer reference. pub unsafe fn new(rb_ref: R) -> Self { Self { head: rb_ref.head(), tail: Cell::new(rb_ref.tail()), target: rb_ref, _phantom: PhantomData, } } /// Commit changes to the ring buffer. pub fn commit(&mut self) { unsafe { self.target.set_tail(self.tail.get()) } } /// Discard new items pushed since last sync. pub fn discard(&mut self) { let last_tail = self.target.tail(); let (first, second) = unsafe { self.target.slices(last_tail, self.tail.get()) }; for item_mut in first.iter_mut().chain(second.iter_mut()) { unsafe { item_mut.assume_init_drop() }; } self.tail.set(last_tail); } /// Commit changes and fetch updates from the ring buffer. pub fn sync(&mut self) { self.commit(); self.head = self.target.head(); } /// Commit and destroy `Self` returning underlying ring buffer. pub fn release(mut self) -> R { self.commit(); let self_uninit = MaybeUninit::new(self); unsafe { ptr::read(&self_uninit.assume_init_ref().target) } // Self will not be dropped. } } ringbuf-0.3.3/src/ring_buffer/init.rs000064400000000000000000000030201046102023000156460ustar 00000000000000use super::{LocalRb, SharedRb}; use crate::utils::uninit_array; use core::mem::MaybeUninit; #[cfg(feature = "alloc")] use alloc::{collections::TryReserveError, vec::Vec}; impl Default for LocalRb; N]> { fn default() -> Self { unsafe { Self::from_raw_parts(uninit_array(), 0, 0) } } } impl Default for SharedRb; N]> { fn default() -> Self { unsafe { Self::from_raw_parts(uninit_array(), 0, 0) } } } #[cfg(feature = "alloc")] impl LocalRb>> { /// Creates a new instance of a ring buffer. /// /// *Panics if `capacity` is zero.* pub fn new(capacity: usize) -> Self { let mut data = Vec::new(); data.resize_with(capacity, MaybeUninit::uninit); unsafe { Self::from_raw_parts(data, 0, 0) } } } #[cfg(feature = "alloc")] impl SharedRb>> { /// Creates a new instance of a ring buffer. /// /// *Panics if allocation failed or `capacity` is zero.* pub fn new(capacity: usize) -> Self { Self::try_new(capacity).unwrap() } /// Creates a new instance of a ring buffer returning an error if allocation failed. /// /// *Panics if `capacity` is zero.* pub fn try_new(capacity: usize) -> Result { let mut data = Vec::new(); data.try_reserve_exact(capacity)?; data.resize_with(capacity, MaybeUninit::uninit); Ok(unsafe { Self::from_raw_parts(data, 0, 0) }) } } ringbuf-0.3.3/src/ring_buffer/local.rs000064400000000000000000000072541046102023000160120ustar 00000000000000use super::{Container, Rb, RbBase, RbRead, RbWrite, SharedStorage}; use crate::{consumer::Consumer, producer::Producer}; use core::{ cell::Cell, mem::{ManuallyDrop, MaybeUninit}, num::NonZeroUsize, ptr, }; #[cfg(feature = "alloc")] use alloc::rc::Rc; /// Ring buffer for using in single thread. /// /// Does *not* implement [`Sync`]. And its [`Producer`] and [`Consumer`] do *not* implement [`Send`]. /// #[cfg_attr( feature = "std", doc = r##" This code must fail to compile: ```compile_fail use std::{thread, vec::Vec}; use ringbuf::LocalRb; let (mut prod, mut cons) = LocalRb::>::new(256).split(); thread::spawn(move || { prod.push(123).unwrap(); }) .join(); thread::spawn(move || { assert_eq!(cons.pop().unwrap(), 123); }) .join(); ``` "## )] pub struct LocalRb> { storage: SharedStorage, head: Cell, tail: Cell, } impl> RbBase for LocalRb { #[inline] unsafe fn slices( &self, head: usize, tail: usize, ) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { self.storage.as_mut_slices(head, tail) } #[inline] fn capacity_nonzero(&self) -> NonZeroUsize { self.storage.len() } #[inline] fn head(&self) -> usize { self.head.get() } #[inline] fn tail(&self) -> usize { self.tail.get() } } impl> RbRead for LocalRb { #[inline] unsafe fn set_head(&self, value: usize) { self.head.set(value); } } impl> RbWrite for LocalRb { #[inline] unsafe fn set_tail(&self, value: usize) { self.tail.set(value); } } impl> Rb for LocalRb {} impl> Drop for LocalRb { fn drop(&mut self) { self.clear(); } } impl> LocalRb { /// Constructs ring buffer from container and counters. /// /// # Safety /// /// The items in container inside `head..tail` range must be initialized, items outside this range must be uninitialized. /// `head` and `tail` values must be valid (see [`RbBase`](`crate::ring_buffer::RbBase`)). pub unsafe fn from_raw_parts(container: C, head: usize, tail: usize) -> Self { Self { storage: SharedStorage::new(container), head: Cell::new(head), tail: Cell::new(tail), } } /// Destructures ring buffer into underlying container and `head` and `tail` counters. /// /// # Safety /// /// Initialized contents of the container must be properly dropped. pub unsafe fn into_raw_parts(self) -> (C, usize, usize) { let (head, tail) = (self.head(), self.tail()); let self_ = ManuallyDrop::new(self); (ptr::read(&self_.storage).into_inner(), head, tail) } /// Splits ring buffer into producer and consumer. /// /// This method consumes the ring buffer and puts it on heap in [`Rc`]. If you don't want to use heap the see [`Self::split_ref`]. #[cfg(feature = "alloc")] pub fn split(self) -> (Producer>, Consumer>) where Self: Sized, { let rc = Rc::new(self); unsafe { (Producer::new(rc.clone()), Consumer::new(rc)) } } /// Splits ring buffer into producer and consumer without using the heap. /// /// In this case producer and consumer stores a reference to the ring buffer, so you also need to store the buffer somewhere. pub fn split_ref(&mut self) -> (Producer, Consumer) where Self: Sized, { unsafe { (Producer::new(self), Consumer::new(self)) } } } ringbuf-0.3.3/src/ring_buffer/mod.rs000064400000000000000000000003101046102023000154610ustar 00000000000000mod base; mod cache; mod init; mod local; mod rb; mod shared; mod storage; pub use base::*; pub use cache::*; pub use init::*; pub use local::*; pub use rb::*; pub use shared::*; pub use storage::*; ringbuf-0.3.3/src/ring_buffer/rb.rs000064400000000000000000000157501046102023000153230ustar 00000000000000use super::{RbBase, RbRead, RbWrite}; use crate::{ consumer::PopIterator, utils::{slice_assume_init_mut, slice_assume_init_ref}, Consumer, Producer, }; use core::{ iter::Chain, ops::{Deref, DerefMut}, slice, }; #[cfg(feature = "alloc")] use alloc::{rc::Rc, sync::Arc}; /// An abstract ring buffer. /// /// See [`RbBase`] for details of internal implementation of the ring buffer. /// /// This trait contains methods that takes `&mut self` allowing you to use ring buffer without splitting it into [`Producer`] and [`Consumer`]. /// /// There are `push*_overwrite` methods that cannot be used from [`Producer`]. /// /// The ring buffer can be guarded with mutex or other synchronization primitive and be used from different threads without splitting (but now only in blocking mode, obviously). pub trait Rb: RbRead + RbWrite { /// Returns capacity of the ring buffer. /// /// The capacity of the buffer is constant. #[inline] fn capacity(&self) -> usize { >::capacity_nonzero(self).get() } /// The number of items stored in the ring buffer. fn len(&self) -> usize { self.occupied_len() } /// The number of remaining free places in the buffer. #[inline] fn free_len(&self) -> usize { self.vacant_len() } /// Returns a pair of slices which contain, in order, the contents of the ring buffer. #[inline] fn as_slices(&self) -> (&[T], &[T]) { unsafe { let (left, right) = self.occupied_slices(); (slice_assume_init_ref(left), slice_assume_init_ref(right)) } } /// Returns a pair of mutable slices which contain, in order, the contents of the ring buffer. #[inline] fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { unsafe { let (left, right) = self.occupied_slices(); (slice_assume_init_mut(left), slice_assume_init_mut(right)) } } /// Removes latest item from the ring buffer and returns it. /// /// Returns `None` if the ring buffer is empty. #[inline] fn pop(&mut self) -> Option { unsafe { Consumer::new(self as &Self) }.pop() } /// Returns an iterator that removes items one by one from the ring buffer. fn pop_iter(&mut self) -> PopIterator<'_, T, RbWrap> { PopIterator::new(unsafe { &*(self as *const Self as *const RbWrap) }) } /// Returns a front-to-back iterator containing references to items in the ring buffer. /// /// This iterator does not remove items out of the ring buffer. fn iter(&self) -> Chain, slice::Iter> { let (left, right) = self.as_slices(); left.iter().chain(right.iter()) } /// Returns a front-to-back iterator that returns mutable references to items in the ring buffer. /// /// This iterator does not remove items out of the ring buffer. fn iter_mut(&mut self) -> Chain, slice::IterMut> { let (left, right) = self.as_mut_slices(); left.iter_mut().chain(right.iter_mut()) } /// Removes exactly `n` items from the buffer and safely drops them. /// /// *Panics if `n` is greater than number of items in the ring buffer.* fn skip(&mut self, count: usize) -> usize { assert!(count <= self.len()); unsafe { self.skip_internal(Some(count)) }; count } /// Removes all items from the buffer and safely drops them. /// /// Returns the number of deleted items. #[inline] fn clear(&mut self) -> usize { unsafe { self.skip_internal(None) } } /// Appends an item to the ring buffer. /// /// On failure returns an `Err` containing the item that hasn't been appended. #[inline] fn push(&mut self, elem: T) -> Result<(), T> { unsafe { Producer::new(self as &Self) }.push(elem) } /// Pushes an item to the ring buffer overwriting the latest item if the buffer is full. /// /// Returns overwritten item if overwriting took place. fn push_overwrite(&mut self, elem: T) -> Option { let ret = if self.is_full() { self.pop() } else { None }; let _ = self.push(elem); ret } /// Appends items from an iterator to the ring buffer. /// Elements that haven't been added to the ring buffer remain in the iterator. #[inline] fn push_iter>(&mut self, iter: &mut I) { unsafe { Producer::new(self as &Self) }.push_iter(iter); } /// Appends items from an iterator to the ring buffer. /// /// *This method consumes iterator until its end.* /// Exactly last `min(iter.len(), capacity)` items from the iterator will be stored in the ring buffer. fn push_iter_overwrite>(&mut self, iter: I) { for elem in iter { self.push_overwrite(elem); } } /// Removes first items from the ring buffer and writes them into a slice. /// Elements must be [`Copy`]. /// /// *Panics if slice length is greater than number of items in the ring buffer.* fn pop_slice(&mut self, elems: &mut [T]) where T: Copy, { assert!(elems.len() <= self.len()); let _ = unsafe { Consumer::new(self as &Self) }.pop_slice(elems); } /// Appends items from slice to the ring buffer. /// Elements must be [`Copy`]. /// /// *Panics if slice length is greater than number of free places in the ring buffer.* fn push_slice(&mut self, elems: &[T]) where T: Copy, { assert!(elems.len() <= self.free_len()); let _ = unsafe { Producer::new(self as &Self) }.push_slice(elems); } /// Appends items from slice to the ring buffer overwriting existing items in the ring buffer. /// /// If the slice length is greater than ring buffer capacity then only last `capacity` items from slice will be stored in the buffer. fn push_slice_overwrite(&mut self, elems: &[T]) where T: Copy, { if elems.len() > self.free_len() { self.skip(usize::min(elems.len() - self.free_len(), self.len())); } self.push_slice(if elems.len() > self.free_len() { &elems[(elems.len() - self.free_len())..] } else { elems }); } } /// An abstract reference to the ring buffer. pub trait RbRef: Deref { type Rb: ?Sized; } impl RbRef for RbWrap { type Rb = B; } impl<'a, B: ?Sized> RbRef for &'a B { type Rb = B; } #[cfg(feature = "alloc")] impl RbRef for Rc { type Rb = B; } #[cfg(feature = "alloc")] impl RbRef for Arc { type Rb = B; } /// Just a wrapper for a ring buffer. /// /// Used to make an owning implementation of [`RbRef`]. #[repr(transparent)] pub struct RbWrap(pub B); impl Deref for RbWrap { type Target = B; fn deref(&self) -> &B { &self.0 } } impl DerefMut for RbWrap { fn deref_mut(&mut self) -> &mut B { &mut self.0 } } ringbuf-0.3.3/src/ring_buffer/shared.rs000064400000000000000000000101171046102023000161560ustar 00000000000000use super::{Container, Rb, RbBase, RbRead, RbWrite, SharedStorage}; use crate::{consumer::Consumer, producer::Producer}; use core::{ mem::{ManuallyDrop, MaybeUninit}, num::NonZeroUsize, ptr, sync::atomic::{AtomicUsize, Ordering}, }; use crossbeam_utils::CachePadded; #[cfg(feature = "alloc")] use alloc::sync::Arc; /// Ring buffer that could be shared between threads. /// /// Implements [`Sync`] *if `T` implements [`Send`]*. And therefore its [`Producer`] and [`Consumer`] implement [`Send`]. /// /// Note that there is no explicit requirement of `T: Send`. Instead [`SharedRb`] will work just fine even with `T: !Send` /// until you try to send its [`Producer`] or [`Consumer`] to another thread. #[cfg_attr( feature = "std", doc = r##" ``` use std::{thread, vec::Vec}; use ringbuf::SharedRb; let (mut prod, mut cons) = SharedRb::>::new(256).split(); thread::spawn(move || { prod.push(123).unwrap(); }) .join(); thread::spawn(move || { assert_eq!(cons.pop().unwrap(), 123); }) .join(); ``` "## )] pub struct SharedRb> { storage: SharedStorage, head: CachePadded, tail: CachePadded, } impl> RbBase for SharedRb { #[inline] unsafe fn slices( &self, head: usize, tail: usize, ) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { self.storage.as_mut_slices(head, tail) } #[inline] fn capacity_nonzero(&self) -> NonZeroUsize { self.storage.len() } #[inline] fn head(&self) -> usize { self.head.load(Ordering::Acquire) } #[inline] fn tail(&self) -> usize { self.tail.load(Ordering::Acquire) } } impl> RbRead for SharedRb { #[inline] unsafe fn set_head(&self, value: usize) { self.head.store(value, Ordering::Release) } } impl> RbWrite for SharedRb { #[inline] unsafe fn set_tail(&self, value: usize) { self.tail.store(value, Ordering::Release) } } impl> Rb for SharedRb {} impl> Drop for SharedRb { fn drop(&mut self) { self.clear(); } } impl> SharedRb { /// Constructs ring buffer from container and counters. /// /// # Safety /// /// The items in container inside `head..tail` range must be initialized, items outside this range must be uninitialized. /// `head` and `tail` values must be valid (see [`RbBase`](`crate::ring_buffer::RbBase`)). pub unsafe fn from_raw_parts(container: C, head: usize, tail: usize) -> Self { Self { storage: SharedStorage::new(container), head: CachePadded::new(AtomicUsize::new(head)), tail: CachePadded::new(AtomicUsize::new(tail)), } } /// Destructures ring buffer into underlying container and `head` and `tail` counters. /// /// # Safety /// /// Initialized contents of the container must be properly dropped. pub unsafe fn into_raw_parts(self) -> (C, usize, usize) { let (head, tail) = (self.head(), self.tail()); let self_ = ManuallyDrop::new(self); (ptr::read(&self_.storage).into_inner(), head, tail) } /// Splits ring buffer into producer and consumer. /// /// This method consumes the ring buffer and puts it on heap in [`Arc`]. If you don't want to use heap the see [`Self::split_ref`]. #[cfg(feature = "alloc")] pub fn split(self) -> (Producer>, Consumer>) where Self: Sized, { let arc = Arc::new(self); unsafe { (Producer::new(arc.clone()), Consumer::new(arc)) } } /// Splits ring buffer into producer and consumer without using the heap. /// /// In this case producer and consumer stores a reference to the ring buffer, so you also need to store the buffer somewhere. pub fn split_ref(&mut self) -> (Producer, Consumer) where Self: Sized, { unsafe { (Producer::new(self), Consumer::new(self)) } } } ringbuf-0.3.3/src/ring_buffer/storage.rs000064400000000000000000000101711046102023000163540ustar 00000000000000use crate::utils::ring_buffer_ranges; #[cfg(feature = "alloc")] use alloc::vec::Vec; use core::{cell::UnsafeCell, marker::PhantomData, mem::MaybeUninit, num::NonZeroUsize, slice}; /// Abstract container for the ring buffer. /// /// Container items must be stored as a contiguous array. /// /// # Safety /// /// *[`Self::len`]/[`Self::is_empty`] must always return the same value.* /// /// *Container must not cause data race on concurrent [`Self::as_mut_slice`]/[`Self::as_mut_ptr`] calls.* pub unsafe trait Container { /// Internal representation of the container. /// /// *Must not be aliased with its content.* type Internal; /// Transform container to internal representation. fn into_internal(self) -> Self::Internal; /// Restore container from internal representation. /// /// # Safety /// /// `this` must be valid. unsafe fn from_internal(this: Self::Internal) -> Self; /// Return pointer to the beginning of the container items. fn as_mut_ptr(this: &Self::Internal) -> *mut MaybeUninit; /// Length of the container. fn len(this: &Self::Internal) -> usize; } unsafe impl<'a, T> Container for &'a mut [MaybeUninit] { type Internal = (*mut MaybeUninit, usize); fn into_internal(self) -> Self::Internal { (self.as_mut_ptr(), self.len()) } unsafe fn from_internal(this: Self::Internal) -> Self { slice::from_raw_parts_mut(this.0, this.1) } #[inline] fn as_mut_ptr(this: &Self::Internal) -> *mut MaybeUninit { this.0 } #[inline] fn len(this: &Self::Internal) -> usize { this.1 } } unsafe impl Container for [MaybeUninit; N] { type Internal = UnsafeCell<[MaybeUninit; N]>; fn into_internal(self) -> Self::Internal { UnsafeCell::new(self) } unsafe fn from_internal(this: Self::Internal) -> Self { this.into_inner() } #[inline] fn as_mut_ptr(this: &Self::Internal) -> *mut MaybeUninit { this.get() as *mut _ } #[inline] fn len(_: &Self::Internal) -> usize { N } } #[cfg(feature = "alloc")] unsafe impl Container for Vec> { type Internal = Self; fn into_internal(self) -> Self::Internal { self } unsafe fn from_internal(this: Self::Internal) -> Self { this } #[inline] fn as_mut_ptr(this: &Self::Internal) -> *mut MaybeUninit { this.as_ptr() as *mut _ } #[inline] fn len(this: &Self::Internal) -> usize { this.len() } } /// Wrapper for container that provides multiple write access to it. pub(crate) struct SharedStorage> { container: C::Internal, _p: PhantomData, } unsafe impl> Sync for SharedStorage where T: Send {} impl> SharedStorage { /// Create new storage. /// /// *Panics if container is empty.* pub fn new(container: C) -> Self { let internal = container.into_internal(); assert!(C::len(&internal) > 0); Self { container: internal, _p: PhantomData, } } /// Get the length of the container. #[inline] pub fn len(&self) -> NonZeroUsize { unsafe { NonZeroUsize::new_unchecked(C::len(&self.container)) } } /// Returns a pair of slices between `head` and `tail` positions in the storage. /// /// For more information see [`ring_buffer_ranges`]. /// /// # Safety /// /// There only single reference to any item allowed to exist at the time. pub unsafe fn as_mut_slices( &self, head: usize, tail: usize, ) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { let ranges = ring_buffer_ranges(self.len(), head, tail); let ptr = C::as_mut_ptr(&self.container); ( slice::from_raw_parts_mut(ptr.add(ranges.0.start), ranges.0.len()), slice::from_raw_parts_mut(ptr.add(ranges.1.start), ranges.1.len()), ) } /// Returns underlying container. pub fn into_inner(self) -> C { unsafe { C::from_internal(self.container) } } } ringbuf-0.3.3/src/tests/access.rs000064400000000000000000000160711046102023000150300ustar 00000000000000use crate::HeapRb; use core::mem::MaybeUninit; #[test] fn push() { let cap = 3; let buf = HeapRb::::new(cap); let (mut prod, mut cons) = buf.split(); let vs_20 = (123, 456); { let (left, right) = unsafe { prod.free_space_as_slices() }; assert_eq!(left.len(), 3); assert_eq!(right.len(), 0); left[0] = MaybeUninit::new(vs_20.0); left[1] = MaybeUninit::new(vs_20.1); unsafe { prod.advance(2) }; } { let (left, right) = unsafe { prod.free_space_as_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 0); } assert_eq!(cons.pop().unwrap(), vs_20.0); assert_eq!(cons.pop().unwrap(), vs_20.1); assert_eq!(cons.pop(), None); let vs_11 = (123, 456); { let (left, right) = unsafe { prod.free_space_as_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 2); left[0] = MaybeUninit::new(vs_11.0); right[0] = MaybeUninit::new(vs_11.1); unsafe { prod.advance(2) }; } { let (left, right) = unsafe { prod.free_space_as_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 0); } assert_eq!(cons.pop().unwrap(), vs_11.0); assert_eq!(cons.pop().unwrap(), vs_11.1); assert_eq!(cons.pop(), None); } #[test] fn pop_full() { let cap = 2; let buf = HeapRb::::new(cap); let (mut prod, mut cons) = buf.split(); for i in 0..cap { prod.push(i as i32).unwrap(); } assert_eq!(prod.push(0), Err(0)); { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), cap); assert_eq!(right.len(), 0); for (i, x) in left.iter().enumerate() { assert_eq!(unsafe { x.assume_init() }, i as i32); } unsafe { cons.advance(cap) }; } assert_eq!(cons.len(), 0); assert_eq!(cons.pop(), None); } #[test] fn pop_empty() { let cap = 2; let buf = HeapRb::::new(cap); let (_, mut cons) = buf.split(); { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 0); assert_eq!(right.len(), 0); unsafe { cons.advance(0) }; } } #[test] fn pop() { let cap = 3; let buf = HeapRb::::new(cap); let (mut prod, mut cons) = buf.split(); let vs_20 = (123, 456, 789); assert_eq!(prod.push(vs_20.0), Ok(())); assert_eq!(prod.push(vs_20.1), Ok(())); assert_eq!(prod.push(vs_20.2), Ok(())); assert_eq!(prod.push(0), Err(0)); assert_eq!(prod.len(), 3); { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 3); assert_eq!(right.len(), 0); assert_eq!(unsafe { left[0].assume_init() }, vs_20.0); assert_eq!(unsafe { left[1].assume_init() }, vs_20.1); assert_eq!(unsafe { left[2].assume_init() }, vs_20.2); unsafe { cons.advance(2) }; } { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 0); } assert_eq!(prod.len(), 1); let vs_11 = (654, 321); assert_eq!(prod.push(vs_11.0), Ok(())); assert_eq!(prod.push(vs_11.1), Ok(())); assert_eq!(prod.push(0), Err(0)); { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 2); assert_eq!(unsafe { left[0].assume_init() }, vs_20.2); assert_eq!(unsafe { right[0].assume_init() }, vs_11.0); assert_eq!(unsafe { right[1].assume_init() }, vs_11.1); unsafe { cons.advance(2) }; } { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 0); } assert_eq!(prod.len(), 1); assert_eq!(cons.pop(), Some(vs_11.1)); } #[test] fn push_return() { let cap = 2; let buf = HeapRb::::new(cap); let (mut prod, mut cons) = buf.split(); { let (left, right) = unsafe { prod.free_space_as_slices() }; assert_eq!(left.len(), 2); assert_eq!(right.len(), 0); unsafe { prod.advance(0) }; } { let (left, right) = unsafe { prod.free_space_as_slices() }; assert_eq!(left.len(), 2); assert_eq!(right.len(), 0); left[0] = MaybeUninit::new(12); unsafe { prod.advance(1) }; } { let (left, right) = unsafe { prod.free_space_as_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 0); left[0] = MaybeUninit::new(34); unsafe { prod.advance(1) }; } assert_eq!(cons.pop().unwrap(), 12); assert_eq!(cons.pop().unwrap(), 34); assert_eq!(cons.pop(), None); } #[test] fn pop_return() { let cap = 2; let buf = HeapRb::::new(cap); let (mut prod, mut cons) = buf.split(); assert_eq!(prod.push(12), Ok(())); assert_eq!(prod.push(34), Ok(())); assert_eq!(prod.push(0), Err(0)); { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 2); assert_eq!(right.len(), 0); unsafe { cons.advance(0) }; } { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 2); assert_eq!(right.len(), 0); assert_eq!(unsafe { left[0].assume_init() }, 12); unsafe { cons.advance(1) }; } { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 0); assert_eq!(unsafe { left[0].assume_init() }, 34); unsafe { cons.advance(1) }; } assert_eq!(prod.len(), 0); } #[test] fn push_pop() { let cap = 3; let buf = HeapRb::::new(cap); let (mut prod, mut cons) = buf.split(); let vs_20 = (123, 456); { let (left, right) = unsafe { prod.free_space_as_slices() }; assert_eq!(left.len(), 3); assert_eq!(right.len(), 0); left[0] = MaybeUninit::new(vs_20.0); left[1] = MaybeUninit::new(vs_20.1); unsafe { prod.advance(2) }; } assert_eq!(prod.len(), 2); { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 2); assert_eq!(right.len(), 0); assert_eq!(unsafe { left[0].assume_init() }, vs_20.0); assert_eq!(unsafe { left[1].assume_init() }, vs_20.1); unsafe { cons.advance(2) }; } assert_eq!(prod.len(), 0); let vs_11 = (123, 456); { let (left, right) = unsafe { prod.free_space_as_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 2); left[0] = MaybeUninit::new(vs_11.0); right[0] = MaybeUninit::new(vs_11.1); unsafe { prod.advance(2) }; } assert_eq!(prod.len(), 2); { let (left, right) = unsafe { cons.as_uninit_slices() }; assert_eq!(left.len(), 1); assert_eq!(right.len(), 1); assert_eq!(unsafe { left[0].assume_init() }, vs_11.0); assert_eq!(unsafe { right[0].assume_init() }, vs_11.1); unsafe { cons.advance(2) }; } assert_eq!(prod.len(), 0); } ringbuf-0.3.3/src/tests/basic.rs000064400000000000000000000114531046102023000146470ustar 00000000000000use crate::HeapRb; #[cfg(feature = "std")] use std::thread; fn head_tail(ring_buffer: &HeapRb) -> (usize, usize) { use crate::ring_buffer::RbBase; (ring_buffer.head(), ring_buffer.tail()) } #[test] fn capacity() { use crate::Rb; let cap = 13; let buf = HeapRb::::new(cap); assert_eq!(buf.capacity(), cap); } #[test] fn split_capacity() { let cap = 13; let buf = HeapRb::::new(cap); let (prod, cons) = buf.split(); assert_eq!(prod.capacity(), cap); assert_eq!(cons.capacity(), cap); } #[cfg(feature = "std")] #[test] fn split_threads() { let buf = HeapRb::::new(10); let (prod, cons) = buf.split(); let pjh = thread::spawn(move || { let _ = prod; }); let cjh = thread::spawn(move || { let _ = cons; }); pjh.join().unwrap(); cjh.join().unwrap(); } #[test] fn push() { let cap = 2; let buf = HeapRb::::new(cap); let (mut prod, _) = buf.split(); assert_eq!(head_tail(prod.rb()), (0, 0)); assert_eq!(prod.push(123), Ok(())); assert_eq!(head_tail(prod.rb()), (0, 1)); assert_eq!(prod.push(234), Ok(())); assert_eq!(head_tail(prod.rb()), (0, 2)); assert_eq!(prod.push(345), Err(345)); assert_eq!(head_tail(prod.rb()), (0, 2)); } #[test] fn pop_empty() { let cap = 2; let buf = HeapRb::::new(cap); let (_, mut cons) = buf.split(); assert_eq!(head_tail(cons.rb()), (0, 0)); assert_eq!(cons.pop(), None); assert_eq!(head_tail(cons.rb()), (0, 0)); } #[test] fn push_pop_one() { let cap = 2; let buf = HeapRb::::new(cap); let (mut prod, mut cons) = buf.split(); let vcap = 2 * cap; let values = [12, 34, 56, 78, 90]; assert_eq!(head_tail(cons.rb()), (0, 0)); for (i, v) in values.iter().enumerate() { assert_eq!(prod.push(*v), Ok(())); assert_eq!(head_tail(cons.rb()), (i % vcap, (i + 1) % vcap)); assert_eq!(cons.pop().unwrap(), *v); assert_eq!(head_tail(cons.rb()), ((i + 1) % vcap, (i + 1) % vcap)); assert_eq!(cons.pop(), None); assert_eq!(head_tail(cons.rb()), ((i + 1) % vcap, (i + 1) % vcap)); } } #[test] fn push_pop_all() { let cap = 2; let buf = HeapRb::::new(cap); let (mut prod, mut cons) = buf.split(); let vcap = 2 * cap; let values = [(12, 34, 13), (56, 78, 57), (90, 10, 91)]; assert_eq!(head_tail(cons.rb()), (0, 0)); for (i, v) in values.iter().enumerate() { assert_eq!(prod.push(v.0), Ok(())); assert_eq!(head_tail(cons.rb()), (cap * i % vcap, (cap * i + 1) % vcap)); assert_eq!(prod.push(v.1), Ok(())); assert_eq!(head_tail(cons.rb()), (cap * i % vcap, (cap * i + 2) % vcap)); assert_eq!(prod.push(v.2).unwrap_err(), v.2); assert_eq!(head_tail(cons.rb()), (cap * i % vcap, (cap * i + 2) % vcap)); assert_eq!(cons.pop().unwrap(), v.0); assert_eq!( head_tail(cons.rb()), ((cap * i + 1) % vcap, (cap * i + 2) % vcap) ); assert_eq!(cons.pop().unwrap(), v.1); assert_eq!( head_tail(cons.rb()), ((cap * i + 2) % vcap, (cap * i + 2) % vcap) ); assert_eq!(cons.pop(), None); assert_eq!( head_tail(cons.rb()), ((cap * i + 2) % vcap, (cap * i + 2) % vcap) ); } } #[test] fn empty_full() { let buf = HeapRb::::new(1); let (mut prod, cons) = buf.split(); assert!(prod.is_empty()); assert!(cons.is_empty()); assert!(!prod.is_full()); assert!(!cons.is_full()); assert_eq!(prod.push(123), Ok(())); assert!(!prod.is_empty()); assert!(!cons.is_empty()); assert!(prod.is_full()); assert!(cons.is_full()); } #[test] fn len_remaining() { let buf = HeapRb::::new(2); let (mut prod, mut cons) = buf.split(); assert_eq!(prod.len(), 0); assert_eq!(cons.len(), 0); assert_eq!(prod.free_len(), 2); assert_eq!(cons.free_len(), 2); assert_eq!(prod.push(123), Ok(())); assert_eq!(prod.len(), 1); assert_eq!(cons.len(), 1); assert_eq!(prod.free_len(), 1); assert_eq!(cons.free_len(), 1); assert_eq!(prod.push(456), Ok(())); assert_eq!(prod.len(), 2); assert_eq!(cons.len(), 2); assert_eq!(prod.free_len(), 0); assert_eq!(cons.free_len(), 0); assert_eq!(cons.pop(), Some(123)); assert_eq!(prod.len(), 1); assert_eq!(cons.len(), 1); assert_eq!(prod.free_len(), 1); assert_eq!(cons.free_len(), 1); assert_eq!(cons.pop(), Some(456)); assert_eq!(prod.len(), 0); assert_eq!(cons.len(), 0); assert_eq!(prod.free_len(), 2); assert_eq!(cons.free_len(), 2); assert_eq!(prod.push(789), Ok(())); assert_eq!(prod.len(), 1); assert_eq!(cons.len(), 1); assert_eq!(prod.free_len(), 1); assert_eq!(cons.free_len(), 1); } ringbuf-0.3.3/src/tests/cached.rs000064400000000000000000000055401046102023000147750ustar 00000000000000use crate::HeapRb; #[test] fn producer() { let (mut prod, mut cons) = HeapRb::::new(2).split(); prod.push(0).unwrap(); assert!(cons.iter().cloned().eq(0..1)); { let mut post_prod = prod.postponed(); post_prod.push(1).unwrap(); assert!(cons.iter().cloned().eq(0..1)); assert_eq!(cons.len(), 1); assert_eq!(post_prod.len(), 2); assert_eq!(cons.pop().unwrap(), 0); assert!(cons.pop().is_none()); assert_eq!(cons.len(), 0); assert_eq!(post_prod.len(), 2); post_prod.sync(); assert!(cons.iter().cloned().eq(1..2)); assert_eq!(cons.len(), 1); assert_eq!(post_prod.len(), 1); post_prod.push(2).unwrap(); assert!(cons.iter().cloned().eq(1..2)); assert_eq!(cons.len(), 1); assert_eq!(post_prod.len(), 2); } assert!(cons.iter().cloned().eq(1..3)); assert_eq!(cons.len(), 2); assert_eq!(prod.len(), 2); } #[test] fn discard() { let (mut prod, cons) = HeapRb::::new(10).split(); prod.push(0).unwrap(); assert!(cons.iter().cloned().eq(0..1)); { let mut post_prod = prod.postponed(); post_prod.push(1).unwrap(); assert_eq!(cons.len(), 1); assert_eq!(post_prod.len(), 2); post_prod.sync(); assert!(cons.iter().cloned().eq(0..2)); assert_eq!(cons.len(), 2); assert_eq!(post_prod.len(), 2); post_prod.push(3).unwrap(); assert_eq!(cons.len(), 2); assert_eq!(post_prod.len(), 3); post_prod.discard(); assert_eq!(cons.len(), 2); assert_eq!(post_prod.len(), 2); post_prod.push(2).unwrap(); assert_eq!(cons.len(), 2); assert_eq!(post_prod.len(), 3); } assert!(cons.iter().cloned().eq(0..3)); assert_eq!(cons.len(), 3); assert_eq!(prod.len(), 3); } #[test] fn consumer() { let (mut prod, mut cons) = HeapRb::::new(10).split(); prod.push(0).unwrap(); prod.push(1).unwrap(); assert!(cons.iter().cloned().eq(0..2)); { let mut post_cons = cons.postponed(); assert_eq!(post_cons.pop().unwrap(), 0); assert!(post_cons.iter().cloned().eq(1..2)); assert_eq!(post_cons.len(), 1); assert_eq!(prod.len(), 2); prod.push(2).unwrap(); assert!(post_cons.iter().cloned().eq(1..2)); assert_eq!(post_cons.len(), 1); assert_eq!(prod.len(), 3); post_cons.sync(); assert!(post_cons.iter().cloned().eq(1..3)); assert_eq!(post_cons.len(), 2); assert_eq!(prod.len(), 2); assert_eq!(post_cons.pop().unwrap(), 1); assert!(post_cons.iter().cloned().eq(2..3)); assert_eq!(post_cons.len(), 1); assert_eq!(prod.len(), 2); } assert!(cons.iter().cloned().eq(2..3)); assert_eq!(cons.len(), 1); assert_eq!(prod.len(), 1); } ringbuf-0.3.3/src/tests/drop.rs000064400000000000000000000052161046102023000145320ustar 00000000000000use crate::{Consumer, HeapRb}; use alloc::collections::BTreeSet; use core::cell::RefCell; #[derive(Debug)] struct Dropper<'a> { id: i32, set: &'a RefCell>, } impl<'a> Dropper<'a> { fn new(set: &'a RefCell>, id: i32) -> Self { if !set.borrow_mut().insert(id) { panic!("value {} already exists", id); } Self { set, id } } } impl<'a> Drop for Dropper<'a> { fn drop(&mut self) { if !self.set.borrow_mut().remove(&self.id) { panic!("value {} already removed", self.id); } } } #[test] fn single() { let set = RefCell::new(BTreeSet::new()); let cap = 3; let buf = HeapRb::new(cap); assert_eq!(set.borrow().len(), 0); { let (mut prod, mut cons) = buf.split(); prod.push(Dropper::new(&set, 1)).unwrap(); assert_eq!(set.borrow().len(), 1); prod.push(Dropper::new(&set, 2)).unwrap(); assert_eq!(set.borrow().len(), 2); prod.push(Dropper::new(&set, 3)).unwrap(); assert_eq!(set.borrow().len(), 3); cons.pop().unwrap(); assert_eq!(set.borrow().len(), 2); cons.pop().unwrap(); assert_eq!(set.borrow().len(), 1); prod.push(Dropper::new(&set, 4)).unwrap(); assert_eq!(set.borrow().len(), 2); } assert_eq!(set.borrow().len(), 0); } // TODO: Use transactions. #[test] fn transaction() { let set = RefCell::new(BTreeSet::new()); let cap = 5; let buf = HeapRb::new(cap); assert_eq!(set.borrow().len(), 0); { let (mut prod, mut cons) = buf.split(); let mut id = 0; let mut cnt = 0; let assert_cnt = |cnt, n, cons: &Consumer<_, _>, set: &RefCell>| { assert_eq!(cnt, n); assert_eq!(cnt, cons.len()); assert_eq!(cnt, set.borrow().len()); }; for _ in 0..4 { id += 1; cnt += 1; prod.push(Dropper::new(&set, id)).unwrap(); } assert_cnt(cnt, 4, &cons, &set); for _ in cons.pop_iter().take(2) { cnt -= 1; } assert_cnt(cnt, 2, &cons, &set); while !prod.is_full() { id += 1; cnt += 1; prod.push(Dropper::new(&set, id)).unwrap(); } assert_cnt(cnt, 5, &cons, &set); for _ in cons.pop_iter() { cnt -= 1; } assert_cnt(cnt, 0, &cons, &set); while !prod.is_full() { id += 1; cnt += 1; prod.push(Dropper::new(&set, id)).unwrap(); } assert_cnt(cnt, 5, &cons, &set); } assert_eq!(set.borrow().len(), 0); } ringbuf-0.3.3/src/tests/fmt_write.rs000064400000000000000000000021601046102023000155610ustar 00000000000000use crate::StaticRb; use core::fmt::Write; #[test] fn write() { let mut buf = StaticRb::::default(); let (mut prod, mut cons) = buf.split_ref(); assert_eq!(write!(prod, "Hello world!\n"), Ok(())); assert_eq!(write!(prod, "The answer is {}\n", 42), Ok(())); assert_eq!(cons.len(), 30); assert!(cons .pop_iter() .eq(b"Hello world!\nThe answer is 42\n".iter().cloned())); } #[test] fn write_overflow() { let mut buf = StaticRb::::default(); let (mut prod, mut cons) = buf.split_ref(); assert_eq!( write!( prod, "This is a very long string that will overflow the small buffer\n" ), Err(core::fmt::Error::default()) ); assert_eq!(cons.len(), 10); assert!(cons.pop_iter().eq(b"This is a ".iter().cloned())); assert_eq!( write!( prod, "{} {} {} {} {}\n", "This", "string", "will", "also", "overflow" ), Err(core::fmt::Error::default()) ); assert_eq!(cons.len(), 10); assert!(cons.pop_iter().eq(b"This strin".iter().cloned())); } ringbuf-0.3.3/src/tests/iter.rs000064400000000000000000000033451046102023000145320ustar 00000000000000use crate::HeapRb; #[test] fn iter() { let buf = HeapRb::::new(2); let (mut prod, mut cons) = buf.split(); prod.push(10).unwrap(); prod.push(20).unwrap(); let sum: i32 = cons.iter().sum(); let first = cons.pop().expect("First item is not available"); let second = cons.pop().expect("Second item is not available"); assert_eq!(sum, first + second); } #[test] fn iter_mut() { let buf = HeapRb::::new(2); let (mut prod, mut cons) = buf.split(); prod.push(10).unwrap(); prod.push(20).unwrap(); for v in cons.iter_mut() { *v *= 2; } let sum: i32 = cons.iter().sum(); let first = cons.pop().expect("First item is not available"); let second = cons.pop().expect("Second item is not available"); assert_eq!(sum, first + second); } #[test] fn pop_iter() { let buf = HeapRb::::new(3); let (mut prod, mut cons) = buf.split(); prod.push(0).unwrap(); prod.push(1).unwrap(); for (i, v) in cons.pop_iter().enumerate() { assert_eq!(i as i32, v); } prod.push(2).unwrap(); prod.push(3).unwrap(); for (i, v) in cons.pop_iter().enumerate() { assert_eq!(i as i32 + 2, v); } assert!(prod.is_empty()); } #[test] fn push_pop_iter_partial() { let buf = HeapRb::::new(4); let (mut prod, mut cons) = buf.split(); prod.push(0).unwrap(); prod.push(1).unwrap(); prod.push(2).unwrap(); for (i, v) in (0..2).zip(cons.pop_iter()) { assert_eq!(i, v); } prod.push(3).unwrap(); prod.push(4).unwrap(); prod.push(5).unwrap(); for (i, v) in (2..5).zip(cons.pop_iter()) { assert_eq!(i, v); } assert_eq!(cons.pop().unwrap(), 5); assert!(prod.is_empty()); } ringbuf-0.3.3/src/tests/message.rs000064400000000000000000000166551046102023000152230ustar 00000000000000use crate::{HeapRb, Rb}; use alloc::{string::String, vec::Vec}; use std::{ io::{self, Read, Write}, sync::{Arc, Mutex}, thread, time::Duration, }; const THE_BOOK_FOREWORD: &str = r#" It wasn't always so clear, but the Rust programming language is fundamentally about empowerment: no matter what kind of code you are writing now, Rust empowers you to reach farther, to program with confidence in a wider variety of domains than you did before. Take, for example, "systems-level" work that deals with low-level details of memory management, data representation, and concurrency. Traditionally, this realm of programming is seen as arcane, accessible only to a select few who have devoted the necessary years learning to avoid its infamous pitfalls. And even those who practice it do so with caution, lest their code be open to exploits, crashes, or corruption. Rust breaks down these barriers by eliminating the old pitfalls and providing a friendly, polished set of tools to help you along the way. Programmers who need to "dip down" into lower-level control can do so with Rust, without taking on the customary risk of crashes or security holes, and without having to learn the fine points of a fickle toolchain. Better yet, the language is designed to guide you naturally towards reliable code that is efficient in terms of speed and memory usage. Programmers who are already working with low-level code can use Rust to raise their ambitions. For example, introducing parallelism in Rust is a relatively low-risk operation: the compiler will catch the classical mistakes for you. And you can tackle more aggressive optimizations in your code with the confidence that you won't accidentally introduce crashes or vulnerabilities. But Rust isn't limited to low-level systems programming. It's expressive and ergonomic enough to make CLI apps, web servers, and many other kinds of code quite pleasant to write — you'll find simple examples of both later in the book. Working with Rust allows you to build skills that transfer from one domain to another; you can learn Rust by writing a web app, then apply those same skills to target your Raspberry Pi. This book fully embraces the potential of Rust to empower its users. It's a friendly and approachable text intended to help you level up not just your knowledge of Rust, but also your reach and confidence as a programmer in general. So dive in, get ready to learn—and welcome to the Rust community! — Nicholas Matsakis and Aaron Turon "#; #[test] #[cfg_attr(miri, ignore)] fn push_pop_slice() { let buf = HeapRb::::new(7); let (mut prod, mut cons) = buf.split(); let smsg = THE_BOOK_FOREWORD; let pjh = thread::spawn(move || { let mut bytes = smsg.as_bytes(); while !bytes.is_empty() { let n = prod.push_slice(bytes); if n > 0 { bytes = &bytes[n..bytes.len()] } else { thread::sleep(Duration::from_millis(1)) } } loop { match prod.push(0) { Ok(()) => break, Err(_) => thread::sleep(Duration::from_millis(1)), } } }); let cjh = thread::spawn(move || { let mut bytes = Vec::::new(); let mut buffer = [0; 5]; loop { let n = cons.pop_slice(&mut buffer); if n > 0 { bytes.extend_from_slice(&buffer[0..n]) } else if bytes.ends_with(&[0]) { break; } else { thread::sleep(Duration::from_millis(1)); } } assert_eq!(bytes.pop().unwrap(), 0); String::from_utf8(bytes).unwrap() }); pjh.join().unwrap(); let rmsg = cjh.join().unwrap(); assert_eq!(smsg, rmsg); } #[test] #[cfg_attr(miri, ignore)] fn read_from_write_into() { let buf = HeapRb::::new(7); let (mut prod, mut cons) = buf.split(); let smsg = THE_BOOK_FOREWORD; let pjh = thread::spawn(move || { let zero = [0]; let mut bytes = smsg.as_bytes().chain(&zero[..]); loop { if prod.is_full() { thread::sleep(Duration::from_millis(1)); } else if prod.read_from(&mut bytes, None).unwrap() == 0 { break; } } }); let cjh = thread::spawn(move || { let mut bytes = Vec::::new(); loop { if cons.is_empty() { if bytes.ends_with(&[0]) { break; } else { thread::sleep(Duration::from_millis(1)); } } else { cons.write_into(&mut bytes, None).unwrap(); } } assert_eq!(bytes.pop().unwrap(), 0); String::from_utf8(bytes).unwrap() }); pjh.join().unwrap(); let rmsg = cjh.join().unwrap(); assert_eq!(smsg, rmsg); } #[test] #[cfg_attr(miri, ignore)] fn read_write() { let buf = HeapRb::::new(7); let (mut prod, mut cons) = buf.split(); let smsg = THE_BOOK_FOREWORD; let pjh = thread::spawn(move || { let mut bytes = smsg.as_bytes(); while !bytes.is_empty() { match prod.write(bytes) { Ok(n) => bytes = &bytes[n..bytes.len()], Err(err) => { assert_eq!(err.kind(), io::ErrorKind::WouldBlock); thread::sleep(Duration::from_millis(1)); } } } loop { match prod.push(0) { Ok(()) => break, Err(_) => thread::sleep(Duration::from_millis(1)), } } }); let cjh = thread::spawn(move || { let mut bytes = Vec::::new(); let mut buffer = [0; 5]; loop { match cons.read(&mut buffer) { Ok(n) => bytes.extend_from_slice(&buffer[0..n]), Err(err) => { assert_eq!(err.kind(), io::ErrorKind::WouldBlock); if bytes.ends_with(&[0]) { break; } else { thread::sleep(Duration::from_millis(1)); } } } } assert_eq!(bytes.pop().unwrap(), 0); String::from_utf8(bytes).unwrap() }); pjh.join().unwrap(); let rmsg = cjh.join().unwrap(); assert_eq!(smsg, rmsg); } #[test] #[cfg_attr(miri, ignore)] fn blocking() { let buf = Arc::new(Mutex::new(HeapRb::::new(7))); let (prod, cons) = (buf.clone(), buf); let smsg = THE_BOOK_FOREWORD; let pjh = thread::spawn(move || { let mut bytes = smsg.as_bytes().iter().copied(); while bytes.len() > 0 { prod.lock().unwrap().push_iter(&mut bytes); thread::sleep(Duration::from_millis(1)) } loop { match prod.lock().unwrap().push(0) { Ok(()) => break, Err(_) => thread::sleep(Duration::from_millis(1)), } } }); let cjh = thread::spawn(move || { let mut bytes = Vec::::new(); loop { bytes.extend(cons.lock().unwrap().pop_iter()); if bytes.ends_with(&[0]) { break; } else { thread::sleep(Duration::from_millis(1)); } } assert_eq!(bytes.pop().unwrap(), 0); String::from_utf8(bytes).unwrap() }); pjh.join().unwrap(); let rmsg = cjh.join().unwrap(); assert_eq!(smsg, rmsg); } ringbuf-0.3.3/src/tests/mod.rs000064400000000000000000000006111046102023000143370ustar 00000000000000#[cfg(feature = "alloc")] mod access; #[cfg(feature = "alloc")] mod basic; #[cfg(feature = "alloc")] mod cached; #[cfg(feature = "alloc")] mod drop; #[cfg(feature = "alloc")] mod iter; #[cfg(feature = "alloc")] mod overwrite; #[cfg(feature = "alloc")] mod skip; #[cfg(feature = "alloc")] mod slice; #[cfg(feature = "std")] mod message; #[cfg(feature = "std")] mod read_write; mod fmt_write; ringbuf-0.3.3/src/tests/overwrite.rs000064400000000000000000000020751046102023000156140ustar 00000000000000use crate::{HeapRb, LocalRb, Rb}; use alloc::vec::Vec; use core::mem::MaybeUninit; #[test] fn push() { let mut rb = HeapRb::::new(2); assert_eq!(rb.push_overwrite(0), None); assert_eq!(rb.push_overwrite(1), None); assert_eq!(rb.push_overwrite(2), Some(0)); assert_eq!(rb.pop(), Some(1)); assert_eq!(rb.pop(), Some(2)); assert_eq!(rb.pop(), None); } #[test] fn push_iter() { let mut rb = HeapRb::::new(2); rb.push_iter_overwrite([0, 1, 2, 3, 4, 5].into_iter()); assert_eq!(rb.pop_iter().collect::>(), [4, 5]); } #[test] fn push_slice() { let mut rb = HeapRb::::new(2); rb.push_slice_overwrite(&[0, 1, 2, 3, 4, 5]); assert_eq!(rb.pop_iter().collect::>(), [4, 5]); } #[test] fn push_local() { let mut rb = LocalRb::; 2]>::default(); assert_eq!(rb.push_overwrite(0), None); assert_eq!(rb.push_overwrite(1), None); assert_eq!(rb.push_overwrite(2), Some(0)); assert_eq!(rb.pop(), Some(1)); assert_eq!(rb.pop(), Some(2)); assert_eq!(rb.pop(), None); } ringbuf-0.3.3/src/tests/read_write.rs000064400000000000000000000061461046102023000157160ustar 00000000000000use crate::HeapRb; use std::io; #[test] fn from() { let buf0 = HeapRb::::new(4); let buf1 = HeapRb::::new(4); let (mut prod0, mut cons0) = buf0.split(); let (mut prod1, mut cons1) = buf1.split(); let mut tmp = [0; 5]; assert_eq!(prod0.push_slice(&[0, 1, 2]), 3); assert_eq!(prod1.read_from(&mut cons0, None).unwrap(), 3); assert_eq!( prod1.read_from(&mut cons0, None).unwrap_err().kind(), io::ErrorKind::WouldBlock ); assert_eq!(cons1.pop_slice(&mut tmp), 3); assert_eq!(tmp[0..3], [0, 1, 2]); assert_eq!(prod0.push_slice(&[3, 4, 5]), 3); assert_eq!(prod1.read_from(&mut cons0, None).unwrap(), 1); assert_eq!(cons1.pop_slice(&mut tmp), 1); assert_eq!(tmp[0..1], [3]); assert_eq!(prod1.read_from(&mut cons0, None).unwrap(), 2); assert_eq!(cons1.pop_slice(&mut tmp), 2); assert_eq!(tmp[0..2], [4, 5]); assert_eq!(prod1.push_slice(&[6, 7, 8]), 3); assert_eq!(prod0.push_slice(&[9, 10]), 2); assert_eq!(prod1.read_from(&mut cons0, None).unwrap(), 1); assert_eq!(prod1.read_from(&mut cons0, None).unwrap(), 0); assert_eq!(cons1.pop_slice(&mut tmp), 4); assert_eq!(tmp[0..4], [6, 7, 8, 9]); } #[test] fn into() { let buf0 = HeapRb::::new(4); let buf1 = HeapRb::::new(4); let (mut prod0, mut cons0) = buf0.split(); let (mut prod1, mut cons1) = buf1.split(); let mut tmp = [0; 5]; assert_eq!(prod0.push_slice(&[0, 1, 2]), 3); assert_eq!(cons0.write_into(&mut prod1, None).unwrap(), 3); assert_eq!(cons0.write_into(&mut prod1, None).unwrap(), 0); assert_eq!(cons1.pop_slice(&mut tmp), 3); assert_eq!(tmp[0..3], [0, 1, 2]); assert_eq!(prod0.push_slice(&[3, 4, 5]), 3); assert_eq!(cons0.write_into(&mut prod1, None).unwrap(), 1); assert_eq!(cons1.pop_slice(&mut tmp), 1); assert_eq!(tmp[0..1], [3]); assert_eq!(cons0.write_into(&mut prod1, None).unwrap(), 2); assert_eq!(cons1.pop_slice(&mut tmp), 2); assert_eq!(tmp[0..2], [4, 5]); assert_eq!(prod1.push_slice(&[6, 7, 8]), 3); assert_eq!(prod0.push_slice(&[9, 10]), 2); assert_eq!(cons0.write_into(&mut prod1, None).unwrap(), 1); assert_eq!( cons0.write_into(&mut prod1, None).unwrap_err().kind(), io::ErrorKind::WouldBlock ); assert_eq!(cons1.pop_slice(&mut tmp), 4); assert_eq!(tmp[0..4], [6, 7, 8, 9]); } #[test] fn count() { let buf0 = HeapRb::::new(4); let buf1 = HeapRb::::new(4); let (mut prod0, mut cons0) = buf0.split(); let (mut prod1, mut cons1) = buf1.split(); let mut tmp = [0; 5]; assert_eq!(prod0.push_slice(&[0, 1, 2, 3]), 4); assert_eq!(prod1.read_from(&mut cons0, Some(3)).unwrap(), 3); assert_eq!(cons1.pop_slice(&mut tmp), 3); assert_eq!(tmp[0..3], [0, 1, 2]); assert_eq!(prod0.push_slice(&[4, 5, 6]), 3); assert_eq!(cons0.write_into(&mut prod1, Some(3)).unwrap(), 1); assert_eq!(cons0.write_into(&mut prod1, Some(2)).unwrap(), 2); assert_eq!(cons0.write_into(&mut prod1, Some(2)).unwrap(), 1); assert_eq!(cons1.pop_slice(&mut tmp), 4); assert_eq!(tmp[0..4], [3, 4, 5, 6]); } ringbuf-0.3.3/src/tests/skip.rs000064400000000000000000000032141046102023000145300ustar 00000000000000use crate::{HeapRb, Rb}; use alloc::rc::Rc; #[test] fn skip() { // Initialize ringbuffer, prod and cons let rb = HeapRb::::new(10); let (mut prod, mut cons) = rb.split(); let mut i = 0; // Fill the buffer for _ in 0..10 { prod.push(i).unwrap(); i += 1; } // Pop in the middle of the buffer assert_eq!(cons.skip(5), 5); // Make sure changes are taken into account assert_eq!(cons.pop().unwrap(), 5); // Fill the buffer again for _ in 0..5 { prod.push(i).unwrap(); i += 1; } assert_eq!(cons.skip(6), 6); assert_eq!(cons.pop().unwrap(), 12); // Fill the buffer again for _ in 0..7 { prod.push(i).unwrap(); i += 1; } // Ask too much, delete the max number of items assert_eq!(cons.skip(10), 9); // Try to remove more than possible assert_eq!(cons.skip(1), 0); // Make sure it is still usable assert_eq!(cons.pop(), None); assert_eq!(prod.push(0), Ok(())); assert_eq!(cons.pop(), Some(0)); } #[test] fn skip_drop() { let rc = Rc::<()>::new(()); static N: usize = 10; let rb = HeapRb::>::new(N); let (mut prod, mut cons) = rb.split(); for _ in 0..N { prod.push(rc.clone()).unwrap(); } assert_eq!(cons.len(), N); assert_eq!(Rc::strong_count(&rc), N + 1); assert_eq!(cons.skip(N), N); // Check ring buffer is empty assert_eq!(cons.len(), 0); // Check that items are dropped assert_eq!(Rc::strong_count(&rc), 1); } #[test] #[should_panic] fn skip_panic() { let mut rb = HeapRb::::new(2); rb.push(1).unwrap(); rb.skip(2); } ringbuf-0.3.3/src/tests/slice.rs000064400000000000000000000055421046102023000146670ustar 00000000000000use crate::{transfer, HeapRb, Rb}; #[test] fn push_pop_slice() { let buf = HeapRb::::new(4); let (mut prod, mut cons) = buf.split(); let mut tmp = [0; 5]; assert_eq!(prod.push_slice(&[]), 0); assert_eq!(prod.push_slice(&[0, 1, 2]), 3); assert_eq!(cons.pop_slice(&mut tmp[0..2]), 2); assert_eq!(tmp[0..2], [0, 1]); assert_eq!(prod.push_slice(&[3, 4]), 2); assert_eq!(prod.push_slice(&[5, 6]), 1); assert_eq!(cons.pop_slice(&mut tmp[0..3]), 3); assert_eq!(tmp[0..3], [2, 3, 4]); assert_eq!(prod.push_slice(&[6, 7, 8, 9]), 3); assert_eq!(cons.pop_slice(&mut tmp), 4); assert_eq!(tmp[0..4], [5, 6, 7, 8]); } #[test] fn move_slice() { let buf0 = HeapRb::::new(4); let buf1 = HeapRb::::new(4); let (mut prod0, mut cons0) = buf0.split(); let (mut prod1, mut cons1) = buf1.split(); let mut tmp = [0; 5]; assert_eq!(prod0.push_slice(&[0, 1, 2]), 3); assert_eq!(transfer(&mut cons0, &mut prod1, None), 3); assert_eq!(transfer(&mut cons0, &mut prod1, None), 0); assert_eq!(cons1.pop_slice(&mut tmp), 3); assert_eq!(tmp[0..3], [0, 1, 2]); assert_eq!(prod0.push_slice(&[3, 4, 5]), 3); assert_eq!(transfer(&mut cons0, &mut prod1, None), 3); assert_eq!(cons1.pop_slice(&mut tmp), 3); assert_eq!(tmp[0..3], [3, 4, 5]); assert_eq!(prod1.push_slice(&[6, 7, 8]), 3); assert_eq!(prod0.push_slice(&[9, 10]), 2); assert_eq!(transfer(&mut cons0, &mut prod1, None), 1); assert_eq!(transfer(&mut cons0, &mut prod1, None), 0); assert_eq!(cons1.pop_slice(&mut tmp), 4); assert_eq!(tmp[0..4], [6, 7, 8, 9]); } #[test] fn move_slice_count() { let buf0 = HeapRb::::new(4); let buf1 = HeapRb::::new(4); let (mut prod0, mut cons0) = buf0.split(); let (mut prod1, mut cons1) = buf1.split(); let mut tmp = [0; 5]; assert_eq!(prod0.push_slice(&[0, 1, 2]), 3); assert_eq!(transfer(&mut cons0, &mut prod1, Some(2)), 2); assert_eq!(cons1.pop_slice(&mut tmp), 2); assert_eq!(tmp[0..2], [0, 1]); assert_eq!(transfer(&mut cons0, &mut prod1, Some(2)), 1); assert_eq!(cons1.pop_slice(&mut tmp), 1); assert_eq!(tmp[0..1], [2]); assert_eq!(prod0.push_slice(&[3, 4, 5, 6]), 4); assert_eq!(transfer(&mut cons0, &mut prod1, Some(3)), 3); assert_eq!(cons1.pop_slice(&mut tmp), 3); assert_eq!(tmp[0..3], [3, 4, 5]); assert_eq!(prod0.push_slice(&[7, 8, 9]), 3); assert_eq!(transfer(&mut cons0, &mut prod1, Some(5)), 4); assert_eq!(cons1.pop_slice(&mut tmp), 4); assert_eq!(tmp[0..4], [6, 7, 8, 9]); } #[test] #[should_panic] fn push_slice_panic() { let mut rb = HeapRb::::new(2); rb.push_slice(&[1, 2, 3]); } #[test] #[should_panic] fn pop_slice_panic() { let mut rb = HeapRb::::new(2); rb.push(1).unwrap(); let mut tmp = [0; 2]; rb.pop_slice(&mut tmp); } ringbuf-0.3.3/src/transfer.rs000064400000000000000000000024121046102023000142430ustar 00000000000000use crate::{ consumer::Consumer, producer::Producer, ring_buffer::{RbRead, RbRef, RbWrite}, }; /// Moves at most `count` items from the `src` consumer to the `dst` producer. /// /// Consumer and producer may be of different buffers as well as of the same one. /// `count` is the number of items being moved, if `None` - as much as possible items will be moved. /// /// Returns number of items been moved. pub fn transfer( src: &mut Consumer, dst: &mut Producer, count: Option, ) -> usize where Rs::Rb: RbRead, Rd::Rb: RbWrite, { let (src_left, src_right) = unsafe { src.as_uninit_slices() }; let (dst_left, dst_right) = unsafe { dst.free_space_as_slices() }; let src_iter = src_left.iter().chain(src_right.iter()); let dst_iter = dst_left.iter_mut().chain(dst_right.iter_mut()); let mut actual_count = 0; for (src_elem, dst_place) in src_iter.zip(dst_iter) { if let Some(count) = count { if actual_count >= count { break; } } unsafe { dst_place.write(src_elem.as_ptr().read()) }; actual_count += 1; } unsafe { src.advance(actual_count) }; unsafe { dst.advance(actual_count) }; actual_count } ringbuf-0.3.3/src/utils.rs000064400000000000000000000036611046102023000135660ustar 00000000000000use core::{ mem::{self, MaybeUninit}, num::NonZeroUsize, ops::Range, }; /// Returns a pair of ranges between `head` and `tail` positions in a ring buffer with specific `capacity`. /// /// `head` and `tail` may be arbitrary large, but must satisfy the following condition: `0 <= (head - tail) % (2 * capacity) <= capacity`. /// Actual positions are taken modulo `capacity`. /// /// The first range starts from `head`. If the first slice is empty then second slice is empty too. pub fn ring_buffer_ranges( capacity: NonZeroUsize, head: usize, tail: usize, ) -> (Range, Range) { let (head_quo, head_rem) = (head / capacity, head % capacity); let (tail_quo, tail_rem) = (tail / capacity, tail % capacity); if (head_quo + tail_quo) % 2 == 0 { (head_rem..tail_rem, 0..0) } else { (head_rem..capacity.get(), 0..tail_rem) } } // TODO: Remove on `maybe_uninit_uninit_array` stabilization. pub fn uninit_array() -> [MaybeUninit; N] { unsafe { MaybeUninit::<[MaybeUninit; N]>::uninit().assume_init() } } // TODO: Remove on `maybe_uninit_slice` stabilization. pub unsafe fn slice_assume_init_ref(slice: &[MaybeUninit]) -> &[T] { &*(slice as *const [MaybeUninit] as *const [T]) } // TODO: Remove on `maybe_uninit_slice` stabilization. pub unsafe fn slice_assume_init_mut(slice: &mut [MaybeUninit]) -> &mut [T] { &mut *(slice as *mut [MaybeUninit] as *mut [T]) } // TODO: Remove on `maybe_uninit_write_slice` stabilization. pub fn write_slice<'a, T: Copy>(dst: &'a mut [MaybeUninit], src: &[T]) -> &'a mut [T] { let uninit_src: &[MaybeUninit] = unsafe { mem::transmute(src) }; dst.copy_from_slice(uninit_src); unsafe { slice_assume_init_mut(dst) } } pub unsafe fn write_uninit_slice<'a, T: Copy>( dst: &'a mut [T], src: &[MaybeUninit], ) -> &'a mut [T] { dst.copy_from_slice(slice_assume_init_ref(src)); dst }