bytes-1.5.0/.cargo_vcs_info.json0000644000000001360000000000100121650ustar { "git": { "sha1": "74e6e200fd671340d4d4a874f83776def04f6c7b" }, "path_in_vcs": "" }bytes-1.5.0/.github/workflows/ci.yml000064400000000000000000000105751046102023000155000ustar 00000000000000name: CI on: pull_request: branches: - master push: branches: - master env: RUSTFLAGS: -Dwarnings RUST_BACKTRACE: 1 nightly: nightly-2022-11-12 defaults: run: shell: bash jobs: # Check formatting rustfmt: name: rustfmt runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust run: rustup update stable && rustup default stable - name: Check formatting run: cargo fmt --all -- --check # TODO # # Apply clippy lints # clippy: # name: clippy # runs-on: ubuntu-latest # steps: # - uses: actions/checkout@v3 # - name: Apply clippy lints # run: cargo clippy --all-features # This represents the minimum Rust version supported by # Bytes. Updating this should be done in a dedicated PR. # # Tests are not run as tests may require newer versions of # rust. minrust: name: minrust runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust run: rustup update 1.39.0 && rustup default 1.39.0 - name: Check run: . ci/test-stable.sh check # Stable stable: name: stable strategy: matrix: os: - ubuntu-latest - macos-latest - windows-latest runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v3 - name: Install Rust # --no-self-update is necessary because the windows environment cannot self-update rustup.exe. run: rustup update stable --no-self-update && rustup default stable - name: Test run: . ci/test-stable.sh test # Nightly nightly: name: nightly runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust run: rustup update $nightly && rustup default $nightly - name: Test run: . ci/test-stable.sh test # Run tests on some extra platforms cross: name: cross strategy: matrix: target: - i686-unknown-linux-gnu - armv7-unknown-linux-gnueabihf - powerpc-unknown-linux-gnu - powerpc64-unknown-linux-gnu - wasm32-unknown-unknown runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust run: rustup update stable && rustup default stable - name: cross build --target ${{ matrix.target }} run: | cargo install cross cross build --target ${{ matrix.target }} if: matrix.target != 'wasm32-unknown-unknown' # WASM support - name: cargo build --target ${{ matrix.target }} run: | rustup target add ${{ matrix.target }} cargo build --target ${{ matrix.target }} if: matrix.target == 'wasm32-unknown-unknown' # Sanitizers tsan: name: tsan runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust run: rustup update $nightly && rustup default $nightly - name: Install rust-src run: rustup component add rust-src - name: ASAN / TSAN run: . ci/tsan.sh miri: name: miri runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Miri run: ci/miri.sh # Loom loom: name: loom runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust run: rustup update $nightly && rustup default $nightly - name: Loom tests run: RUSTFLAGS="--cfg loom -Dwarnings" cargo test --lib publish_docs: name: Publish Documentation needs: - rustfmt # - clippy - stable - nightly - minrust - cross - tsan - loom runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust run: rustup update $nightly && rustup default $nightly - name: Build documentation run: cargo doc --no-deps --all-features env: RUSTDOCFLAGS: --cfg docsrs - name: Publish documentation run: | cd target/doc git init git add . git -c user.name='ci' -c user.email='ci' commit -m 'Deploy Bytes API documentation' git push -f -q https://git:${{ secrets.github_token }}@github.com/${{ github.repository }} HEAD:gh-pages if: github.event_name == 'push' && github.event.ref == 'refs/heads/master' && github.repository == 'tokio-rs/bytes' bytes-1.5.0/.gitignore000064400000000000000000000000241046102023000127410ustar 00000000000000/target /Cargo.lock bytes-1.5.0/CHANGELOG.md000064400000000000000000000175001046102023000125710ustar 00000000000000# 1.5.0 (September 7, 2023) ### Added - Add `UninitSlice::{new,init}` (#598, #599) - Implement `BufMut` for `&mut [MaybeUninit]` (#597) ### Changed - Mark `BytesMut::extend_from_slice` as inline (#595) # 1.4.0 (January 31, 2023) ### Added - Make `IntoIter` constructor public (#581) ### Fixed - Avoid large reallocations when freezing `BytesMut` (#592) ### Documented - Document which functions require `std` (#591) - Fix duplicate "the the" typos (#585) # 1.3.0 (November 20, 2022) ### Added - Rename and expose `BytesMut::spare_capacity_mut` (#572) - Implement native-endian get and put functions for `Buf` and `BufMut` (#576) ### Fixed - Don't have important data in unused capacity when calling reserve (#563) ### Documented - `Bytes::new` etc should return `Self` not `Bytes` (#568) # 1.2.1 (July 30, 2022) ### Fixed - Fix unbounded memory growth when using `reserve` (#560) # 1.2.0 (July 19, 2022) ### Added - Add `BytesMut::zeroed` (#517) - Implement `Extend` for `BytesMut` (#527) - Add conversion from `BytesMut` to `Vec` (#543, #554) - Add conversion from `Bytes` to `Vec` (#547) - Add `UninitSlice::as_uninit_slice_mut()` (#548) - Add const to `Bytes::{len,is_empty}` (#514) ### Changed - Reuse vector in `BytesMut::reserve` (#539, #544) ### Fixed - Make miri happy (#515, #523, #542, #545, #553) - Make tsan happy (#541) - Fix `remaining_mut()` on chain (#488) - Fix amortized asymptotics of `BytesMut` (#555) ### Documented - Redraw layout diagram with box drawing characters (#539) - Clarify `BytesMut::unsplit` docs (#535) # 1.1.0 (August 25, 2021) ### Added - `BufMut::put_bytes(self, val, cnt)` (#487) - Implement `From>` for `Bytes` (#504) ### Changed - Override `put_slice` for `&mut [u8]` (#483) - Panic on integer overflow in `Chain::remaining` (#482) - Add inline tags to `UninitSlice` methods (#443) - Override `copy_to_bytes` for Chain and Take (#481) - Keep capacity when unsplit on empty other buf (#502) ### Documented - Clarify `BufMut` allocation guarantees (#501) - Clarify `BufMut::put_int` behavior (#486) - Clarify actions of `clear` and `truncate`. (#508) # 1.0.1 (January 11, 2021) ### Changed - mark `Vec::put_slice` with `#[inline]` (#459) ### Fixed - Fix deprecation warning (#457) - use `Box::into_raw` instead of `mem::forget`-in-disguise (#458) # 1.0.0 (December 22, 2020) ### Changed - Rename `Buf`/`BufMut` methods `bytes()` and `bytes_mut()` to `chunk()` and `chunk_mut()` (#450) ### Removed - remove unused Buf implementation. (#449) # 0.6.0 (October 21, 2020) API polish in preparation for a 1.0 release. ### Changed - `BufMut` is now an `unsafe` trait (#432). - `BufMut::bytes_mut()` returns `&mut UninitSlice`, a type owned by `bytes` to avoid undefined behavior (#433). - `Buf::copy_to_bytes(len)` replaces `Buf::into_bytes()` (#439). - `Buf`/`BufMut` utility methods are moved onto the trait and `*Ext` traits are removed (#431). ### Removed - `BufMut::bytes_vectored_mut()` (#430). - `new` methods on combinator types (#434). # 0.5.6 (July 13, 2020) - Improve `BytesMut` to reuse buffer when fully `advance`d. - Mark `BytesMut::{as_mut, set_len}` with `#[inline]`. - Relax synchronization when cloning in shared vtable of `Bytes`. - Move `loom` to `dev-dependencies`. # 0.5.5 (June 18, 2020) ### Added - Allow using the `serde` feature in `no_std` environments (#385). ### Fix - Fix `BufMut::advance_mut` to panic if advanced passed the capacity (#354).. - Fix `BytesMut::freeze` ignoring amount previously `advance`d (#352). # 0.5.4 (January 23, 2020) ### Added - Make `Bytes::new` a `const fn`. - Add `From` for `Bytes`. ### Fix - Fix reversed arguments in `PartialOrd` for `Bytes`. - Fix `Bytes::truncate` losing original capacity when repr is an unshared `Vec`. - Fix `Bytes::from(Vec)` when allocator gave `Vec` a pointer with LSB set. - Fix panic in `Bytes::slice_ref` if argument is an empty slice. # 0.5.3 (December 12, 2019) ### Added - `must_use` attributes to `split`, `split_off`, and `split_to` methods (#337). ### Fix - Potential freeing of a null pointer in `Bytes` when constructed with an empty `Vec` (#341, #342). - Calling `Bytes::truncate` with a size large than the length will no longer clear the `Bytes` (#333). # 0.5.2 (November 27, 2019) ### Added - `Limit` methods `into_inner`, `get_ref`, `get_mut`, `limit`, and `set_limit` (#325). # 0.5.1 (November 25, 2019) ### Fix - Growth documentation for `BytesMut` (#321) # 0.5.0 (November 25, 2019) ### Fix - Potential overflow in `copy_to_slice` ### Changed - Increased minimum supported Rust version to 1.39. - `Bytes` is now a "trait object", allowing for custom allocation strategies (#298) - `BytesMut` implicitly grows internal storage. `remaining_mut()` returns `usize::MAX` (#316). - `BufMut::bytes_mut` returns `&mut [MaybeUninit]` to reflect the unknown initialization state (#305). - `Buf` / `BufMut` implementations for `&[u8]` and `&mut [u8]` respectively (#261). - Move `Buf` / `BufMut` "extra" functions to an extension trait (#306). - `BufMutExt::limit` (#309). - `Bytes::slice` takes a `RangeBounds` argument (#265). - `Bytes::from_static` is now a `const fn` (#311). - A multitude of smaller performance optimizations. ### Added - `no_std` support (#281). - `get_*`, `put_*`, `get_*_le`, and `put_*le` accessors for handling byte order. - `BorrowMut` implementation for `BytesMut` (#185). ### Removed - `IntoBuf` (#288). - `Buf` implementation for `&str` (#301). - `byteorder` dependency (#280). - `iovec` dependency, use `std::IoSlice` instead (#263). - optional `either` dependency (#315). - optional `i128` feature -- now available on stable. (#276). # 0.4.12 (March 6, 2019) ### Added - Implement `FromIterator<&'a u8>` for `BytesMut`/`Bytes` (#244). - Implement `Buf` for `VecDeque` (#249). # 0.4.11 (November 17, 2018) * Use raw pointers for potentially racy loads (#233). * Implement `BufRead` for `buf::Reader` (#232). * Documentation tweaks (#234). # 0.4.10 (September 4, 2018) * impl `Buf` and `BufMut` for `Either` (#225). * Add `Bytes::slice_ref` (#208). # 0.4.9 (July 12, 2018) * Add 128 bit number support behind a feature flag (#209). * Implement `IntoBuf` for `&mut [u8]` # 0.4.8 (May 25, 2018) * Fix panic in `BytesMut` `FromIterator` implementation. * Bytes: Recycle space when reserving space in vec mode (#197). * Bytes: Add resize fn (#203). # 0.4.7 (April 27, 2018) * Make `Buf` and `BufMut` usable as trait objects (#186). * impl BorrowMut for BytesMut (#185). * Improve accessor performance (#195). # 0.4.6 (Janary 8, 2018) * Implement FromIterator for Bytes/BytesMut (#148). * Add `advance` fn to Bytes/BytesMut (#166). * Add `unsplit` fn to `BytesMut` (#162, #173). * Improvements to Bytes split fns (#92). # 0.4.5 (August 12, 2017) * Fix range bug in `Take::bytes` * Misc performance improvements * Add extra `PartialEq` implementations. * Add `Bytes::with_capacity` * Implement `AsMut[u8]` for `BytesMut` # 0.4.4 (May 26, 2017) * Add serde support behind feature flag * Add `extend_from_slice` on `Bytes` and `BytesMut` * Add `truncate` and `clear` on `Bytes` * Misc additional std trait implementations * Misc performance improvements # 0.4.3 (April 30, 2017) * Fix Vec::advance_mut bug * Bump minimum Rust version to 1.15 * Misc performance tweaks # 0.4.2 (April 5, 2017) * Misc performance tweaks * Improved `Debug` implementation for `Bytes` * Avoid some incorrect assert panics # 0.4.1 (March 15, 2017) * Expose `buf` module and have most types available from there vs. root. * Implement `IntoBuf` for `T: Buf`. * Add `FromBuf` and `Buf::collect`. * Add iterator adapter for `Buf`. * Add scatter/gather support to `Buf` and `BufMut`. * Add `Buf::chain`. * Reduce allocations on repeated calls to `BytesMut::reserve`. * Implement `Debug` for more types. * Remove `Source` in favor of `IntoBuf`. * Implement `Extend` for `BytesMut`. # 0.4.0 (February 24, 2017) * Initial release bytes-1.5.0/Cargo.toml0000644000000022750000000000100101710ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "bytes" version = "1.5.0" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "Types and traits for working with bytes" readme = "README.md" keywords = [ "buffers", "zero-copy", "io", ] categories = [ "network-programming", "data-structures", ] license = "MIT" repository = "https://github.com/tokio-rs/bytes" [package.metadata.docs.rs] rustdoc-args = [ "--cfg", "docsrs", ] [dependencies.serde] version = "1.0.60" features = ["alloc"] optional = true default-features = false [dev-dependencies.serde_test] version = "1.0" [features] default = ["std"] std = [] [target."cfg(loom)".dev-dependencies.loom] version = "0.5" bytes-1.5.0/Cargo.toml.orig000064400000000000000000000014241046102023000136450ustar 00000000000000[package] name = "bytes" # When releasing to crates.io: # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. version = "1.5.0" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "Types and traits for working with bytes" repository = "https://github.com/tokio-rs/bytes" readme = "README.md" keywords = ["buffers", "zero-copy", "io"] categories = ["network-programming", "data-structures"] edition = "2018" [features] default = ["std"] std = [] [dependencies] serde = { version = "1.0.60", optional = true, default-features = false, features = ["alloc"] } [dev-dependencies] serde_test = "1.0" [target.'cfg(loom)'.dev-dependencies] loom = "0.5" [package.metadata.docs.rs] rustdoc-args = ["--cfg", "docsrs"] bytes-1.5.0/LICENSE000064400000000000000000000020371046102023000117640ustar 00000000000000Copyright (c) 2018 Carl Lerche Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. bytes-1.5.0/README.md000064400000000000000000000023761046102023000122440ustar 00000000000000# Bytes A utility library for working with bytes. [![Crates.io][crates-badge]][crates-url] [![Build Status][ci-badge]][ci-url] [crates-badge]: https://img.shields.io/crates/v/bytes.svg [crates-url]: https://crates.io/crates/bytes [ci-badge]: https://github.com/tokio-rs/bytes/workflows/CI/badge.svg [ci-url]: https://github.com/tokio-rs/bytes/actions [Documentation](https://docs.rs/bytes) ## Usage To use `bytes`, first add this to your `Cargo.toml`: ```toml [dependencies] bytes = "1" ``` Next, add this to your crate: ```rust use bytes::{Bytes, BytesMut, Buf, BufMut}; ``` ## Serde support Serde support is optional and disabled by default. To enable use the feature `serde`. ```toml [dependencies] bytes = { version = "1", features = ["serde"] } ``` ## Building documentation When building the `bytes` documentation the `docsrs` option should be used, otherwise feature gates will not be shown. This requires a nightly toolchain: ``` RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc ``` ## License This project is licensed under the [MIT license](LICENSE). ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in `bytes` by you, shall be licensed as MIT, without any additional terms or conditions. bytes-1.5.0/benches/buf.rs000064400000000000000000000121271046102023000135110ustar 00000000000000#![feature(test)] #![warn(rust_2018_idioms)] extern crate test; use bytes::Buf; use test::Bencher; /// Dummy Buf implementation struct TestBuf { buf: &'static [u8], readlens: &'static [usize], init_pos: usize, pos: usize, readlen_pos: usize, readlen: usize, } impl TestBuf { fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBuf { let mut buf = TestBuf { buf, readlens, init_pos, pos: 0, readlen_pos: 0, readlen: 0, }; buf.reset(); buf } fn reset(&mut self) { self.pos = self.init_pos; self.readlen_pos = 0; self.next_readlen(); } /// Compute the length of the next read : /// - use the next value specified in readlens (capped by remaining) if any /// - else the remaining fn next_readlen(&mut self) { self.readlen = self.buf.len() - self.pos; if let Some(readlen) = self.readlens.get(self.readlen_pos) { self.readlen = std::cmp::min(self.readlen, *readlen); self.readlen_pos += 1; } } } impl Buf for TestBuf { fn remaining(&self) -> usize { self.buf.len() - self.pos } fn advance(&mut self, cnt: usize) { self.pos += cnt; assert!(self.pos <= self.buf.len()); self.next_readlen(); } fn chunk(&self) -> &[u8] { if self.readlen == 0 { Default::default() } else { &self.buf[self.pos..self.pos + self.readlen] } } } /// Dummy Buf implementation /// version with methods forced to not be inlined (to simulate costly calls) struct TestBufC { inner: TestBuf, } impl TestBufC { fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBufC { TestBufC { inner: TestBuf::new(buf, readlens, init_pos), } } fn reset(&mut self) { self.inner.reset() } } impl Buf for TestBufC { #[inline(never)] fn remaining(&self) -> usize { self.inner.remaining() } #[inline(never)] fn advance(&mut self, cnt: usize) { self.inner.advance(cnt) } #[inline(never)] fn chunk(&self) -> &[u8] { self.inner.chunk() } } macro_rules! bench { ($fname:ident, testbuf $testbuf:ident $readlens:expr, $method:ident $(,$arg:expr)*) => ( #[bench] fn $fname(b: &mut Bencher) { let mut bufs = [ $testbuf::new(&[1u8; 8+0], $readlens, 0), $testbuf::new(&[1u8; 8+1], $readlens, 1), $testbuf::new(&[1u8; 8+2], $readlens, 2), $testbuf::new(&[1u8; 8+3], $readlens, 3), $testbuf::new(&[1u8; 8+4], $readlens, 4), $testbuf::new(&[1u8; 8+5], $readlens, 5), $testbuf::new(&[1u8; 8+6], $readlens, 6), $testbuf::new(&[1u8; 8+7], $readlens, 7), ]; b.iter(|| { for i in 0..8 { bufs[i].reset(); let buf: &mut dyn Buf = &mut bufs[i]; // type erasure test::black_box(buf.$method($($arg,)*)); } }) } ); ($fname:ident, slice, $method:ident $(,$arg:expr)*) => ( #[bench] fn $fname(b: &mut Bencher) { // buf must be long enough for one read of 8 bytes starting at pos 7 let arr = [1u8; 8+7]; b.iter(|| { for i in 0..8 { let mut buf = &arr[i..]; let buf = &mut buf as &mut dyn Buf; // type erasure test::black_box(buf.$method($($arg,)*)); } }) } ); ($fname:ident, option) => ( #[bench] fn $fname(b: &mut Bencher) { let data = [1u8; 1]; b.iter(|| { for _ in 0..8 { let mut buf = Some(data); let buf = &mut buf as &mut dyn Buf; // type erasure test::black_box(buf.get_u8()); } }) } ); } macro_rules! bench_group { ($method:ident $(,$arg:expr)*) => ( bench!(slice, slice, $method $(,$arg)*); bench!(tbuf_1, testbuf TestBuf &[], $method $(,$arg)*); bench!(tbuf_1_costly, testbuf TestBufC &[], $method $(,$arg)*); bench!(tbuf_2, testbuf TestBuf &[1], $method $(,$arg)*); bench!(tbuf_2_costly, testbuf TestBufC &[1], $method $(,$arg)*); // bench!(tbuf_onebyone, testbuf TestBuf &[1,1,1,1,1,1,1,1], $method $(,$arg)*); // bench!(tbuf_onebyone_costly, testbuf TestBufC &[1,1,1,1,1,1,1,1], $method $(,$arg)*); ); } mod get_u8 { use super::*; bench_group!(get_u8); } mod get_u16 { use super::*; bench_group!(get_u16); } mod get_u32 { use super::*; bench_group!(get_u32); } mod get_u64 { use super::*; bench_group!(get_u64); } mod get_f32 { use super::*; bench_group!(get_f32); } mod get_f64 { use super::*; bench_group!(get_f64); } mod get_uint24 { use super::*; bench_group!(get_uint, 3); } bytes-1.5.0/benches/bytes.rs000064400000000000000000000046741046102023000140730ustar 00000000000000#![feature(test)] #![warn(rust_2018_idioms)] extern crate test; use bytes::Bytes; use test::Bencher; #[bench] fn deref_unique(b: &mut Bencher) { let buf = Bytes::from(vec![0; 1024]); b.iter(|| { for _ in 0..1024 { test::black_box(&buf[..]); } }) } #[bench] fn deref_shared(b: &mut Bencher) { let buf = Bytes::from(vec![0; 1024]); let _b2 = buf.clone(); b.iter(|| { for _ in 0..1024 { test::black_box(&buf[..]); } }) } #[bench] fn deref_static(b: &mut Bencher) { let buf = Bytes::from_static(b"hello world"); b.iter(|| { for _ in 0..1024 { test::black_box(&buf[..]); } }) } #[bench] fn clone_static(b: &mut Bencher) { let bytes = Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes()); b.iter(|| { for _ in 0..1024 { test::black_box(&bytes.clone()); } }) } #[bench] fn clone_shared(b: &mut Bencher) { let bytes = Bytes::from(b"hello world 1234567890 and have a good byte 0987654321".to_vec()); b.iter(|| { for _ in 0..1024 { test::black_box(&bytes.clone()); } }) } #[bench] fn clone_arc_vec(b: &mut Bencher) { use std::sync::Arc; let bytes = Arc::new(b"hello world 1234567890 and have a good byte 0987654321".to_vec()); b.iter(|| { for _ in 0..1024 { test::black_box(&bytes.clone()); } }) } #[bench] fn from_long_slice(b: &mut Bencher) { let data = [0u8; 128]; b.bytes = data.len() as u64; b.iter(|| { let buf = Bytes::copy_from_slice(&data[..]); test::black_box(buf); }) } #[bench] fn slice_empty(b: &mut Bencher) { b.iter(|| { // `clone` is to convert to ARC let b = Bytes::from(vec![17; 1024]).clone(); for i in 0..1000 { test::black_box(b.slice(i % 100..i % 100)); } }) } #[bench] fn slice_short_from_arc(b: &mut Bencher) { b.iter(|| { // `clone` is to convert to ARC let b = Bytes::from(vec![17; 1024]).clone(); for i in 0..1000 { test::black_box(b.slice(1..2 + i % 10)); } }) } #[bench] fn split_off_and_drop(b: &mut Bencher) { b.iter(|| { for _ in 0..1024 { let v = vec![10; 200]; let mut b = Bytes::from(v); test::black_box(b.split_off(100)); test::black_box(b); } }) } bytes-1.5.0/benches/bytes_mut.rs000064400000000000000000000124571046102023000147560ustar 00000000000000#![feature(test)] #![warn(rust_2018_idioms)] extern crate test; use bytes::{BufMut, BytesMut}; use test::Bencher; #[bench] fn alloc_small(b: &mut Bencher) { b.iter(|| { for _ in 0..1024 { test::black_box(BytesMut::with_capacity(12)); } }) } #[bench] fn alloc_mid(b: &mut Bencher) { b.iter(|| { test::black_box(BytesMut::with_capacity(128)); }) } #[bench] fn alloc_big(b: &mut Bencher) { b.iter(|| { test::black_box(BytesMut::with_capacity(4096)); }) } #[bench] fn deref_unique(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(4096); buf.put(&[0u8; 1024][..]); b.iter(|| { for _ in 0..1024 { test::black_box(&buf[..]); } }) } #[bench] fn deref_unique_unroll(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(4096); buf.put(&[0u8; 1024][..]); b.iter(|| { for _ in 0..128 { test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); } }) } #[bench] fn deref_shared(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(4096); buf.put(&[0u8; 1024][..]); let _b2 = buf.split_off(1024); b.iter(|| { for _ in 0..1024 { test::black_box(&buf[..]); } }) } #[bench] fn deref_two(b: &mut Bencher) { let mut buf1 = BytesMut::with_capacity(8); buf1.put(&[0u8; 8][..]); let mut buf2 = BytesMut::with_capacity(4096); buf2.put(&[0u8; 1024][..]); b.iter(|| { for _ in 0..512 { test::black_box(&buf1[..]); test::black_box(&buf2[..]); } }) } #[bench] fn clone_frozen(b: &mut Bencher) { let bytes = BytesMut::from(&b"hello world 1234567890 and have a good byte 0987654321"[..]) .split() .freeze(); b.iter(|| { for _ in 0..1024 { test::black_box(&bytes.clone()); } }) } #[bench] fn alloc_write_split_to_mid(b: &mut Bencher) { b.iter(|| { let mut buf = BytesMut::with_capacity(128); buf.put_slice(&[0u8; 64]); test::black_box(buf.split_to(64)); }) } #[bench] fn drain_write_drain(b: &mut Bencher) { let data = [0u8; 128]; b.iter(|| { let mut buf = BytesMut::with_capacity(1024); let mut parts = Vec::with_capacity(8); for _ in 0..8 { buf.put(&data[..]); parts.push(buf.split_to(128)); } test::black_box(parts); }) } #[bench] fn fmt_write(b: &mut Bencher) { use std::fmt::Write; let mut buf = BytesMut::with_capacity(128); let s = "foo bar baz quux lorem ipsum dolor et"; b.bytes = s.len() as u64; b.iter(|| { let _ = write!(buf, "{}", s); test::black_box(&buf); unsafe { buf.set_len(0); } }) } #[bench] fn bytes_mut_extend(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(256); let data = [33u8; 32]; b.bytes = data.len() as u64 * 4; b.iter(|| { for _ in 0..4 { buf.extend(&data); } test::black_box(&buf); unsafe { buf.set_len(0); } }); } // BufMut for BytesMut vs Vec #[bench] fn put_slice_bytes_mut(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(256); let data = [33u8; 32]; b.bytes = data.len() as u64 * 4; b.iter(|| { for _ in 0..4 { buf.put_slice(&data); } test::black_box(&buf); unsafe { buf.set_len(0); } }); } #[bench] fn put_u8_bytes_mut(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(256); let cnt = 128; b.bytes = cnt as u64; b.iter(|| { for _ in 0..cnt { buf.put_u8(b'x'); } test::black_box(&buf); unsafe { buf.set_len(0); } }); } #[bench] fn put_slice_vec(b: &mut Bencher) { let mut buf = Vec::::with_capacity(256); let data = [33u8; 32]; b.bytes = data.len() as u64 * 4; b.iter(|| { for _ in 0..4 { buf.put_slice(&data); } test::black_box(&buf); unsafe { buf.set_len(0); } }); } #[bench] fn put_u8_vec(b: &mut Bencher) { let mut buf = Vec::::with_capacity(256); let cnt = 128; b.bytes = cnt as u64; b.iter(|| { for _ in 0..cnt { buf.put_u8(b'x'); } test::black_box(&buf); unsafe { buf.set_len(0); } }); } #[bench] fn put_slice_vec_extend(b: &mut Bencher) { let mut buf = Vec::::with_capacity(256); let data = [33u8; 32]; b.bytes = data.len() as u64 * 4; b.iter(|| { for _ in 0..4 { buf.extend_from_slice(&data); } test::black_box(&buf); unsafe { buf.set_len(0); } }); } #[bench] fn put_u8_vec_push(b: &mut Bencher) { let mut buf = Vec::::with_capacity(256); let cnt = 128; b.bytes = cnt as u64; b.iter(|| { for _ in 0..cnt { buf.push(b'x'); } test::black_box(&buf); unsafe { buf.set_len(0); } }); } bytes-1.5.0/ci/miri.sh000075500000000000000000000003501046102023000126450ustar 00000000000000#!/bin/bash set -e rustup toolchain install nightly --component miri rustup override set nightly cargo miri setup export MIRIFLAGS="-Zmiri-strict-provenance" cargo miri test cargo miri test --target mips64-unknown-linux-gnuabi64 bytes-1.5.0/ci/test-stable.sh000064400000000000000000000013521046102023000141340ustar 00000000000000#!/bin/bash set -ex cmd="${1:-test}" # Install cargo-hack for feature flag test host=$(rustc -Vv | grep host | sed 's/host: //') curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-$host.tar.gz | tar xzf - -C ~/.cargo/bin # Run with each feature # * --each-feature includes both default/no-default features # * --optional-deps is needed for serde feature cargo hack "${cmd}" --each-feature --optional-deps # Run with all features cargo "${cmd}" --all-features cargo doc --no-deps --all-features if [[ "${RUST_VERSION}" == "nightly"* ]]; then # Check benchmarks cargo check --benches # Check minimal versions cargo clean cargo update -Zminimal-versions cargo check --all-features fi bytes-1.5.0/ci/tsan.sh000064400000000000000000000006321046102023000126520ustar 00000000000000#!/bin/bash set -ex export ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" # Run address sanitizer RUSTFLAGS="-Z sanitizer=address" \ cargo test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut # Run thread sanitizer RUSTFLAGS="-Z sanitizer=thread" \ cargo -Zbuild-std test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut bytes-1.5.0/clippy.toml000064400000000000000000000000161046102023000131470ustar 00000000000000msrv = "1.39" bytes-1.5.0/src/buf/buf_impl.rs000064400000000000000000001134221046102023000144660ustar 00000000000000#[cfg(feature = "std")] use crate::buf::{reader, Reader}; use crate::buf::{take, Chain, Take}; use core::{cmp, mem, ptr}; #[cfg(feature = "std")] use std::io::IoSlice; use alloc::boxed::Box; macro_rules! buf_get_impl { ($this:ident, $typ:tt::$conv:tt) => {{ const SIZE: usize = mem::size_of::<$typ>(); // try to convert directly from the bytes // this Option trick is to avoid keeping a borrow on self // when advance() is called (mut borrow) and to call bytes() only once let ret = $this .chunk() .get(..SIZE) .map(|src| unsafe { $typ::$conv(*(src as *const _ as *const [_; SIZE])) }); if let Some(ret) = ret { // if the direct conversion was possible, advance and return $this.advance(SIZE); return ret; } else { // if not we copy the bytes in a temp buffer then convert let mut buf = [0; SIZE]; $this.copy_to_slice(&mut buf); // (do the advance) return $typ::$conv(buf); } }}; (le => $this:ident, $typ:tt, $len_to_read:expr) => {{ debug_assert!(mem::size_of::<$typ>() >= $len_to_read); // The same trick as above does not improve the best case speed. // It seems to be linked to the way the method is optimised by the compiler let mut buf = [0; (mem::size_of::<$typ>())]; $this.copy_to_slice(&mut buf[..($len_to_read)]); return $typ::from_le_bytes(buf); }}; (be => $this:ident, $typ:tt, $len_to_read:expr) => {{ debug_assert!(mem::size_of::<$typ>() >= $len_to_read); let mut buf = [0; (mem::size_of::<$typ>())]; $this.copy_to_slice(&mut buf[mem::size_of::<$typ>() - ($len_to_read)..]); return $typ::from_be_bytes(buf); }}; } /// Read bytes from a buffer. /// /// A buffer stores bytes in memory such that read operations are infallible. /// The underlying storage may or may not be in contiguous memory. A `Buf` value /// is a cursor into the buffer. Reading from `Buf` advances the cursor /// position. It can be thought of as an efficient `Iterator` for collections of /// bytes. /// /// The simplest `Buf` is a `&[u8]`. /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"hello world"[..]; /// /// assert_eq!(b'h', buf.get_u8()); /// assert_eq!(b'e', buf.get_u8()); /// assert_eq!(b'l', buf.get_u8()); /// /// let mut rest = [0; 8]; /// buf.copy_to_slice(&mut rest); /// /// assert_eq!(&rest[..], &b"lo world"[..]); /// ``` pub trait Buf { /// Returns the number of bytes between the current position and the end of /// the buffer. /// /// This value is greater than or equal to the length of the slice returned /// by `chunk()`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"hello world"[..]; /// /// assert_eq!(buf.remaining(), 11); /// /// buf.get_u8(); /// /// assert_eq!(buf.remaining(), 10); /// ``` /// /// # Implementer notes /// /// Implementations of `remaining` should ensure that the return value does /// not change unless a call is made to `advance` or any other function that /// is documented to change the `Buf`'s current position. fn remaining(&self) -> usize; /// Returns a slice starting at the current position and of length between 0 /// and `Buf::remaining()`. Note that this *can* return shorter slice (this allows /// non-continuous internal representation). /// /// This is a lower level function. Most operations are done with other /// functions. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"hello world"[..]; /// /// assert_eq!(buf.chunk(), &b"hello world"[..]); /// /// buf.advance(6); /// /// assert_eq!(buf.chunk(), &b"world"[..]); /// ``` /// /// # Implementer notes /// /// This function should never panic. Once the end of the buffer is reached, /// i.e., `Buf::remaining` returns 0, calls to `chunk()` should return an /// empty slice. // The `chunk` method was previously called `bytes`. This alias makes the rename // more easily discoverable. #[cfg_attr(docsrs, doc(alias = "bytes"))] fn chunk(&self) -> &[u8]; /// Fills `dst` with potentially multiple slices starting at `self`'s /// current position. /// /// If the `Buf` is backed by disjoint slices of bytes, `chunk_vectored` enables /// fetching more than one slice at once. `dst` is a slice of `IoSlice` /// references, enabling the slice to be directly used with [`writev`] /// without any further conversion. The sum of the lengths of all the /// buffers in `dst` will be less than or equal to `Buf::remaining()`. /// /// The entries in `dst` will be overwritten, but the data **contained** by /// the slices **will not** be modified. If `chunk_vectored` does not fill every /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices /// in `self. /// /// This is a lower level function. Most operations are done with other /// functions. /// /// # Implementer notes /// /// This function should never panic. Once the end of the buffer is reached, /// i.e., `Buf::remaining` returns 0, calls to `chunk_vectored` must return 0 /// without mutating `dst`. /// /// Implementations should also take care to properly handle being called /// with `dst` being a zero length slice. /// /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { if dst.is_empty() { return 0; } if self.has_remaining() { dst[0] = IoSlice::new(self.chunk()); 1 } else { 0 } } /// Advance the internal cursor of the Buf /// /// The next call to `chunk()` will return a slice starting `cnt` bytes /// further into the underlying buffer. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"hello world"[..]; /// /// assert_eq!(buf.chunk(), &b"hello world"[..]); /// /// buf.advance(6); /// /// assert_eq!(buf.chunk(), &b"world"[..]); /// ``` /// /// # Panics /// /// This function **may** panic if `cnt > self.remaining()`. /// /// # Implementer notes /// /// It is recommended for implementations of `advance` to panic if `cnt > /// self.remaining()`. If the implementation does not panic, the call must /// behave as if `cnt == self.remaining()`. /// /// A call with `cnt == 0` should never panic and be a no-op. fn advance(&mut self, cnt: usize); /// Returns true if there are any more bytes to consume /// /// This is equivalent to `self.remaining() != 0`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"a"[..]; /// /// assert!(buf.has_remaining()); /// /// buf.get_u8(); /// /// assert!(!buf.has_remaining()); /// ``` fn has_remaining(&self) -> bool { self.remaining() > 0 } /// Copies bytes from `self` into `dst`. /// /// The cursor is advanced by the number of bytes copied. `self` must have /// enough remaining bytes to fill `dst`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"hello world"[..]; /// let mut dst = [0; 5]; /// /// buf.copy_to_slice(&mut dst); /// assert_eq!(&b"hello"[..], &dst); /// assert_eq!(6, buf.remaining()); /// ``` /// /// # Panics /// /// This function panics if `self.remaining() < dst.len()` fn copy_to_slice(&mut self, dst: &mut [u8]) { let mut off = 0; assert!(self.remaining() >= dst.len()); while off < dst.len() { let cnt; unsafe { let src = self.chunk(); cnt = cmp::min(src.len(), dst.len() - off); ptr::copy_nonoverlapping(src.as_ptr(), dst[off..].as_mut_ptr(), cnt); off += cnt; } self.advance(cnt); } } /// Gets an unsigned 8 bit integer from `self`. /// /// The current position is advanced by 1. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x08 hello"[..]; /// assert_eq!(8, buf.get_u8()); /// ``` /// /// # Panics /// /// This function panics if there is no more remaining data in `self`. fn get_u8(&mut self) -> u8 { assert!(self.remaining() >= 1); let ret = self.chunk()[0]; self.advance(1); ret } /// Gets a signed 8 bit integer from `self`. /// /// The current position is advanced by 1. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x08 hello"[..]; /// assert_eq!(8, buf.get_i8()); /// ``` /// /// # Panics /// /// This function panics if there is no more remaining data in `self`. fn get_i8(&mut self) -> i8 { assert!(self.remaining() >= 1); let ret = self.chunk()[0] as i8; self.advance(1); ret } /// Gets an unsigned 16 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x08\x09 hello"[..]; /// assert_eq!(0x0809, buf.get_u16()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u16(&mut self) -> u16 { buf_get_impl!(self, u16::from_be_bytes); } /// Gets an unsigned 16 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x09\x08 hello"[..]; /// assert_eq!(0x0809, buf.get_u16_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u16_le(&mut self) -> u16 { buf_get_impl!(self, u16::from_le_bytes); } /// Gets an unsigned 16 bit integer from `self` in native-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x08\x09 hello", /// false => b"\x09\x08 hello", /// }; /// assert_eq!(0x0809, buf.get_u16_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u16_ne(&mut self) -> u16 { buf_get_impl!(self, u16::from_ne_bytes); } /// Gets a signed 16 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x08\x09 hello"[..]; /// assert_eq!(0x0809, buf.get_i16()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i16(&mut self) -> i16 { buf_get_impl!(self, i16::from_be_bytes); } /// Gets a signed 16 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x09\x08 hello"[..]; /// assert_eq!(0x0809, buf.get_i16_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i16_le(&mut self) -> i16 { buf_get_impl!(self, i16::from_le_bytes); } /// Gets a signed 16 bit integer from `self` in native-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x08\x09 hello", /// false => b"\x09\x08 hello", /// }; /// assert_eq!(0x0809, buf.get_i16_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i16_ne(&mut self) -> i16 { buf_get_impl!(self, i16::from_ne_bytes); } /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..]; /// assert_eq!(0x0809A0A1, buf.get_u32()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u32(&mut self) -> u32 { buf_get_impl!(self, u32::from_be_bytes); } /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..]; /// assert_eq!(0x0809A0A1, buf.get_u32_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u32_le(&mut self) -> u32 { buf_get_impl!(self, u32::from_le_bytes); } /// Gets an unsigned 32 bit integer from `self` in native-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x08\x09\xA0\xA1 hello", /// false => b"\xA1\xA0\x09\x08 hello", /// }; /// assert_eq!(0x0809A0A1, buf.get_u32_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u32_ne(&mut self) -> u32 { buf_get_impl!(self, u32::from_ne_bytes); } /// Gets a signed 32 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..]; /// assert_eq!(0x0809A0A1, buf.get_i32()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i32(&mut self) -> i32 { buf_get_impl!(self, i32::from_be_bytes); } /// Gets a signed 32 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..]; /// assert_eq!(0x0809A0A1, buf.get_i32_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i32_le(&mut self) -> i32 { buf_get_impl!(self, i32::from_le_bytes); } /// Gets a signed 32 bit integer from `self` in native-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x08\x09\xA0\xA1 hello", /// false => b"\xA1\xA0\x09\x08 hello", /// }; /// assert_eq!(0x0809A0A1, buf.get_i32_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i32_ne(&mut self) -> i32 { buf_get_impl!(self, i32::from_ne_bytes); } /// Gets an unsigned 64 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..]; /// assert_eq!(0x0102030405060708, buf.get_u64()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u64(&mut self) -> u64 { buf_get_impl!(self, u64::from_be_bytes); } /// Gets an unsigned 64 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; /// assert_eq!(0x0102030405060708, buf.get_u64_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u64_le(&mut self) -> u64 { buf_get_impl!(self, u64::from_le_bytes); } /// Gets an unsigned 64 bit integer from `self` in native-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello", /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello", /// }; /// assert_eq!(0x0102030405060708, buf.get_u64_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u64_ne(&mut self) -> u64 { buf_get_impl!(self, u64::from_ne_bytes); } /// Gets a signed 64 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..]; /// assert_eq!(0x0102030405060708, buf.get_i64()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i64(&mut self) -> i64 { buf_get_impl!(self, i64::from_be_bytes); } /// Gets a signed 64 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; /// assert_eq!(0x0102030405060708, buf.get_i64_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i64_le(&mut self) -> i64 { buf_get_impl!(self, i64::from_le_bytes); } /// Gets a signed 64 bit integer from `self` in native-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello", /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello", /// }; /// assert_eq!(0x0102030405060708, buf.get_i64_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i64_ne(&mut self) -> i64 { buf_get_impl!(self, i64::from_ne_bytes); } /// Gets an unsigned 128 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..]; /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u128(&mut self) -> u128 { buf_get_impl!(self, u128::from_be_bytes); } /// Gets an unsigned 128 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u128_le(&mut self) -> u128 { buf_get_impl!(self, u128::from_le_bytes); } /// Gets an unsigned 128 bit integer from `self` in native-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello", /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello", /// }; /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u128_ne(&mut self) -> u128 { buf_get_impl!(self, u128::from_ne_bytes); } /// Gets a signed 128 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..]; /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i128(&mut self) -> i128 { buf_get_impl!(self, i128::from_be_bytes); } /// Gets a signed 128 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i128_le(&mut self) -> i128 { buf_get_impl!(self, i128::from_le_bytes); } /// Gets a signed 128 bit integer from `self` in native-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello", /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello", /// }; /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i128_ne(&mut self) -> i128 { buf_get_impl!(self, i128::from_ne_bytes); } /// Gets an unsigned n-byte integer from `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x01\x02\x03 hello"[..]; /// assert_eq!(0x010203, buf.get_uint(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_uint(&mut self, nbytes: usize) -> u64 { buf_get_impl!(be => self, u64, nbytes); } /// Gets an unsigned n-byte integer from `self` in little-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x03\x02\x01 hello"[..]; /// assert_eq!(0x010203, buf.get_uint_le(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_uint_le(&mut self, nbytes: usize) -> u64 { buf_get_impl!(le => self, u64, nbytes); } /// Gets an unsigned n-byte integer from `self` in native-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x01\x02\x03 hello", /// false => b"\x03\x02\x01 hello", /// }; /// assert_eq!(0x010203, buf.get_uint_ne(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_uint_ne(&mut self, nbytes: usize) -> u64 { if cfg!(target_endian = "big") { self.get_uint(nbytes) } else { self.get_uint_le(nbytes) } } /// Gets a signed n-byte integer from `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x01\x02\x03 hello"[..]; /// assert_eq!(0x010203, buf.get_int(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_int(&mut self, nbytes: usize) -> i64 { buf_get_impl!(be => self, i64, nbytes); } /// Gets a signed n-byte integer from `self` in little-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x03\x02\x01 hello"[..]; /// assert_eq!(0x010203, buf.get_int_le(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_int_le(&mut self, nbytes: usize) -> i64 { buf_get_impl!(le => self, i64, nbytes); } /// Gets a signed n-byte integer from `self` in native-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x01\x02\x03 hello", /// false => b"\x03\x02\x01 hello", /// }; /// assert_eq!(0x010203, buf.get_int_ne(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_int_ne(&mut self, nbytes: usize) -> i64 { if cfg!(target_endian = "big") { self.get_int(nbytes) } else { self.get_int_le(nbytes) } } /// Gets an IEEE754 single-precision (4 bytes) floating point number from /// `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x3F\x99\x99\x9A hello"[..]; /// assert_eq!(1.2f32, buf.get_f32()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f32(&mut self) -> f32 { f32::from_bits(Self::get_u32(self)) } /// Gets an IEEE754 single-precision (4 bytes) floating point number from /// `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x9A\x99\x99\x3F hello"[..]; /// assert_eq!(1.2f32, buf.get_f32_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f32_le(&mut self) -> f32 { f32::from_bits(Self::get_u32_le(self)) } /// Gets an IEEE754 single-precision (4 bytes) floating point number from /// `self` in native-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x3F\x99\x99\x9A hello", /// false => b"\x9A\x99\x99\x3F hello", /// }; /// assert_eq!(1.2f32, buf.get_f32_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f32_ne(&mut self) -> f32 { f32::from_bits(Self::get_u32_ne(self)) } /// Gets an IEEE754 double-precision (8 bytes) floating point number from /// `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..]; /// assert_eq!(1.2f64, buf.get_f64()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f64(&mut self) -> f64 { f64::from_bits(Self::get_u64(self)) } /// Gets an IEEE754 double-precision (8 bytes) floating point number from /// `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..]; /// assert_eq!(1.2f64, buf.get_f64_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f64_le(&mut self) -> f64 { f64::from_bits(Self::get_u64_le(self)) } /// Gets an IEEE754 double-precision (8 bytes) floating point number from /// `self` in native-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf: &[u8] = match cfg!(target_endian = "big") { /// true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello", /// false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello", /// }; /// assert_eq!(1.2f64, buf.get_f64_ne()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f64_ne(&mut self) -> f64 { f64::from_bits(Self::get_u64_ne(self)) } /// Consumes `len` bytes inside self and returns new instance of `Bytes` /// with this data. /// /// This function may be optimized by the underlying type to avoid actual /// copies. For example, `Bytes` implementation will do a shallow copy /// (ref-count increment). /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let bytes = (&b"hello world"[..]).copy_to_bytes(5); /// assert_eq!(&bytes[..], &b"hello"[..]); /// ``` fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { use super::BufMut; assert!(len <= self.remaining(), "`len` greater than remaining"); let mut ret = crate::BytesMut::with_capacity(len); ret.put(self.take(len)); ret.freeze() } /// Creates an adaptor which will read at most `limit` bytes from `self`. /// /// This function returns a new instance of `Buf` which will read at most /// `limit` bytes. /// /// # Examples /// /// ``` /// use bytes::{Buf, BufMut}; /// /// let mut buf = b"hello world"[..].take(5); /// let mut dst = vec![]; /// /// dst.put(&mut buf); /// assert_eq!(dst, b"hello"); /// /// let mut buf = buf.into_inner(); /// dst.clear(); /// dst.put(&mut buf); /// assert_eq!(dst, b" world"); /// ``` fn take(self, limit: usize) -> Take where Self: Sized, { take::new(self, limit) } /// Creates an adaptor which will chain this buffer with another. /// /// The returned `Buf` instance will first consume all bytes from `self`. /// Afterwards the output is equivalent to the output of next. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut chain = b"hello "[..].chain(&b"world"[..]); /// /// let full = chain.copy_to_bytes(11); /// assert_eq!(full.chunk(), b"hello world"); /// ``` fn chain(self, next: U) -> Chain where Self: Sized, { Chain::new(self, next) } /// Creates an adaptor which implements the `Read` trait for `self`. /// /// This function returns a new value which implements `Read` by adapting /// the `Read` trait functions to the `Buf` trait functions. Given that /// `Buf` operations are infallible, none of the `Read` functions will /// return with `Err`. /// /// # Examples /// /// ``` /// use bytes::{Bytes, Buf}; /// use std::io::Read; /// /// let buf = Bytes::from("hello world"); /// /// let mut reader = buf.reader(); /// let mut dst = [0; 1024]; /// /// let num = reader.read(&mut dst).unwrap(); /// /// assert_eq!(11, num); /// assert_eq!(&dst[..11], &b"hello world"[..]); /// ``` #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] fn reader(self) -> Reader where Self: Sized, { reader::new(self) } } macro_rules! deref_forward_buf { () => { fn remaining(&self) -> usize { (**self).remaining() } fn chunk(&self) -> &[u8] { (**self).chunk() } #[cfg(feature = "std")] fn chunks_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize { (**self).chunks_vectored(dst) } fn advance(&mut self, cnt: usize) { (**self).advance(cnt) } fn has_remaining(&self) -> bool { (**self).has_remaining() } fn copy_to_slice(&mut self, dst: &mut [u8]) { (**self).copy_to_slice(dst) } fn get_u8(&mut self) -> u8 { (**self).get_u8() } fn get_i8(&mut self) -> i8 { (**self).get_i8() } fn get_u16(&mut self) -> u16 { (**self).get_u16() } fn get_u16_le(&mut self) -> u16 { (**self).get_u16_le() } fn get_u16_ne(&mut self) -> u16 { (**self).get_u16_ne() } fn get_i16(&mut self) -> i16 { (**self).get_i16() } fn get_i16_le(&mut self) -> i16 { (**self).get_i16_le() } fn get_i16_ne(&mut self) -> i16 { (**self).get_i16_ne() } fn get_u32(&mut self) -> u32 { (**self).get_u32() } fn get_u32_le(&mut self) -> u32 { (**self).get_u32_le() } fn get_u32_ne(&mut self) -> u32 { (**self).get_u32_ne() } fn get_i32(&mut self) -> i32 { (**self).get_i32() } fn get_i32_le(&mut self) -> i32 { (**self).get_i32_le() } fn get_i32_ne(&mut self) -> i32 { (**self).get_i32_ne() } fn get_u64(&mut self) -> u64 { (**self).get_u64() } fn get_u64_le(&mut self) -> u64 { (**self).get_u64_le() } fn get_u64_ne(&mut self) -> u64 { (**self).get_u64_ne() } fn get_i64(&mut self) -> i64 { (**self).get_i64() } fn get_i64_le(&mut self) -> i64 { (**self).get_i64_le() } fn get_i64_ne(&mut self) -> i64 { (**self).get_i64_ne() } fn get_uint(&mut self, nbytes: usize) -> u64 { (**self).get_uint(nbytes) } fn get_uint_le(&mut self, nbytes: usize) -> u64 { (**self).get_uint_le(nbytes) } fn get_uint_ne(&mut self, nbytes: usize) -> u64 { (**self).get_uint_ne(nbytes) } fn get_int(&mut self, nbytes: usize) -> i64 { (**self).get_int(nbytes) } fn get_int_le(&mut self, nbytes: usize) -> i64 { (**self).get_int_le(nbytes) } fn get_int_ne(&mut self, nbytes: usize) -> i64 { (**self).get_int_ne(nbytes) } fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { (**self).copy_to_bytes(len) } }; } impl Buf for &mut T { deref_forward_buf!(); } impl Buf for Box { deref_forward_buf!(); } impl Buf for &[u8] { #[inline] fn remaining(&self) -> usize { self.len() } #[inline] fn chunk(&self) -> &[u8] { self } #[inline] fn advance(&mut self, cnt: usize) { *self = &self[cnt..]; } } #[cfg(feature = "std")] impl> Buf for std::io::Cursor { fn remaining(&self) -> usize { let len = self.get_ref().as_ref().len(); let pos = self.position(); if pos >= len as u64 { return 0; } len - pos as usize } fn chunk(&self) -> &[u8] { let len = self.get_ref().as_ref().len(); let pos = self.position(); if pos >= len as u64 { return &[]; } &self.get_ref().as_ref()[pos as usize..] } fn advance(&mut self, cnt: usize) { let pos = (self.position() as usize) .checked_add(cnt) .expect("overflow"); assert!(pos <= self.get_ref().as_ref().len()); self.set_position(pos as u64); } } // The existence of this function makes the compiler catch if the Buf // trait is "object-safe" or not. fn _assert_trait_object(_b: &dyn Buf) {} bytes-1.5.0/src/buf/buf_mut.rs000064400000000000000000001167541046102023000143450ustar 00000000000000use crate::buf::{limit, Chain, Limit, UninitSlice}; #[cfg(feature = "std")] use crate::buf::{writer, Writer}; use core::{cmp, mem, ptr, usize}; use alloc::{boxed::Box, vec::Vec}; /// A trait for values that provide sequential write access to bytes. /// /// Write bytes to a buffer /// /// A buffer stores bytes in memory such that write operations are infallible. /// The underlying storage may or may not be in contiguous memory. A `BufMut` /// value is a cursor into the buffer. Writing to `BufMut` advances the cursor /// position. /// /// The simplest `BufMut` is a `Vec`. /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// /// buf.put(&b"hello world"[..]); /// /// assert_eq!(buf, b"hello world"); /// ``` pub unsafe trait BufMut { /// Returns the number of bytes that can be written from the current /// position until the end of the buffer is reached. /// /// This value is greater than or equal to the length of the slice returned /// by `chunk_mut()`. /// /// Writing to a `BufMut` may involve allocating more memory on the fly. /// Implementations may fail before reaching the number of bytes indicated /// by this method if they encounter an allocation failure. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut dst = [0; 10]; /// let mut buf = &mut dst[..]; /// /// let original_remaining = buf.remaining_mut(); /// buf.put(&b"hello"[..]); /// /// assert_eq!(original_remaining - 5, buf.remaining_mut()); /// ``` /// /// # Implementer notes /// /// Implementations of `remaining_mut` should ensure that the return value /// does not change unless a call is made to `advance_mut` or any other /// function that is documented to change the `BufMut`'s current position. /// /// # Note /// /// `remaining_mut` may return value smaller than actual available space. fn remaining_mut(&self) -> usize; /// Advance the internal cursor of the BufMut /// /// The next call to `chunk_mut` will return a slice starting `cnt` bytes /// further into the underlying buffer. /// /// This function is unsafe because there is no guarantee that the bytes /// being advanced past have been initialized. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = Vec::with_capacity(16); /// /// // Write some data /// buf.chunk_mut()[0..2].copy_from_slice(b"he"); /// unsafe { buf.advance_mut(2) }; /// /// // write more bytes /// buf.chunk_mut()[0..3].copy_from_slice(b"llo"); /// /// unsafe { buf.advance_mut(3); } /// /// assert_eq!(5, buf.len()); /// assert_eq!(buf, b"hello"); /// ``` /// /// # Panics /// /// This function **may** panic if `cnt > self.remaining_mut()`. /// /// # Implementer notes /// /// It is recommended for implementations of `advance_mut` to panic if /// `cnt > self.remaining_mut()`. If the implementation does not panic, /// the call must behave as if `cnt == self.remaining_mut()`. /// /// A call with `cnt == 0` should never panic and be a no-op. unsafe fn advance_mut(&mut self, cnt: usize); /// Returns true if there is space in `self` for more bytes. /// /// This is equivalent to `self.remaining_mut() != 0`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut dst = [0; 5]; /// let mut buf = &mut dst[..]; /// /// assert!(buf.has_remaining_mut()); /// /// buf.put(&b"hello"[..]); /// /// assert!(!buf.has_remaining_mut()); /// ``` fn has_remaining_mut(&self) -> bool { self.remaining_mut() > 0 } /// Returns a mutable slice starting at the current BufMut position and of /// length between 0 and `BufMut::remaining_mut()`. Note that this *can* be shorter than the /// whole remainder of the buffer (this allows non-continuous implementation). /// /// This is a lower level function. Most operations are done with other /// functions. /// /// The returned byte slice may represent uninitialized memory. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = Vec::with_capacity(16); /// /// unsafe { /// // MaybeUninit::as_mut_ptr /// buf.chunk_mut()[0..].as_mut_ptr().write(b'h'); /// buf.chunk_mut()[1..].as_mut_ptr().write(b'e'); /// /// buf.advance_mut(2); /// /// buf.chunk_mut()[0..].as_mut_ptr().write(b'l'); /// buf.chunk_mut()[1..].as_mut_ptr().write(b'l'); /// buf.chunk_mut()[2..].as_mut_ptr().write(b'o'); /// /// buf.advance_mut(3); /// } /// /// assert_eq!(5, buf.len()); /// assert_eq!(buf, b"hello"); /// ``` /// /// # Implementer notes /// /// This function should never panic. `chunk_mut` should return an empty /// slice **if and only if** `remaining_mut()` returns 0. In other words, /// `chunk_mut()` returning an empty slice implies that `remaining_mut()` will /// return 0 and `remaining_mut()` returning 0 implies that `chunk_mut()` will /// return an empty slice. /// /// This function may trigger an out-of-memory abort if it tries to allocate /// memory and fails to do so. // The `chunk_mut` method was previously called `bytes_mut`. This alias makes the // rename more easily discoverable. #[cfg_attr(docsrs, doc(alias = "bytes_mut"))] fn chunk_mut(&mut self) -> &mut UninitSlice; /// Transfer bytes into `self` from `src` and advance the cursor by the /// number of bytes written. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// /// buf.put_u8(b'h'); /// buf.put(&b"ello"[..]); /// buf.put(&b" world"[..]); /// /// assert_eq!(buf, b"hello world"); /// ``` /// /// # Panics /// /// Panics if `self` does not have enough capacity to contain `src`. fn put(&mut self, mut src: T) where Self: Sized, { assert!(self.remaining_mut() >= src.remaining()); while src.has_remaining() { let l; unsafe { let s = src.chunk(); let d = self.chunk_mut(); l = cmp::min(s.len(), d.len()); ptr::copy_nonoverlapping(s.as_ptr(), d.as_mut_ptr() as *mut u8, l); } src.advance(l); unsafe { self.advance_mut(l); } } } /// Transfer bytes into `self` from `src` and advance the cursor by the /// number of bytes written. /// /// `self` must have enough remaining capacity to contain all of `src`. /// /// ``` /// use bytes::BufMut; /// /// let mut dst = [0; 6]; /// /// { /// let mut buf = &mut dst[..]; /// buf.put_slice(b"hello"); /// /// assert_eq!(1, buf.remaining_mut()); /// } /// /// assert_eq!(b"hello\0", &dst); /// ``` fn put_slice(&mut self, src: &[u8]) { let mut off = 0; assert!( self.remaining_mut() >= src.len(), "buffer overflow; remaining = {}; src = {}", self.remaining_mut(), src.len() ); while off < src.len() { let cnt; unsafe { let dst = self.chunk_mut(); cnt = cmp::min(dst.len(), src.len() - off); ptr::copy_nonoverlapping(src[off..].as_ptr(), dst.as_mut_ptr() as *mut u8, cnt); off += cnt; } unsafe { self.advance_mut(cnt); } } } /// Put `cnt` bytes `val` into `self`. /// /// Logically equivalent to calling `self.put_u8(val)` `cnt` times, but may work faster. /// /// `self` must have at least `cnt` remaining capacity. /// /// ``` /// use bytes::BufMut; /// /// let mut dst = [0; 6]; /// /// { /// let mut buf = &mut dst[..]; /// buf.put_bytes(b'a', 4); /// /// assert_eq!(2, buf.remaining_mut()); /// } /// /// assert_eq!(b"aaaa\0\0", &dst); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_bytes(&mut self, val: u8, cnt: usize) { for _ in 0..cnt { self.put_u8(val); } } /// Writes an unsigned 8 bit integer to `self`. /// /// The current position is advanced by 1. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u8(0x01); /// assert_eq!(buf, b"\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u8(&mut self, n: u8) { let src = [n]; self.put_slice(&src); } /// Writes a signed 8 bit integer to `self`. /// /// The current position is advanced by 1. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i8(0x01); /// assert_eq!(buf, b"\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i8(&mut self, n: i8) { let src = [n as u8]; self.put_slice(&src) } /// Writes an unsigned 16 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u16(0x0809); /// assert_eq!(buf, b"\x08\x09"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u16(&mut self, n: u16) { self.put_slice(&n.to_be_bytes()) } /// Writes an unsigned 16 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u16_le(0x0809); /// assert_eq!(buf, b"\x09\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u16_le(&mut self, n: u16) { self.put_slice(&n.to_le_bytes()) } /// Writes an unsigned 16 bit integer to `self` in native-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u16_ne(0x0809); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x08\x09"); /// } else { /// assert_eq!(buf, b"\x09\x08"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u16_ne(&mut self, n: u16) { self.put_slice(&n.to_ne_bytes()) } /// Writes a signed 16 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i16(0x0809); /// assert_eq!(buf, b"\x08\x09"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i16(&mut self, n: i16) { self.put_slice(&n.to_be_bytes()) } /// Writes a signed 16 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i16_le(0x0809); /// assert_eq!(buf, b"\x09\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i16_le(&mut self, n: i16) { self.put_slice(&n.to_le_bytes()) } /// Writes a signed 16 bit integer to `self` in native-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i16_ne(0x0809); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x08\x09"); /// } else { /// assert_eq!(buf, b"\x09\x08"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i16_ne(&mut self, n: i16) { self.put_slice(&n.to_ne_bytes()) } /// Writes an unsigned 32 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u32(0x0809A0A1); /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u32(&mut self, n: u32) { self.put_slice(&n.to_be_bytes()) } /// Writes an unsigned 32 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u32_le(0x0809A0A1); /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u32_le(&mut self, n: u32) { self.put_slice(&n.to_le_bytes()) } /// Writes an unsigned 32 bit integer to `self` in native-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u32_ne(0x0809A0A1); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); /// } else { /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u32_ne(&mut self, n: u32) { self.put_slice(&n.to_ne_bytes()) } /// Writes a signed 32 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i32(0x0809A0A1); /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i32(&mut self, n: i32) { self.put_slice(&n.to_be_bytes()) } /// Writes a signed 32 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i32_le(0x0809A0A1); /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i32_le(&mut self, n: i32) { self.put_slice(&n.to_le_bytes()) } /// Writes a signed 32 bit integer to `self` in native-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i32_ne(0x0809A0A1); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); /// } else { /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i32_ne(&mut self, n: i32) { self.put_slice(&n.to_ne_bytes()) } /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u64(0x0102030405060708); /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u64(&mut self, n: u64) { self.put_slice(&n.to_be_bytes()) } /// Writes an unsigned 64 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u64_le(0x0102030405060708); /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u64_le(&mut self, n: u64) { self.put_slice(&n.to_le_bytes()) } /// Writes an unsigned 64 bit integer to `self` in native-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u64_ne(0x0102030405060708); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); /// } else { /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u64_ne(&mut self, n: u64) { self.put_slice(&n.to_ne_bytes()) } /// Writes a signed 64 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i64(0x0102030405060708); /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i64(&mut self, n: i64) { self.put_slice(&n.to_be_bytes()) } /// Writes a signed 64 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i64_le(0x0102030405060708); /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i64_le(&mut self, n: i64) { self.put_slice(&n.to_le_bytes()) } /// Writes a signed 64 bit integer to `self` in native-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i64_ne(0x0102030405060708); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); /// } else { /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i64_ne(&mut self, n: i64) { self.put_slice(&n.to_ne_bytes()) } /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u128(0x01020304050607080910111213141516); /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u128(&mut self, n: u128) { self.put_slice(&n.to_be_bytes()) } /// Writes an unsigned 128 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u128_le(0x01020304050607080910111213141516); /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u128_le(&mut self, n: u128) { self.put_slice(&n.to_le_bytes()) } /// Writes an unsigned 128 bit integer to `self` in native-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u128_ne(0x01020304050607080910111213141516); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); /// } else { /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u128_ne(&mut self, n: u128) { self.put_slice(&n.to_ne_bytes()) } /// Writes a signed 128 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i128(0x01020304050607080910111213141516); /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i128(&mut self, n: i128) { self.put_slice(&n.to_be_bytes()) } /// Writes a signed 128 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i128_le(0x01020304050607080910111213141516); /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i128_le(&mut self, n: i128) { self.put_slice(&n.to_le_bytes()) } /// Writes a signed 128 bit integer to `self` in native-endian byte order. /// /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i128_ne(0x01020304050607080910111213141516); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); /// } else { /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i128_ne(&mut self, n: i128) { self.put_slice(&n.to_ne_bytes()) } /// Writes an unsigned n-byte integer to `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_uint(0x010203, 3); /// assert_eq!(buf, b"\x01\x02\x03"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_uint(&mut self, n: u64, nbytes: usize) { self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); } /// Writes an unsigned n-byte integer to `self` in the little-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_uint_le(0x010203, 3); /// assert_eq!(buf, b"\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_uint_le(&mut self, n: u64, nbytes: usize) { self.put_slice(&n.to_le_bytes()[0..nbytes]); } /// Writes an unsigned n-byte integer to `self` in the native-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_uint_ne(0x010203, 3); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x01\x02\x03"); /// } else { /// assert_eq!(buf, b"\x03\x02\x01"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_uint_ne(&mut self, n: u64, nbytes: usize) { if cfg!(target_endian = "big") { self.put_uint(n, nbytes) } else { self.put_uint_le(n, nbytes) } } /// Writes low `nbytes` of a signed integer to `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_int(0x0504010203, 3); /// assert_eq!(buf, b"\x01\x02\x03"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self` or if `nbytes` is greater than 8. fn put_int(&mut self, n: i64, nbytes: usize) { self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); } /// Writes low `nbytes` of a signed integer to `self` in little-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_int_le(0x0504010203, 3); /// assert_eq!(buf, b"\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self` or if `nbytes` is greater than 8. fn put_int_le(&mut self, n: i64, nbytes: usize) { self.put_slice(&n.to_le_bytes()[0..nbytes]); } /// Writes low `nbytes` of a signed integer to `self` in native-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_int_ne(0x010203, 3); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x01\x02\x03"); /// } else { /// assert_eq!(buf, b"\x03\x02\x01"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self` or if `nbytes` is greater than 8. fn put_int_ne(&mut self, n: i64, nbytes: usize) { if cfg!(target_endian = "big") { self.put_int(n, nbytes) } else { self.put_int_le(n, nbytes) } } /// Writes an IEEE754 single-precision (4 bytes) floating point number to /// `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f32(1.2f32); /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f32(&mut self, n: f32) { self.put_u32(n.to_bits()); } /// Writes an IEEE754 single-precision (4 bytes) floating point number to /// `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f32_le(1.2f32); /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f32_le(&mut self, n: f32) { self.put_u32_le(n.to_bits()); } /// Writes an IEEE754 single-precision (4 bytes) floating point number to /// `self` in native-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f32_ne(1.2f32); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); /// } else { /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f32_ne(&mut self, n: f32) { self.put_u32_ne(n.to_bits()); } /// Writes an IEEE754 double-precision (8 bytes) floating point number to /// `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f64(1.2f64); /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f64(&mut self, n: f64) { self.put_u64(n.to_bits()); } /// Writes an IEEE754 double-precision (8 bytes) floating point number to /// `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f64_le(1.2f64); /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f64_le(&mut self, n: f64) { self.put_u64_le(n.to_bits()); } /// Writes an IEEE754 double-precision (8 bytes) floating point number to /// `self` in native-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f64_ne(1.2f64); /// if cfg!(target_endian = "big") { /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); /// } else { /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f64_ne(&mut self, n: f64) { self.put_u64_ne(n.to_bits()); } /// Creates an adaptor which can write at most `limit` bytes to `self`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let arr = &mut [0u8; 128][..]; /// assert_eq!(arr.remaining_mut(), 128); /// /// let dst = arr.limit(10); /// assert_eq!(dst.remaining_mut(), 10); /// ``` fn limit(self, limit: usize) -> Limit where Self: Sized, { limit::new(self, limit) } /// Creates an adaptor which implements the `Write` trait for `self`. /// /// This function returns a new value which implements `Write` by adapting /// the `Write` trait functions to the `BufMut` trait functions. Given that /// `BufMut` operations are infallible, none of the `Write` functions will /// return with `Err`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// use std::io::Write; /// /// let mut buf = vec![].writer(); /// /// let num = buf.write(&b"hello world"[..]).unwrap(); /// assert_eq!(11, num); /// /// let buf = buf.into_inner(); /// /// assert_eq!(*buf, b"hello world"[..]); /// ``` #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] fn writer(self) -> Writer where Self: Sized, { writer::new(self) } /// Creates an adapter which will chain this buffer with another. /// /// The returned `BufMut` instance will first write to all bytes from /// `self`. Afterwards, it will write to `next`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut a = [0u8; 5]; /// let mut b = [0u8; 6]; /// /// let mut chain = (&mut a[..]).chain_mut(&mut b[..]); /// /// chain.put_slice(b"hello world"); /// /// assert_eq!(&a[..], b"hello"); /// assert_eq!(&b[..], b" world"); /// ``` fn chain_mut(self, next: U) -> Chain where Self: Sized, { Chain::new(self, next) } } macro_rules! deref_forward_bufmut { () => { fn remaining_mut(&self) -> usize { (**self).remaining_mut() } fn chunk_mut(&mut self) -> &mut UninitSlice { (**self).chunk_mut() } unsafe fn advance_mut(&mut self, cnt: usize) { (**self).advance_mut(cnt) } fn put_slice(&mut self, src: &[u8]) { (**self).put_slice(src) } fn put_u8(&mut self, n: u8) { (**self).put_u8(n) } fn put_i8(&mut self, n: i8) { (**self).put_i8(n) } fn put_u16(&mut self, n: u16) { (**self).put_u16(n) } fn put_u16_le(&mut self, n: u16) { (**self).put_u16_le(n) } fn put_u16_ne(&mut self, n: u16) { (**self).put_u16_ne(n) } fn put_i16(&mut self, n: i16) { (**self).put_i16(n) } fn put_i16_le(&mut self, n: i16) { (**self).put_i16_le(n) } fn put_i16_ne(&mut self, n: i16) { (**self).put_i16_ne(n) } fn put_u32(&mut self, n: u32) { (**self).put_u32(n) } fn put_u32_le(&mut self, n: u32) { (**self).put_u32_le(n) } fn put_u32_ne(&mut self, n: u32) { (**self).put_u32_ne(n) } fn put_i32(&mut self, n: i32) { (**self).put_i32(n) } fn put_i32_le(&mut self, n: i32) { (**self).put_i32_le(n) } fn put_i32_ne(&mut self, n: i32) { (**self).put_i32_ne(n) } fn put_u64(&mut self, n: u64) { (**self).put_u64(n) } fn put_u64_le(&mut self, n: u64) { (**self).put_u64_le(n) } fn put_u64_ne(&mut self, n: u64) { (**self).put_u64_ne(n) } fn put_i64(&mut self, n: i64) { (**self).put_i64(n) } fn put_i64_le(&mut self, n: i64) { (**self).put_i64_le(n) } fn put_i64_ne(&mut self, n: i64) { (**self).put_i64_ne(n) } }; } unsafe impl BufMut for &mut T { deref_forward_bufmut!(); } unsafe impl BufMut for Box { deref_forward_bufmut!(); } unsafe impl BufMut for &mut [u8] { #[inline] fn remaining_mut(&self) -> usize { self.len() } #[inline] fn chunk_mut(&mut self) -> &mut UninitSlice { // UninitSlice is repr(transparent), so safe to transmute unsafe { &mut *(*self as *mut [u8] as *mut _) } } #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { // Lifetime dance taken from `impl Write for &mut [u8]`. let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); *self = b; } #[inline] fn put_slice(&mut self, src: &[u8]) { self[..src.len()].copy_from_slice(src); unsafe { self.advance_mut(src.len()); } } fn put_bytes(&mut self, val: u8, cnt: usize) { assert!(self.remaining_mut() >= cnt); unsafe { ptr::write_bytes(self.as_mut_ptr(), val, cnt); self.advance_mut(cnt); } } } unsafe impl BufMut for &mut [core::mem::MaybeUninit] { #[inline] fn remaining_mut(&self) -> usize { self.len() } #[inline] fn chunk_mut(&mut self) -> &mut UninitSlice { UninitSlice::uninit(self) } #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { // Lifetime dance taken from `impl Write for &mut [u8]`. let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); *self = b; } #[inline] fn put_slice(&mut self, src: &[u8]) { self.chunk_mut()[..src.len()].copy_from_slice(src); unsafe { self.advance_mut(src.len()); } } fn put_bytes(&mut self, val: u8, cnt: usize) { assert!(self.remaining_mut() >= cnt); unsafe { ptr::write_bytes(self.as_mut_ptr() as *mut u8, val, cnt); self.advance_mut(cnt); } } } unsafe impl BufMut for Vec { #[inline] fn remaining_mut(&self) -> usize { // A vector can never have more than isize::MAX bytes core::isize::MAX as usize - self.len() } #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { let len = self.len(); let remaining = self.capacity() - len; assert!( cnt <= remaining, "cannot advance past `remaining_mut`: {:?} <= {:?}", cnt, remaining ); self.set_len(len + cnt); } #[inline] fn chunk_mut(&mut self) -> &mut UninitSlice { if self.capacity() == self.len() { self.reserve(64); // Grow the vec } let cap = self.capacity(); let len = self.len(); let ptr = self.as_mut_ptr(); unsafe { &mut UninitSlice::from_raw_parts_mut(ptr, cap)[len..] } } // Specialize these methods so they can skip checking `remaining_mut` // and `advance_mut`. fn put(&mut self, mut src: T) where Self: Sized, { // In case the src isn't contiguous, reserve upfront self.reserve(src.remaining()); while src.has_remaining() { let l; // a block to contain the src.bytes() borrow { let s = src.chunk(); l = s.len(); self.extend_from_slice(s); } src.advance(l); } } #[inline] fn put_slice(&mut self, src: &[u8]) { self.extend_from_slice(src); } fn put_bytes(&mut self, val: u8, cnt: usize) { let new_len = self.len().checked_add(cnt).unwrap(); self.resize(new_len, val); } } // The existence of this function makes the compiler catch if the BufMut // trait is "object-safe" or not. fn _assert_trait_object(_b: &dyn BufMut) {} bytes-1.5.0/src/buf/chain.rs000064400000000000000000000127261046102023000137600ustar 00000000000000use crate::buf::{IntoIter, UninitSlice}; use crate::{Buf, BufMut, Bytes}; #[cfg(feature = "std")] use std::io::IoSlice; /// A `Chain` sequences two buffers. /// /// `Chain` is an adapter that links two underlying buffers and provides a /// continuous view across both buffers. It is able to sequence either immutable /// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values). /// /// This struct is generally created by calling [`Buf::chain`]. Please see that /// function's documentation for more detail. /// /// # Examples /// /// ``` /// use bytes::{Bytes, Buf}; /// /// let mut buf = (&b"hello "[..]) /// .chain(&b"world"[..]); /// /// let full: Bytes = buf.copy_to_bytes(11); /// assert_eq!(full[..], b"hello world"[..]); /// ``` /// /// [`Buf::chain`]: trait.Buf.html#method.chain /// [`Buf`]: trait.Buf.html /// [`BufMut`]: trait.BufMut.html #[derive(Debug)] pub struct Chain { a: T, b: U, } impl Chain { /// Creates a new `Chain` sequencing the provided values. pub(crate) fn new(a: T, b: U) -> Chain { Chain { a, b } } /// Gets a reference to the first underlying `Buf`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let buf = (&b"hello"[..]) /// .chain(&b"world"[..]); /// /// assert_eq!(buf.first_ref()[..], b"hello"[..]); /// ``` pub fn first_ref(&self) -> &T { &self.a } /// Gets a mutable reference to the first underlying `Buf`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = (&b"hello"[..]) /// .chain(&b"world"[..]); /// /// buf.first_mut().advance(1); /// /// let full = buf.copy_to_bytes(9); /// assert_eq!(full, b"elloworld"[..]); /// ``` pub fn first_mut(&mut self) -> &mut T { &mut self.a } /// Gets a reference to the last underlying `Buf`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let buf = (&b"hello"[..]) /// .chain(&b"world"[..]); /// /// assert_eq!(buf.last_ref()[..], b"world"[..]); /// ``` pub fn last_ref(&self) -> &U { &self.b } /// Gets a mutable reference to the last underlying `Buf`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let mut buf = (&b"hello "[..]) /// .chain(&b"world"[..]); /// /// buf.last_mut().advance(1); /// /// let full = buf.copy_to_bytes(10); /// assert_eq!(full, b"hello orld"[..]); /// ``` pub fn last_mut(&mut self) -> &mut U { &mut self.b } /// Consumes this `Chain`, returning the underlying values. /// /// # Examples /// /// ``` /// use bytes::Buf; /// /// let chain = (&b"hello"[..]) /// .chain(&b"world"[..]); /// /// let (first, last) = chain.into_inner(); /// assert_eq!(first[..], b"hello"[..]); /// assert_eq!(last[..], b"world"[..]); /// ``` pub fn into_inner(self) -> (T, U) { (self.a, self.b) } } impl Buf for Chain where T: Buf, U: Buf, { fn remaining(&self) -> usize { self.a.remaining().checked_add(self.b.remaining()).unwrap() } fn chunk(&self) -> &[u8] { if self.a.has_remaining() { self.a.chunk() } else { self.b.chunk() } } fn advance(&mut self, mut cnt: usize) { let a_rem = self.a.remaining(); if a_rem != 0 { if a_rem >= cnt { self.a.advance(cnt); return; } // Consume what is left of a self.a.advance(a_rem); cnt -= a_rem; } self.b.advance(cnt); } #[cfg(feature = "std")] fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { let mut n = self.a.chunks_vectored(dst); n += self.b.chunks_vectored(&mut dst[n..]); n } fn copy_to_bytes(&mut self, len: usize) -> Bytes { let a_rem = self.a.remaining(); if a_rem >= len { self.a.copy_to_bytes(len) } else if a_rem == 0 { self.b.copy_to_bytes(len) } else { assert!( len - a_rem <= self.b.remaining(), "`len` greater than remaining" ); let mut ret = crate::BytesMut::with_capacity(len); ret.put(&mut self.a); ret.put((&mut self.b).take(len - a_rem)); ret.freeze() } } } unsafe impl BufMut for Chain where T: BufMut, U: BufMut, { fn remaining_mut(&self) -> usize { self.a .remaining_mut() .saturating_add(self.b.remaining_mut()) } fn chunk_mut(&mut self) -> &mut UninitSlice { if self.a.has_remaining_mut() { self.a.chunk_mut() } else { self.b.chunk_mut() } } unsafe fn advance_mut(&mut self, mut cnt: usize) { let a_rem = self.a.remaining_mut(); if a_rem != 0 { if a_rem >= cnt { self.a.advance_mut(cnt); return; } // Consume what is left of a self.a.advance_mut(a_rem); cnt -= a_rem; } self.b.advance_mut(cnt); } } impl IntoIterator for Chain where T: Buf, U: Buf, { type Item = u8; type IntoIter = IntoIter>; fn into_iter(self) -> Self::IntoIter { IntoIter::new(self) } } bytes-1.5.0/src/buf/iter.rs000064400000000000000000000056661046102023000136460ustar 00000000000000use crate::Buf; /// Iterator over the bytes contained by the buffer. /// /// # Examples /// /// Basic usage: /// /// ``` /// use bytes::Bytes; /// /// let buf = Bytes::from(&b"abc"[..]); /// let mut iter = buf.into_iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// assert_eq!(iter.next(), Some(b'b')); /// assert_eq!(iter.next(), Some(b'c')); /// assert_eq!(iter.next(), None); /// ``` /// /// [`iter`]: trait.Buf.html#method.iter /// [`Buf`]: trait.Buf.html #[derive(Debug)] pub struct IntoIter { inner: T, } impl IntoIter { /// Creates an iterator over the bytes contained by the buffer. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let buf = Bytes::from_static(b"abc"); /// let mut iter = buf.into_iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// assert_eq!(iter.next(), Some(b'b')); /// assert_eq!(iter.next(), Some(b'c')); /// assert_eq!(iter.next(), None); /// ``` pub fn new(inner: T) -> IntoIter { IntoIter { inner } } /// Consumes this `IntoIter`, returning the underlying value. /// /// # Examples /// /// ```rust /// use bytes::{Buf, Bytes}; /// /// let buf = Bytes::from(&b"abc"[..]); /// let mut iter = buf.into_iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// /// let buf = iter.into_inner(); /// assert_eq!(2, buf.remaining()); /// ``` pub fn into_inner(self) -> T { self.inner } /// Gets a reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::{Buf, Bytes}; /// /// let buf = Bytes::from(&b"abc"[..]); /// let mut iter = buf.into_iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// /// assert_eq!(2, iter.get_ref().remaining()); /// ``` pub fn get_ref(&self) -> &T { &self.inner } /// Gets a mutable reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BytesMut}; /// /// let buf = BytesMut::from(&b"abc"[..]); /// let mut iter = buf.into_iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// /// iter.get_mut().advance(1); /// /// assert_eq!(iter.next(), Some(b'c')); /// ``` pub fn get_mut(&mut self) -> &mut T { &mut self.inner } } impl Iterator for IntoIter { type Item = u8; fn next(&mut self) -> Option { if !self.inner.has_remaining() { return None; } let b = self.inner.chunk()[0]; self.inner.advance(1); Some(b) } fn size_hint(&self) -> (usize, Option) { let rem = self.inner.remaining(); (rem, Some(rem)) } } impl ExactSizeIterator for IntoIter {} bytes-1.5.0/src/buf/limit.rs000064400000000000000000000036371046102023000140150ustar 00000000000000use crate::buf::UninitSlice; use crate::BufMut; use core::cmp; /// A `BufMut` adapter which limits the amount of bytes that can be written /// to an underlying buffer. #[derive(Debug)] pub struct Limit { inner: T, limit: usize, } pub(super) fn new(inner: T, limit: usize) -> Limit { Limit { inner, limit } } impl Limit { /// Consumes this `Limit`, returning the underlying value. pub fn into_inner(self) -> T { self.inner } /// Gets a reference to the underlying `BufMut`. /// /// It is inadvisable to directly write to the underlying `BufMut`. pub fn get_ref(&self) -> &T { &self.inner } /// Gets a mutable reference to the underlying `BufMut`. /// /// It is inadvisable to directly write to the underlying `BufMut`. pub fn get_mut(&mut self) -> &mut T { &mut self.inner } /// Returns the maximum number of bytes that can be written /// /// # Note /// /// If the inner `BufMut` has fewer bytes than indicated by this method then /// that is the actual number of available bytes. pub fn limit(&self) -> usize { self.limit } /// Sets the maximum number of bytes that can be written. /// /// # Note /// /// If the inner `BufMut` has fewer bytes than `lim` then that is the actual /// number of available bytes. pub fn set_limit(&mut self, lim: usize) { self.limit = lim } } unsafe impl BufMut for Limit { fn remaining_mut(&self) -> usize { cmp::min(self.inner.remaining_mut(), self.limit) } fn chunk_mut(&mut self) -> &mut UninitSlice { let bytes = self.inner.chunk_mut(); let end = cmp::min(bytes.len(), self.limit); &mut bytes[..end] } unsafe fn advance_mut(&mut self, cnt: usize) { assert!(cnt <= self.limit); self.inner.advance_mut(cnt); self.limit -= cnt; } } bytes-1.5.0/src/buf/mod.rs000064400000000000000000000022551046102023000134510ustar 00000000000000//! Utilities for working with buffers. //! //! A buffer is any structure that contains a sequence of bytes. The bytes may //! or may not be stored in contiguous memory. This module contains traits used //! to abstract over buffers as well as utilities for working with buffer types. //! //! # `Buf`, `BufMut` //! //! These are the two foundational traits for abstractly working with buffers. //! They can be thought as iterators for byte structures. They offer additional //! performance over `Iterator` by providing an API optimized for byte slices. //! //! See [`Buf`] and [`BufMut`] for more details. //! //! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) //! [`Buf`]: trait.Buf.html //! [`BufMut`]: trait.BufMut.html mod buf_impl; mod buf_mut; mod chain; mod iter; mod limit; #[cfg(feature = "std")] mod reader; mod take; mod uninit_slice; mod vec_deque; #[cfg(feature = "std")] mod writer; pub use self::buf_impl::Buf; pub use self::buf_mut::BufMut; pub use self::chain::Chain; pub use self::iter::IntoIter; pub use self::limit::Limit; pub use self::take::Take; pub use self::uninit_slice::UninitSlice; #[cfg(feature = "std")] pub use self::{reader::Reader, writer::Writer}; bytes-1.5.0/src/buf/reader.rs000064400000000000000000000036051046102023000141340ustar 00000000000000use crate::Buf; use std::{cmp, io}; /// A `Buf` adapter which implements `io::Read` for the inner value. /// /// This struct is generally created by calling `reader()` on `Buf`. See /// documentation of [`reader()`](trait.Buf.html#method.reader) for more /// details. #[derive(Debug)] pub struct Reader { buf: B, } pub fn new(buf: B) -> Reader { Reader { buf } } impl Reader { /// Gets a reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::Buf; /// /// let buf = b"hello world".reader(); /// /// assert_eq!(b"hello world", buf.get_ref()); /// ``` pub fn get_ref(&self) -> &B { &self.buf } /// Gets a mutable reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. pub fn get_mut(&mut self) -> &mut B { &mut self.buf } /// Consumes this `Reader`, returning the underlying value. /// /// # Examples /// /// ```rust /// use bytes::Buf; /// use std::io; /// /// let mut buf = b"hello world".reader(); /// let mut dst = vec![]; /// /// io::copy(&mut buf, &mut dst).unwrap(); /// /// let buf = buf.into_inner(); /// assert_eq!(0, buf.remaining()); /// ``` pub fn into_inner(self) -> B { self.buf } } impl io::Read for Reader { fn read(&mut self, dst: &mut [u8]) -> io::Result { let len = cmp::min(self.buf.remaining(), dst.len()); Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]); Ok(len) } } impl io::BufRead for Reader { fn fill_buf(&mut self) -> io::Result<&[u8]> { Ok(self.buf.chunk()) } fn consume(&mut self, amt: usize) { self.buf.advance(amt) } } bytes-1.5.0/src/buf/take.rs000064400000000000000000000071461046102023000136220ustar 00000000000000use crate::{Buf, Bytes}; use core::cmp; /// A `Buf` adapter which limits the bytes read from an underlying buffer. /// /// This struct is generally created by calling `take()` on `Buf`. See /// documentation of [`take()`](trait.Buf.html#method.take) for more details. #[derive(Debug)] pub struct Take { inner: T, limit: usize, } pub fn new(inner: T, limit: usize) -> Take { Take { inner, limit } } impl Take { /// Consumes this `Take`, returning the underlying value. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// /// let mut buf = b"hello world".take(2); /// let mut dst = vec![]; /// /// dst.put(&mut buf); /// assert_eq!(*dst, b"he"[..]); /// /// let mut buf = buf.into_inner(); /// /// dst.clear(); /// dst.put(&mut buf); /// assert_eq!(*dst, b"llo world"[..]); /// ``` pub fn into_inner(self) -> T { self.inner } /// Gets a reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::Buf; /// /// let buf = b"hello world".take(2); /// /// assert_eq!(11, buf.get_ref().remaining()); /// ``` pub fn get_ref(&self) -> &T { &self.inner } /// Gets a mutable reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// /// let mut buf = b"hello world".take(2); /// let mut dst = vec![]; /// /// buf.get_mut().advance(2); /// /// dst.put(&mut buf); /// assert_eq!(*dst, b"ll"[..]); /// ``` pub fn get_mut(&mut self) -> &mut T { &mut self.inner } /// Returns the maximum number of bytes that can be read. /// /// # Note /// /// If the inner `Buf` has fewer bytes than indicated by this method then /// that is the actual number of available bytes. /// /// # Examples /// /// ```rust /// use bytes::Buf; /// /// let mut buf = b"hello world".take(2); /// /// assert_eq!(2, buf.limit()); /// assert_eq!(b'h', buf.get_u8()); /// assert_eq!(1, buf.limit()); /// ``` pub fn limit(&self) -> usize { self.limit } /// Sets the maximum number of bytes that can be read. /// /// # Note /// /// If the inner `Buf` has fewer bytes than `lim` then that is the actual /// number of available bytes. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// /// let mut buf = b"hello world".take(2); /// let mut dst = vec![]; /// /// dst.put(&mut buf); /// assert_eq!(*dst, b"he"[..]); /// /// dst.clear(); /// /// buf.set_limit(3); /// dst.put(&mut buf); /// assert_eq!(*dst, b"llo"[..]); /// ``` pub fn set_limit(&mut self, lim: usize) { self.limit = lim } } impl Buf for Take { fn remaining(&self) -> usize { cmp::min(self.inner.remaining(), self.limit) } fn chunk(&self) -> &[u8] { let bytes = self.inner.chunk(); &bytes[..cmp::min(bytes.len(), self.limit)] } fn advance(&mut self, cnt: usize) { assert!(cnt <= self.limit); self.inner.advance(cnt); self.limit -= cnt; } fn copy_to_bytes(&mut self, len: usize) -> Bytes { assert!(len <= self.remaining(), "`len` greater than remaining"); let r = self.inner.copy_to_bytes(len); self.limit -= len; r } } bytes-1.5.0/src/buf/uninit_slice.rs000064400000000000000000000163431046102023000153620ustar 00000000000000use core::fmt; use core::mem::MaybeUninit; use core::ops::{ Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, }; /// Uninitialized byte slice. /// /// Returned by `BufMut::chunk_mut()`, the referenced byte slice may be /// uninitialized. The wrapper provides safe access without introducing /// undefined behavior. /// /// The safety invariants of this wrapper are: /// /// 1. Reading from an `UninitSlice` is undefined behavior. /// 2. Writing uninitialized bytes to an `UninitSlice` is undefined behavior. /// /// The difference between `&mut UninitSlice` and `&mut [MaybeUninit]` is /// that it is possible in safe code to write uninitialized bytes to an /// `&mut [MaybeUninit]`, which this type prohibits. #[repr(transparent)] pub struct UninitSlice([MaybeUninit]); impl UninitSlice { /// Creates a `&mut UninitSlice` wrapping a slice of initialised memory. /// /// # Examples /// /// ``` /// use bytes::buf::UninitSlice; /// /// let mut buffer = [0u8; 64]; /// let slice = UninitSlice::new(&mut buffer[..]); /// ``` #[inline] pub fn new(slice: &mut [u8]) -> &mut UninitSlice { unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit] as *mut UninitSlice) } } /// Creates a `&mut UninitSlice` wrapping a slice of uninitialised memory. /// /// # Examples /// /// ``` /// use bytes::buf::UninitSlice; /// use core::mem::MaybeUninit; /// /// let mut buffer = [MaybeUninit::uninit(); 64]; /// let slice = UninitSlice::uninit(&mut buffer[..]); /// /// let mut vec = Vec::with_capacity(1024); /// let spare: &mut UninitSlice = vec.spare_capacity_mut().into(); /// ``` #[inline] pub fn uninit(slice: &mut [MaybeUninit]) -> &mut UninitSlice { unsafe { &mut *(slice as *mut [MaybeUninit] as *mut UninitSlice) } } fn uninit_ref(slice: &[MaybeUninit]) -> &UninitSlice { unsafe { &*(slice as *const [MaybeUninit] as *const UninitSlice) } } /// Create a `&mut UninitSlice` from a pointer and a length. /// /// # Safety /// /// The caller must ensure that `ptr` references a valid memory region owned /// by the caller representing a byte slice for the duration of `'a`. /// /// # Examples /// /// ``` /// use bytes::buf::UninitSlice; /// /// let bytes = b"hello world".to_vec(); /// let ptr = bytes.as_ptr() as *mut _; /// let len = bytes.len(); /// /// let slice = unsafe { UninitSlice::from_raw_parts_mut(ptr, len) }; /// ``` #[inline] pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice { let maybe_init: &mut [MaybeUninit] = core::slice::from_raw_parts_mut(ptr as *mut _, len); Self::uninit(maybe_init) } /// Write a single byte at the specified offset. /// /// # Panics /// /// The function panics if `index` is out of bounds. /// /// # Examples /// /// ``` /// use bytes::buf::UninitSlice; /// /// let mut data = [b'f', b'o', b'o']; /// let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) }; /// /// slice.write_byte(0, b'b'); /// /// assert_eq!(b"boo", &data[..]); /// ``` #[inline] pub fn write_byte(&mut self, index: usize, byte: u8) { assert!(index < self.len()); unsafe { self[index..].as_mut_ptr().write(byte) } } /// Copies bytes from `src` into `self`. /// /// The length of `src` must be the same as `self`. /// /// # Panics /// /// The function panics if `src` has a different length than `self`. /// /// # Examples /// /// ``` /// use bytes::buf::UninitSlice; /// /// let mut data = [b'f', b'o', b'o']; /// let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) }; /// /// slice.copy_from_slice(b"bar"); /// /// assert_eq!(b"bar", &data[..]); /// ``` #[inline] pub fn copy_from_slice(&mut self, src: &[u8]) { use core::ptr; assert_eq!(self.len(), src.len()); unsafe { ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len()); } } /// Return a raw pointer to the slice's buffer. /// /// # Safety /// /// The caller **must not** read from the referenced memory and **must not** /// write **uninitialized** bytes to the slice either. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut data = [0, 1, 2]; /// let mut slice = &mut data[..]; /// let ptr = BufMut::chunk_mut(&mut slice).as_mut_ptr(); /// ``` #[inline] pub fn as_mut_ptr(&mut self) -> *mut u8 { self.0.as_mut_ptr() as *mut _ } /// Return a `&mut [MaybeUninit]` to this slice's buffer. /// /// # Safety /// /// The caller **must not** read from the referenced memory and **must not** write /// **uninitialized** bytes to the slice either. This is because `BufMut` implementation /// that created the `UninitSlice` knows which parts are initialized. Writing uninitalized /// bytes to the slice may cause the `BufMut` to read those bytes and trigger undefined /// behavior. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut data = [0, 1, 2]; /// let mut slice = &mut data[..]; /// unsafe { /// let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut(); /// }; /// ``` #[inline] pub unsafe fn as_uninit_slice_mut<'a>(&'a mut self) -> &'a mut [MaybeUninit] { &mut *(self as *mut _ as *mut [MaybeUninit]) } /// Returns the number of bytes in the slice. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut data = [0, 1, 2]; /// let mut slice = &mut data[..]; /// let len = BufMut::chunk_mut(&mut slice).len(); /// /// assert_eq!(len, 3); /// ``` #[inline] pub fn len(&self) -> usize { self.0.len() } } impl fmt::Debug for UninitSlice { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("UninitSlice[...]").finish() } } impl<'a> From<&'a mut [u8]> for &'a mut UninitSlice { fn from(slice: &'a mut [u8]) -> Self { UninitSlice::new(slice) } } impl<'a> From<&'a mut [MaybeUninit]> for &'a mut UninitSlice { fn from(slice: &'a mut [MaybeUninit]) -> Self { UninitSlice::uninit(slice) } } macro_rules! impl_index { ($($t:ty),*) => { $( impl Index<$t> for UninitSlice { type Output = UninitSlice; #[inline] fn index(&self, index: $t) -> &UninitSlice { UninitSlice::uninit_ref(&self.0[index]) } } impl IndexMut<$t> for UninitSlice { #[inline] fn index_mut(&mut self, index: $t) -> &mut UninitSlice { UninitSlice::uninit(&mut self.0[index]) } } )* }; } impl_index!( Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive ); bytes-1.5.0/src/buf/vec_deque.rs000064400000000000000000000005731046102023000146330ustar 00000000000000use alloc::collections::VecDeque; use super::Buf; impl Buf for VecDeque { fn remaining(&self) -> usize { self.len() } fn chunk(&self) -> &[u8] { let (s1, s2) = self.as_slices(); if s1.is_empty() { s2 } else { s1 } } fn advance(&mut self, cnt: usize) { self.drain(..cnt); } } bytes-1.5.0/src/buf/writer.rs000064400000000000000000000040001046102023000141740ustar 00000000000000use crate::BufMut; use std::{cmp, io}; /// A `BufMut` adapter which implements `io::Write` for the inner value. /// /// This struct is generally created by calling `writer()` on `BufMut`. See /// documentation of [`writer()`](trait.BufMut.html#method.writer) for more /// details. #[derive(Debug)] pub struct Writer { buf: B, } pub fn new(buf: B) -> Writer { Writer { buf } } impl Writer { /// Gets a reference to the underlying `BufMut`. /// /// It is inadvisable to directly write to the underlying `BufMut`. /// /// # Examples /// /// ```rust /// use bytes::BufMut; /// /// let buf = Vec::with_capacity(1024).writer(); /// /// assert_eq!(1024, buf.get_ref().capacity()); /// ``` pub fn get_ref(&self) -> &B { &self.buf } /// Gets a mutable reference to the underlying `BufMut`. /// /// It is inadvisable to directly write to the underlying `BufMut`. /// /// # Examples /// /// ```rust /// use bytes::BufMut; /// /// let mut buf = vec![].writer(); /// /// buf.get_mut().reserve(1024); /// /// assert_eq!(1024, buf.get_ref().capacity()); /// ``` pub fn get_mut(&mut self) -> &mut B { &mut self.buf } /// Consumes this `Writer`, returning the underlying value. /// /// # Examples /// /// ```rust /// use bytes::BufMut; /// use std::io; /// /// let mut buf = vec![].writer(); /// let mut src = &b"hello world"[..]; /// /// io::copy(&mut src, &mut buf).unwrap(); /// /// let buf = buf.into_inner(); /// assert_eq!(*buf, b"hello world"[..]); /// ``` pub fn into_inner(self) -> B { self.buf } } impl io::Write for Writer { fn write(&mut self, src: &[u8]) -> io::Result { let n = cmp::min(self.buf.remaining_mut(), src.len()); self.buf.put(&src[0..n]); Ok(n) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } bytes-1.5.0/src/bytes.rs000064400000000000000000001060731046102023000132470ustar 00000000000000use core::iter::FromIterator; use core::ops::{Deref, RangeBounds}; use core::{cmp, fmt, hash, mem, ptr, slice, usize}; use alloc::{ alloc::{dealloc, Layout}, borrow::Borrow, boxed::Box, string::String, vec::Vec, }; use crate::buf::IntoIter; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use crate::Buf; /// A cheaply cloneable and sliceable chunk of contiguous memory. /// /// `Bytes` is an efficient container for storing and operating on contiguous /// slices of memory. It is intended for use primarily in networking code, but /// could have applications elsewhere as well. /// /// `Bytes` values facilitate zero-copy network programming by allowing multiple /// `Bytes` objects to point to the same underlying memory. /// /// `Bytes` does not have a single implementation. It is an interface, whose /// exact behavior is implemented through dynamic dispatch in several underlying /// implementations of `Bytes`. /// /// All `Bytes` implementations must fulfill the following requirements: /// - They are cheaply cloneable and thereby shareable between an unlimited amount /// of components, for example by modifying a reference count. /// - Instances can be sliced to refer to a subset of the original buffer. /// /// ``` /// use bytes::Bytes; /// /// let mut mem = Bytes::from("Hello world"); /// let a = mem.slice(0..5); /// /// assert_eq!(a, "Hello"); /// /// let b = mem.split_to(6); /// /// assert_eq!(mem, "world"); /// assert_eq!(b, "Hello "); /// ``` /// /// # Memory layout /// /// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used /// to track information about which segment of the underlying memory the /// `Bytes` handle has access to. /// /// `Bytes` keeps both a pointer to the shared state containing the full memory /// slice and a pointer to the start of the region visible by the handle. /// `Bytes` also tracks the length of its view into the memory. /// /// # Sharing /// /// `Bytes` contains a vtable, which allows implementations of `Bytes` to define /// how sharing/cloning is implemented in detail. /// When `Bytes::clone()` is called, `Bytes` will call the vtable function for /// cloning the backing storage in order to share it behind between multiple /// `Bytes` instances. /// /// For `Bytes` implementations which refer to constant memory (e.g. created /// via `Bytes::from_static()`) the cloning implementation will be a no-op. /// /// For `Bytes` implementations which point to a reference counted shared storage /// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the /// reference count. /// /// Due to this mechanism, multiple `Bytes` instances may point to the same /// shared memory region. /// Each `Bytes` instance can point to different sections within that /// memory region, and `Bytes` instances may or may not have overlapping views /// into the memory. /// /// The following diagram visualizes a scenario where 2 `Bytes` instances make /// use of an `Arc`-based backing storage, and provide access to different views: /// /// ```text /// /// Arc ptrs ┌─────────┐ /// ________________________ / │ Bytes 2 │ /// / └─────────┘ /// / ┌───────────┐ | | /// |_________/ │ Bytes 1 │ | | /// | └───────────┘ | | /// | | | ___/ data | tail /// | data | tail |/ | /// v v v v /// ┌─────┬─────┬───────────┬───────────────┬─────┐ /// │ Arc │ │ │ │ │ /// └─────┴─────┴───────────┴───────────────┴─────┘ /// ``` pub struct Bytes { ptr: *const u8, len: usize, // inlined "trait object" data: AtomicPtr<()>, vtable: &'static Vtable, } pub(crate) struct Vtable { /// fn(data, ptr, len) pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, /// fn(data, ptr, len) /// /// takes `Bytes` to value pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec, /// fn(data, ptr, len) pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), } impl Bytes { /// Creates a new empty `Bytes`. /// /// This will not allocate and the returned `Bytes` handle will be empty. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let b = Bytes::new(); /// assert_eq!(&b[..], b""); /// ``` #[inline] #[cfg(not(all(loom, test)))] pub const fn new() -> Self { // Make it a named const to work around // "unsizing casts are not allowed in const fn" const EMPTY: &[u8] = &[]; Bytes::from_static(EMPTY) } #[cfg(all(loom, test))] pub fn new() -> Self { const EMPTY: &[u8] = &[]; Bytes::from_static(EMPTY) } /// Creates a new `Bytes` from a static slice. /// /// The returned `Bytes` will point directly to the static slice. There is /// no allocating or copying. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let b = Bytes::from_static(b"hello"); /// assert_eq!(&b[..], b"hello"); /// ``` #[inline] #[cfg(not(all(loom, test)))] pub const fn from_static(bytes: &'static [u8]) -> Self { Bytes { ptr: bytes.as_ptr(), len: bytes.len(), data: AtomicPtr::new(ptr::null_mut()), vtable: &STATIC_VTABLE, } } #[cfg(all(loom, test))] pub fn from_static(bytes: &'static [u8]) -> Self { Bytes { ptr: bytes.as_ptr(), len: bytes.len(), data: AtomicPtr::new(ptr::null_mut()), vtable: &STATIC_VTABLE, } } /// Returns the number of bytes contained in this `Bytes`. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let b = Bytes::from(&b"hello"[..]); /// assert_eq!(b.len(), 5); /// ``` #[inline] pub const fn len(&self) -> usize { self.len } /// Returns true if the `Bytes` has a length of 0. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let b = Bytes::new(); /// assert!(b.is_empty()); /// ``` #[inline] pub const fn is_empty(&self) -> bool { self.len == 0 } /// Creates `Bytes` instance from slice, by copying it. pub fn copy_from_slice(data: &[u8]) -> Self { data.to_vec().into() } /// Returns a slice of self for the provided range. /// /// This will increment the reference count for the underlying memory and /// return a new `Bytes` handle set to the slice. /// /// This operation is `O(1)`. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let a = Bytes::from(&b"hello world"[..]); /// let b = a.slice(2..5); /// /// assert_eq!(&b[..], b"llo"); /// ``` /// /// # Panics /// /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing /// will panic. pub fn slice(&self, range: impl RangeBounds) -> Self { use core::ops::Bound; let len = self.len(); let begin = match range.start_bound() { Bound::Included(&n) => n, Bound::Excluded(&n) => n + 1, Bound::Unbounded => 0, }; let end = match range.end_bound() { Bound::Included(&n) => n.checked_add(1).expect("out of range"), Bound::Excluded(&n) => n, Bound::Unbounded => len, }; assert!( begin <= end, "range start must not be greater than end: {:?} <= {:?}", begin, end, ); assert!( end <= len, "range end out of bounds: {:?} <= {:?}", end, len, ); if end == begin { return Bytes::new(); } let mut ret = self.clone(); ret.len = end - begin; ret.ptr = unsafe { ret.ptr.add(begin) }; ret } /// Returns a slice of self that is equivalent to the given `subset`. /// /// When processing a `Bytes` buffer with other tools, one often gets a /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it. /// This function turns that `&[u8]` into another `Bytes`, as if one had /// called `self.slice()` with the offsets that correspond to `subset`. /// /// This operation is `O(1)`. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let bytes = Bytes::from(&b"012345678"[..]); /// let as_slice = bytes.as_ref(); /// let subset = &as_slice[2..6]; /// let subslice = bytes.slice_ref(&subset); /// assert_eq!(&subslice[..], b"2345"); /// ``` /// /// # Panics /// /// Requires that the given `sub` slice is in fact contained within the /// `Bytes` buffer; otherwise this function will panic. pub fn slice_ref(&self, subset: &[u8]) -> Self { // Empty slice and empty Bytes may have their pointers reset // so explicitly allow empty slice to be a subslice of any slice. if subset.is_empty() { return Bytes::new(); } let bytes_p = self.as_ptr() as usize; let bytes_len = self.len(); let sub_p = subset.as_ptr() as usize; let sub_len = subset.len(); assert!( sub_p >= bytes_p, "subset pointer ({:p}) is smaller than self pointer ({:p})", subset.as_ptr(), self.as_ptr(), ); assert!( sub_p + sub_len <= bytes_p + bytes_len, "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})", self.as_ptr(), bytes_len, subset.as_ptr(), sub_len, ); let sub_offset = sub_p - bytes_p; self.slice(sub_offset..(sub_offset + sub_len)) } /// Splits the bytes into two at the given index. /// /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes` /// contains elements `[at, len)`. /// /// This is an `O(1)` operation that just increases the reference count and /// sets a few indices. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut a = Bytes::from(&b"hello world"[..]); /// let b = a.split_off(5); /// /// assert_eq!(&a[..], b"hello"); /// assert_eq!(&b[..], b" world"); /// ``` /// /// # Panics /// /// Panics if `at > len`. #[must_use = "consider Bytes::truncate if you don't need the other half"] pub fn split_off(&mut self, at: usize) -> Self { assert!( at <= self.len(), "split_off out of bounds: {:?} <= {:?}", at, self.len(), ); if at == self.len() { return Bytes::new(); } if at == 0 { return mem::replace(self, Bytes::new()); } let mut ret = self.clone(); self.len = at; unsafe { ret.inc_start(at) }; ret } /// Splits the bytes into two at the given index. /// /// Afterwards `self` contains elements `[at, len)`, and the returned /// `Bytes` contains elements `[0, at)`. /// /// This is an `O(1)` operation that just increases the reference count and /// sets a few indices. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut a = Bytes::from(&b"hello world"[..]); /// let b = a.split_to(5); /// /// assert_eq!(&a[..], b" world"); /// assert_eq!(&b[..], b"hello"); /// ``` /// /// # Panics /// /// Panics if `at > len`. #[must_use = "consider Bytes::advance if you don't need the other half"] pub fn split_to(&mut self, at: usize) -> Self { assert!( at <= self.len(), "split_to out of bounds: {:?} <= {:?}", at, self.len(), ); if at == self.len() { return mem::replace(self, Bytes::new()); } if at == 0 { return Bytes::new(); } let mut ret = self.clone(); unsafe { self.inc_start(at) }; ret.len = at; ret } /// Shortens the buffer, keeping the first `len` bytes and dropping the /// rest. /// /// If `len` is greater than the buffer's current length, this has no /// effect. /// /// The [`split_off`] method can emulate `truncate`, but this causes the /// excess bytes to be returned instead of dropped. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut buf = Bytes::from(&b"hello world"[..]); /// buf.truncate(5); /// assert_eq!(buf, b"hello"[..]); /// ``` /// /// [`split_off`]: #method.split_off #[inline] pub fn truncate(&mut self, len: usize) { if len < self.len { // The Vec "promotable" vtables do not store the capacity, // so we cannot truncate while using this repr. We *have* to // promote using `split_off` so the capacity can be stored. if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE { drop(self.split_off(len)); } else { self.len = len; } } } /// Clears the buffer, removing all data. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut buf = Bytes::from(&b"hello world"[..]); /// buf.clear(); /// assert!(buf.is_empty()); /// ``` #[inline] pub fn clear(&mut self) { self.truncate(0); } #[inline] pub(crate) unsafe fn with_vtable( ptr: *const u8, len: usize, data: AtomicPtr<()>, vtable: &'static Vtable, ) -> Bytes { Bytes { ptr, len, data, vtable, } } // private #[inline] fn as_slice(&self) -> &[u8] { unsafe { slice::from_raw_parts(self.ptr, self.len) } } #[inline] unsafe fn inc_start(&mut self, by: usize) { // should already be asserted, but debug assert for tests debug_assert!(self.len >= by, "internal: inc_start out of bounds"); self.len -= by; self.ptr = self.ptr.add(by); } } // Vtable must enforce this behavior unsafe impl Send for Bytes {} unsafe impl Sync for Bytes {} impl Drop for Bytes { #[inline] fn drop(&mut self) { unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) } } } impl Clone for Bytes { #[inline] fn clone(&self) -> Bytes { unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) } } } impl Buf for Bytes { #[inline] fn remaining(&self) -> usize { self.len() } #[inline] fn chunk(&self) -> &[u8] { self.as_slice() } #[inline] fn advance(&mut self, cnt: usize) { assert!( cnt <= self.len(), "cannot advance past `remaining`: {:?} <= {:?}", cnt, self.len(), ); unsafe { self.inc_start(cnt); } } fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { if len == self.remaining() { core::mem::replace(self, Bytes::new()) } else { let ret = self.slice(..len); self.advance(len); ret } } } impl Deref for Bytes { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.as_slice() } } impl AsRef<[u8]> for Bytes { #[inline] fn as_ref(&self) -> &[u8] { self.as_slice() } } impl hash::Hash for Bytes { fn hash(&self, state: &mut H) where H: hash::Hasher, { self.as_slice().hash(state); } } impl Borrow<[u8]> for Bytes { fn borrow(&self) -> &[u8] { self.as_slice() } } impl IntoIterator for Bytes { type Item = u8; type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter::new(self) } } impl<'a> IntoIterator for &'a Bytes { type Item = &'a u8; type IntoIter = core::slice::Iter<'a, u8>; fn into_iter(self) -> Self::IntoIter { self.as_slice().iter() } } impl FromIterator for Bytes { fn from_iter>(into_iter: T) -> Self { Vec::from_iter(into_iter).into() } } // impl Eq impl PartialEq for Bytes { fn eq(&self, other: &Bytes) -> bool { self.as_slice() == other.as_slice() } } impl PartialOrd for Bytes { fn partial_cmp(&self, other: &Bytes) -> Option { self.as_slice().partial_cmp(other.as_slice()) } } impl Ord for Bytes { fn cmp(&self, other: &Bytes) -> cmp::Ordering { self.as_slice().cmp(other.as_slice()) } } impl Eq for Bytes {} impl PartialEq<[u8]> for Bytes { fn eq(&self, other: &[u8]) -> bool { self.as_slice() == other } } impl PartialOrd<[u8]> for Bytes { fn partial_cmp(&self, other: &[u8]) -> Option { self.as_slice().partial_cmp(other) } } impl PartialEq for [u8] { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for [u8] { fn partial_cmp(&self, other: &Bytes) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) } } impl PartialEq for Bytes { fn eq(&self, other: &str) -> bool { self.as_slice() == other.as_bytes() } } impl PartialOrd for Bytes { fn partial_cmp(&self, other: &str) -> Option { self.as_slice().partial_cmp(other.as_bytes()) } } impl PartialEq for str { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for str { fn partial_cmp(&self, other: &Bytes) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) } } impl PartialEq> for Bytes { fn eq(&self, other: &Vec) -> bool { *self == other[..] } } impl PartialOrd> for Bytes { fn partial_cmp(&self, other: &Vec) -> Option { self.as_slice().partial_cmp(&other[..]) } } impl PartialEq for Vec { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for Vec { fn partial_cmp(&self, other: &Bytes) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) } } impl PartialEq for Bytes { fn eq(&self, other: &String) -> bool { *self == other[..] } } impl PartialOrd for Bytes { fn partial_cmp(&self, other: &String) -> Option { self.as_slice().partial_cmp(other.as_bytes()) } } impl PartialEq for String { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for String { fn partial_cmp(&self, other: &Bytes) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) } } impl PartialEq for &[u8] { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for &[u8] { fn partial_cmp(&self, other: &Bytes) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) } } impl PartialEq for &str { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for &str { fn partial_cmp(&self, other: &Bytes) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) } } impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes where Bytes: PartialEq, { fn eq(&self, other: &&'a T) -> bool { *self == **other } } impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes where Bytes: PartialOrd, { fn partial_cmp(&self, other: &&'a T) -> Option { self.partial_cmp(&**other) } } // impl From impl Default for Bytes { #[inline] fn default() -> Bytes { Bytes::new() } } impl From<&'static [u8]> for Bytes { fn from(slice: &'static [u8]) -> Bytes { Bytes::from_static(slice) } } impl From<&'static str> for Bytes { fn from(slice: &'static str) -> Bytes { Bytes::from_static(slice.as_bytes()) } } impl From> for Bytes { fn from(vec: Vec) -> Bytes { let mut vec = vec; let ptr = vec.as_mut_ptr(); let len = vec.len(); let cap = vec.capacity(); // Avoid an extra allocation if possible. if len == cap { return Bytes::from(vec.into_boxed_slice()); } let shared = Box::new(Shared { buf: ptr, cap, ref_cnt: AtomicUsize::new(1), }); mem::forget(vec); let shared = Box::into_raw(shared); // The pointer should be aligned, so this assert should // always succeed. debug_assert!( 0 == (shared as usize & KIND_MASK), "internal: Box should have an aligned pointer", ); Bytes { ptr, len, data: AtomicPtr::new(shared as _), vtable: &SHARED_VTABLE, } } } impl From> for Bytes { fn from(slice: Box<[u8]>) -> Bytes { // Box<[u8]> doesn't contain a heap allocation for empty slices, // so the pointer isn't aligned enough for the KIND_VEC stashing to // work. if slice.is_empty() { return Bytes::new(); } let len = slice.len(); let ptr = Box::into_raw(slice) as *mut u8; if ptr as usize & 0x1 == 0 { let data = ptr_map(ptr, |addr| addr | KIND_VEC); Bytes { ptr, len, data: AtomicPtr::new(data.cast()), vtable: &PROMOTABLE_EVEN_VTABLE, } } else { Bytes { ptr, len, data: AtomicPtr::new(ptr.cast()), vtable: &PROMOTABLE_ODD_VTABLE, } } } } impl From for Bytes { fn from(s: String) -> Bytes { Bytes::from(s.into_bytes()) } } impl From for Vec { fn from(bytes: Bytes) -> Vec { let bytes = mem::ManuallyDrop::new(bytes); unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) } } } // ===== impl Vtable ===== impl fmt::Debug for Vtable { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Vtable") .field("clone", &(self.clone as *const ())) .field("drop", &(self.drop as *const ())) .finish() } } // ===== impl StaticVtable ===== const STATIC_VTABLE: Vtable = Vtable { clone: static_clone, to_vec: static_to_vec, drop: static_drop, }; unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { let slice = slice::from_raw_parts(ptr, len); Bytes::from_static(slice) } unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { let slice = slice::from_raw_parts(ptr, len); slice.to_vec() } unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { // nothing to drop for &'static [u8] } // ===== impl PromotableVtable ===== static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { clone: promotable_even_clone, to_vec: promotable_even_to_vec, drop: promotable_even_drop, }; static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { clone: promotable_odd_clone, to_vec: promotable_odd_to_vec, drop: promotable_odd_drop, }; unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { let shared = data.load(Ordering::Acquire); let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { shallow_clone_arc(shared.cast(), ptr, len) } else { debug_assert_eq!(kind, KIND_VEC); let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); shallow_clone_vec(data, shared, buf, ptr, len) } } unsafe fn promotable_to_vec( data: &AtomicPtr<()>, ptr: *const u8, len: usize, f: fn(*mut ()) -> *mut u8, ) -> Vec { let shared = data.load(Ordering::Acquire); let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { shared_to_vec_impl(shared.cast(), ptr, len) } else { // If Bytes holds a Vec, then the offset must be 0. debug_assert_eq!(kind, KIND_VEC); let buf = f(shared); let cap = (ptr as usize - buf as usize) + len; // Copy back buffer ptr::copy(ptr, buf, len); Vec::from_raw_parts(buf, len, cap) } } unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { promotable_to_vec(data, ptr, len, |shared| { ptr_map(shared.cast(), |addr| addr & !KIND_MASK) }) } unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { data.with_mut(|shared| { let shared = *shared; let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { release_shared(shared.cast()); } else { debug_assert_eq!(kind, KIND_VEC); let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); free_boxed_slice(buf, ptr, len); } }); } unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { let shared = data.load(Ordering::Acquire); let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { shallow_clone_arc(shared as _, ptr, len) } else { debug_assert_eq!(kind, KIND_VEC); shallow_clone_vec(data, shared, shared.cast(), ptr, len) } } unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { promotable_to_vec(data, ptr, len, |shared| shared.cast()) } unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { data.with_mut(|shared| { let shared = *shared; let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { release_shared(shared.cast()); } else { debug_assert_eq!(kind, KIND_VEC); free_boxed_slice(shared.cast(), ptr, len); } }); } unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { let cap = (offset as usize - buf as usize) + len; dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) } // ===== impl SharedVtable ===== struct Shared { // Holds arguments to dealloc upon Drop, but otherwise doesn't use them buf: *mut u8, cap: usize, ref_cnt: AtomicUsize, } impl Drop for Shared { fn drop(&mut self) { unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) } } } // Assert that the alignment of `Shared` is divisible by 2. // This is a necessary invariant since we depend on allocating `Shared` a // shared object to implicitly carry the `KIND_ARC` flag in its pointer. // This flag is set when the LSB is 0. const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. static SHARED_VTABLE: Vtable = Vtable { clone: shared_clone, to_vec: shared_to_vec, drop: shared_drop, }; const KIND_ARC: usize = 0b0; const KIND_VEC: usize = 0b1; const KIND_MASK: usize = 0b1; unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { let shared = data.load(Ordering::Relaxed); shallow_clone_arc(shared as _, ptr, len) } unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec { // Check that the ref_cnt is 1 (unique). // // If it is unique, then it is set to 0 with AcqRel fence for the same // reason in release_shared. // // Otherwise, we take the other branch and call release_shared. if (*shared) .ref_cnt .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed) .is_ok() { let buf = (*shared).buf; let cap = (*shared).cap; // Deallocate Shared drop(Box::from_raw(shared as *mut mem::ManuallyDrop)); // Copy back buffer ptr::copy(ptr, buf, len); Vec::from_raw_parts(buf, len, cap) } else { let v = slice::from_raw_parts(ptr, len).to_vec(); release_shared(shared); v } } unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len) } unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { data.with_mut(|shared| { release_shared(shared.cast()); }); } unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes { let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed); if old_size > usize::MAX >> 1 { crate::abort(); } Bytes { ptr, len, data: AtomicPtr::new(shared as _), vtable: &SHARED_VTABLE, } } #[cold] unsafe fn shallow_clone_vec( atom: &AtomicPtr<()>, ptr: *const (), buf: *mut u8, offset: *const u8, len: usize, ) -> Bytes { // If the buffer is still tracked in a `Vec`. It is time to // promote the vec to an `Arc`. This could potentially be called // concurrently, so some care must be taken. // First, allocate a new `Shared` instance containing the // `Vec` fields. It's important to note that `ptr`, `len`, // and `cap` cannot be mutated without having `&mut self`. // This means that these fields will not be concurrently // updated and since the buffer hasn't been promoted to an // `Arc`, those three fields still are the components of the // vector. let shared = Box::new(Shared { buf, cap: (offset as usize - buf as usize) + len, // Initialize refcount to 2. One for this reference, and one // for the new clone that will be returned from // `shallow_clone`. ref_cnt: AtomicUsize::new(2), }); let shared = Box::into_raw(shared); // The pointer should be aligned, so this assert should // always succeed. debug_assert!( 0 == (shared as usize & KIND_MASK), "internal: Box should have an aligned pointer", ); // Try compare & swapping the pointer into the `arc` field. // `Release` is used synchronize with other threads that // will load the `arc` field. // // If the `compare_exchange` fails, then the thread lost the // race to promote the buffer to shared. The `Acquire` // ordering will synchronize with the `compare_exchange` // that happened in the other thread and the `Shared` // pointed to by `actual` will be visible. match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) { Ok(actual) => { debug_assert!(actual as usize == ptr as usize); // The upgrade was successful, the new handle can be // returned. Bytes { ptr: offset, len, data: AtomicPtr::new(shared as _), vtable: &SHARED_VTABLE, } } Err(actual) => { // The upgrade failed, a concurrent clone happened. Release // the allocation that was made in this thread, it will not // be needed. let shared = Box::from_raw(shared); mem::forget(*shared); // Buffer already promoted to shared storage, so increment ref // count. shallow_clone_arc(actual as _, offset, len) } } } unsafe fn release_shared(ptr: *mut Shared) { // `Shared` storage... follow the drop steps from Arc. if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 { return; } // This fence is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` fence. This // means that use of the data happens before decreasing the reference // count, which happens before this fence, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // // Thread sanitizer does not support atomic fences. Use an atomic load // instead. (*ptr).ref_cnt.load(Ordering::Acquire); // Drop the data drop(Box::from_raw(ptr)); } // Ideally we would always use this version of `ptr_map` since it is strict // provenance compatible, but it results in worse codegen. We will however still // use it on miri because it gives better diagnostics for people who test bytes // code with miri. // // See https://github.com/tokio-rs/bytes/pull/545 for more info. #[cfg(miri)] fn ptr_map(ptr: *mut u8, f: F) -> *mut u8 where F: FnOnce(usize) -> usize, { let old_addr = ptr as usize; let new_addr = f(old_addr); let diff = new_addr.wrapping_sub(old_addr); ptr.wrapping_add(diff) } #[cfg(not(miri))] fn ptr_map(ptr: *mut u8, f: F) -> *mut u8 where F: FnOnce(usize) -> usize, { let old_addr = ptr as usize; let new_addr = f(old_addr); new_addr as *mut u8 } // compile-fails /// ```compile_fail /// use bytes::Bytes; /// #[deny(unused_must_use)] /// { /// let mut b1 = Bytes::from("hello world"); /// b1.split_to(6); /// } /// ``` fn _split_to_must_use() {} /// ```compile_fail /// use bytes::Bytes; /// #[deny(unused_must_use)] /// { /// let mut b1 = Bytes::from("hello world"); /// b1.split_off(6); /// } /// ``` fn _split_off_must_use() {} // fuzz tests #[cfg(all(test, loom))] mod fuzz { use loom::sync::Arc; use loom::thread; use super::Bytes; #[test] fn bytes_cloning_vec() { loom::model(|| { let a = Bytes::from(b"abcdefgh".to_vec()); let addr = a.as_ptr() as usize; // test the Bytes::clone is Sync by putting it in an Arc let a1 = Arc::new(a); let a2 = a1.clone(); let t1 = thread::spawn(move || { let b: Bytes = (*a1).clone(); assert_eq!(b.as_ptr() as usize, addr); }); let t2 = thread::spawn(move || { let b: Bytes = (*a2).clone(); assert_eq!(b.as_ptr() as usize, addr); }); t1.join().unwrap(); t2.join().unwrap(); }); } } bytes-1.5.0/src/bytes_mut.rs000064400000000000000000001472371046102023000141430ustar 00000000000000use core::iter::{FromIterator, Iterator}; use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ops::{Deref, DerefMut}; use core::ptr::{self, NonNull}; use core::{cmp, fmt, hash, isize, slice, usize}; use alloc::{ borrow::{Borrow, BorrowMut}, boxed::Box, string::String, vec, vec::Vec, }; use crate::buf::{IntoIter, UninitSlice}; use crate::bytes::Vtable; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use crate::{Buf, BufMut, Bytes}; /// A unique reference to a contiguous slice of memory. /// /// `BytesMut` represents a unique view into a potentially shared memory region. /// Given the uniqueness guarantee, owners of `BytesMut` handles are able to /// mutate the memory. /// /// `BytesMut` can be thought of as containing a `buf: Arc>`, an offset /// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the /// same `buf` overlaps with its slice. That guarantee means that a write lock /// is not required. /// /// # Growth /// /// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as /// necessary. However, explicitly reserving the required space up-front before /// a series of inserts will be more efficient. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut buf = BytesMut::with_capacity(64); /// /// buf.put_u8(b'h'); /// buf.put_u8(b'e'); /// buf.put(&b"llo"[..]); /// /// assert_eq!(&buf[..], b"hello"); /// /// // Freeze the buffer so that it can be shared /// let a = buf.freeze(); /// /// // This does not allocate, instead `b` points to the same memory. /// let b = a.clone(); /// /// assert_eq!(&a[..], b"hello"); /// assert_eq!(&b[..], b"hello"); /// ``` pub struct BytesMut { ptr: NonNull, len: usize, cap: usize, data: *mut Shared, } // Thread-safe reference-counted container for the shared storage. This mostly // the same as `core::sync::Arc` but without the weak counter. The ref counting // fns are based on the ones found in `std`. // // The main reason to use `Shared` instead of `core::sync::Arc` is that it ends // up making the overall code simpler and easier to reason about. This is due to // some of the logic around setting `Inner::arc` and other ways the `arc` field // is used. Using `Arc` ended up requiring a number of funky transmutes and // other shenanigans to make it work. struct Shared { vec: Vec, original_capacity_repr: usize, ref_count: AtomicUsize, } // Buffer storage strategy flags. const KIND_ARC: usize = 0b0; const KIND_VEC: usize = 0b1; const KIND_MASK: usize = 0b1; // The max original capacity value. Any `Bytes` allocated with a greater initial // capacity will default to this. const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17; // The original capacity algorithm will not take effect unless the originally // allocated capacity was at least 1kb in size. const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10; // The original capacity is stored in powers of 2 starting at 1kb to a max of // 64kb. Representing it as such requires only 3 bits of storage. const ORIGINAL_CAPACITY_MASK: usize = 0b11100; const ORIGINAL_CAPACITY_OFFSET: usize = 2; // When the storage is in the `Vec` representation, the pointer can be advanced // at most this value. This is due to the amount of storage available to track // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY // bits. const VEC_POS_OFFSET: usize = 5; const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET; const NOT_VEC_POS_MASK: usize = 0b11111; #[cfg(target_pointer_width = "64")] const PTR_WIDTH: usize = 64; #[cfg(target_pointer_width = "32")] const PTR_WIDTH: usize = 32; /* * * ===== BytesMut ===== * */ impl BytesMut { /// Creates a new `BytesMut` with the specified capacity. /// /// The returned `BytesMut` will be able to hold at least `capacity` bytes /// without reallocating. /// /// It is important to note that this function does not specify the length /// of the returned `BytesMut`, but only the capacity. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut bytes = BytesMut::with_capacity(64); /// /// // `bytes` contains no data, even though there is capacity /// assert_eq!(bytes.len(), 0); /// /// bytes.put(&b"hello world"[..]); /// /// assert_eq!(&bytes[..], b"hello world"); /// ``` #[inline] pub fn with_capacity(capacity: usize) -> BytesMut { BytesMut::from_vec(Vec::with_capacity(capacity)) } /// Creates a new `BytesMut` with default capacity. /// /// Resulting object has length 0 and unspecified capacity. /// This function does not allocate. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut bytes = BytesMut::new(); /// /// assert_eq!(0, bytes.len()); /// /// bytes.reserve(2); /// bytes.put_slice(b"xy"); /// /// assert_eq!(&b"xy"[..], &bytes[..]); /// ``` #[inline] pub fn new() -> BytesMut { BytesMut::with_capacity(0) } /// Returns the number of bytes contained in this `BytesMut`. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let b = BytesMut::from(&b"hello"[..]); /// assert_eq!(b.len(), 5); /// ``` #[inline] pub fn len(&self) -> usize { self.len } /// Returns true if the `BytesMut` has a length of 0. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let b = BytesMut::with_capacity(64); /// assert!(b.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns the number of bytes the `BytesMut` can hold without reallocating. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let b = BytesMut::with_capacity(64); /// assert_eq!(b.capacity(), 64); /// ``` #[inline] pub fn capacity(&self) -> usize { self.cap } /// Converts `self` into an immutable `Bytes`. /// /// The conversion is zero cost and is used to indicate that the slice /// referenced by the handle will no longer be mutated. Once the conversion /// is done, the handle can be cloned and shared across threads. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// use std::thread; /// /// let mut b = BytesMut::with_capacity(64); /// b.put(&b"hello world"[..]); /// let b1 = b.freeze(); /// let b2 = b1.clone(); /// /// let th = thread::spawn(move || { /// assert_eq!(&b1[..], b"hello world"); /// }); /// /// assert_eq!(&b2[..], b"hello world"); /// th.join().unwrap(); /// ``` #[inline] pub fn freeze(mut self) -> Bytes { if self.kind() == KIND_VEC { // Just re-use `Bytes` internal Vec vtable unsafe { let (off, _) = self.get_vec_pos(); let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); mem::forget(self); let mut b: Bytes = vec.into(); b.advance(off); b } } else { debug_assert_eq!(self.kind(), KIND_ARC); let ptr = self.ptr.as_ptr(); let len = self.len; let data = AtomicPtr::new(self.data.cast()); mem::forget(self); unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } } } /// Creates a new `BytesMut`, which is initialized with zero. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let zeros = BytesMut::zeroed(42); /// /// assert_eq!(zeros.len(), 42); /// zeros.into_iter().for_each(|x| assert_eq!(x, 0)); /// ``` pub fn zeroed(len: usize) -> BytesMut { BytesMut::from_vec(vec![0; len]) } /// Splits the bytes into two at the given index. /// /// Afterwards `self` contains elements `[0, at)`, and the returned /// `BytesMut` contains elements `[at, capacity)`. /// /// This is an `O(1)` operation that just increases the reference count /// and sets a few indices. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut a = BytesMut::from(&b"hello world"[..]); /// let mut b = a.split_off(5); /// /// a[0] = b'j'; /// b[0] = b'!'; /// /// assert_eq!(&a[..], b"jello"); /// assert_eq!(&b[..], b"!world"); /// ``` /// /// # Panics /// /// Panics if `at > capacity`. #[must_use = "consider BytesMut::truncate if you don't need the other half"] pub fn split_off(&mut self, at: usize) -> BytesMut { assert!( at <= self.capacity(), "split_off out of bounds: {:?} <= {:?}", at, self.capacity(), ); unsafe { let mut other = self.shallow_clone(); other.set_start(at); self.set_end(at); other } } /// Removes the bytes from the current view, returning them in a new /// `BytesMut` handle. /// /// Afterwards, `self` will be empty, but will retain any additional /// capacity that it had before the operation. This is identical to /// `self.split_to(self.len())`. /// /// This is an `O(1)` operation that just increases the reference count and /// sets a few indices. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut buf = BytesMut::with_capacity(1024); /// buf.put(&b"hello world"[..]); /// /// let other = buf.split(); /// /// assert!(buf.is_empty()); /// assert_eq!(1013, buf.capacity()); /// /// assert_eq!(other, b"hello world"[..]); /// ``` #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"] pub fn split(&mut self) -> BytesMut { let len = self.len(); self.split_to(len) } /// Splits the buffer into two at the given index. /// /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut` /// contains elements `[0, at)`. /// /// This is an `O(1)` operation that just increases the reference count and /// sets a few indices. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut a = BytesMut::from(&b"hello world"[..]); /// let mut b = a.split_to(5); /// /// a[0] = b'!'; /// b[0] = b'j'; /// /// assert_eq!(&a[..], b"!world"); /// assert_eq!(&b[..], b"jello"); /// ``` /// /// # Panics /// /// Panics if `at > len`. #[must_use = "consider BytesMut::advance if you don't need the other half"] pub fn split_to(&mut self, at: usize) -> BytesMut { assert!( at <= self.len(), "split_to out of bounds: {:?} <= {:?}", at, self.len(), ); unsafe { let mut other = self.shallow_clone(); other.set_end(at); self.set_start(at); other } } /// Shortens the buffer, keeping the first `len` bytes and dropping the /// rest. /// /// If `len` is greater than the buffer's current length, this has no /// effect. /// /// Existing underlying capacity is preserved. /// /// The [`split_off`] method can emulate `truncate`, but this causes the /// excess bytes to be returned instead of dropped. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::from(&b"hello world"[..]); /// buf.truncate(5); /// assert_eq!(buf, b"hello"[..]); /// ``` /// /// [`split_off`]: #method.split_off pub fn truncate(&mut self, len: usize) { if len <= self.len() { unsafe { self.set_len(len); } } } /// Clears the buffer, removing all data. Existing capacity is preserved. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::from(&b"hello world"[..]); /// buf.clear(); /// assert!(buf.is_empty()); /// ``` pub fn clear(&mut self) { self.truncate(0); } /// Resizes the buffer so that `len` is equal to `new_len`. /// /// If `new_len` is greater than `len`, the buffer is extended by the /// difference with each additional byte set to `value`. If `new_len` is /// less than `len`, the buffer is simply truncated. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::new(); /// /// buf.resize(3, 0x1); /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]); /// /// buf.resize(2, 0x2); /// assert_eq!(&buf[..], &[0x1, 0x1]); /// /// buf.resize(4, 0x3); /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]); /// ``` pub fn resize(&mut self, new_len: usize, value: u8) { let len = self.len(); if new_len > len { let additional = new_len - len; self.reserve(additional); unsafe { let dst = self.chunk_mut().as_mut_ptr(); ptr::write_bytes(dst, value, additional); self.set_len(new_len); } } else { self.truncate(new_len); } } /// Sets the length of the buffer. /// /// This will explicitly set the size of the buffer without actually /// modifying the data, so it is up to the caller to ensure that the data /// has been initialized. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut b = BytesMut::from(&b"hello world"[..]); /// /// unsafe { /// b.set_len(5); /// } /// /// assert_eq!(&b[..], b"hello"); /// /// unsafe { /// b.set_len(11); /// } /// /// assert_eq!(&b[..], b"hello world"); /// ``` #[inline] pub unsafe fn set_len(&mut self, len: usize) { debug_assert!(len <= self.cap, "set_len out of bounds"); self.len = len; } /// Reserves capacity for at least `additional` more bytes to be inserted /// into the given `BytesMut`. /// /// More than `additional` bytes may be reserved in order to avoid frequent /// reallocations. A call to `reserve` may result in an allocation. /// /// Before allocating new buffer space, the function will attempt to reclaim /// space in the existing buffer. If the current handle references a view /// into a larger original buffer, and all other handles referencing part /// of the same original buffer have been dropped, then the current view /// can be copied/shifted to the front of the buffer and the handle can take /// ownership of the full buffer, provided that the full buffer is large /// enough to fit the requested additional capacity. /// /// This optimization will only happen if shifting the data from the current /// view to the front of the buffer is not too expensive in terms of the /// (amortized) time required. The precise condition is subject to change; /// as of now, the length of the data being shifted needs to be at least as /// large as the distance that it's shifted by. If the current view is empty /// and the original buffer is large enough to fit the requested additional /// capacity, then reallocations will never happen. /// /// # Examples /// /// In the following example, a new buffer is allocated. /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::from(&b"hello"[..]); /// buf.reserve(64); /// assert!(buf.capacity() >= 69); /// ``` /// /// In the following example, the existing buffer is reclaimed. /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut buf = BytesMut::with_capacity(128); /// buf.put(&[0; 64][..]); /// /// let ptr = buf.as_ptr(); /// let other = buf.split(); /// /// assert!(buf.is_empty()); /// assert_eq!(buf.capacity(), 64); /// /// drop(other); /// buf.reserve(128); /// /// assert_eq!(buf.capacity(), 128); /// assert_eq!(buf.as_ptr(), ptr); /// ``` /// /// # Panics /// /// Panics if the new capacity overflows `usize`. #[inline] pub fn reserve(&mut self, additional: usize) { let len = self.len(); let rem = self.capacity() - len; if additional <= rem { // The handle can already store at least `additional` more bytes, so // there is no further work needed to be done. return; } self.reserve_inner(additional); } // In separate function to allow the short-circuits in `reserve` to // be inline-able. Significant helps performance. fn reserve_inner(&mut self, additional: usize) { let len = self.len(); let kind = self.kind(); if kind == KIND_VEC { // If there's enough free space before the start of the buffer, then // just copy the data backwards and reuse the already-allocated // space. // // Otherwise, since backed by a vector, use `Vec::reserve` // // We need to make sure that this optimization does not kill the // amortized runtimes of BytesMut's operations. unsafe { let (off, prev) = self.get_vec_pos(); // Only reuse space if we can satisfy the requested additional space. // // Also check if the value of `off` suggests that enough bytes // have been read to account for the overhead of shifting all // the data (in an amortized analysis). // Hence the condition `off >= self.len()`. // // This condition also already implies that the buffer is going // to be (at least) half-empty in the end; so we do not break // the (amortized) runtime with future resizes of the underlying // `Vec`. // // [For more details check issue #524, and PR #525.] if self.capacity() - self.len() + off >= additional && off >= self.len() { // There's enough space, and it's not too much overhead: // reuse the space! // // Just move the pointer back to the start after copying // data back. let base_ptr = self.ptr.as_ptr().offset(-(off as isize)); // Since `off >= self.len()`, the two regions don't overlap. ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len); self.ptr = vptr(base_ptr); self.set_vec_pos(0, prev); // Length stays constant, but since we moved backwards we // can gain capacity back. self.cap += off; } else { // Not enough space, or reusing might be too much overhead: // allocate more space! let mut v = ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off)); v.reserve(additional); // Update the info self.ptr = vptr(v.as_mut_ptr().add(off)); self.len = v.len() - off; self.cap = v.capacity() - off; } return; } } debug_assert_eq!(kind, KIND_ARC); let shared: *mut Shared = self.data; // Reserving involves abandoning the currently shared buffer and // allocating a new vector with the requested capacity. // // Compute the new capacity let mut new_cap = len.checked_add(additional).expect("overflow"); let original_capacity; let original_capacity_repr; unsafe { original_capacity_repr = (*shared).original_capacity_repr; original_capacity = original_capacity_from_repr(original_capacity_repr); // First, try to reclaim the buffer. This is possible if the current // handle is the only outstanding handle pointing to the buffer. if (*shared).is_unique() { // This is the only handle to the buffer. It can be reclaimed. // However, before doing the work of copying data, check to make // sure that the vector has enough capacity. let v = &mut (*shared).vec; let v_capacity = v.capacity(); let ptr = v.as_mut_ptr(); let offset = offset_from(self.ptr.as_ptr(), ptr); // Compare the condition in the `kind == KIND_VEC` case above // for more details. if v_capacity >= new_cap + offset { self.cap = new_cap; // no copy is necessary } else if v_capacity >= new_cap && offset >= len { // The capacity is sufficient, and copying is not too much // overhead: reclaim the buffer! // `offset >= len` means: no overlap ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len); self.ptr = vptr(ptr); self.cap = v.capacity(); } else { // calculate offset let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize); // new_cap is calculated in terms of `BytesMut`, not the underlying // `Vec`, so it does not take the offset into account. // // Thus we have to manually add it here. new_cap = new_cap.checked_add(off).expect("overflow"); // The vector capacity is not sufficient. The reserve request is // asking for more than the initial buffer capacity. Allocate more // than requested if `new_cap` is not much bigger than the current // capacity. // // There are some situations, using `reserve_exact` that the // buffer capacity could be below `original_capacity`, so do a // check. let double = v.capacity().checked_shl(1).unwrap_or(new_cap); new_cap = cmp::max(double, new_cap); // No space - allocate more // // The length field of `Shared::vec` is not used by the `BytesMut`; // instead we use the `len` field in the `BytesMut` itself. However, // when calling `reserve`, it doesn't guarantee that data stored in // the unused capacity of the vector is copied over to the new // allocation, so we need to ensure that we don't have any data we // care about in the unused capacity before calling `reserve`. debug_assert!(off + len <= v.capacity()); v.set_len(off + len); v.reserve(new_cap - v.len()); // Update the info self.ptr = vptr(v.as_mut_ptr().add(off)); self.cap = v.capacity() - off; } return; } else { new_cap = cmp::max(new_cap, original_capacity); } } // Create a new vector to store the data let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap)); // Copy the bytes v.extend_from_slice(self.as_ref()); // Release the shared handle. This must be done *after* the bytes are // copied. unsafe { release_shared(shared) }; // Update self let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; self.data = invalid_ptr(data); self.ptr = vptr(v.as_mut_ptr()); self.len = v.len(); self.cap = v.capacity(); } /// Appends given bytes to this `BytesMut`. /// /// If this `BytesMut` object does not have enough capacity, it is resized /// first. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::with_capacity(0); /// buf.extend_from_slice(b"aaabbb"); /// buf.extend_from_slice(b"cccddd"); /// /// assert_eq!(b"aaabbbcccddd", &buf[..]); /// ``` #[inline] pub fn extend_from_slice(&mut self, extend: &[u8]) { let cnt = extend.len(); self.reserve(cnt); unsafe { let dst = self.spare_capacity_mut(); // Reserved above debug_assert!(dst.len() >= cnt); ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt); } unsafe { self.advance_mut(cnt); } } /// Absorbs a `BytesMut` that was previously split off. /// /// If the two `BytesMut` objects were previously contiguous and not mutated /// in a way that causes re-allocation i.e., if `other` was created by /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation /// that just decreases a reference count and sets a few indices. /// Otherwise this method degenerates to /// `self.extend_from_slice(other.as_ref())`. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::with_capacity(64); /// buf.extend_from_slice(b"aaabbbcccddd"); /// /// let split = buf.split_off(6); /// assert_eq!(b"aaabbb", &buf[..]); /// assert_eq!(b"cccddd", &split[..]); /// /// buf.unsplit(split); /// assert_eq!(b"aaabbbcccddd", &buf[..]); /// ``` pub fn unsplit(&mut self, other: BytesMut) { if self.is_empty() { *self = other; return; } if let Err(other) = self.try_unsplit(other) { self.extend_from_slice(other.as_ref()); } } // private // For now, use a `Vec` to manage the memory for us, but we may want to // change that in the future to some alternate allocator strategy. // // Thus, we don't expose an easy way to construct from a `Vec` since an // internal change could make a simple pattern (`BytesMut::from(vec)`) // suddenly a lot more expensive. #[inline] pub(crate) fn from_vec(mut vec: Vec) -> BytesMut { let ptr = vptr(vec.as_mut_ptr()); let len = vec.len(); let cap = vec.capacity(); mem::forget(vec); let original_capacity_repr = original_capacity_to_repr(cap); let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; BytesMut { ptr, len, cap, data: invalid_ptr(data), } } #[inline] fn as_slice(&self) -> &[u8] { unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } #[inline] fn as_slice_mut(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } } unsafe fn set_start(&mut self, start: usize) { // Setting the start to 0 is a no-op, so return early if this is the // case. if start == 0 { return; } debug_assert!(start <= self.cap, "internal: set_start out of bounds"); let kind = self.kind(); if kind == KIND_VEC { // Setting the start when in vec representation is a little more // complicated. First, we have to track how far ahead the // "start" of the byte buffer from the beginning of the vec. We // also have to ensure that we don't exceed the maximum shift. let (mut pos, prev) = self.get_vec_pos(); pos += start; if pos <= MAX_VEC_POS { self.set_vec_pos(pos, prev); } else { // The repr must be upgraded to ARC. This will never happen // on 64 bit systems and will only happen on 32 bit systems // when shifting past 134,217,727 bytes. As such, we don't // worry too much about performance here. self.promote_to_shared(/*ref_count = */ 1); } } // Updating the start of the view is setting `ptr` to point to the // new start and updating the `len` field to reflect the new length // of the view. self.ptr = vptr(self.ptr.as_ptr().add(start)); if self.len >= start { self.len -= start; } else { self.len = 0; } self.cap -= start; } unsafe fn set_end(&mut self, end: usize) { debug_assert_eq!(self.kind(), KIND_ARC); assert!(end <= self.cap, "set_end out of bounds"); self.cap = end; self.len = cmp::min(self.len, end); } fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> { if other.capacity() == 0 { return Ok(()); } let ptr = unsafe { self.ptr.as_ptr().add(self.len) }; if ptr == other.ptr.as_ptr() && self.kind() == KIND_ARC && other.kind() == KIND_ARC && self.data == other.data { // Contiguous blocks, just combine directly self.len += other.len; self.cap += other.cap; Ok(()) } else { Err(other) } } #[inline] fn kind(&self) -> usize { self.data as usize & KIND_MASK } unsafe fn promote_to_shared(&mut self, ref_cnt: usize) { debug_assert_eq!(self.kind(), KIND_VEC); debug_assert!(ref_cnt == 1 || ref_cnt == 2); let original_capacity_repr = (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET; // The vec offset cannot be concurrently mutated, so there // should be no danger reading it. let off = (self.data as usize) >> VEC_POS_OFFSET; // First, allocate a new `Shared` instance containing the // `Vec` fields. It's important to note that `ptr`, `len`, // and `cap` cannot be mutated without having `&mut self`. // This means that these fields will not be concurrently // updated and since the buffer hasn't been promoted to an // `Arc`, those three fields still are the components of the // vector. let shared = Box::new(Shared { vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off), original_capacity_repr, ref_count: AtomicUsize::new(ref_cnt), }); let shared = Box::into_raw(shared); // The pointer should be aligned, so this assert should // always succeed. debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC); self.data = shared; } /// Makes an exact shallow clone of `self`. /// /// The kind of `self` doesn't matter, but this is unsafe /// because the clone will have the same offsets. You must /// be sure the returned value to the user doesn't allow /// two views into the same range. #[inline] unsafe fn shallow_clone(&mut self) -> BytesMut { if self.kind() == KIND_ARC { increment_shared(self.data); ptr::read(self) } else { self.promote_to_shared(/*ref_count = */ 2); ptr::read(self) } } #[inline] unsafe fn get_vec_pos(&mut self) -> (usize, usize) { debug_assert_eq!(self.kind(), KIND_VEC); let prev = self.data as usize; (prev >> VEC_POS_OFFSET, prev) } #[inline] unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) { debug_assert_eq!(self.kind(), KIND_VEC); debug_assert!(pos <= MAX_VEC_POS); self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)); } /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit`. /// /// The returned slice can be used to fill the buffer with data (e.g. by /// reading from a file) before marking the data as initialized using the /// [`set_len`] method. /// /// [`set_len`]: BytesMut::set_len /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// // Allocate buffer big enough for 10 bytes. /// let mut buf = BytesMut::with_capacity(10); /// /// // Fill in the first 3 elements. /// let uninit = buf.spare_capacity_mut(); /// uninit[0].write(0); /// uninit[1].write(1); /// uninit[2].write(2); /// /// // Mark the first 3 bytes of the buffer as being initialized. /// unsafe { /// buf.set_len(3); /// } /// /// assert_eq!(&buf[..], &[0, 1, 2]); /// ``` #[inline] pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit] { unsafe { let ptr = self.ptr.as_ptr().add(self.len); let len = self.cap - self.len; slice::from_raw_parts_mut(ptr.cast(), len) } } } impl Drop for BytesMut { fn drop(&mut self) { let kind = self.kind(); if kind == KIND_VEC { unsafe { let (off, _) = self.get_vec_pos(); // Vector storage, free the vector let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); } } else if kind == KIND_ARC { unsafe { release_shared(self.data) }; } } } impl Buf for BytesMut { #[inline] fn remaining(&self) -> usize { self.len() } #[inline] fn chunk(&self) -> &[u8] { self.as_slice() } #[inline] fn advance(&mut self, cnt: usize) { assert!( cnt <= self.remaining(), "cannot advance past `remaining`: {:?} <= {:?}", cnt, self.remaining(), ); unsafe { self.set_start(cnt); } } fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { self.split_to(len).freeze() } } unsafe impl BufMut for BytesMut { #[inline] fn remaining_mut(&self) -> usize { usize::MAX - self.len() } #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { let new_len = self.len() + cnt; assert!( new_len <= self.cap, "new_len = {}; capacity = {}", new_len, self.cap ); self.len = new_len; } #[inline] fn chunk_mut(&mut self) -> &mut UninitSlice { if self.capacity() == self.len() { self.reserve(64); } self.spare_capacity_mut().into() } // Specialize these methods so they can skip checking `remaining_mut` // and `advance_mut`. fn put(&mut self, mut src: T) where Self: Sized, { while src.has_remaining() { let s = src.chunk(); let l = s.len(); self.extend_from_slice(s); src.advance(l); } } fn put_slice(&mut self, src: &[u8]) { self.extend_from_slice(src); } fn put_bytes(&mut self, val: u8, cnt: usize) { self.reserve(cnt); unsafe { let dst = self.spare_capacity_mut(); // Reserved above debug_assert!(dst.len() >= cnt); ptr::write_bytes(dst.as_mut_ptr(), val, cnt); self.advance_mut(cnt); } } } impl AsRef<[u8]> for BytesMut { #[inline] fn as_ref(&self) -> &[u8] { self.as_slice() } } impl Deref for BytesMut { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.as_ref() } } impl AsMut<[u8]> for BytesMut { #[inline] fn as_mut(&mut self) -> &mut [u8] { self.as_slice_mut() } } impl DerefMut for BytesMut { #[inline] fn deref_mut(&mut self) -> &mut [u8] { self.as_mut() } } impl<'a> From<&'a [u8]> for BytesMut { fn from(src: &'a [u8]) -> BytesMut { BytesMut::from_vec(src.to_vec()) } } impl<'a> From<&'a str> for BytesMut { fn from(src: &'a str) -> BytesMut { BytesMut::from(src.as_bytes()) } } impl From for Bytes { fn from(src: BytesMut) -> Bytes { src.freeze() } } impl PartialEq for BytesMut { fn eq(&self, other: &BytesMut) -> bool { self.as_slice() == other.as_slice() } } impl PartialOrd for BytesMut { fn partial_cmp(&self, other: &BytesMut) -> Option { self.as_slice().partial_cmp(other.as_slice()) } } impl Ord for BytesMut { fn cmp(&self, other: &BytesMut) -> cmp::Ordering { self.as_slice().cmp(other.as_slice()) } } impl Eq for BytesMut {} impl Default for BytesMut { #[inline] fn default() -> BytesMut { BytesMut::new() } } impl hash::Hash for BytesMut { fn hash(&self, state: &mut H) where H: hash::Hasher, { let s: &[u8] = self.as_ref(); s.hash(state); } } impl Borrow<[u8]> for BytesMut { fn borrow(&self) -> &[u8] { self.as_ref() } } impl BorrowMut<[u8]> for BytesMut { fn borrow_mut(&mut self) -> &mut [u8] { self.as_mut() } } impl fmt::Write for BytesMut { #[inline] fn write_str(&mut self, s: &str) -> fmt::Result { if self.remaining_mut() >= s.len() { self.put_slice(s.as_bytes()); Ok(()) } else { Err(fmt::Error) } } #[inline] fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { fmt::write(self, args) } } impl Clone for BytesMut { fn clone(&self) -> BytesMut { BytesMut::from(&self[..]) } } impl IntoIterator for BytesMut { type Item = u8; type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter::new(self) } } impl<'a> IntoIterator for &'a BytesMut { type Item = &'a u8; type IntoIter = core::slice::Iter<'a, u8>; fn into_iter(self) -> Self::IntoIter { self.as_ref().iter() } } impl Extend for BytesMut { fn extend(&mut self, iter: T) where T: IntoIterator, { let iter = iter.into_iter(); let (lower, _) = iter.size_hint(); self.reserve(lower); // TODO: optimize // 1. If self.kind() == KIND_VEC, use Vec::extend // 2. Make `reserve` inline-able for b in iter { self.reserve(1); self.put_u8(b); } } } impl<'a> Extend<&'a u8> for BytesMut { fn extend(&mut self, iter: T) where T: IntoIterator, { self.extend(iter.into_iter().copied()) } } impl Extend for BytesMut { fn extend(&mut self, iter: T) where T: IntoIterator, { for bytes in iter { self.extend_from_slice(&bytes) } } } impl FromIterator for BytesMut { fn from_iter>(into_iter: T) -> Self { BytesMut::from_vec(Vec::from_iter(into_iter)) } } impl<'a> FromIterator<&'a u8> for BytesMut { fn from_iter>(into_iter: T) -> Self { BytesMut::from_iter(into_iter.into_iter().copied()) } } /* * * ===== Inner ===== * */ unsafe fn increment_shared(ptr: *mut Shared) { let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed); if old_size > isize::MAX as usize { crate::abort(); } } unsafe fn release_shared(ptr: *mut Shared) { // `Shared` storage... follow the drop steps from Arc. if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 { return; } // This fence is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` fence. This // means that use of the data happens before decreasing the reference // count, which happens before this fence, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // // Thread sanitizer does not support atomic fences. Use an atomic load // instead. (*ptr).ref_count.load(Ordering::Acquire); // Drop the data drop(Box::from_raw(ptr)); } impl Shared { fn is_unique(&self) -> bool { // The goal is to check if the current handle is the only handle // that currently has access to the buffer. This is done by // checking if the `ref_count` is currently 1. // // The `Acquire` ordering synchronizes with the `Release` as // part of the `fetch_sub` in `release_shared`. The `fetch_sub` // operation guarantees that any mutations done in other threads // are ordered before the `ref_count` is decremented. As such, // this `Acquire` will guarantee that those mutations are // visible to the current thread. self.ref_count.load(Ordering::Acquire) == 1 } } #[inline] fn original_capacity_to_repr(cap: usize) -> usize { let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); cmp::min( width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH, ) } fn original_capacity_from_repr(repr: usize) -> usize { if repr == 0 { return 0; } 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1)) } /* #[test] fn test_original_capacity_to_repr() { assert_eq!(original_capacity_to_repr(0), 0); let max_width = 32; for width in 1..(max_width + 1) { let cap = 1 << width - 1; let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH { 0 } else if width < MAX_ORIGINAL_CAPACITY_WIDTH { width - MIN_ORIGINAL_CAPACITY_WIDTH } else { MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH }; assert_eq!(original_capacity_to_repr(cap), expected); if width > 1 { assert_eq!(original_capacity_to_repr(cap + 1), expected); } // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 { assert_eq!(original_capacity_to_repr(cap - 24), expected - 1); assert_eq!(original_capacity_to_repr(cap + 76), expected); } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 { assert_eq!(original_capacity_to_repr(cap - 1), expected - 1); assert_eq!(original_capacity_to_repr(cap - 48), expected - 1); } } } #[test] fn test_original_capacity_from_repr() { assert_eq!(0, original_capacity_from_repr(0)); let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH; assert_eq!(min_cap, original_capacity_from_repr(1)); assert_eq!(min_cap * 2, original_capacity_from_repr(2)); assert_eq!(min_cap * 4, original_capacity_from_repr(3)); assert_eq!(min_cap * 8, original_capacity_from_repr(4)); assert_eq!(min_cap * 16, original_capacity_from_repr(5)); assert_eq!(min_cap * 32, original_capacity_from_repr(6)); assert_eq!(min_cap * 64, original_capacity_from_repr(7)); } */ unsafe impl Send for BytesMut {} unsafe impl Sync for BytesMut {} /* * * ===== PartialEq / PartialOrd ===== * */ impl PartialEq<[u8]> for BytesMut { fn eq(&self, other: &[u8]) -> bool { &**self == other } } impl PartialOrd<[u8]> for BytesMut { fn partial_cmp(&self, other: &[u8]) -> Option { (**self).partial_cmp(other) } } impl PartialEq for [u8] { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for [u8] { fn partial_cmp(&self, other: &BytesMut) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) } } impl PartialEq for BytesMut { fn eq(&self, other: &str) -> bool { &**self == other.as_bytes() } } impl PartialOrd for BytesMut { fn partial_cmp(&self, other: &str) -> Option { (**self).partial_cmp(other.as_bytes()) } } impl PartialEq for str { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for str { fn partial_cmp(&self, other: &BytesMut) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) } } impl PartialEq> for BytesMut { fn eq(&self, other: &Vec) -> bool { *self == other[..] } } impl PartialOrd> for BytesMut { fn partial_cmp(&self, other: &Vec) -> Option { (**self).partial_cmp(&other[..]) } } impl PartialEq for Vec { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for Vec { fn partial_cmp(&self, other: &BytesMut) -> Option { other.partial_cmp(self) } } impl PartialEq for BytesMut { fn eq(&self, other: &String) -> bool { *self == other[..] } } impl PartialOrd for BytesMut { fn partial_cmp(&self, other: &String) -> Option { (**self).partial_cmp(other.as_bytes()) } } impl PartialEq for String { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for String { fn partial_cmp(&self, other: &BytesMut) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) } } impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut where BytesMut: PartialEq, { fn eq(&self, other: &&'a T) -> bool { *self == **other } } impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut where BytesMut: PartialOrd, { fn partial_cmp(&self, other: &&'a T) -> Option { self.partial_cmp(*other) } } impl PartialEq for &[u8] { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for &[u8] { fn partial_cmp(&self, other: &BytesMut) -> Option { <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) } } impl PartialEq for &str { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for &str { fn partial_cmp(&self, other: &BytesMut) -> Option { other.partial_cmp(self) } } impl PartialEq for Bytes { fn eq(&self, other: &BytesMut) -> bool { other[..] == self[..] } } impl PartialEq for BytesMut { fn eq(&self, other: &Bytes) -> bool { other[..] == self[..] } } impl From for Vec { fn from(mut bytes: BytesMut) -> Self { let kind = bytes.kind(); let mut vec = if kind == KIND_VEC { unsafe { let (off, _) = bytes.get_vec_pos(); rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off) } } else if kind == KIND_ARC { let shared = bytes.data as *mut Shared; if unsafe { (*shared).is_unique() } { let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new()); unsafe { release_shared(shared) }; vec } else { return bytes.deref().to_vec(); } } else { return bytes.deref().to_vec(); }; let len = bytes.len; unsafe { ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len); vec.set_len(len); } mem::forget(bytes); vec } } #[inline] fn vptr(ptr: *mut u8) -> NonNull { if cfg!(debug_assertions) { NonNull::new(ptr).expect("Vec pointer should be non-null") } else { unsafe { NonNull::new_unchecked(ptr) } } } /// Returns a dangling pointer with the given address. This is used to store /// integer data in pointer fields. /// /// It is equivalent to `addr as *mut T`, but this fails on miri when strict /// provenance checking is enabled. #[inline] fn invalid_ptr(addr: usize) -> *mut T { let ptr = core::ptr::null_mut::().wrapping_add(addr); debug_assert_eq!(ptr as usize, addr); ptr.cast::() } /// Precondition: dst >= original /// /// The following line is equivalent to: /// /// ```rust,ignore /// self.ptr.as_ptr().offset_from(ptr) as usize; /// ``` /// /// But due to min rust is 1.39 and it is only stablised /// in 1.47, we cannot use it. #[inline] fn offset_from(dst: *mut u8, original: *mut u8) -> usize { debug_assert!(dst >= original); dst as usize - original as usize } unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec { let ptr = ptr.offset(-(off as isize)); len += off; cap += off; Vec::from_raw_parts(ptr, len, cap) } // ===== impl SharedVtable ===== static SHARED_VTABLE: Vtable = Vtable { clone: shared_v_clone, to_vec: shared_v_to_vec, drop: shared_v_drop, }; unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { let shared = data.load(Ordering::Relaxed) as *mut Shared; increment_shared(shared); let data = AtomicPtr::new(shared as *mut ()); Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { let shared: *mut Shared = data.load(Ordering::Relaxed).cast(); if (*shared).is_unique() { let shared = &mut *shared; // Drop shared let mut vec = mem::replace(&mut shared.vec, Vec::new()); release_shared(shared); // Copy back buffer ptr::copy(ptr, vec.as_mut_ptr(), len); vec.set_len(len); vec } else { let v = slice::from_raw_parts(ptr, len).to_vec(); release_shared(shared); v } } unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { data.with_mut(|shared| { release_shared(*shared as *mut Shared); }); } // compile-fails /// ```compile_fail /// use bytes::BytesMut; /// #[deny(unused_must_use)] /// { /// let mut b1 = BytesMut::from("hello world"); /// b1.split_to(6); /// } /// ``` fn _split_to_must_use() {} /// ```compile_fail /// use bytes::BytesMut; /// #[deny(unused_must_use)] /// { /// let mut b1 = BytesMut::from("hello world"); /// b1.split_off(6); /// } /// ``` fn _split_off_must_use() {} /// ```compile_fail /// use bytes::BytesMut; /// #[deny(unused_must_use)] /// { /// let mut b1 = BytesMut::from("hello world"); /// b1.split(); /// } /// ``` fn _split_must_use() {} // fuzz tests #[cfg(all(test, loom))] mod fuzz { use loom::sync::Arc; use loom::thread; use super::BytesMut; use crate::Bytes; #[test] fn bytes_mut_cloning_frozen() { loom::model(|| { let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze(); let addr = a.as_ptr() as usize; // test the Bytes::clone is Sync by putting it in an Arc let a1 = Arc::new(a); let a2 = a1.clone(); let t1 = thread::spawn(move || { let b: Bytes = (*a1).clone(); assert_eq!(b.as_ptr() as usize, addr); }); let t2 = thread::spawn(move || { let b: Bytes = (*a2).clone(); assert_eq!(b.as_ptr() as usize, addr); }); t1.join().unwrap(); t2.join().unwrap(); }); } } bytes-1.5.0/src/fmt/debug.rs000064400000000000000000000030101046102023000137600ustar 00000000000000use core::fmt::{Debug, Formatter, Result}; use super::BytesRef; use crate::{Bytes, BytesMut}; /// Alternative implementation of `std::fmt::Debug` for byte slice. /// /// Standard `Debug` implementation for `[u8]` is comma separated /// list of numbers. Since large amount of byte strings are in fact /// ASCII strings or contain a lot of ASCII strings (e. g. HTTP), /// it is convenient to print strings as ASCII when possible. impl Debug for BytesRef<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> Result { write!(f, "b\"")?; for &b in self.0 { // https://doc.rust-lang.org/reference/tokens.html#byte-escapes if b == b'\n' { write!(f, "\\n")?; } else if b == b'\r' { write!(f, "\\r")?; } else if b == b'\t' { write!(f, "\\t")?; } else if b == b'\\' || b == b'"' { write!(f, "\\{}", b as char)?; } else if b == b'\0' { write!(f, "\\0")?; // ASCII printable } else if (0x20..0x7f).contains(&b) { write!(f, "{}", b as char)?; } else { write!(f, "\\x{:02x}", b)?; } } write!(f, "\"")?; Ok(()) } } impl Debug for Bytes { fn fmt(&self, f: &mut Formatter<'_>) -> Result { Debug::fmt(&BytesRef(self.as_ref()), f) } } impl Debug for BytesMut { fn fmt(&self, f: &mut Formatter<'_>) -> Result { Debug::fmt(&BytesRef(self.as_ref()), f) } } bytes-1.5.0/src/fmt/hex.rs000064400000000000000000000014701046102023000134660ustar 00000000000000use core::fmt::{Formatter, LowerHex, Result, UpperHex}; use super::BytesRef; use crate::{Bytes, BytesMut}; impl LowerHex for BytesRef<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> Result { for &b in self.0 { write!(f, "{:02x}", b)?; } Ok(()) } } impl UpperHex for BytesRef<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> Result { for &b in self.0 { write!(f, "{:02X}", b)?; } Ok(()) } } macro_rules! hex_impl { ($tr:ident, $ty:ty) => { impl $tr for $ty { fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&BytesRef(self.as_ref()), f) } } }; } hex_impl!(LowerHex, Bytes); hex_impl!(LowerHex, BytesMut); hex_impl!(UpperHex, Bytes); hex_impl!(UpperHex, BytesMut); bytes-1.5.0/src/fmt/mod.rs000064400000000000000000000001571046102023000134620ustar 00000000000000mod debug; mod hex; /// `BytesRef` is not a part of public API of bytes crate. struct BytesRef<'a>(&'a [u8]); bytes-1.5.0/src/lib.rs000064400000000000000000000067001046102023000126630ustar 00000000000000#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] #![no_std] #![cfg_attr(docsrs, feature(doc_cfg))] //! Provides abstractions for working with bytes. //! //! The `bytes` crate provides an efficient byte buffer structure //! ([`Bytes`](struct.Bytes.html)) and traits for working with buffer //! implementations ([`Buf`], [`BufMut`]). //! //! [`Buf`]: trait.Buf.html //! [`BufMut`]: trait.BufMut.html //! //! # `Bytes` //! //! `Bytes` is an efficient container for storing and operating on contiguous //! slices of memory. It is intended for use primarily in networking code, but //! could have applications elsewhere as well. //! //! `Bytes` values facilitate zero-copy network programming by allowing multiple //! `Bytes` objects to point to the same underlying memory. This is managed by //! using a reference count to track when the memory is no longer needed and can //! be freed. //! //! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]` //! or `Vec`), but usually a `BytesMut` is used first and written to. For //! example: //! //! ```rust //! use bytes::{BytesMut, BufMut}; //! //! let mut buf = BytesMut::with_capacity(1024); //! buf.put(&b"hello world"[..]); //! buf.put_u16(1234); //! //! let a = buf.split(); //! assert_eq!(a, b"hello world\x04\xD2"[..]); //! //! buf.put(&b"goodbye world"[..]); //! //! let b = buf.split(); //! assert_eq!(b, b"goodbye world"[..]); //! //! assert_eq!(buf.capacity(), 998); //! ``` //! //! In the above example, only a single buffer of 1024 is allocated. The handles //! `a` and `b` will share the underlying buffer and maintain indices tracking //! the view into the buffer represented by the handle. //! //! See the [struct docs] for more details. //! //! [struct docs]: struct.Bytes.html //! //! # `Buf`, `BufMut` //! //! These two traits provide read and write access to buffers. The underlying //! storage may or may not be in contiguous memory. For example, `Bytes` is a //! buffer that guarantees contiguous memory, but a [rope] stores the bytes in //! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current //! position in the underlying byte storage. When bytes are read or written, the //! cursor is advanced. //! //! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) //! //! ## Relation with `Read` and `Write` //! //! At first glance, it may seem that `Buf` and `BufMut` overlap in //! functionality with `std::io::Read` and `std::io::Write`. However, they //! serve different purposes. A buffer is the value that is provided as an //! argument to `Read::read` and `Write::write`. `Read` and `Write` may then //! perform a syscall, which has the potential of failing. Operations on `Buf` //! and `BufMut` are infallible. extern crate alloc; #[cfg(feature = "std")] extern crate std; pub mod buf; pub use crate::buf::{Buf, BufMut}; mod bytes; mod bytes_mut; mod fmt; mod loom; pub use crate::bytes::Bytes; pub use crate::bytes_mut::BytesMut; // Optional Serde support #[cfg(feature = "serde")] mod serde; #[inline(never)] #[cold] fn abort() -> ! { #[cfg(feature = "std")] { std::process::abort(); } #[cfg(not(feature = "std"))] { struct Abort; impl Drop for Abort { fn drop(&mut self) { panic!(); } } let _a = Abort; panic!("abort"); } } bytes-1.5.0/src/loom.rs000064400000000000000000000014041046102023000130570ustar 00000000000000#[cfg(not(all(test, loom)))] pub(crate) mod sync { pub(crate) mod atomic { pub(crate) use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; pub(crate) trait AtomicMut { fn with_mut(&mut self, f: F) -> R where F: FnOnce(&mut *mut T) -> R; } impl AtomicMut for AtomicPtr { fn with_mut(&mut self, f: F) -> R where F: FnOnce(&mut *mut T) -> R, { f(self.get_mut()) } } } } #[cfg(all(test, loom))] pub(crate) mod sync { pub(crate) mod atomic { pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; pub(crate) trait AtomicMut {} } } bytes-1.5.0/src/serde.rs000064400000000000000000000050371046102023000132210ustar 00000000000000use super::{Bytes, BytesMut}; use alloc::string::String; use alloc::vec::Vec; use core::{cmp, fmt}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; macro_rules! serde_impl { ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => { impl Serialize for $ty { #[inline] fn serialize(&self, serializer: S) -> Result where S: Serializer, { serializer.serialize_bytes(&self) } } struct $visitor_ty; impl<'de> de::Visitor<'de> for $visitor_ty { type Value = $ty; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("byte array") } #[inline] fn visit_seq(self, mut seq: V) -> Result where V: de::SeqAccess<'de>, { let len = cmp::min(seq.size_hint().unwrap_or(0), 4096); let mut values: Vec = Vec::with_capacity(len); while let Some(value) = seq.next_element()? { values.push(value); } Ok($ty::$from_vec(values)) } #[inline] fn visit_bytes(self, v: &[u8]) -> Result where E: de::Error, { Ok($ty::$from_slice(v)) } #[inline] fn visit_byte_buf(self, v: Vec) -> Result where E: de::Error, { Ok($ty::$from_vec(v)) } #[inline] fn visit_str(self, v: &str) -> Result where E: de::Error, { Ok($ty::$from_slice(v.as_bytes())) } #[inline] fn visit_string(self, v: String) -> Result where E: de::Error, { Ok($ty::$from_vec(v.into_bytes())) } } impl<'de> Deserialize<'de> for $ty { #[inline] fn deserialize(deserializer: D) -> Result<$ty, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_byte_buf($visitor_ty) } } }; } serde_impl!(Bytes, BytesVisitor, copy_from_slice, from); serde_impl!(BytesMut, BytesMutVisitor, from, from_vec); bytes-1.5.0/tests/test_buf.rs000064400000000000000000000051211046102023000142770ustar 00000000000000#![warn(rust_2018_idioms)] use bytes::Buf; #[cfg(feature = "std")] use std::io::IoSlice; #[test] fn test_fresh_cursor_vec() { let mut buf = &b"hello"[..]; assert_eq!(buf.remaining(), 5); assert_eq!(buf.chunk(), b"hello"); buf.advance(2); assert_eq!(buf.remaining(), 3); assert_eq!(buf.chunk(), b"llo"); buf.advance(3); assert_eq!(buf.remaining(), 0); assert_eq!(buf.chunk(), b""); } #[test] fn test_get_u8() { let mut buf = &b"\x21zomg"[..]; assert_eq!(0x21, buf.get_u8()); } #[test] fn test_get_u16() { let mut buf = &b"\x21\x54zomg"[..]; assert_eq!(0x2154, buf.get_u16()); let mut buf = &b"\x21\x54zomg"[..]; assert_eq!(0x5421, buf.get_u16_le()); } #[test] #[should_panic] fn test_get_u16_buffer_underflow() { let mut buf = &b"\x21"[..]; buf.get_u16(); } #[cfg(feature = "std")] #[test] fn test_bufs_vec() { let buf = &b"hello world"[..]; let b1: &[u8] = &mut []; let b2: &[u8] = &mut []; let mut dst = [IoSlice::new(b1), IoSlice::new(b2)]; assert_eq!(1, buf.chunks_vectored(&mut dst[..])); } #[test] fn test_vec_deque() { use std::collections::VecDeque; let mut buffer: VecDeque = VecDeque::new(); buffer.extend(b"hello world"); assert_eq!(11, buffer.remaining()); assert_eq!(b"hello world", buffer.chunk()); buffer.advance(6); assert_eq!(b"world", buffer.chunk()); buffer.extend(b" piece"); let mut out = [0; 11]; buffer.copy_to_slice(&mut out); assert_eq!(b"world piece", &out[..]); } #[allow(unused_allocation)] // This is intentional. #[test] fn test_deref_buf_forwards() { struct Special; impl Buf for Special { fn remaining(&self) -> usize { unreachable!("remaining"); } fn chunk(&self) -> &[u8] { unreachable!("chunk"); } fn advance(&mut self, _: usize) { unreachable!("advance"); } fn get_u8(&mut self) -> u8 { // specialized! b'x' } } // these should all use the specialized method assert_eq!(Special.get_u8(), b'x'); assert_eq!((&mut Special as &mut dyn Buf).get_u8(), b'x'); assert_eq!((Box::new(Special) as Box).get_u8(), b'x'); assert_eq!(Box::new(Special).get_u8(), b'x'); } #[test] fn copy_to_bytes_less() { let mut buf = &b"hello world"[..]; let bytes = buf.copy_to_bytes(5); assert_eq!(bytes, &b"hello"[..]); assert_eq!(buf, &b" world"[..]) } #[test] #[should_panic] fn copy_to_bytes_overflow() { let mut buf = &b"hello world"[..]; let _bytes = buf.copy_to_bytes(12); } bytes-1.5.0/tests/test_buf_mut.rs000064400000000000000000000146711046102023000151760ustar 00000000000000#![warn(rust_2018_idioms)] use bytes::buf::UninitSlice; use bytes::{BufMut, BytesMut}; use core::fmt::Write; use core::mem::MaybeUninit; use core::usize; #[test] fn test_vec_as_mut_buf() { let mut buf = Vec::with_capacity(64); assert_eq!(buf.remaining_mut(), isize::MAX as usize); assert!(buf.chunk_mut().len() >= 64); buf.put(&b"zomg"[..]); assert_eq!(&buf, b"zomg"); assert_eq!(buf.remaining_mut(), isize::MAX as usize - 4); assert_eq!(buf.capacity(), 64); for _ in 0..16 { buf.put(&b"zomg"[..]); } assert_eq!(buf.len(), 68); } #[test] fn test_vec_put_bytes() { let mut buf = Vec::new(); buf.push(17); buf.put_bytes(19, 2); assert_eq!([17, 19, 19], &buf[..]); } #[test] fn test_put_u8() { let mut buf = Vec::with_capacity(8); buf.put_u8(33); assert_eq!(b"\x21", &buf[..]); } #[test] fn test_put_u16() { let mut buf = Vec::with_capacity(8); buf.put_u16(8532); assert_eq!(b"\x21\x54", &buf[..]); buf.clear(); buf.put_u16_le(8532); assert_eq!(b"\x54\x21", &buf[..]); } #[test] fn test_put_int() { let mut buf = Vec::with_capacity(8); buf.put_int(0x1020304050607080, 3); assert_eq!(b"\x60\x70\x80", &buf[..]); } #[test] #[should_panic] fn test_put_int_nbytes_overflow() { let mut buf = Vec::with_capacity(8); buf.put_int(0x1020304050607080, 9); } #[test] fn test_put_int_le() { let mut buf = Vec::with_capacity(8); buf.put_int_le(0x1020304050607080, 3); assert_eq!(b"\x80\x70\x60", &buf[..]); } #[test] #[should_panic] fn test_put_int_le_nbytes_overflow() { let mut buf = Vec::with_capacity(8); buf.put_int_le(0x1020304050607080, 9); } #[test] #[should_panic(expected = "cannot advance")] fn test_vec_advance_mut() { // Verify fix for #354 let mut buf = Vec::with_capacity(8); unsafe { buf.advance_mut(12); } } #[test] fn test_clone() { let mut buf = BytesMut::with_capacity(100); buf.write_str("this is a test").unwrap(); let buf2 = buf.clone(); buf.write_str(" of our emergency broadcast system").unwrap(); assert!(buf != buf2); } fn do_test_slice_small(make: impl Fn(&mut [u8]) -> &mut T) where for<'r> &'r mut T: BufMut, { let mut buf = [b'X'; 8]; let mut slice = make(&mut buf[..]); slice.put_bytes(b'A', 2); slice.put_u8(b'B'); slice.put_slice(b"BCC"); assert_eq!(2, slice.remaining_mut()); assert_eq!(b"AABBCCXX", &buf[..]); let mut slice = make(&mut buf[..]); slice.put_u32(0x61626364); assert_eq!(4, slice.remaining_mut()); assert_eq!(b"abcdCCXX", &buf[..]); let mut slice = make(&mut buf[..]); slice.put_u32_le(0x30313233); assert_eq!(4, slice.remaining_mut()); assert_eq!(b"3210CCXX", &buf[..]); } fn do_test_slice_large(make: impl Fn(&mut [u8]) -> &mut T) where for<'r> &'r mut T: BufMut, { const LEN: usize = 100; const FILL: [u8; LEN] = [b'Y'; LEN]; let test = |fill: &dyn Fn(&mut &mut T, usize)| { for buf_len in 0..LEN { let mut buf = [b'X'; LEN]; for fill_len in 0..=buf_len { let mut slice = make(&mut buf[..buf_len]); fill(&mut slice, fill_len); assert_eq!(buf_len - fill_len, slice.remaining_mut()); let (head, tail) = buf.split_at(fill_len); assert_eq!(&FILL[..fill_len], head); assert!(tail.iter().all(|b| *b == b'X')); } } }; test(&|slice, fill_len| slice.put_slice(&FILL[..fill_len])); test(&|slice, fill_len| slice.put_bytes(FILL[0], fill_len)); } fn do_test_slice_put_slice_panics(make: impl Fn(&mut [u8]) -> &mut T) where for<'r> &'r mut T: BufMut, { let mut buf = [b'X'; 4]; let mut slice = make(&mut buf[..]); slice.put_slice(b"12345"); } fn do_test_slice_put_bytes_panics(make: impl Fn(&mut [u8]) -> &mut T) where for<'r> &'r mut T: BufMut, { let mut buf = [b'X'; 4]; let mut slice = make(&mut buf[..]); slice.put_bytes(b'1', 5); } #[test] fn test_slice_buf_mut_small() { do_test_slice_small(|x| x); } #[test] fn test_slice_buf_mut_large() { do_test_slice_large(|x| x); } #[test] #[should_panic] fn test_slice_buf_mut_put_slice_overflow() { do_test_slice_put_slice_panics(|x| x); } #[test] #[should_panic] fn test_slice_buf_mut_put_bytes_overflow() { do_test_slice_put_bytes_panics(|x| x); } fn make_maybe_uninit_slice(slice: &mut [u8]) -> &mut [MaybeUninit] { // SAFETY: [u8] has the same layout as [MaybeUninit]. unsafe { core::mem::transmute(slice) } } #[test] fn test_maybe_uninit_buf_mut_small() { do_test_slice_small(make_maybe_uninit_slice); } #[test] fn test_maybe_uninit_buf_mut_large() { do_test_slice_large(make_maybe_uninit_slice); } #[test] #[should_panic] fn test_maybe_uninit_buf_mut_put_slice_overflow() { do_test_slice_put_slice_panics(make_maybe_uninit_slice); } #[test] #[should_panic] fn test_maybe_uninit_buf_mut_put_bytes_overflow() { do_test_slice_put_bytes_panics(make_maybe_uninit_slice); } #[allow(unused_allocation)] // This is intentional. #[test] fn test_deref_bufmut_forwards() { struct Special; unsafe impl BufMut for Special { fn remaining_mut(&self) -> usize { unreachable!("remaining_mut"); } fn chunk_mut(&mut self) -> &mut UninitSlice { unreachable!("chunk_mut"); } unsafe fn advance_mut(&mut self, _: usize) { unreachable!("advance"); } fn put_u8(&mut self, _: u8) { // specialized! } } // these should all use the specialized method Special.put_u8(b'x'); (&mut Special as &mut dyn BufMut).put_u8(b'x'); (Box::new(Special) as Box).put_u8(b'x'); Box::new(Special).put_u8(b'x'); } #[test] #[should_panic] fn write_byte_panics_if_out_of_bounds() { let mut data = [b'b', b'a', b'r']; let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) }; slice.write_byte(4, b'f'); } #[test] #[should_panic] fn copy_from_slice_panics_if_different_length_1() { let mut data = [b'b', b'a', b'r']; let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) }; slice.copy_from_slice(b"a"); } #[test] #[should_panic] fn copy_from_slice_panics_if_different_length_2() { let mut data = [b'b', b'a', b'r']; let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) }; slice.copy_from_slice(b"abcd"); } bytes-1.5.0/tests/test_bytes.rs000064400000000000000000000701551046102023000146620ustar 00000000000000#![warn(rust_2018_idioms)] use bytes::{Buf, BufMut, Bytes, BytesMut}; use std::usize; const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb"; const SHORT: &[u8] = b"hello world"; fn is_sync() {} fn is_send() {} #[test] fn test_bounds() { is_sync::(); is_sync::(); is_send::(); is_send::(); } #[test] fn test_layout() { use std::mem; assert_eq!( mem::size_of::(), mem::size_of::() * 4, "Bytes size should be 4 words", ); assert_eq!( mem::size_of::(), mem::size_of::() * 4, "BytesMut should be 4 words", ); assert_eq!( mem::size_of::(), mem::size_of::>(), "Bytes should be same size as Option", ); assert_eq!( mem::size_of::(), mem::size_of::>(), "BytesMut should be same size as Option", ); } #[test] fn from_slice() { let a = Bytes::from(&b"abcdefgh"[..]); assert_eq!(a, b"abcdefgh"[..]); assert_eq!(a, &b"abcdefgh"[..]); assert_eq!(a, Vec::from(&b"abcdefgh"[..])); assert_eq!(b"abcdefgh"[..], a); assert_eq!(&b"abcdefgh"[..], a); assert_eq!(Vec::from(&b"abcdefgh"[..]), a); let a = BytesMut::from(&b"abcdefgh"[..]); assert_eq!(a, b"abcdefgh"[..]); assert_eq!(a, &b"abcdefgh"[..]); assert_eq!(a, Vec::from(&b"abcdefgh"[..])); assert_eq!(b"abcdefgh"[..], a); assert_eq!(&b"abcdefgh"[..], a); assert_eq!(Vec::from(&b"abcdefgh"[..]), a); } #[test] fn fmt() { let a = format!("{:?}", Bytes::from(&b"abcdefg"[..])); let b = "b\"abcdefg\""; assert_eq!(a, b); let a = format!("{:?}", BytesMut::from(&b"abcdefg"[..])); assert_eq!(a, b); } #[test] fn fmt_write() { use std::fmt::Write; use std::iter::FromIterator; let s = String::from_iter((0..10).map(|_| "abcdefg")); let mut a = BytesMut::with_capacity(64); write!(a, "{}", &s[..64]).unwrap(); assert_eq!(a, s[..64].as_bytes()); let mut b = BytesMut::with_capacity(64); write!(b, "{}", &s[..32]).unwrap(); write!(b, "{}", &s[32..64]).unwrap(); assert_eq!(b, s[..64].as_bytes()); let mut c = BytesMut::with_capacity(64); write!(c, "{}", s).unwrap(); assert_eq!(c, s[..].as_bytes()); } #[test] fn len() { let a = Bytes::from(&b"abcdefg"[..]); assert_eq!(a.len(), 7); let a = BytesMut::from(&b"abcdefg"[..]); assert_eq!(a.len(), 7); let a = Bytes::from(&b""[..]); assert!(a.is_empty()); let a = BytesMut::from(&b""[..]); assert!(a.is_empty()); } #[test] fn index() { let a = Bytes::from(&b"hello world"[..]); assert_eq!(a[0..5], *b"hello"); } #[test] fn slice() { let a = Bytes::from(&b"hello world"[..]); let b = a.slice(3..5); assert_eq!(b, b"lo"[..]); let b = a.slice(0..0); assert_eq!(b, b""[..]); let b = a.slice(3..3); assert_eq!(b, b""[..]); let b = a.slice(a.len()..a.len()); assert_eq!(b, b""[..]); let b = a.slice(..5); assert_eq!(b, b"hello"[..]); let b = a.slice(3..); assert_eq!(b, b"lo world"[..]); } #[test] #[should_panic] fn slice_oob_1() { let a = Bytes::from(&b"hello world"[..]); a.slice(5..44); } #[test] #[should_panic] fn slice_oob_2() { let a = Bytes::from(&b"hello world"[..]); a.slice(44..49); } #[test] fn split_off() { let mut hello = Bytes::from(&b"helloworld"[..]); let world = hello.split_off(5); assert_eq!(hello, &b"hello"[..]); assert_eq!(world, &b"world"[..]); let mut hello = BytesMut::from(&b"helloworld"[..]); let world = hello.split_off(5); assert_eq!(hello, &b"hello"[..]); assert_eq!(world, &b"world"[..]); } #[test] #[should_panic] fn split_off_oob() { let mut hello = Bytes::from(&b"helloworld"[..]); let _ = hello.split_off(44); } #[test] fn split_off_uninitialized() { let mut bytes = BytesMut::with_capacity(1024); let other = bytes.split_off(128); assert_eq!(bytes.len(), 0); assert_eq!(bytes.capacity(), 128); assert_eq!(other.len(), 0); assert_eq!(other.capacity(), 896); } #[test] fn split_off_to_loop() { let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; for i in 0..(s.len() + 1) { { let mut bytes = Bytes::from(&s[..]); let off = bytes.split_off(i); assert_eq!(i, bytes.len()); let mut sum = Vec::new(); sum.extend(bytes.iter()); sum.extend(off.iter()); assert_eq!(&s[..], &sum[..]); } { let mut bytes = BytesMut::from(&s[..]); let off = bytes.split_off(i); assert_eq!(i, bytes.len()); let mut sum = Vec::new(); sum.extend(&bytes); sum.extend(&off); assert_eq!(&s[..], &sum[..]); } { let mut bytes = Bytes::from(&s[..]); let off = bytes.split_to(i); assert_eq!(i, off.len()); let mut sum = Vec::new(); sum.extend(off.iter()); sum.extend(bytes.iter()); assert_eq!(&s[..], &sum[..]); } { let mut bytes = BytesMut::from(&s[..]); let off = bytes.split_to(i); assert_eq!(i, off.len()); let mut sum = Vec::new(); sum.extend(&off); sum.extend(&bytes); assert_eq!(&s[..], &sum[..]); } } } #[test] fn split_to_1() { // Static let mut a = Bytes::from_static(SHORT); let b = a.split_to(4); assert_eq!(SHORT[4..], a); assert_eq!(SHORT[..4], b); // Allocated let mut a = Bytes::copy_from_slice(LONG); let b = a.split_to(4); assert_eq!(LONG[4..], a); assert_eq!(LONG[..4], b); let mut a = Bytes::copy_from_slice(LONG); let b = a.split_to(30); assert_eq!(LONG[30..], a); assert_eq!(LONG[..30], b); } #[test] fn split_to_2() { let mut a = Bytes::from(LONG); assert_eq!(LONG, a); let b = a.split_to(1); assert_eq!(LONG[1..], a); drop(b); } #[test] #[should_panic] fn split_to_oob() { let mut hello = Bytes::from(&b"helloworld"[..]); let _ = hello.split_to(33); } #[test] #[should_panic] fn split_to_oob_mut() { let mut hello = BytesMut::from(&b"helloworld"[..]); let _ = hello.split_to(33); } #[test] #[should_panic] fn split_to_uninitialized() { let mut bytes = BytesMut::with_capacity(1024); let _other = bytes.split_to(128); } #[test] fn split_off_to_at_gt_len() { fn make_bytes() -> Bytes { let mut bytes = BytesMut::with_capacity(100); bytes.put_slice(&[10, 20, 30, 40]); bytes.freeze() } use std::panic; let _ = make_bytes().split_to(4); let _ = make_bytes().split_off(4); assert!(panic::catch_unwind(move || { let _ = make_bytes().split_to(5); }) .is_err()); assert!(panic::catch_unwind(move || { let _ = make_bytes().split_off(5); }) .is_err()); } #[test] fn truncate() { let s = &b"helloworld"[..]; let mut hello = Bytes::from(s); hello.truncate(15); assert_eq!(hello, s); hello.truncate(10); assert_eq!(hello, s); hello.truncate(5); assert_eq!(hello, "hello"); } #[test] fn freeze_clone_shared() { let s = &b"abcdefgh"[..]; let b = BytesMut::from(s).split().freeze(); assert_eq!(b, s); let c = b.clone(); assert_eq!(c, s); } #[test] fn freeze_clone_unique() { let s = &b"abcdefgh"[..]; let b = BytesMut::from(s).freeze(); assert_eq!(b, s); let c = b.clone(); assert_eq!(c, s); } #[test] fn freeze_after_advance() { let s = &b"abcdefgh"[..]; let mut b = BytesMut::from(s); b.advance(1); assert_eq!(b, s[1..]); let b = b.freeze(); // Verify fix for #352. Previously, freeze would ignore the start offset // for BytesMuts in Vec mode. assert_eq!(b, s[1..]); } #[test] fn freeze_after_advance_arc() { let s = &b"abcdefgh"[..]; let mut b = BytesMut::from(s); // Make b Arc let _ = b.split_to(0); b.advance(1); assert_eq!(b, s[1..]); let b = b.freeze(); assert_eq!(b, s[1..]); } #[test] fn freeze_after_split_to() { let s = &b"abcdefgh"[..]; let mut b = BytesMut::from(s); let _ = b.split_to(1); assert_eq!(b, s[1..]); let b = b.freeze(); assert_eq!(b, s[1..]); } #[test] fn freeze_after_truncate() { let s = &b"abcdefgh"[..]; let mut b = BytesMut::from(s); b.truncate(7); assert_eq!(b, s[..7]); let b = b.freeze(); assert_eq!(b, s[..7]); } #[test] fn freeze_after_truncate_arc() { let s = &b"abcdefgh"[..]; let mut b = BytesMut::from(s); // Make b Arc let _ = b.split_to(0); b.truncate(7); assert_eq!(b, s[..7]); let b = b.freeze(); assert_eq!(b, s[..7]); } #[test] fn freeze_after_split_off() { let s = &b"abcdefgh"[..]; let mut b = BytesMut::from(s); let _ = b.split_off(7); assert_eq!(b, s[..7]); let b = b.freeze(); assert_eq!(b, s[..7]); } #[test] fn fns_defined_for_bytes_mut() { let mut bytes = BytesMut::from(&b"hello world"[..]); let _ = bytes.as_ptr(); let _ = bytes.as_mut_ptr(); // Iterator let v: Vec = bytes.as_ref().iter().cloned().collect(); assert_eq!(&v[..], bytes); } #[test] fn reserve_convert() { // Vec -> Vec let mut bytes = BytesMut::from(LONG); bytes.reserve(64); assert_eq!(bytes.capacity(), LONG.len() + 64); // Arc -> Vec let mut bytes = BytesMut::from(LONG); let a = bytes.split_to(30); bytes.reserve(128); assert!(bytes.capacity() >= bytes.len() + 128); drop(a); } #[test] fn reserve_growth() { let mut bytes = BytesMut::with_capacity(64); bytes.put("hello world".as_bytes()); let _ = bytes.split(); bytes.reserve(65); assert_eq!(bytes.capacity(), 117); } #[test] fn reserve_allocates_at_least_original_capacity() { let mut bytes = BytesMut::with_capacity(1024); for i in 0..1020 { bytes.put_u8(i as u8); } let _other = bytes.split(); bytes.reserve(16); assert_eq!(bytes.capacity(), 1024); } #[test] #[cfg_attr(miri, ignore)] // Miri is too slow fn reserve_max_original_capacity_value() { const SIZE: usize = 128 * 1024; let mut bytes = BytesMut::with_capacity(SIZE); for _ in 0..SIZE { bytes.put_u8(0u8); } let _other = bytes.split(); bytes.reserve(16); assert_eq!(bytes.capacity(), 64 * 1024); } #[test] fn reserve_vec_recycling() { let mut bytes = BytesMut::with_capacity(16); assert_eq!(bytes.capacity(), 16); let addr = bytes.as_ptr() as usize; bytes.put("0123456789012345".as_bytes()); assert_eq!(bytes.as_ptr() as usize, addr); bytes.advance(10); assert_eq!(bytes.capacity(), 6); bytes.reserve(8); assert_eq!(bytes.capacity(), 16); assert_eq!(bytes.as_ptr() as usize, addr); } #[test] fn reserve_in_arc_unique_does_not_overallocate() { let mut bytes = BytesMut::with_capacity(1000); let _ = bytes.split(); // now bytes is Arc and refcount == 1 assert_eq!(1000, bytes.capacity()); bytes.reserve(2001); assert_eq!(2001, bytes.capacity()); } #[test] fn reserve_in_arc_unique_doubles() { let mut bytes = BytesMut::with_capacity(1000); let _ = bytes.split(); // now bytes is Arc and refcount == 1 assert_eq!(1000, bytes.capacity()); bytes.reserve(1001); assert_eq!(2000, bytes.capacity()); } #[test] fn reserve_in_arc_unique_does_not_overallocate_after_split() { let mut bytes = BytesMut::from(LONG); let orig_capacity = bytes.capacity(); drop(bytes.split_off(LONG.len() / 2)); // now bytes is Arc and refcount == 1 let new_capacity = bytes.capacity(); bytes.reserve(orig_capacity - new_capacity); assert_eq!(bytes.capacity(), orig_capacity); } #[test] fn reserve_in_arc_unique_does_not_overallocate_after_multiple_splits() { let mut bytes = BytesMut::from(LONG); let orig_capacity = bytes.capacity(); for _ in 0..10 { drop(bytes.split_off(LONG.len() / 2)); // now bytes is Arc and refcount == 1 let new_capacity = bytes.capacity(); bytes.reserve(orig_capacity - new_capacity); } assert_eq!(bytes.capacity(), orig_capacity); } #[test] fn reserve_in_arc_nonunique_does_not_overallocate() { let mut bytes = BytesMut::with_capacity(1000); let _copy = bytes.split(); // now bytes is Arc and refcount == 2 assert_eq!(1000, bytes.capacity()); bytes.reserve(2001); assert_eq!(2001, bytes.capacity()); } /// This function tests `BytesMut::reserve_inner`, where `BytesMut` holds /// a unique reference to the shared vector and decide to reuse it /// by reallocating the `Vec`. #[test] fn reserve_shared_reuse() { let mut bytes = BytesMut::with_capacity(1000); bytes.put_slice(b"Hello, World!"); drop(bytes.split()); bytes.put_slice(b"!123ex123,sadchELLO,_wORLD!"); // Use split_off so that v.capacity() - self.cap != off drop(bytes.split_off(9)); assert_eq!(&*bytes, b"!123ex123"); bytes.reserve(2000); assert_eq!(&*bytes, b"!123ex123"); assert_eq!(bytes.capacity(), 2009); } #[test] fn extend_mut() { let mut bytes = BytesMut::with_capacity(0); bytes.extend(LONG); assert_eq!(*bytes, LONG[..]); } #[test] fn extend_from_slice_mut() { for &i in &[3, 34] { let mut bytes = BytesMut::new(); bytes.extend_from_slice(&LONG[..i]); bytes.extend_from_slice(&LONG[i..]); assert_eq!(LONG[..], *bytes); } } #[test] fn extend_mut_from_bytes() { let mut bytes = BytesMut::with_capacity(0); bytes.extend([Bytes::from(LONG)]); assert_eq!(*bytes, LONG[..]); } #[test] fn extend_mut_without_size_hint() { let mut bytes = BytesMut::with_capacity(0); let mut long_iter = LONG.iter(); // Use iter::from_fn since it doesn't know a size_hint bytes.extend(std::iter::from_fn(|| long_iter.next())); assert_eq!(*bytes, LONG[..]); } #[test] fn from_static() { let mut a = Bytes::from_static(b"ab"); let b = a.split_off(1); assert_eq!(a, b"a"[..]); assert_eq!(b, b"b"[..]); } #[test] fn advance_static() { let mut a = Bytes::from_static(b"hello world"); a.advance(6); assert_eq!(a, &b"world"[..]); } #[test] fn advance_vec() { let mut a = Bytes::from(b"hello world boooo yah world zomg wat wat".to_vec()); a.advance(16); assert_eq!(a, b"o yah world zomg wat wat"[..]); a.advance(4); assert_eq!(a, b"h world zomg wat wat"[..]); a.advance(6); assert_eq!(a, b"d zomg wat wat"[..]); } #[test] fn advance_bytes_mut() { let mut a = BytesMut::from("hello world boooo yah world zomg wat wat"); a.advance(16); assert_eq!(a, b"o yah world zomg wat wat"[..]); a.advance(4); assert_eq!(a, b"h world zomg wat wat"[..]); // Reserve some space. a.reserve(1024); assert_eq!(a, b"h world zomg wat wat"[..]); a.advance(6); assert_eq!(a, b"d zomg wat wat"[..]); } #[test] #[should_panic] fn advance_past_len() { let mut a = BytesMut::from("hello world"); a.advance(20); } #[test] // Only run these tests on little endian systems. CI uses qemu for testing // big endian... and qemu doesn't really support threading all that well. #[cfg(any(miri, target_endian = "little"))] fn stress() { // Tests promoting a buffer from a vec -> shared in a concurrent situation use std::sync::{Arc, Barrier}; use std::thread; const THREADS: usize = 8; const ITERS: usize = if cfg!(miri) { 100 } else { 1_000 }; for i in 0..ITERS { let data = [i as u8; 256]; let buf = Arc::new(Bytes::copy_from_slice(&data[..])); let barrier = Arc::new(Barrier::new(THREADS)); let mut joins = Vec::with_capacity(THREADS); for _ in 0..THREADS { let c = barrier.clone(); let buf = buf.clone(); joins.push(thread::spawn(move || { c.wait(); let buf: Bytes = (*buf).clone(); drop(buf); })); } for th in joins { th.join().unwrap(); } assert_eq!(*buf, data[..]); } } #[test] fn partial_eq_bytesmut() { let bytes = Bytes::from(&b"The quick red fox"[..]); let bytesmut = BytesMut::from(&b"The quick red fox"[..]); assert!(bytes == bytesmut); assert!(bytesmut == bytes); let bytes2 = Bytes::from(&b"Jumped over the lazy brown dog"[..]); assert!(bytes2 != bytesmut); assert!(bytesmut != bytes2); } /* #[test] fn bytes_unsplit_basic() { let buf = Bytes::from(&b"aaabbbcccddd"[..]); let splitted = buf.split_off(6); assert_eq!(b"aaabbb", &buf[..]); assert_eq!(b"cccddd", &splitted[..]); buf.unsplit(splitted); assert_eq!(b"aaabbbcccddd", &buf[..]); } #[test] fn bytes_unsplit_empty_other() { let buf = Bytes::from(&b"aaabbbcccddd"[..]); // empty other let other = Bytes::new(); buf.unsplit(other); assert_eq!(b"aaabbbcccddd", &buf[..]); } #[test] fn bytes_unsplit_empty_self() { // empty self let mut buf = Bytes::new(); let mut other = Bytes::with_capacity(64); other.extend_from_slice(b"aaabbbcccddd"); buf.unsplit(other); assert_eq!(b"aaabbbcccddd", &buf[..]); } #[test] fn bytes_unsplit_arc_different() { let mut buf = Bytes::with_capacity(64); buf.extend_from_slice(b"aaaabbbbeeee"); buf.split_off(8); //arc let mut buf2 = Bytes::with_capacity(64); buf2.extend_from_slice(b"ccccddddeeee"); buf2.split_off(8); //arc buf.unsplit(buf2); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn bytes_unsplit_arc_non_contiguous() { let mut buf = Bytes::with_capacity(64); buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); let mut buf2 = buf.split_off(8); //arc let buf3 = buf2.split_off(4); //arc buf.unsplit(buf3); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn bytes_unsplit_two_split_offs() { let mut buf = Bytes::with_capacity(64); buf.extend_from_slice(b"aaaabbbbccccdddd"); let mut buf2 = buf.split_off(8); //arc let buf3 = buf2.split_off(4); //arc buf2.unsplit(buf3); buf.unsplit(buf2); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn bytes_unsplit_overlapping_references() { let mut buf = Bytes::with_capacity(64); buf.extend_from_slice(b"abcdefghijklmnopqrstuvwxyz"); let mut buf0010 = buf.slice(0..10); let buf1020 = buf.slice(10..20); let buf0515 = buf.slice(5..15); buf0010.unsplit(buf1020); assert_eq!(b"abcdefghijklmnopqrst", &buf0010[..]); assert_eq!(b"fghijklmno", &buf0515[..]); } */ #[test] fn bytes_mut_unsplit_basic() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaabbbcccddd"); let splitted = buf.split_off(6); assert_eq!(b"aaabbb", &buf[..]); assert_eq!(b"cccddd", &splitted[..]); buf.unsplit(splitted); assert_eq!(b"aaabbbcccddd", &buf[..]); } #[test] fn bytes_mut_unsplit_empty_other() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaabbbcccddd"); // empty other let other = BytesMut::new(); buf.unsplit(other); assert_eq!(b"aaabbbcccddd", &buf[..]); } #[test] fn bytes_mut_unsplit_empty_self() { // empty self let mut buf = BytesMut::new(); let mut other = BytesMut::with_capacity(64); other.extend_from_slice(b"aaabbbcccddd"); buf.unsplit(other); assert_eq!(b"aaabbbcccddd", &buf[..]); } #[test] fn bytes_mut_unsplit_other_keeps_capacity() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aabb"); // non empty other created "from" buf let mut other = buf.split_off(buf.len()); other.extend_from_slice(b"ccddee"); buf.unsplit(other); assert_eq!(buf.capacity(), 64); } #[test] fn bytes_mut_unsplit_empty_other_keeps_capacity() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aabbccddee"); // empty other created "from" buf let other = buf.split_off(buf.len()); buf.unsplit(other); assert_eq!(buf.capacity(), 64); } #[test] fn bytes_mut_unsplit_arc_different() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaaabbbbeeee"); let _ = buf.split_off(8); //arc let mut buf2 = BytesMut::with_capacity(64); buf2.extend_from_slice(b"ccccddddeeee"); let _ = buf2.split_off(8); //arc buf.unsplit(buf2); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn bytes_mut_unsplit_arc_non_contiguous() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); let mut buf2 = buf.split_off(8); //arc let buf3 = buf2.split_off(4); //arc buf.unsplit(buf3); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn bytes_mut_unsplit_two_split_offs() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaaabbbbccccdddd"); let mut buf2 = buf.split_off(8); //arc let buf3 = buf2.split_off(4); //arc buf2.unsplit(buf3); buf.unsplit(buf2); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn from_iter_no_size_hint() { use std::iter; let mut expect = vec![]; let actual: Bytes = iter::repeat(b'x') .scan(100, |cnt, item| { if *cnt >= 1 { *cnt -= 1; expect.push(item); Some(item) } else { None } }) .collect(); assert_eq!(&actual[..], &expect[..]); } fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) { let slice = &(bytes.as_ref()[start..end]); let sub = bytes.slice_ref(slice); assert_eq!(&sub[..], expected); } #[test] fn slice_ref_works() { let bytes = Bytes::from(&b"012345678"[..]); test_slice_ref(&bytes, 0, 0, b""); test_slice_ref(&bytes, 0, 3, b"012"); test_slice_ref(&bytes, 2, 6, b"2345"); test_slice_ref(&bytes, 7, 9, b"78"); test_slice_ref(&bytes, 9, 9, b""); } #[test] fn slice_ref_empty() { let bytes = Bytes::from(&b""[..]); let slice = &(bytes.as_ref()[0..0]); let sub = bytes.slice_ref(slice); assert_eq!(&sub[..], b""); } #[test] fn slice_ref_empty_subslice() { let bytes = Bytes::from(&b"abcde"[..]); let subbytes = bytes.slice(0..0); let slice = &subbytes[..]; // The `slice` object is derived from the original `bytes` object // so `slice_ref` should work. assert_eq!(Bytes::new(), bytes.slice_ref(slice)); } #[test] #[should_panic] fn slice_ref_catches_not_a_subset() { let bytes = Bytes::from(&b"012345678"[..]); let slice = &b"012345"[0..4]; bytes.slice_ref(slice); } #[test] fn slice_ref_not_an_empty_subset() { let bytes = Bytes::from(&b"012345678"[..]); let slice = &b""[0..0]; assert_eq!(Bytes::new(), bytes.slice_ref(slice)); } #[test] fn empty_slice_ref_not_an_empty_subset() { let bytes = Bytes::new(); let slice = &b"some other slice"[0..0]; assert_eq!(Bytes::new(), bytes.slice_ref(slice)); } #[test] fn bytes_buf_mut_advance() { let mut bytes = BytesMut::with_capacity(1024); unsafe { let ptr = bytes.chunk_mut().as_mut_ptr(); assert_eq!(1024, bytes.chunk_mut().len()); bytes.advance_mut(10); let next = bytes.chunk_mut().as_mut_ptr(); assert_eq!(1024 - 10, bytes.chunk_mut().len()); assert_eq!(ptr.offset(10), next); // advance to the end bytes.advance_mut(1024 - 10); // The buffer size is doubled assert_eq!(1024, bytes.chunk_mut().len()); } } #[test] fn bytes_buf_mut_reuse_when_fully_consumed() { use bytes::{Buf, BytesMut}; let mut buf = BytesMut::new(); buf.reserve(8192); buf.extend_from_slice(&[0u8; 100][..]); let p = &buf[0] as *const u8; buf.advance(100); buf.reserve(8192); buf.extend_from_slice(b" "); assert_eq!(&buf[0] as *const u8, p); } #[test] #[should_panic] fn bytes_reserve_overflow() { let mut bytes = BytesMut::with_capacity(1024); bytes.put_slice(b"hello world"); bytes.reserve(usize::MAX); } #[test] fn bytes_with_capacity_but_empty() { // See https://github.com/tokio-rs/bytes/issues/340 let vec = Vec::with_capacity(1); let _ = Bytes::from(vec); } #[test] fn bytes_put_bytes() { let mut bytes = BytesMut::new(); bytes.put_u8(17); bytes.put_bytes(19, 2); assert_eq!([17, 19, 19], bytes.as_ref()); } #[test] fn box_slice_empty() { // See https://github.com/tokio-rs/bytes/issues/340 let empty: Box<[u8]> = Default::default(); let b = Bytes::from(empty); assert!(b.is_empty()); } #[test] fn bytes_into_vec() { // Test kind == KIND_VEC let content = b"helloworld"; let mut bytes = BytesMut::new(); bytes.put_slice(content); let vec: Vec = bytes.into(); assert_eq!(&vec, content); // Test kind == KIND_ARC, shared.is_unique() == True let mut bytes = BytesMut::new(); bytes.put_slice(b"abcdewe23"); bytes.put_slice(content); // Overwrite the bytes to make sure only one reference to the underlying // Vec exists. bytes = bytes.split_off(9); let vec: Vec = bytes.into(); assert_eq!(&vec, content); // Test kind == KIND_ARC, shared.is_unique() == False let prefix = b"abcdewe23"; let mut bytes = BytesMut::new(); bytes.put_slice(prefix); bytes.put_slice(content); let vec: Vec = bytes.split_off(prefix.len()).into(); assert_eq!(&vec, content); let vec: Vec = bytes.into(); assert_eq!(&vec, prefix); } #[test] fn test_bytes_into_vec() { // Test STATIC_VTABLE.to_vec let bs = b"1b23exfcz3r"; let vec: Vec = Bytes::from_static(bs).into(); assert_eq!(&*vec, bs); // Test bytes_mut.SHARED_VTABLE.to_vec impl eprintln!("1"); let mut bytes_mut: BytesMut = bs[..].into(); // Set kind to KIND_ARC so that after freeze, Bytes will use bytes_mut.SHARED_VTABLE eprintln!("2"); drop(bytes_mut.split_off(bs.len())); eprintln!("3"); let b1 = bytes_mut.freeze(); eprintln!("4"); let b2 = b1.clone(); eprintln!("{:#?}", (&*b1).as_ptr()); // shared.is_unique() = False eprintln!("5"); assert_eq!(&*Vec::from(b2), bs); // shared.is_unique() = True eprintln!("6"); assert_eq!(&*Vec::from(b1), bs); // Test bytes_mut.SHARED_VTABLE.to_vec impl where offset != 0 let mut bytes_mut1: BytesMut = bs[..].into(); let bytes_mut2 = bytes_mut1.split_off(9); let b1 = bytes_mut1.freeze(); let b2 = bytes_mut2.freeze(); assert_eq!(Vec::from(b2), bs[9..]); assert_eq!(Vec::from(b1), bs[..9]); } #[test] fn test_bytes_into_vec_promotable_even() { let vec = vec![33u8; 1024]; // Test cases where kind == KIND_VEC let b1 = Bytes::from(vec.clone()); assert_eq!(Vec::from(b1), vec); // Test cases where kind == KIND_ARC, ref_cnt == 1 let b1 = Bytes::from(vec.clone()); drop(b1.clone()); assert_eq!(Vec::from(b1), vec); // Test cases where kind == KIND_ARC, ref_cnt == 2 let b1 = Bytes::from(vec.clone()); let b2 = b1.clone(); assert_eq!(Vec::from(b1), vec); // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1 assert_eq!(Vec::from(b2), vec); // Test cases where offset != 0 let mut b1 = Bytes::from(vec.clone()); let b2 = b1.split_off(20); assert_eq!(Vec::from(b2), vec[20..]); assert_eq!(Vec::from(b1), vec[..20]); } #[test] fn test_bytes_vec_conversion() { let mut vec = Vec::with_capacity(10); vec.extend(b"abcdefg"); let b = Bytes::from(vec); let v = Vec::from(b); assert_eq!(v.len(), 7); assert_eq!(v.capacity(), 10); let mut b = Bytes::from(v); b.advance(1); let v = Vec::from(b); assert_eq!(v.len(), 6); assert_eq!(v.capacity(), 10); assert_eq!(v.as_slice(), b"bcdefg"); } #[test] fn test_bytes_mut_conversion() { let mut b1 = BytesMut::with_capacity(10); b1.extend(b"abcdefg"); let b2 = Bytes::from(b1); let v = Vec::from(b2); assert_eq!(v.len(), 7); assert_eq!(v.capacity(), 10); let mut b = Bytes::from(v); b.advance(1); let v = Vec::from(b); assert_eq!(v.len(), 6); assert_eq!(v.capacity(), 10); assert_eq!(v.as_slice(), b"bcdefg"); } #[test] fn test_bytes_capacity_len() { for cap in 0..100 { for len in 0..=cap { let mut v = Vec::with_capacity(cap); v.resize(len, 0); let _ = Bytes::from(v); } } } bytes-1.5.0/tests/test_bytes_odd_alloc.rs000064400000000000000000000052201046102023000166510ustar 00000000000000//! Test using `Bytes` with an allocator that hands out "odd" pointers for //! vectors (pointers where the LSB is set). #![cfg(not(miri))] // Miri does not support custom allocators (also, Miri is "odd" by default with 50% chance) use std::alloc::{GlobalAlloc, Layout, System}; use std::ptr; use bytes::Bytes; #[global_allocator] static ODD: Odd = Odd; struct Odd; unsafe impl GlobalAlloc for Odd { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { if layout.align() == 1 && layout.size() > 0 { // Allocate slightly bigger so that we can offset the pointer by 1 let size = layout.size() + 1; let new_layout = match Layout::from_size_align(size, 1) { Ok(layout) => layout, Err(_err) => return ptr::null_mut(), }; let ptr = System.alloc(new_layout); if !ptr.is_null() { ptr.offset(1) } else { ptr } } else { System.alloc(layout) } } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { if layout.align() == 1 && layout.size() > 0 { let size = layout.size() + 1; let new_layout = match Layout::from_size_align(size, 1) { Ok(layout) => layout, Err(_err) => std::process::abort(), }; System.dealloc(ptr.offset(-1), new_layout); } else { System.dealloc(ptr, layout); } } } #[test] fn sanity_check_odd_allocator() { let vec = vec![33u8; 1024]; let p = vec.as_ptr() as usize; assert!(p & 0x1 == 0x1, "{:#b}", p); } #[test] fn test_bytes_from_vec_drop() { let vec = vec![33u8; 1024]; let _b = Bytes::from(vec); } #[test] fn test_bytes_clone_drop() { let vec = vec![33u8; 1024]; let b1 = Bytes::from(vec); let _b2 = b1.clone(); } #[test] fn test_bytes_into_vec() { let vec = vec![33u8; 1024]; // Test cases where kind == KIND_VEC let b1 = Bytes::from(vec.clone()); assert_eq!(Vec::from(b1), vec); // Test cases where kind == KIND_ARC, ref_cnt == 1 let b1 = Bytes::from(vec.clone()); drop(b1.clone()); assert_eq!(Vec::from(b1), vec); // Test cases where kind == KIND_ARC, ref_cnt == 2 let b1 = Bytes::from(vec.clone()); let b2 = b1.clone(); assert_eq!(Vec::from(b1), vec); // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1 assert_eq!(Vec::from(b2), vec); // Test cases where offset != 0 let mut b1 = Bytes::from(vec.clone()); let b2 = b1.split_off(20); assert_eq!(Vec::from(b2), vec[20..]); assert_eq!(Vec::from(b1), vec[..20]); } bytes-1.5.0/tests/test_bytes_vec_alloc.rs000064400000000000000000000077311046102023000166710ustar 00000000000000use std::alloc::{GlobalAlloc, Layout, System}; use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use bytes::{Buf, Bytes}; #[global_allocator] static LEDGER: Ledger = Ledger::new(); const LEDGER_LENGTH: usize = 2048; struct Ledger { alloc_table: [(AtomicPtr, AtomicUsize); LEDGER_LENGTH], } impl Ledger { const fn new() -> Self { const ELEM: (AtomicPtr, AtomicUsize) = (AtomicPtr::new(null_mut()), AtomicUsize::new(0)); let alloc_table = [ELEM; LEDGER_LENGTH]; Self { alloc_table } } /// Iterate over our table until we find an open entry, then insert into said entry fn insert(&self, ptr: *mut u8, size: usize) { for (entry_ptr, entry_size) in self.alloc_table.iter() { // SeqCst is good enough here, we don't care about perf, i just want to be correct! if entry_ptr .compare_exchange(null_mut(), ptr, Ordering::SeqCst, Ordering::SeqCst) .is_ok() { entry_size.store(size, Ordering::SeqCst); break; } } } fn remove(&self, ptr: *mut u8) -> usize { for (entry_ptr, entry_size) in self.alloc_table.iter() { // set the value to be something that will never try and be deallocated, so that we // don't have any chance of a race condition // // dont worry, LEDGER_LENGTH is really long to compensate for us not reclaiming space if entry_ptr .compare_exchange( ptr, invalid_ptr(usize::MAX), Ordering::SeqCst, Ordering::SeqCst, ) .is_ok() { return entry_size.load(Ordering::SeqCst); } } panic!("Couldn't find a matching entry for {:x?}", ptr); } } unsafe impl GlobalAlloc for Ledger { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { let size = layout.size(); let ptr = System.alloc(layout); self.insert(ptr, size); ptr } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { let orig_size = self.remove(ptr); if orig_size != layout.size() { panic!( "bad dealloc: alloc size was {}, dealloc size is {}", orig_size, layout.size() ); } else { System.dealloc(ptr, layout); } } } #[test] fn test_bytes_advance() { let mut bytes = Bytes::from(vec![10, 20, 30]); bytes.advance(1); drop(bytes); } #[test] fn test_bytes_truncate() { let mut bytes = Bytes::from(vec![10, 20, 30]); bytes.truncate(2); drop(bytes); } #[test] fn test_bytes_truncate_and_advance() { let mut bytes = Bytes::from(vec![10, 20, 30]); bytes.truncate(2); bytes.advance(1); drop(bytes); } /// Returns a dangling pointer with the given address. This is used to store /// integer data in pointer fields. #[inline] fn invalid_ptr(addr: usize) -> *mut T { let ptr = std::ptr::null_mut::().wrapping_add(addr); debug_assert_eq!(ptr as usize, addr); ptr.cast::() } #[test] fn test_bytes_into_vec() { let vec = vec![33u8; 1024]; // Test cases where kind == KIND_VEC let b1 = Bytes::from(vec.clone()); assert_eq!(Vec::from(b1), vec); // Test cases where kind == KIND_ARC, ref_cnt == 1 let b1 = Bytes::from(vec.clone()); drop(b1.clone()); assert_eq!(Vec::from(b1), vec); // Test cases where kind == KIND_ARC, ref_cnt == 2 let b1 = Bytes::from(vec.clone()); let b2 = b1.clone(); assert_eq!(Vec::from(b1), vec); // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1 assert_eq!(Vec::from(b2), vec); // Test cases where offset != 0 let mut b1 = Bytes::from(vec.clone()); let b2 = b1.split_off(20); assert_eq!(Vec::from(b2), vec[20..]); assert_eq!(Vec::from(b1), vec[..20]); } bytes-1.5.0/tests/test_chain.rs000064400000000000000000000110741046102023000146110ustar 00000000000000#![warn(rust_2018_idioms)] use bytes::{Buf, BufMut, Bytes}; #[cfg(feature = "std")] use std::io::IoSlice; #[test] fn collect_two_bufs() { let a = Bytes::from(&b"hello"[..]); let b = Bytes::from(&b"world"[..]); let res = a.chain(b).copy_to_bytes(10); assert_eq!(res, &b"helloworld"[..]); } #[test] fn writing_chained() { let mut a = [0u8; 64]; let mut b = [0u8; 64]; { let mut buf = (&mut a[..]).chain_mut(&mut b[..]); for i in 0u8..128 { buf.put_u8(i); } } for i in 0..64 { let expect = i as u8; assert_eq!(expect, a[i]); assert_eq!(expect + 64, b[i]); } } #[test] fn iterating_two_bufs() { let a = Bytes::from(&b"hello"[..]); let b = Bytes::from(&b"world"[..]); let res: Vec = a.chain(b).into_iter().collect(); assert_eq!(res, &b"helloworld"[..]); } #[cfg(feature = "std")] #[test] fn vectored_read() { let a = Bytes::from(&b"hello"[..]); let b = Bytes::from(&b"world"[..]); let mut buf = a.chain(b); { let b1: &[u8] = &mut []; let b2: &[u8] = &mut []; let b3: &[u8] = &mut []; let b4: &[u8] = &mut []; let mut iovecs = [ IoSlice::new(b1), IoSlice::new(b2), IoSlice::new(b3), IoSlice::new(b4), ]; assert_eq!(2, buf.chunks_vectored(&mut iovecs)); assert_eq!(iovecs[0][..], b"hello"[..]); assert_eq!(iovecs[1][..], b"world"[..]); assert_eq!(iovecs[2][..], b""[..]); assert_eq!(iovecs[3][..], b""[..]); } buf.advance(2); { let b1: &[u8] = &mut []; let b2: &[u8] = &mut []; let b3: &[u8] = &mut []; let b4: &[u8] = &mut []; let mut iovecs = [ IoSlice::new(b1), IoSlice::new(b2), IoSlice::new(b3), IoSlice::new(b4), ]; assert_eq!(2, buf.chunks_vectored(&mut iovecs)); assert_eq!(iovecs[0][..], b"llo"[..]); assert_eq!(iovecs[1][..], b"world"[..]); assert_eq!(iovecs[2][..], b""[..]); assert_eq!(iovecs[3][..], b""[..]); } buf.advance(3); { let b1: &[u8] = &mut []; let b2: &[u8] = &mut []; let b3: &[u8] = &mut []; let b4: &[u8] = &mut []; let mut iovecs = [ IoSlice::new(b1), IoSlice::new(b2), IoSlice::new(b3), IoSlice::new(b4), ]; assert_eq!(1, buf.chunks_vectored(&mut iovecs)); assert_eq!(iovecs[0][..], b"world"[..]); assert_eq!(iovecs[1][..], b""[..]); assert_eq!(iovecs[2][..], b""[..]); assert_eq!(iovecs[3][..], b""[..]); } buf.advance(3); { let b1: &[u8] = &mut []; let b2: &[u8] = &mut []; let b3: &[u8] = &mut []; let b4: &[u8] = &mut []; let mut iovecs = [ IoSlice::new(b1), IoSlice::new(b2), IoSlice::new(b3), IoSlice::new(b4), ]; assert_eq!(1, buf.chunks_vectored(&mut iovecs)); assert_eq!(iovecs[0][..], b"ld"[..]); assert_eq!(iovecs[1][..], b""[..]); assert_eq!(iovecs[2][..], b""[..]); assert_eq!(iovecs[3][..], b""[..]); } } #[test] fn chain_growing_buffer() { let mut buff = [' ' as u8; 10]; let mut vec = b"wassup".to_vec(); let mut chained = (&mut buff[..]).chain_mut(&mut vec).chain_mut(Vec::new()); // Required for potential overflow because remaining_mut for Vec is isize::MAX - vec.len(), but for chain_mut is usize::MAX chained.put_slice(b"hey there123123"); assert_eq!(&buff, b"hey there1"); assert_eq!(&vec, b"wassup23123"); } #[test] fn chain_overflow_remaining_mut() { let mut chained = Vec::::new().chain_mut(Vec::new()).chain_mut(Vec::new()); assert_eq!(chained.remaining_mut(), usize::MAX); chained.put_slice(&[0; 256]); assert_eq!(chained.remaining_mut(), usize::MAX); } #[test] fn chain_get_bytes() { let mut ab = Bytes::copy_from_slice(b"ab"); let mut cd = Bytes::copy_from_slice(b"cd"); let ab_ptr = ab.as_ptr(); let cd_ptr = cd.as_ptr(); let mut chain = (&mut ab).chain(&mut cd); let a = chain.copy_to_bytes(1); let bc = chain.copy_to_bytes(2); let d = chain.copy_to_bytes(1); assert_eq!(Bytes::copy_from_slice(b"a"), a); assert_eq!(Bytes::copy_from_slice(b"bc"), bc); assert_eq!(Bytes::copy_from_slice(b"d"), d); // assert `get_bytes` did not allocate assert_eq!(ab_ptr, a.as_ptr()); // assert `get_bytes` did not allocate assert_eq!(cd_ptr.wrapping_offset(1), d.as_ptr()); } bytes-1.5.0/tests/test_debug.rs000064400000000000000000000025071046102023000146160ustar 00000000000000#![warn(rust_2018_idioms)] use bytes::Bytes; #[test] fn fmt() { let vec: Vec<_> = (0..0x100).map(|b| b as u8).collect(); let expected = "b\"\ \\0\\x01\\x02\\x03\\x04\\x05\\x06\\x07\ \\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\ \\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\ \\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\ \x20!\\\"#$%&'()*+,-./0123456789:;<=>?\ @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\ `abcdefghijklmnopqrstuvwxyz{|}~\\x7f\ \\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\ \\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\ \\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\ \\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\ \\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\ \\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\ \\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\ \\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\ \\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\ \\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\ \\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\ \\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\ \\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\ \\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\ \\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\ \\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff\""; assert_eq!(expected, format!("{:?}", Bytes::from(vec))); } bytes-1.5.0/tests/test_iter.rs000064400000000000000000000006341046102023000144720ustar 00000000000000#![warn(rust_2018_idioms)] use bytes::Bytes; #[test] fn iter_len() { let buf = Bytes::from_static(b"hello world"); let iter = buf.iter(); assert_eq!(iter.size_hint(), (11, Some(11))); assert_eq!(iter.len(), 11); } #[test] fn empty_iter_len() { let buf = Bytes::from_static(b""); let iter = buf.iter(); assert_eq!(iter.size_hint(), (0, Some(0))); assert_eq!(iter.len(), 0); } bytes-1.5.0/tests/test_reader.rs000064400000000000000000000013321046102023000147650ustar 00000000000000#![warn(rust_2018_idioms)] #![cfg(feature = "std")] use std::io::{BufRead, Read}; use bytes::Buf; #[test] fn read() { let buf1 = &b"hello "[..]; let buf2 = &b"world"[..]; let buf = Buf::chain(buf1, buf2); // Disambiguate with Read::chain let mut buffer = Vec::new(); buf.reader().read_to_end(&mut buffer).unwrap(); assert_eq!(b"hello world", &buffer[..]); } #[test] fn buf_read() { let buf1 = &b"hell"[..]; let buf2 = &b"o\nworld"[..]; let mut reader = Buf::chain(buf1, buf2).reader(); let mut line = String::new(); reader.read_line(&mut line).unwrap(); assert_eq!("hello\n", &line); line.clear(); reader.read_line(&mut line).unwrap(); assert_eq!("world", &line); } bytes-1.5.0/tests/test_serde.rs000064400000000000000000000010201046102023000146170ustar 00000000000000#![cfg(feature = "serde")] #![warn(rust_2018_idioms)] use serde_test::{assert_tokens, Token}; #[test] fn test_ser_de_empty() { let b = bytes::Bytes::new(); assert_tokens(&b, &[Token::Bytes(b"")]); let b = bytes::BytesMut::with_capacity(0); assert_tokens(&b, &[Token::Bytes(b"")]); } #[test] fn test_ser_de() { let b = bytes::Bytes::from(&b"bytes"[..]); assert_tokens(&b, &[Token::Bytes(b"bytes")]); let b = bytes::BytesMut::from(&b"bytes"[..]); assert_tokens(&b, &[Token::Bytes(b"bytes")]); } bytes-1.5.0/tests/test_take.rs000064400000000000000000000015531046102023000144540ustar 00000000000000#![warn(rust_2018_idioms)] use bytes::buf::Buf; use bytes::Bytes; #[test] fn long_take() { // Tests that get a take with a size greater than the buffer length will not // overrun the buffer. Regression test for #138. let buf = b"hello world".take(100); assert_eq!(11, buf.remaining()); assert_eq!(b"hello world", buf.chunk()); } #[test] fn take_copy_to_bytes() { let mut abcd = Bytes::copy_from_slice(b"abcd"); let abcd_ptr = abcd.as_ptr(); let mut take = (&mut abcd).take(2); let a = take.copy_to_bytes(1); assert_eq!(Bytes::copy_from_slice(b"a"), a); // assert `to_bytes` did not allocate assert_eq!(abcd_ptr, a.as_ptr()); assert_eq!(Bytes::copy_from_slice(b"bcd"), abcd); } #[test] #[should_panic] fn take_copy_to_bytes_panics() { let abcd = Bytes::copy_from_slice(b"abcd"); abcd.take(2).copy_to_bytes(3); }