bytes-0.4.12/benches/bytes.rs010064400007650000024000000126361337670411000142600ustar0000000000000000#![feature(test)] extern crate bytes; extern crate test; use test::Bencher; use bytes::{Bytes, BytesMut, BufMut}; #[bench] fn alloc_small(b: &mut Bencher) { b.iter(|| { for _ in 0..1024 { test::black_box(BytesMut::with_capacity(12)); } }) } #[bench] fn alloc_mid(b: &mut Bencher) { b.iter(|| { test::black_box(BytesMut::with_capacity(128)); }) } #[bench] fn alloc_big(b: &mut Bencher) { b.iter(|| { test::black_box(BytesMut::with_capacity(4096)); }) } #[bench] fn split_off_and_drop(b: &mut Bencher) { b.iter(|| { for _ in 0..1024 { let v = vec![10; 200]; let mut b = Bytes::from(v); test::black_box(b.split_off(100)); test::black_box(b); } }) } #[bench] fn deref_unique(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(4096); buf.put(&[0u8; 1024][..]); b.iter(|| { for _ in 0..1024 { test::black_box(&buf[..]); } }) } #[bench] fn deref_unique_unroll(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(4096); buf.put(&[0u8; 1024][..]); b.iter(|| { for _ in 0..128 { test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); test::black_box(&buf[..]); } }) } #[bench] fn deref_shared(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(4096); buf.put(&[0u8; 1024][..]); let _b2 = buf.split_off(1024); b.iter(|| { for _ in 0..1024 { test::black_box(&buf[..]); } }) } #[bench] fn deref_inline(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(8); buf.put(&[0u8; 8][..]); b.iter(|| { for _ in 0..1024 { test::black_box(&buf[..]); } }) } #[bench] fn deref_two(b: &mut Bencher) { let mut buf1 = BytesMut::with_capacity(8); buf1.put(&[0u8; 8][..]); let mut buf2 = BytesMut::with_capacity(4096); buf2.put(&[0u8; 1024][..]); b.iter(|| { for _ in 0..512 { test::black_box(&buf1[..]); test::black_box(&buf2[..]); } }) } #[bench] fn clone_inline(b: &mut Bencher) { let bytes = Bytes::from_static(b"hello world"); b.iter(|| { for _ in 0..1024 { test::black_box(&bytes.clone()); } }) } #[bench] fn clone_static(b: &mut Bencher) { let bytes = Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes()); b.iter(|| { for _ in 0..1024 { test::black_box(&bytes.clone()); } }) } #[bench] fn clone_arc(b: &mut Bencher) { let bytes = Bytes::from("hello world 1234567890 and have a good byte 0987654321".as_bytes()); b.iter(|| { for _ in 0..1024 { test::black_box(&bytes.clone()); } }) } #[bench] fn alloc_write_split_to_mid(b: &mut Bencher) { b.iter(|| { let mut buf = BytesMut::with_capacity(128); buf.put_slice(&[0u8; 64]); test::black_box(buf.split_to(64)); }) } #[bench] fn drain_write_drain(b: &mut Bencher) { let data = [0u8; 128]; b.iter(|| { let mut buf = BytesMut::with_capacity(1024); let mut parts = Vec::with_capacity(8); for _ in 0..8 { buf.put(&data[..]); parts.push(buf.split_to(128)); } test::black_box(parts); }) } #[bench] fn fmt_write(b: &mut Bencher) { use std::fmt::Write; let mut buf = BytesMut::with_capacity(128); let s = "foo bar baz quux lorem ipsum dolor et"; b.bytes = s.len() as u64; b.iter(|| { let _ = write!(buf, "{}", s); test::black_box(&buf); unsafe { buf.set_len(0); } }) } #[bench] fn from_long_slice(b: &mut Bencher) { let data = [0u8; 128]; b.bytes = data.len() as u64; b.iter(|| { let buf = BytesMut::from(&data[..]); test::black_box(buf); }) } #[bench] fn slice_empty(b: &mut Bencher) { b.iter(|| { let b = Bytes::from(vec![17; 1024]).clone(); for i in 0..1000 { test::black_box(b.slice(i % 100, i % 100)); } }) } #[bench] fn slice_short_from_arc(b: &mut Bencher) { b.iter(|| { // `clone` is to convert to ARC let b = Bytes::from(vec![17; 1024]).clone(); for i in 0..1000 { test::black_box(b.slice(1, 2 + i % 10)); } }) } // Keep in sync with bytes.rs #[cfg(target_pointer_width = "64")] const INLINE_CAP: usize = 4 * 8 - 1; #[cfg(target_pointer_width = "32")] const INLINE_CAP: usize = 4 * 4 - 1; #[bench] fn slice_avg_le_inline_from_arc(b: &mut Bencher) { b.iter(|| { // `clone` is to convert to ARC let b = Bytes::from(vec![17; 1024]).clone(); for i in 0..1000 { // [1, INLINE_CAP] let len = 1 + i % (INLINE_CAP - 1); test::black_box(b.slice(i % 10, i % 10 + len)); } }) } #[bench] fn slice_large_le_inline_from_arc(b: &mut Bencher) { b.iter(|| { // `clone` is to convert to ARC let b = Bytes::from(vec![17; 1024]).clone(); for i in 0..1000 { // [INLINE_CAP - 10, INLINE_CAP] let len = INLINE_CAP - 9 + i % 10; test::black_box(b.slice(i % 10, i % 10 + len)); } }) } bytes-0.4.12/Cargo.toml.orig010064400007650000024000000017671344003026700140440ustar0000000000000000[package] name = "bytes" # When releasing to crates.io: # - Update html_root_url. # - Update CHANGELOG.md. # - Update doc URL. # - Create "v0.4.x" git tag. version = "0.4.12" license = "MIT" authors = ["Carl Lerche "] description = "Types and traits for working with bytes" documentation = "https://docs.rs/bytes/0.4.12/bytes" homepage = "https://github.com/carllerche/bytes" repository = "https://github.com/carllerche/bytes" readme = "README.md" keywords = ["buffers", "zero-copy", "io"] exclude = [ ".gitignore", ".travis.yml", "deploy.sh", "bench/**/*", "test/**/*" ] categories = ["network-programming", "data-structures"] [package.metadata.docs.rs] features = ["i128"] [dependencies] byteorder = "1.1.0" iovec = "0.1" serde = { version = "1.0", optional = true } either = { version = "1.5", default-features = false, optional = true } [dev-dependencies] serde_test = "1.0" [features] i128 = ["byteorder/i128"] bytes-0.4.12/Cargo.toml0000644000000025500000000000000103050ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g. crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "bytes" version = "0.4.12" authors = ["Carl Lerche "] exclude = [".gitignore", ".travis.yml", "deploy.sh", "bench/**/*", "test/**/*"] description = "Types and traits for working with bytes" homepage = "https://github.com/carllerche/bytes" documentation = "https://docs.rs/bytes/0.4.12/bytes" readme = "README.md" keywords = ["buffers", "zero-copy", "io"] categories = ["network-programming", "data-structures"] license = "MIT" repository = "https://github.com/carllerche/bytes" [package.metadata.docs.rs] features = ["i128"] [dependencies.byteorder] version = "1.1.0" [dependencies.either] version = "1.5" optional = true default-features = false [dependencies.iovec] version = "0.1" [dependencies.serde] version = "1.0" optional = true [dev-dependencies.serde_test] version = "1.0" [features] i128 = ["byteorder/i128"] bytes-0.4.12/CHANGELOG.md010064400007650000024000000043521344003026700127570ustar0000000000000000# 0.4.12 (March 6, 2018) ### Added - Implement `FromIterator<&'a u8>` for `BytesMut`/`Bytes` (#244). - Implement `Buf` for `VecDeque` (#249). # 0.4.11 (November 17, 2018) * Use raw pointers for potentially racy loads (#233). * Implement `BufRead` for `buf::Reader` (#232). * Documentation tweaks (#234). # 0.4.10 (September 4, 2018) * impl `Buf` and `BufMut` for `Either` (#225). * Add `Bytes::slice_ref` (#208). # 0.4.9 (July 12, 2018) * Add 128 bit number support behind a feature flag (#209). * Implement `IntoBuf` for `&mut [u8]` # 0.4.8 (May 25, 2018) * Fix panic in `BytesMut` `FromIterator` implementation. * Bytes: Recycle space when reserving space in vec mode (#197). * Bytes: Add resize fn (#203). # 0.4.7 (April 27, 2018) * Make `Buf` and `BufMut` usable as trait objects (#186). * impl BorrowMut for BytesMut (#185). * Improve accessor performance (#195). # 0.4.6 (Janary 8, 2018) * Implement FromIterator for Bytes/BytesMut (#148). * Add `advance` fn to Bytes/BytesMut (#166). * Add `unsplit` fn to `BytesMut` (#162, #173). * Improvements to Bytes split fns (#92). # 0.4.5 (August 12, 2017) * Fix range bug in `Take::bytes` * Misc performance improvements * Add extra `PartialEq` implementations. * Add `Bytes::with_capacity` * Implement `AsMut[u8]` for `BytesMut` # 0.4.4 (May 26, 2017) * Add serde support behind feature flag * Add `extend_from_slice` on `Bytes` and `BytesMut` * Add `truncate` and `clear` on `Bytes` * Misc additional std trait implementations * Misc performance improvements # 0.4.3 (April 30, 2017) * Fix Vec::advance_mut bug * Bump minimum Rust version to 1.15 * Misc performance tweaks # 0.4.2 (April 5, 2017) * Misc performance tweaks * Improved `Debug` implementation for `Bytes` * Avoid some incorrect assert panics # 0.4.1 (March 15, 2017) * Expose `buf` module and have most types available from there vs. root. * Implement `IntoBuf` for `T: Buf`. * Add `FromBuf` and `Buf::collect`. * Add iterator adapter for `Buf`. * Add scatter/gather support to `Buf` and `BufMut`. * Add `Buf::chain`. * Reduce allocations on repeated calls to `BytesMut::reserve`. * Implement `Debug` for more types. * Remove `Source` in favor of `IntoBuf`. * Implement `Extend` for `BytesMut`. # 0.4.0 (February 24, 2017) * Initial release bytes-0.4.12/ci/before_deploy.ps1010064400007650000024000000011001337670411000147730ustar0000000000000000# This script takes care of packaging the build artifacts that will go in the # release zipfile $SRC_DIR = $PWD.Path $STAGE = [System.Guid]::NewGuid().ToString() Set-Location $ENV:Temp New-Item -Type Directory -Name $STAGE Set-Location $STAGE $ZIP = "$SRC_DIR\$($Env:CRATE_NAME)-$($Env:APPVEYOR_REPO_TAG_NAME)-$($Env:TARGET).zip" # TODO Update this to package the right artifacts Copy-Item "$SRC_DIR\target\$($Env:TARGET)\release\hello.exe" '.\' 7z a "$ZIP" * Push-AppveyorArtifact "$ZIP" Remove-Item *.* -Force Set-Location .. Remove-Item $STAGE Set-Location $SRC_DIR bytes-0.4.12/ci/before_deploy.sh010064400007650000024000000012661337670411000147170ustar0000000000000000# This script takes care of building your crate and packaging it for release set -ex main() { local src=$(pwd) \ stage= case $TRAVIS_OS_NAME in linux) stage=$(mktemp -d) ;; osx) stage=$(mktemp -d -t tmp) ;; esac test -f Cargo.lock || cargo generate-lockfile # TODO Update this to build the artifacts that matter to you cross rustc --bin hello --target $TARGET --release -- -C lto # TODO Update this to package the right artifacts cp target/$TARGET/release/hello $stage/ cd $stage tar czf $src/$CRATE_NAME-$TRAVIS_TAG-$TARGET.tar.gz * cd $src rm -rf $stage } main bytes-0.4.12/ci/install.sh010064400007650000024000000015661337670411000135520ustar0000000000000000set -ex main() { curl https://sh.rustup.rs -sSf | \ sh -s -- -y --default-toolchain $TRAVIS_RUST_VERSION local target= if [ $TRAVIS_OS_NAME = linux ]; then target=x86_64-unknown-linux-gnu sort=sort else target=x86_64-apple-darwin sort=gsort # for `sort --sort-version`, from brew's coreutils. fi # This fetches latest stable release local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \ | cut -d/ -f3 \ | grep -E '^v[0-9.]+$' \ | $sort --version-sort \ | tail -n1) echo cross version: $tag curl -LSfs https://japaric.github.io/trust/install.sh | \ sh -s -- \ --force \ --git japaric/cross \ --tag $tag \ --target $target } main bytes-0.4.12/ci/script.sh010064400007650000024000000004661337670411000134060ustar0000000000000000# This script takes care of testing your crate set -ex main() { cross build --target $TARGET $EXTRA_ARGS if [ ! -z $DISABLE_TESTS ]; then return fi cross test --target $TARGET $EXTRA_ARGS } # we don't run the "test phase" when doing deploys if [ -z $TRAVIS_TAG ]; then main fi bytes-0.4.12/ci/tsan010064400007650000024000000016461337670411000124370ustar0000000000000000# TSAN suppressions file for `bytes` # TSAN does not understand fences and `Arc::drop` is implemented using a fence. # This causes many false positives. race:Arc*drop race:arc*Weak*drop # `std` mpsc is not used in any Bytes code base. This race is triggered by some # rust runtime logic. race:std*mpsc_queue # Some test runtime races. Allocation should be race free race:alloc::alloc # Not sure why this is warning, but it is in the test harness and not the library. race:TestEvent*clone race:test::run_tests_console::*closure # Probably more fences in std. race:__call_tls_dtors # `is_inline_or_static` is explicitly called concurrently without synchronization. # The safety explanation can be found in a comment. race:Inner::is_inline_or_static # This ignores a false positive caused by `thread::park()`/`thread::unpark()`. # See: https://github.com/rust-lang/rust/pull/54806#issuecomment-436193353 race:pthread_cond_destroy bytes-0.4.12/LICENSE010064400007650000024000000020371337670411000121540ustar0000000000000000Copyright (c) 2018 Carl Lerche Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. bytes-0.4.12/README.md010064400007650000024000000017551344003026700124310ustar0000000000000000# Bytes A utility library for working with bytes. [![Crates.io](https://img.shields.io/crates/v/bytes.svg?maxAge=2592000)](https://crates.io/crates/bytes) [![Build Status](https://travis-ci.org/carllerche/bytes.svg?branch=master)](https://travis-ci.org/carllerche/bytes) [Documentation](https://docs.rs/bytes/0.4.12/bytes/) ## Usage To use `bytes`, first add this to your `Cargo.toml`: ```toml [dependencies] bytes = "0.4.12" ``` Next, add this to your crate: ```rust extern crate bytes; use bytes::{Bytes, BytesMut, Buf, BufMut}; ``` ## Serde support Serde support is optional and disabled by default. To enable use the feature `serde`. ```toml [dependencies] bytes = { version = "0.4.12", features = ["serde"] } ``` ## License This project is licensed under the [MIT license](LICENSE). ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in `bytes` by you, shall be licensed as MIT, without any additional terms or conditions. bytes-0.4.12/src/buf/buf.rs010064400007650000024000001001571337670411000136360ustar0000000000000000use super::{IntoBuf, Take, Reader, Iter, FromBuf, Chain}; use byteorder::{BigEndian, ByteOrder, LittleEndian}; use iovec::IoVec; use std::{cmp, io, ptr}; macro_rules! buf_get_impl { ($this:ident, $size:expr, $conv:path) => ({ // try to convert directly from the bytes let ret = { // this Option trick is to avoid keeping a borrow on self // when advance() is called (mut borrow) and to call bytes() only once if let Some(src) = $this.bytes().get(..($size)) { Some($conv(src)) } else { None } }; if let Some(ret) = ret { // if the direct convertion was possible, advance and return $this.advance($size); return ret; } else { // if not we copy the bytes in a temp buffer then convert let mut buf = [0; ($size)]; $this.copy_to_slice(&mut buf); // (do the advance) return $conv(&buf); } }); ($this:ident, $buf_size:expr, $conv:path, $len_to_read:expr) => ({ // The same trick as above does not improve the best case speed. // It seems to be linked to the way the method is optimised by the compiler let mut buf = [0; ($buf_size)]; $this.copy_to_slice(&mut buf[..($len_to_read)]); return $conv(&buf[..($len_to_read)], $len_to_read); }); } /// Read bytes from a buffer. /// /// A buffer stores bytes in memory such that read operations are infallible. /// The underlying storage may or may not be in contiguous memory. A `Buf` value /// is a cursor into the buffer. Reading from `Buf` advances the cursor /// position. It can be thought of as an efficient `Iterator` for collections of /// bytes. /// /// The simplest `Buf` is a `Cursor` wrapping a `[u8]`. /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world"); /// /// assert_eq!(b'h', buf.get_u8()); /// assert_eq!(b'e', buf.get_u8()); /// assert_eq!(b'l', buf.get_u8()); /// /// let mut rest = [0; 8]; /// buf.copy_to_slice(&mut rest); /// /// assert_eq!(&rest[..], b"lo world"); /// ``` pub trait Buf { /// Returns the number of bytes between the current position and the end of /// the buffer. /// /// This value is greater than or equal to the length of the slice returned /// by `bytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world"); /// /// assert_eq!(buf.remaining(), 11); /// /// buf.get_u8(); /// /// assert_eq!(buf.remaining(), 10); /// ``` /// /// # Implementer notes /// /// Implementations of `remaining` should ensure that the return value does /// not change unless a call is made to `advance` or any other function that /// is documented to change the `Buf`'s current position. fn remaining(&self) -> usize; /// Returns a slice starting at the current position and of length between 0 /// and `Buf::remaining()`. Note that this *can* return shorter slice (this allows /// non-continuous internal representation). /// /// This is a lower level function. Most operations are done with other /// functions. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world"); /// /// assert_eq!(buf.bytes(), b"hello world"); /// /// buf.advance(6); /// /// assert_eq!(buf.bytes(), b"world"); /// ``` /// /// # Implementer notes /// /// This function should never panic. Once the end of the buffer is reached, /// i.e., `Buf::remaining` returns 0, calls to `bytes` should return an /// empty slice. fn bytes(&self) -> &[u8]; /// Fills `dst` with potentially multiple slices starting at `self`'s /// current position. /// /// If the `Buf` is backed by disjoint slices of bytes, `bytes_vec` enables /// fetching more than one slice at once. `dst` is a slice of `IoVec` /// references, enabling the slice to be directly used with [`writev`] /// without any further conversion. The sum of the lengths of all the /// buffers in `dst` will be less than or equal to `Buf::remaining()`. /// /// The entries in `dst` will be overwritten, but the data **contained** by /// the slices **will not** be modified. If `bytes_vec` does not fill every /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices /// in `self. /// /// This is a lower level function. Most operations are done with other /// functions. /// /// # Implementer notes /// /// This function should never panic. Once the end of the buffer is reached, /// i.e., `Buf::remaining` returns 0, calls to `bytes_vec` must return 0 /// without mutating `dst`. /// /// Implementations should also take care to properly handle being called /// with `dst` being a zero length slice. /// /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html fn bytes_vec<'a>(&'a self, dst: &mut [&'a IoVec]) -> usize { if dst.is_empty() { return 0; } if self.has_remaining() { dst[0] = self.bytes().into(); 1 } else { 0 } } /// Advance the internal cursor of the Buf /// /// The next call to `bytes` will return a slice starting `cnt` bytes /// further into the underlying buffer. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world"); /// /// assert_eq!(buf.bytes(), b"hello world"); /// /// buf.advance(6); /// /// assert_eq!(buf.bytes(), b"world"); /// ``` /// /// # Panics /// /// This function **may** panic if `cnt > self.remaining()`. /// /// # Implementer notes /// /// It is recommended for implementations of `advance` to panic if `cnt > /// self.remaining()`. If the implementation does not panic, the call must /// behave as if `cnt == self.remaining()`. /// /// A call with `cnt == 0` should never panic and be a no-op. fn advance(&mut self, cnt: usize); /// Returns true if there are any more bytes to consume /// /// This is equivalent to `self.remaining() != 0`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"a"); /// /// assert!(buf.has_remaining()); /// /// buf.get_u8(); /// /// assert!(!buf.has_remaining()); /// ``` fn has_remaining(&self) -> bool { self.remaining() > 0 } /// Copies bytes from `self` into `dst`. /// /// The cursor is advanced by the number of bytes copied. `self` must have /// enough remaining bytes to fill `dst`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world"); /// let mut dst = [0; 5]; /// /// buf.copy_to_slice(&mut dst); /// assert_eq!(b"hello", &dst); /// assert_eq!(6, buf.remaining()); /// ``` /// /// # Panics /// /// This function panics if `self.remaining() < dst.len()` fn copy_to_slice(&mut self, dst: &mut [u8]) { let mut off = 0; assert!(self.remaining() >= dst.len()); while off < dst.len() { let cnt; unsafe { let src = self.bytes(); cnt = cmp::min(src.len(), dst.len() - off); ptr::copy_nonoverlapping( src.as_ptr(), dst[off..].as_mut_ptr(), cnt); off += src.len(); } self.advance(cnt); } } /// Gets an unsigned 8 bit integer from `self`. /// /// The current position is advanced by 1. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08 hello"); /// assert_eq!(8, buf.get_u8()); /// ``` /// /// # Panics /// /// This function panics if there is no more remaining data in `self`. fn get_u8(&mut self) -> u8 { assert!(self.remaining() >= 1); let ret = self.bytes()[0]; self.advance(1); ret } /// Gets a signed 8 bit integer from `self`. /// /// The current position is advanced by 1. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08 hello"); /// assert_eq!(8, buf.get_i8()); /// ``` /// /// # Panics /// /// This function panics if there is no more remaining data in `self`. fn get_i8(&mut self) -> i8 { assert!(self.remaining() >= 1); let ret = self.bytes()[0] as i8; self.advance(1); ret } #[doc(hidden)] #[deprecated(note="use get_u16_be or get_u16_le")] fn get_u16(&mut self) -> u16 where Self: Sized { let mut buf = [0; 2]; self.copy_to_slice(&mut buf); T::read_u16(&buf) } /// Gets an unsigned 16 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08\x09 hello"); /// assert_eq!(0x0809, buf.get_u16_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u16_be(&mut self) -> u16 { buf_get_impl!(self, 2, BigEndian::read_u16); } /// Gets an unsigned 16 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x09\x08 hello"); /// assert_eq!(0x0809, buf.get_u16_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u16_le(&mut self) -> u16 { buf_get_impl!(self, 2, LittleEndian::read_u16); } #[doc(hidden)] #[deprecated(note="use get_i16_be or get_i16_le")] fn get_i16(&mut self) -> i16 where Self: Sized { let mut buf = [0; 2]; self.copy_to_slice(&mut buf); T::read_i16(&buf) } /// Gets a signed 16 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08\x09 hello"); /// assert_eq!(0x0809, buf.get_i16_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i16_be(&mut self) -> i16 { buf_get_impl!(self, 2, BigEndian::read_i16); } /// Gets a signed 16 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x09\x08 hello"); /// assert_eq!(0x0809, buf.get_i16_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i16_le(&mut self) -> i16 { buf_get_impl!(self, 2, LittleEndian::read_i16); } #[doc(hidden)] #[deprecated(note="use get_u32_be or get_u32_le")] fn get_u32(&mut self) -> u32 where Self: Sized { let mut buf = [0; 4]; self.copy_to_slice(&mut buf); T::read_u32(&buf) } /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello"); /// assert_eq!(0x0809A0A1, buf.get_u32_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u32_be(&mut self) -> u32 { buf_get_impl!(self, 4, BigEndian::read_u32); } /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello"); /// assert_eq!(0x0809A0A1, buf.get_u32_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u32_le(&mut self) -> u32 { buf_get_impl!(self, 4, LittleEndian::read_u32); } #[doc(hidden)] #[deprecated(note="use get_i32_be or get_i32_le")] fn get_i32(&mut self) -> i32 where Self: Sized { let mut buf = [0; 4]; self.copy_to_slice(&mut buf); T::read_i32(&buf) } /// Gets a signed 32 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello"); /// assert_eq!(0x0809A0A1, buf.get_i32_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i32_be(&mut self) -> i32 { buf_get_impl!(self, 4, BigEndian::read_i32); } /// Gets a signed 32 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello"); /// assert_eq!(0x0809A0A1, buf.get_i32_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i32_le(&mut self) -> i32 { buf_get_impl!(self, 4, LittleEndian::read_i32); } #[doc(hidden)] #[deprecated(note="use get_u64_be or get_u64_le")] fn get_u64(&mut self) -> u64 where Self: Sized { let mut buf = [0; 8]; self.copy_to_slice(&mut buf); T::read_u64(&buf) } /// Gets an unsigned 64 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"); /// assert_eq!(0x0102030405060708, buf.get_u64_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u64_be(&mut self) -> u64 { buf_get_impl!(self, 8, BigEndian::read_u64); } /// Gets an unsigned 64 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"); /// assert_eq!(0x0102030405060708, buf.get_u64_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_u64_le(&mut self) -> u64 { buf_get_impl!(self, 8, LittleEndian::read_u64); } #[doc(hidden)] #[deprecated(note="use get_i64_be or get_i64_le")] fn get_i64(&mut self) -> i64 where Self: Sized { let mut buf = [0; 8]; self.copy_to_slice(&mut buf); T::read_i64(&buf) } /// Gets a signed 64 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"); /// assert_eq!(0x0102030405060708, buf.get_i64_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i64_be(&mut self) -> i64 { buf_get_impl!(self, 8, BigEndian::read_i64); } /// Gets a signed 64 bit integer from `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"); /// assert_eq!(0x0102030405060708, buf.get_i64_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_i64_le(&mut self) -> i64 { buf_get_impl!(self, 8, LittleEndian::read_i64); } /// Gets an unsigned 128 bit integer from `self` in big-endian byte order. /// /// **NOTE:** This method requires the `i128` feature. /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"); /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. #[cfg(feature = "i128")] fn get_u128_be(&mut self) -> u128 { buf_get_impl!(self, 16, BigEndian::read_u128); } /// Gets an unsigned 128 bit integer from `self` in little-endian byte order. /// /// **NOTE:** This method requires the `i128` feature. /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"); /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. #[cfg(feature = "i128")] fn get_u128_le(&mut self) -> u128 { buf_get_impl!(self, 16, LittleEndian::read_u128); } /// Gets a signed 128 bit integer from `self` in big-endian byte order. /// /// **NOTE:** This method requires the `i128` feature. /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"); /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. #[cfg(feature = "i128")] fn get_i128_be(&mut self) -> i128 { buf_get_impl!(self, 16, BigEndian::read_i128); } /// Gets a signed 128 bit integer from `self` in little-endian byte order. /// /// **NOTE:** This method requires the `i128` feature. /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"); /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. #[cfg(feature = "i128")] fn get_i128_le(&mut self) -> i128 { buf_get_impl!(self, 16, LittleEndian::read_i128); } #[doc(hidden)] #[deprecated(note="use get_uint_be or get_uint_le")] fn get_uint(&mut self, nbytes: usize) -> u64 where Self: Sized { let mut buf = [0; 8]; self.copy_to_slice(&mut buf[..nbytes]); T::read_uint(&buf[..nbytes], nbytes) } /// Gets an unsigned n-byte integer from `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::{Buf, BigEndian}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x01\x02\x03 hello"); /// assert_eq!(0x010203, buf.get_uint_be(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_uint_be(&mut self, nbytes: usize) -> u64 { buf_get_impl!(self, 8, BigEndian::read_uint, nbytes); } /// Gets an unsigned n-byte integer from `self` in little-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x03\x02\x01 hello"); /// assert_eq!(0x010203, buf.get_uint_le(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_uint_le(&mut self, nbytes: usize) -> u64 { buf_get_impl!(self, 8, LittleEndian::read_uint, nbytes); } #[doc(hidden)] #[deprecated(note="use get_int_be or get_int_le")] fn get_int(&mut self, nbytes: usize) -> i64 where Self: Sized { let mut buf = [0; 8]; self.copy_to_slice(&mut buf[..nbytes]); T::read_int(&buf[..nbytes], nbytes) } /// Gets a signed n-byte integer from `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x01\x02\x03 hello"); /// assert_eq!(0x010203, buf.get_int_be(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_int_be(&mut self, nbytes: usize) -> i64 { buf_get_impl!(self, 8, BigEndian::read_int, nbytes); } /// Gets a signed n-byte integer from `self` in little-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x03\x02\x01 hello"); /// assert_eq!(0x010203, buf.get_int_le(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_int_le(&mut self, nbytes: usize) -> i64 { buf_get_impl!(self, 8, LittleEndian::read_int, nbytes); } #[doc(hidden)] #[deprecated(note="use get_f32_be or get_f32_le")] fn get_f32(&mut self) -> f32 where Self: Sized { let mut buf = [0; 4]; self.copy_to_slice(&mut buf); T::read_f32(&buf) } /// Gets an IEEE754 single-precision (4 bytes) floating point number from /// `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x3F\x99\x99\x9A hello"); /// assert_eq!(1.2f32, buf.get_f32_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f32_be(&mut self) -> f32 { buf_get_impl!(self, 4, BigEndian::read_f32); } /// Gets an IEEE754 single-precision (4 bytes) floating point number from /// `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x9A\x99\x99\x3F hello"); /// assert_eq!(1.2f32, buf.get_f32_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f32_le(&mut self) -> f32 { buf_get_impl!(self, 4, LittleEndian::read_f32); } #[doc(hidden)] #[deprecated(note="use get_f64_be or get_f64_le")] fn get_f64(&mut self) -> f64 where Self: Sized { let mut buf = [0; 8]; self.copy_to_slice(&mut buf); T::read_f64(&buf) } /// Gets an IEEE754 double-precision (8 bytes) floating point number from /// `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"); /// assert_eq!(1.2f64, buf.get_f64_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f64_be(&mut self) -> f64 { buf_get_impl!(self, 8, BigEndian::read_f64); } /// Gets an IEEE754 double-precision (8 bytes) floating point number from /// `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"); /// assert_eq!(1.2f64, buf.get_f64_le()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. fn get_f64_le(&mut self) -> f64 { buf_get_impl!(self, 8, LittleEndian::read_f64); } /// Transforms a `Buf` into a concrete buffer. /// /// `collect()` can operate on any value that implements `Buf`, and turn it /// into the relevent concrete buffer type. /// /// # Examples /// /// Collecting a buffer and loading the contents into a `Vec`. /// /// ``` /// use bytes::{Buf, Bytes, IntoBuf}; /// /// let buf = Bytes::from(&b"hello world"[..]).into_buf(); /// let vec: Vec = buf.collect(); /// /// assert_eq!(vec, &b"hello world"[..]); /// ``` fn collect(self) -> B where Self: Sized, B: FromBuf, { B::from_buf(self) } /// Creates an adaptor which will read at most `limit` bytes from `self`. /// /// This function returns a new instance of `Buf` which will read at most /// `limit` bytes. /// /// # Examples /// /// ``` /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new("hello world").take(5); /// let mut dst = vec![]; /// /// dst.put(&mut buf); /// assert_eq!(dst, b"hello"); /// /// let mut buf = buf.into_inner(); /// dst.clear(); /// dst.put(&mut buf); /// assert_eq!(dst, b" world"); /// ``` fn take(self, limit: usize) -> Take where Self: Sized { super::take::new(self, limit) } /// Creates an adaptor which will chain this buffer with another. /// /// The returned `Buf` instance will first consume all bytes from `self`. /// Afterwards the output is equivalent to the output of next. /// /// # Examples /// /// ``` /// use bytes::{Bytes, Buf, IntoBuf}; /// use bytes::buf::Chain; /// /// let buf = Bytes::from(&b"hello "[..]).into_buf() /// .chain(Bytes::from(&b"world"[..])); /// /// let full: Bytes = buf.collect(); /// assert_eq!(full[..], b"hello world"[..]); /// ``` fn chain(self, next: U) -> Chain where U: IntoBuf, Self: Sized, { Chain::new(self, next.into_buf()) } /// Creates a "by reference" adaptor for this instance of `Buf`. /// /// The returned adaptor also implements `Buf` and will simply borrow `self`. /// /// # Examples /// /// ``` /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new("hello world"); /// let mut dst = vec![]; /// /// { /// let mut reference = buf.by_ref(); /// dst.put(&mut reference.take(5)); /// assert_eq!(dst, b"hello"); /// } // drop our &mut reference so we can use `buf` again /// /// dst.clear(); /// dst.put(&mut buf); /// assert_eq!(dst, b" world"); /// ``` fn by_ref(&mut self) -> &mut Self where Self: Sized { self } /// Creates an adaptor which implements the `Read` trait for `self`. /// /// This function returns a new value which implements `Read` by adapting /// the `Read` trait functions to the `Buf` trait functions. Given that /// `Buf` operations are infallible, none of the `Read` functions will /// return with `Err`. /// /// # Examples /// /// ``` /// use bytes::{Buf, IntoBuf, Bytes}; /// use std::io::Read; /// /// let buf = Bytes::from("hello world").into_buf(); /// /// let mut reader = buf.reader(); /// let mut dst = [0; 1024]; /// /// let num = reader.read(&mut dst).unwrap(); /// /// assert_eq!(11, num); /// assert_eq!(&dst[..11], b"hello world"); /// ``` fn reader(self) -> Reader where Self: Sized { super::reader::new(self) } /// Returns an iterator over the bytes contained by the buffer. /// /// # Examples /// /// ``` /// use bytes::{Buf, IntoBuf, Bytes}; /// /// let buf = Bytes::from(&b"abc"[..]).into_buf(); /// let mut iter = buf.iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// assert_eq!(iter.next(), Some(b'b')); /// assert_eq!(iter.next(), Some(b'c')); /// assert_eq!(iter.next(), None); /// ``` fn iter(self) -> Iter where Self: Sized { super::iter::new(self) } } impl<'a, T: Buf + ?Sized> Buf for &'a mut T { fn remaining(&self) -> usize { (**self).remaining() } fn bytes(&self) -> &[u8] { (**self).bytes() } fn bytes_vec<'b>(&'b self, dst: &mut [&'b IoVec]) -> usize { (**self).bytes_vec(dst) } fn advance(&mut self, cnt: usize) { (**self).advance(cnt) } } impl Buf for Box { fn remaining(&self) -> usize { (**self).remaining() } fn bytes(&self) -> &[u8] { (**self).bytes() } fn bytes_vec<'b>(&'b self, dst: &mut [&'b IoVec]) -> usize { (**self).bytes_vec(dst) } fn advance(&mut self, cnt: usize) { (**self).advance(cnt) } } impl> Buf for io::Cursor { fn remaining(&self) -> usize { let len = self.get_ref().as_ref().len(); let pos = self.position(); if pos >= len as u64 { return 0; } len - pos as usize } fn bytes(&self) -> &[u8] { let len = self.get_ref().as_ref().len(); let pos = self.position() as usize; if pos >= len { return Default::default(); } &(self.get_ref().as_ref())[pos..] } fn advance(&mut self, cnt: usize) { let pos = (self.position() as usize) .checked_add(cnt).expect("overflow"); assert!(pos <= self.get_ref().as_ref().len()); self.set_position(pos as u64); } } impl Buf for Option<[u8; 1]> { fn remaining(&self) -> usize { if self.is_some() { 1 } else { 0 } } fn bytes(&self) -> &[u8] { self.as_ref().map(AsRef::as_ref) .unwrap_or(Default::default()) } fn advance(&mut self, cnt: usize) { if cnt == 0 { return; } if self.is_none() { panic!("overflow"); } else { assert_eq!(1, cnt); *self = None; } } } // The existance of this function makes the compiler catch if the Buf // trait is "object-safe" or not. fn _assert_trait_object(_b: &Buf) {} bytes-0.4.12/src/buf/buf_mut.rs010064400007650000024000000767621337670411000145410ustar0000000000000000use super::{IntoBuf, Writer}; use byteorder::{LittleEndian, ByteOrder, BigEndian}; use iovec::IoVec; use std::{cmp, io, ptr, usize}; /// A trait for values that provide sequential write access to bytes. /// /// Write bytes to a buffer /// /// A buffer stores bytes in memory such that write operations are infallible. /// The underlying storage may or may not be in contiguous memory. A `BufMut` /// value is a cursor into the buffer. Writing to `BufMut` advances the cursor /// position. /// /// The simplest `BufMut` is a `Vec`. /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// /// buf.put("hello world"); /// /// assert_eq!(buf, b"hello world"); /// ``` pub trait BufMut { /// Returns the number of bytes that can be written from the current /// position until the end of the buffer is reached. /// /// This value is greater than or equal to the length of the slice returned /// by `bytes_mut`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// use std::io::Cursor; /// /// let mut dst = [0; 10]; /// let mut buf = Cursor::new(&mut dst[..]); /// /// assert_eq!(10, buf.remaining_mut()); /// buf.put("hello"); /// /// assert_eq!(5, buf.remaining_mut()); /// ``` /// /// # Implementer notes /// /// Implementations of `remaining_mut` should ensure that the return value /// does not change unless a call is made to `advance_mut` or any other /// function that is documented to change the `BufMut`'s current position. fn remaining_mut(&self) -> usize; /// Advance the internal cursor of the BufMut /// /// The next call to `bytes_mut` will return a slice starting `cnt` bytes /// further into the underlying buffer. /// /// This function is unsafe because there is no guarantee that the bytes /// being advanced past have been initialized. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = Vec::with_capacity(16); /// /// unsafe { /// buf.bytes_mut()[0] = b'h'; /// buf.bytes_mut()[1] = b'e'; /// /// buf.advance_mut(2); /// /// buf.bytes_mut()[0] = b'l'; /// buf.bytes_mut()[1..3].copy_from_slice(b"lo"); /// /// buf.advance_mut(3); /// } /// /// assert_eq!(5, buf.len()); /// assert_eq!(buf, b"hello"); /// ``` /// /// # Panics /// /// This function **may** panic if `cnt > self.remaining_mut()`. /// /// # Implementer notes /// /// It is recommended for implementations of `advance_mut` to panic if /// `cnt > self.remaining_mut()`. If the implementation does not panic, /// the call must behave as if `cnt == self.remaining_mut()`. /// /// A call with `cnt == 0` should never panic and be a no-op. unsafe fn advance_mut(&mut self, cnt: usize); /// Returns true if there is space in `self` for more bytes. /// /// This is equivalent to `self.remaining_mut() != 0`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// use std::io::Cursor; /// /// let mut dst = [0; 5]; /// let mut buf = Cursor::new(&mut dst); /// /// assert!(buf.has_remaining_mut()); /// /// buf.put("hello"); /// /// assert!(!buf.has_remaining_mut()); /// ``` fn has_remaining_mut(&self) -> bool { self.remaining_mut() > 0 } /// Returns a mutable slice starting at the current BufMut position and of /// length between 0 and `BufMut::remaining_mut()`. Note that this *can* be shorter than the /// whole remainder of the buffer (this allows non-continuous implementation). /// /// This is a lower level function. Most operations are done with other /// functions. /// /// The returned byte slice may represent uninitialized memory. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = Vec::with_capacity(16); /// /// unsafe { /// buf.bytes_mut()[0] = b'h'; /// buf.bytes_mut()[1] = b'e'; /// /// buf.advance_mut(2); /// /// buf.bytes_mut()[0] = b'l'; /// buf.bytes_mut()[1..3].copy_from_slice(b"lo"); /// /// buf.advance_mut(3); /// } /// /// assert_eq!(5, buf.len()); /// assert_eq!(buf, b"hello"); /// ``` /// /// # Implementer notes /// /// This function should never panic. `bytes_mut` should return an empty /// slice **if and only if** `remaining_mut` returns 0. In other words, /// `bytes_mut` returning an empty slice implies that `remaining_mut` will /// return 0 and `remaining_mut` returning 0 implies that `bytes_mut` will /// return an empty slice. unsafe fn bytes_mut(&mut self) -> &mut [u8]; /// Fills `dst` with potentially multiple mutable slices starting at `self`'s /// current position. /// /// If the `BufMut` is backed by disjoint slices of bytes, `bytes_vec_mut` /// enables fetching more than one slice at once. `dst` is a slice of /// mutable `IoVec` references, enabling the slice to be directly used with /// [`readv`] without any further conversion. The sum of the lengths of all /// the buffers in `dst` will be less than or equal to /// `Buf::remaining_mut()`. /// /// The entries in `dst` will be overwritten, but the data **contained** by /// the slices **will not** be modified. If `bytes_vec_mut` does not fill every /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices /// in `self. /// /// This is a lower level function. Most operations are done with other /// functions. /// /// # Implementer notes /// /// This function should never panic. Once the end of the buffer is reached, /// i.e., `BufMut::remaining_mut` returns 0, calls to `bytes_vec_mut` must /// return 0 without mutating `dst`. /// /// Implementations should also take care to properly handle being called /// with `dst` being a zero length slice. /// /// [`readv`]: http://man7.org/linux/man-pages/man2/readv.2.html unsafe fn bytes_vec_mut<'a>(&'a mut self, dst: &mut [&'a mut IoVec]) -> usize { if dst.is_empty() { return 0; } if self.has_remaining_mut() { dst[0] = self.bytes_mut().into(); 1 } else { 0 } } /// Transfer bytes into `self` from `src` and advance the cursor by the /// number of bytes written. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// /// buf.put(b'h'); /// buf.put(&b"ello"[..]); /// buf.put(" world"); /// /// assert_eq!(buf, b"hello world"); /// ``` /// /// # Panics /// /// Panics if `self` does not have enough capacity to contain `src`. fn put(&mut self, src: T) where Self: Sized { use super::Buf; let mut src = src.into_buf(); assert!(self.remaining_mut() >= src.remaining()); while src.has_remaining() { let l; unsafe { let s = src.bytes(); let d = self.bytes_mut(); l = cmp::min(s.len(), d.len()); ptr::copy_nonoverlapping( s.as_ptr(), d.as_mut_ptr(), l); } src.advance(l); unsafe { self.advance_mut(l); } } } /// Transfer bytes into `self` from `src` and advance the cursor by the /// number of bytes written. /// /// `self` must have enough remaining capacity to contain all of `src`. /// /// ``` /// use bytes::BufMut; /// use std::io::Cursor; /// /// let mut dst = [0; 6]; /// /// { /// let mut buf = Cursor::new(&mut dst); /// buf.put_slice(b"hello"); /// /// assert_eq!(1, buf.remaining_mut()); /// } /// /// assert_eq!(b"hello\0", &dst); /// ``` fn put_slice(&mut self, src: &[u8]) { let mut off = 0; assert!(self.remaining_mut() >= src.len(), "buffer overflow"); while off < src.len() { let cnt; unsafe { let dst = self.bytes_mut(); cnt = cmp::min(dst.len(), src.len() - off); ptr::copy_nonoverlapping( src[off..].as_ptr(), dst.as_mut_ptr(), cnt); off += cnt; } unsafe { self.advance_mut(cnt); } } } /// Writes an unsigned 8 bit integer to `self`. /// /// The current position is advanced by 1. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u8(0x01); /// assert_eq!(buf, b"\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u8(&mut self, n: u8) { let src = [n]; self.put_slice(&src); } /// Writes a signed 8 bit integer to `self`. /// /// The current position is advanced by 1. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i8(0x01); /// assert_eq!(buf, b"\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i8(&mut self, n: i8) { let src = [n as u8]; self.put_slice(&src) } #[doc(hidden)] #[deprecated(note="use put_u16_be or put_u16_le")] fn put_u16(&mut self, n: u16) where Self: Sized { let mut buf = [0; 2]; T::write_u16(&mut buf, n); self.put_slice(&buf) } /// Writes an unsigned 16 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u16_be(0x0809); /// assert_eq!(buf, b"\x08\x09"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u16_be(&mut self, n: u16) { let mut buf = [0; 2]; BigEndian::write_u16(&mut buf, n); self.put_slice(&buf) } /// Writes an unsigned 16 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u16_le(0x0809); /// assert_eq!(buf, b"\x09\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u16_le(&mut self, n: u16) { let mut buf = [0; 2]; LittleEndian::write_u16(&mut buf, n); self.put_slice(&buf) } #[doc(hidden)] #[deprecated(note="use put_i16_be or put_i16_le")] fn put_i16(&mut self, n: i16) where Self: Sized { let mut buf = [0; 2]; T::write_i16(&mut buf, n); self.put_slice(&buf) } /// Writes a signed 16 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i16_be(0x0809); /// assert_eq!(buf, b"\x08\x09"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i16_be(&mut self, n: i16) { let mut buf = [0; 2]; BigEndian::write_i16(&mut buf, n); self.put_slice(&buf) } /// Writes a signed 16 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i16_le(0x0809); /// assert_eq!(buf, b"\x09\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i16_le(&mut self, n: i16) { let mut buf = [0; 2]; LittleEndian::write_i16(&mut buf, n); self.put_slice(&buf) } #[doc(hidden)] #[deprecated(note="use put_u32_be or put_u32_le")] fn put_u32(&mut self, n: u32) where Self: Sized { let mut buf = [0; 4]; T::write_u32(&mut buf, n); self.put_slice(&buf) } /// Writes an unsigned 32 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u32_be(0x0809A0A1); /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u32_be(&mut self, n: u32) { let mut buf = [0; 4]; BigEndian::write_u32(&mut buf, n); self.put_slice(&buf) } /// Writes an unsigned 32 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u32_le(0x0809A0A1); /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u32_le(&mut self, n: u32) { let mut buf = [0; 4]; LittleEndian::write_u32(&mut buf, n); self.put_slice(&buf) } #[doc(hidden)] #[deprecated(note="use put_i32_be or put_i32_le")] fn put_i32(&mut self, n: i32) where Self: Sized { let mut buf = [0; 4]; T::write_i32(&mut buf, n); self.put_slice(&buf) } /// Writes a signed 32 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i32_be(0x0809A0A1); /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i32_be(&mut self, n: i32) { let mut buf = [0; 4]; BigEndian::write_i32(&mut buf, n); self.put_slice(&buf) } /// Writes a signed 32 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i32_le(0x0809A0A1); /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i32_le(&mut self, n: i32) { let mut buf = [0; 4]; LittleEndian::write_i32(&mut buf, n); self.put_slice(&buf) } #[doc(hidden)] #[deprecated(note="use put_u64_be or put_u64_le")] fn put_u64(&mut self, n: u64) where Self: Sized { let mut buf = [0; 8]; T::write_u64(&mut buf, n); self.put_slice(&buf) } /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u64_be(0x0102030405060708); /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u64_be(&mut self, n: u64) { let mut buf = [0; 8]; BigEndian::write_u64(&mut buf, n); self.put_slice(&buf) } /// Writes an unsigned 64 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u64_le(0x0102030405060708); /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_u64_le(&mut self, n: u64) { let mut buf = [0; 8]; LittleEndian::write_u64(&mut buf, n); self.put_slice(&buf) } #[doc(hidden)] #[deprecated(note="use put_i64_be or put_i64_le")] fn put_i64(&mut self, n: i64) where Self: Sized { let mut buf = [0; 8]; T::write_i64(&mut buf, n); self.put_slice(&buf) } /// Writes a signed 64 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i64_be(0x0102030405060708); /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i64_be(&mut self, n: i64) { let mut buf = [0; 8]; BigEndian::write_i64(&mut buf, n); self.put_slice(&buf) } /// Writes a signed 64 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i64_le(0x0102030405060708); /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_i64_le(&mut self, n: i64) { let mut buf = [0; 8]; LittleEndian::write_i64(&mut buf, n); self.put_slice(&buf) } /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order. /// /// **NOTE:** This method requires the `i128` feature. /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u128_be(0x01020304050607080910111213141516); /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. #[cfg(feature = "i128")] fn put_u128_be(&mut self, n: u128) { let mut buf = [0; 16]; BigEndian::write_u128(&mut buf, n); self.put_slice(&buf) } /// Writes an unsigned 128 bit integer to `self` in little-endian byte order. /// /// **NOTE:** This method requires the `i128` feature. /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_u128_le(0x01020304050607080910111213141516); /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. #[cfg(feature = "i128")] fn put_u128_le(&mut self, n: u128) { let mut buf = [0; 16]; LittleEndian::write_u128(&mut buf, n); self.put_slice(&buf) } /// Writes a signed 128 bit integer to `self` in the big-endian byte order. /// /// **NOTE:** This method requires the `i128` feature. /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i128_be(0x01020304050607080910111213141516); /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. #[cfg(feature = "i128")] fn put_i128_be(&mut self, n: i128) { let mut buf = [0; 16]; BigEndian::write_i128(&mut buf, n); self.put_slice(&buf) } /// Writes a signed 128 bit integer to `self` in little-endian byte order. /// /// **NOTE:** This method requires the `i128` feature. /// The current position is advanced by 16. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_i128_le(0x01020304050607080910111213141516); /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. #[cfg(feature = "i128")] fn put_i128_le(&mut self, n: i128) { let mut buf = [0; 16]; LittleEndian::write_i128(&mut buf, n); self.put_slice(&buf) } #[doc(hidden)] #[deprecated(note="use put_uint_be or put_uint_le")] fn put_uint(&mut self, n: u64, nbytes: usize) where Self: Sized { let mut buf = [0; 8]; T::write_uint(&mut buf, n, nbytes); self.put_slice(&buf[0..nbytes]) } /// Writes an unsigned n-byte integer to `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_uint_be(0x010203, 3); /// assert_eq!(buf, b"\x01\x02\x03"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_uint_be(&mut self, n: u64, nbytes: usize) { let mut buf = [0; 8]; BigEndian::write_uint(&mut buf, n, nbytes); self.put_slice(&buf[0..nbytes]) } /// Writes an unsigned n-byte integer to `self` in the little-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_uint_le(0x010203, 3); /// assert_eq!(buf, b"\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_uint_le(&mut self, n: u64, nbytes: usize) { let mut buf = [0; 8]; LittleEndian::write_uint(&mut buf, n, nbytes); self.put_slice(&buf[0..nbytes]) } #[doc(hidden)] #[deprecated(note="use put_int_be or put_int_le")] fn put_int(&mut self, n: i64, nbytes: usize) where Self: Sized { let mut buf = [0; 8]; T::write_int(&mut buf, n, nbytes); self.put_slice(&buf[0..nbytes]) } /// Writes a signed n-byte integer to `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_int_be(0x010203, 3); /// assert_eq!(buf, b"\x01\x02\x03"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_int_be(&mut self, n: i64, nbytes: usize) { let mut buf = [0; 8]; BigEndian::write_int(&mut buf, n, nbytes); self.put_slice(&buf[0..nbytes]) } /// Writes a signed n-byte integer to `self` in little-endian byte order. /// /// The current position is advanced by `nbytes`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_int_le(0x010203, 3); /// assert_eq!(buf, b"\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_int_le(&mut self, n: i64, nbytes: usize) { let mut buf = [0; 8]; LittleEndian::write_int(&mut buf, n, nbytes); self.put_slice(&buf[0..nbytes]) } #[doc(hidden)] #[deprecated(note="use put_f32_be or put_f32_le")] fn put_f32(&mut self, n: f32) where Self: Sized { let mut buf = [0; 4]; T::write_f32(&mut buf, n); self.put_slice(&buf) } /// Writes an IEEE754 single-precision (4 bytes) floating point number to /// `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f32_be(1.2f32); /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f32_be(&mut self, n: f32) { let mut buf = [0; 4]; BigEndian::write_f32(&mut buf, n); self.put_slice(&buf) } /// Writes an IEEE754 single-precision (4 bytes) floating point number to /// `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f32_le(1.2f32); /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f32_le(&mut self, n: f32) { let mut buf = [0; 4]; LittleEndian::write_f32(&mut buf, n); self.put_slice(&buf) } #[doc(hidden)] #[deprecated(note="use put_f64_be or put_f64_le")] fn put_f64(&mut self, n: f64) where Self: Sized { let mut buf = [0; 8]; T::write_f64(&mut buf, n); self.put_slice(&buf) } /// Writes an IEEE754 double-precision (8 bytes) floating point number to /// `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f64_be(1.2f64); /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f64_be(&mut self, n: f64) { let mut buf = [0; 8]; BigEndian::write_f64(&mut buf, n); self.put_slice(&buf) } /// Writes an IEEE754 double-precision (8 bytes) floating point number to /// `self` in little-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// /// let mut buf = vec![]; /// buf.put_f64_le(1.2f64); /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. fn put_f64_le(&mut self, n: f64) { let mut buf = [0; 8]; LittleEndian::write_f64(&mut buf, n); self.put_slice(&buf) } /// Creates a "by reference" adaptor for this instance of `BufMut`. /// /// The returned adapter also implements `BufMut` and will simply borrow /// `self`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// use std::io; /// /// let mut buf = vec![]; /// /// { /// let mut reference = buf.by_ref(); /// /// // Adapt reference to `std::io::Write`. /// let mut writer = reference.writer(); /// /// // Use the buffer as a writter /// io::Write::write(&mut writer, &b"hello world"[..]).unwrap(); /// } // drop our &mut reference so that we can use `buf` again /// /// assert_eq!(buf, &b"hello world"[..]); /// ``` fn by_ref(&mut self) -> &mut Self where Self: Sized { self } /// Creates an adaptor which implements the `Write` trait for `self`. /// /// This function returns a new value which implements `Write` by adapting /// the `Write` trait functions to the `BufMut` trait functions. Given that /// `BufMut` operations are infallible, none of the `Write` functions will /// return with `Err`. /// /// # Examples /// /// ``` /// use bytes::BufMut; /// use std::io::Write; /// /// let mut buf = vec![].writer(); /// /// let num = buf.write(&b"hello world"[..]).unwrap(); /// assert_eq!(11, num); /// /// let buf = buf.into_inner(); /// /// assert_eq!(*buf, b"hello world"[..]); /// ``` fn writer(self) -> Writer where Self: Sized { super::writer::new(self) } } impl<'a, T: BufMut + ?Sized> BufMut for &'a mut T { fn remaining_mut(&self) -> usize { (**self).remaining_mut() } unsafe fn bytes_mut(&mut self) -> &mut [u8] { (**self).bytes_mut() } unsafe fn bytes_vec_mut<'b>(&'b mut self, dst: &mut [&'b mut IoVec]) -> usize { (**self).bytes_vec_mut(dst) } unsafe fn advance_mut(&mut self, cnt: usize) { (**self).advance_mut(cnt) } } impl BufMut for Box { fn remaining_mut(&self) -> usize { (**self).remaining_mut() } unsafe fn bytes_mut(&mut self) -> &mut [u8] { (**self).bytes_mut() } unsafe fn bytes_vec_mut<'b>(&'b mut self, dst: &mut [&'b mut IoVec]) -> usize { (**self).bytes_vec_mut(dst) } unsafe fn advance_mut(&mut self, cnt: usize) { (**self).advance_mut(cnt) } } impl + AsRef<[u8]>> BufMut for io::Cursor { fn remaining_mut(&self) -> usize { use Buf; self.remaining() } /// Advance the internal cursor of the BufMut unsafe fn advance_mut(&mut self, cnt: usize) { use Buf; self.advance(cnt); } /// Returns a mutable slice starting at the current BufMut position and of /// length between 0 and `BufMut::remaining()`. /// /// The returned byte slice may represent uninitialized memory. unsafe fn bytes_mut(&mut self) -> &mut [u8] { let len = self.get_ref().as_ref().len(); let pos = self.position() as usize; if pos >= len { return Default::default(); } &mut (self.get_mut().as_mut())[pos..] } } impl BufMut for Vec { #[inline] fn remaining_mut(&self) -> usize { usize::MAX - self.len() } #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { let len = self.len(); let remaining = self.capacity() - len; if cnt > remaining { // Reserve additional capacity, and ensure that the total length // will not overflow usize. self.reserve(cnt); } self.set_len(len + cnt); } #[inline] unsafe fn bytes_mut(&mut self) -> &mut [u8] { use std::slice; if self.capacity() == self.len() { self.reserve(64); // Grow the vec } let cap = self.capacity(); let len = self.len(); let ptr = self.as_mut_ptr(); &mut slice::from_raw_parts_mut(ptr, cap)[len..] } } // The existance of this function makes the compiler catch if the BufMut // trait is "object-safe" or not. fn _assert_trait_object(_b: &BufMut) {} bytes-0.4.12/src/buf/chain.rs010064400007650000024000000126751337670411000141530ustar0000000000000000use {Buf, BufMut}; use iovec::IoVec; /// A `Chain` sequences two buffers. /// /// `Chain` is an adapter that links two underlying buffers and provides a /// continous view across both buffers. It is able to sequence either immutable /// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values). /// /// This struct is generally created by calling [`Buf::chain`]. Please see that /// function's documentation for more detail. /// /// # Examples /// /// ``` /// use bytes::{Bytes, Buf, IntoBuf}; /// use bytes::buf::Chain; /// /// let buf = Bytes::from(&b"hello "[..]).into_buf() /// .chain(Bytes::from(&b"world"[..])); /// /// let full: Bytes = buf.collect(); /// assert_eq!(full[..], b"hello world"[..]); /// ``` /// /// [`Buf::chain`]: trait.Buf.html#method.chain /// [`Buf`]: trait.Buf.html /// [`BufMut`]: trait.BufMut.html #[derive(Debug)] pub struct Chain { a: T, b: U, } impl Chain { /// Creates a new `Chain` sequencing the provided values. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// use bytes::buf::Chain; /// /// let buf = Chain::new( /// BytesMut::with_capacity(1024), /// BytesMut::with_capacity(1024)); /// /// // Use the chained buffer /// ``` pub fn new(a: T, b: U) -> Chain { Chain { a: a, b: b, } } /// Gets a reference to the first underlying `Buf`. /// /// # Examples /// /// ``` /// use bytes::{Bytes, Buf, IntoBuf}; /// /// let buf = Bytes::from(&b"hello"[..]).into_buf() /// .chain(Bytes::from(&b"world"[..])); /// /// assert_eq!(buf.first_ref().get_ref()[..], b"hello"[..]); /// ``` pub fn first_ref(&self) -> &T { &self.a } /// Gets a mutable reference to the first underlying `Buf`. /// /// # Examples /// /// ``` /// use bytes::{Bytes, Buf, IntoBuf}; /// /// let mut buf = Bytes::from(&b"hello "[..]).into_buf() /// .chain(Bytes::from(&b"world"[..])); /// /// buf.first_mut().set_position(1); /// /// let full: Bytes = buf.collect(); /// assert_eq!(full[..], b"ello world"[..]); /// ``` pub fn first_mut(&mut self) -> &mut T { &mut self.a } /// Gets a reference to the last underlying `Buf`. /// /// # Examples /// /// ``` /// use bytes::{Bytes, Buf, IntoBuf}; /// /// let buf = Bytes::from(&b"hello"[..]).into_buf() /// .chain(Bytes::from(&b"world"[..])); /// /// assert_eq!(buf.last_ref().get_ref()[..], b"world"[..]); /// ``` pub fn last_ref(&self) -> &U { &self.b } /// Gets a mutable reference to the last underlying `Buf`. /// /// # Examples /// /// ``` /// use bytes::{Bytes, Buf, IntoBuf}; /// /// let mut buf = Bytes::from(&b"hello "[..]).into_buf() /// .chain(Bytes::from(&b"world"[..])); /// /// buf.last_mut().set_position(1); /// /// let full: Bytes = buf.collect(); /// assert_eq!(full[..], b"hello orld"[..]); /// ``` pub fn last_mut(&mut self) -> &mut U { &mut self.b } /// Consumes this `Chain`, returning the underlying values. /// /// # Examples /// /// ``` /// use bytes::{Bytes, Buf, IntoBuf}; /// /// let buf = Bytes::from(&b"hello"[..]).into_buf() /// .chain(Bytes::from(&b"world"[..])); /// /// let (first, last) = buf.into_inner(); /// assert_eq!(first.get_ref()[..], b"hello"[..]); /// assert_eq!(last.get_ref()[..], b"world"[..]); /// ``` pub fn into_inner(self) -> (T, U) { (self.a, self.b) } } impl Buf for Chain where T: Buf, U: Buf, { fn remaining(&self) -> usize { self.a.remaining() + self.b.remaining() } fn bytes(&self) -> &[u8] { if self.a.has_remaining() { self.a.bytes() } else { self.b.bytes() } } fn advance(&mut self, mut cnt: usize) { let a_rem = self.a.remaining(); if a_rem != 0 { if a_rem >= cnt { self.a.advance(cnt); return; } // Consume what is left of a self.a.advance(a_rem); cnt -= a_rem; } self.b.advance(cnt); } fn bytes_vec<'a>(&'a self, dst: &mut [&'a IoVec]) -> usize { let mut n = self.a.bytes_vec(dst); n += self.b.bytes_vec(&mut dst[n..]); n } } impl BufMut for Chain where T: BufMut, U: BufMut, { fn remaining_mut(&self) -> usize { self.a.remaining_mut() + self.b.remaining_mut() } unsafe fn bytes_mut(&mut self) -> &mut [u8] { if self.a.has_remaining_mut() { self.a.bytes_mut() } else { self.b.bytes_mut() } } unsafe fn advance_mut(&mut self, mut cnt: usize) { let a_rem = self.a.remaining_mut(); if a_rem != 0 { if a_rem >= cnt { self.a.advance_mut(cnt); return; } // Consume what is left of a self.a.advance_mut(a_rem); cnt -= a_rem; } self.b.advance_mut(cnt); } unsafe fn bytes_vec_mut<'a>(&'a mut self, dst: &mut [&'a mut IoVec]) -> usize { let mut n = self.a.bytes_vec_mut(dst); n += self.b.bytes_vec_mut(&mut dst[n..]); n } } bytes-0.4.12/src/buf/from_buf.rs010064400007650000024000000055121337670411000146600ustar0000000000000000use {Buf, BufMut, IntoBuf, Bytes, BytesMut}; /// Conversion from a [`Buf`] /// /// Implementing `FromBuf` for a type defines how it is created from a buffer. /// This is common for types which represent byte storage of some kind. /// /// [`FromBuf::from_buf`] is rarely called explicitly, and it is instead used /// through [`Buf::collect`]. See [`Buf::collect`] documentation for more examples. /// /// See also [`IntoBuf`]. /// /// # Examples /// /// Basic usage: /// /// ``` /// use bytes::{Bytes, IntoBuf}; /// use bytes::buf::FromBuf; /// /// let buf = Bytes::from(&b"hello world"[..]).into_buf(); /// let vec = Vec::from_buf(buf); /// /// assert_eq!(vec, &b"hello world"[..]); /// ``` /// /// Using [`Buf::collect`] to implicitly use `FromBuf`: /// /// ``` /// use bytes::{Buf, Bytes, IntoBuf}; /// /// let buf = Bytes::from(&b"hello world"[..]).into_buf(); /// let vec: Vec = buf.collect(); /// /// assert_eq!(vec, &b"hello world"[..]); /// ``` /// /// Implementing `FromBuf` for your type: /// /// ``` /// use bytes::{BufMut, Bytes}; /// use bytes::buf::{IntoBuf, FromBuf}; /// /// // A sample buffer, that's just a wrapper over Vec /// struct MyBuffer(Vec); /// /// impl FromBuf for MyBuffer { /// fn from_buf(buf: B) -> Self where B: IntoBuf { /// let mut v = Vec::new(); /// v.put(buf.into_buf()); /// MyBuffer(v) /// } /// } /// /// // Now we can make a new buf /// let buf = Bytes::from(&b"hello world"[..]); /// /// // And make a MyBuffer out of it /// let my_buf = MyBuffer::from_buf(buf); /// /// assert_eq!(my_buf.0, &b"hello world"[..]); /// ``` /// /// [`Buf`]: trait.Buf.html /// [`FromBuf::from_buf`]: #method.from_buf /// [`Buf::collect`]: trait.Buf.html#method.collect /// [`IntoBuf`]: trait.IntoBuf.html pub trait FromBuf { /// Creates a value from a buffer. /// /// See the [type-level documentation](#) for more details. /// /// # Examples /// /// Basic usage: /// /// ``` /// use bytes::{Bytes, IntoBuf}; /// use bytes::buf::FromBuf; /// /// let buf = Bytes::from(&b"hello world"[..]).into_buf(); /// let vec = Vec::from_buf(buf); /// /// assert_eq!(vec, &b"hello world"[..]); /// ``` fn from_buf(buf: T) -> Self where T: IntoBuf; } impl FromBuf for Vec { fn from_buf(buf: T) -> Self where T: IntoBuf { let buf = buf.into_buf(); let mut ret = Vec::with_capacity(buf.remaining()); ret.put(buf); ret } } impl FromBuf for Bytes { fn from_buf(buf: T) -> Self where T: IntoBuf { BytesMut::from_buf(buf).freeze() } } impl FromBuf for BytesMut { fn from_buf(buf: T) -> Self where T: IntoBuf { let buf = buf.into_buf(); let mut ret = BytesMut::with_capacity(buf.remaining()); ret.put(buf); ret } } bytes-0.4.12/src/buf/into_buf.rs010064400007650000024000000057301337670411000146700ustar0000000000000000use super::{Buf}; use std::io; /// Conversion into a `Buf` /// /// An `IntoBuf` implementation defines how to convert a value into a `Buf`. /// This is common for types that represent byte storage of some kind. `IntoBuf` /// may be implemented directly for types or on references for those types. /// /// # Examples /// /// ``` /// use bytes::{Buf, IntoBuf, BigEndian}; /// /// let bytes = b"\x00\x01hello world"; /// let mut buf = bytes.into_buf(); /// /// assert_eq!(1, buf.get_u16::()); /// /// let mut rest = [0; 11]; /// buf.copy_to_slice(&mut rest); /// /// assert_eq!(b"hello world", &rest); /// ``` pub trait IntoBuf { /// The `Buf` type that `self` is being converted into type Buf: Buf; /// Creates a `Buf` from a value. /// /// # Examples /// /// ``` /// use bytes::{Buf, IntoBuf, BigEndian}; /// /// let bytes = b"\x00\x01hello world"; /// let mut buf = bytes.into_buf(); /// /// assert_eq!(1, buf.get_u16::()); /// /// let mut rest = [0; 11]; /// buf.copy_to_slice(&mut rest); /// /// assert_eq!(b"hello world", &rest); /// ``` fn into_buf(self) -> Self::Buf; } impl IntoBuf for T { type Buf = Self; fn into_buf(self) -> Self { self } } impl<'a> IntoBuf for &'a [u8] { type Buf = io::Cursor<&'a [u8]>; fn into_buf(self) -> Self::Buf { io::Cursor::new(self) } } impl<'a> IntoBuf for &'a mut [u8] { type Buf = io::Cursor<&'a mut [u8]>; fn into_buf(self) -> Self::Buf { io::Cursor::new(self) } } impl<'a> IntoBuf for &'a str { type Buf = io::Cursor<&'a [u8]>; fn into_buf(self) -> Self::Buf { self.as_bytes().into_buf() } } impl IntoBuf for Vec { type Buf = io::Cursor>; fn into_buf(self) -> Self::Buf { io::Cursor::new(self) } } impl<'a> IntoBuf for &'a Vec { type Buf = io::Cursor<&'a [u8]>; fn into_buf(self) -> Self::Buf { io::Cursor::new(&self[..]) } } // Kind of annoying... but this impl is required to allow passing `&'static // [u8]` where for<'a> &'a T: IntoBuf is required. impl<'a> IntoBuf for &'a &'static [u8] { type Buf = io::Cursor<&'static [u8]>; fn into_buf(self) -> Self::Buf { io::Cursor::new(self) } } impl<'a> IntoBuf for &'a &'static str { type Buf = io::Cursor<&'static [u8]>; fn into_buf(self) -> Self::Buf { self.as_bytes().into_buf() } } impl IntoBuf for String { type Buf = io::Cursor>; fn into_buf(self) -> Self::Buf { self.into_bytes().into_buf() } } impl<'a> IntoBuf for &'a String { type Buf = io::Cursor<&'a [u8]>; fn into_buf(self) -> Self::Buf { self.as_bytes().into_buf() } } impl IntoBuf for u8 { type Buf = Option<[u8; 1]>; fn into_buf(self) -> Self::Buf { Some([self]) } } impl IntoBuf for i8 { type Buf = Option<[u8; 1]>; fn into_buf(self) -> Self::Buf { Some([self as u8; 1]) } } bytes-0.4.12/src/buf/iter.rs010064400007650000024000000051561337670411000140300ustar0000000000000000use Buf; /// Iterator over the bytes contained by the buffer. /// /// This struct is created by the [`iter`] method on [`Buf`]. /// /// # Examples /// /// Basic usage: /// /// ``` /// use bytes::{Buf, IntoBuf, Bytes}; /// /// let buf = Bytes::from(&b"abc"[..]).into_buf(); /// let mut iter = buf.iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// assert_eq!(iter.next(), Some(b'b')); /// assert_eq!(iter.next(), Some(b'c')); /// assert_eq!(iter.next(), None); /// ``` /// /// [`iter`]: trait.Buf.html#method.iter /// [`Buf`]: trait.Buf.html #[derive(Debug)] pub struct Iter { inner: T, } impl Iter { /// Consumes this `Iter`, returning the underlying value. /// /// # Examples /// /// ```rust /// use bytes::{Buf, IntoBuf, Bytes}; /// /// let buf = Bytes::from(&b"abc"[..]).into_buf(); /// let mut iter = buf.iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// /// let buf = iter.into_inner(); /// assert_eq!(2, buf.remaining()); /// ``` pub fn into_inner(self) -> T { self.inner } /// Gets a reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::{Buf, IntoBuf, Bytes}; /// /// let buf = Bytes::from(&b"abc"[..]).into_buf(); /// let mut iter = buf.iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// /// assert_eq!(2, iter.get_ref().remaining()); /// ``` pub fn get_ref(&self) -> &T { &self.inner } /// Gets a mutable reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::{Buf, IntoBuf, BytesMut}; /// /// let buf = BytesMut::from(&b"abc"[..]).into_buf(); /// let mut iter = buf.iter(); /// /// assert_eq!(iter.next(), Some(b'a')); /// /// iter.get_mut().set_position(0); /// /// assert_eq!(iter.next(), Some(b'a')); /// ``` pub fn get_mut(&mut self) -> &mut T { &mut self.inner } } pub fn new(inner: T) -> Iter { Iter { inner: inner } } impl Iterator for Iter { type Item = u8; fn next(&mut self) -> Option { if !self.inner.has_remaining() { return None; } let b = self.inner.bytes()[0]; self.inner.advance(1); Some(b) } fn size_hint(&self) -> (usize, Option) { let rem = self.inner.remaining(); (rem, Some(rem)) } } impl ExactSizeIterator for Iter { } bytes-0.4.12/src/buf/mod.rs010064400007650000024000000021361344002305400136300ustar0000000000000000//! Utilities for working with buffers. //! //! A buffer is any structure that contains a sequence of bytes. The bytes may //! or may not be stored in contiguous memory. This module contains traits used //! to abstract over buffers as well as utilities for working with buffer types. //! //! # `Buf`, `BufMut` //! //! These are the two foundational traits for abstractly working with buffers. //! They can be thought as iterators for byte structures. They offer additional //! performance over `Iterator` by providing an API optimized for byte slices. //! //! See [`Buf`] and [`BufMut`] for more details. //! //! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) //! [`Buf`]: trait.Buf.html //! [`BufMut`]: trait.BufMut.html mod buf; mod buf_mut; mod from_buf; mod chain; mod into_buf; mod iter; mod reader; mod take; mod vec_deque; mod writer; pub use self::buf::Buf; pub use self::buf_mut::BufMut; pub use self::from_buf::FromBuf; pub use self::chain::Chain; pub use self::into_buf::IntoBuf; pub use self::iter::Iter; pub use self::reader::Reader; pub use self::take::Take; pub use self::writer::Writer; bytes-0.4.12/src/buf/reader.rs010064400007650000024000000045061337670411000143250ustar0000000000000000use {Buf}; use std::{cmp, io}; /// A `Buf` adapter which implements `io::Read` for the inner value. /// /// This struct is generally created by calling `reader()` on `Buf`. See /// documentation of [`reader()`](trait.Buf.html#method.reader) for more /// details. #[derive(Debug)] pub struct Reader { buf: B, } pub fn new(buf: B) -> Reader { Reader { buf: buf } } impl Reader { /// Gets a reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::Buf; /// use std::io::{self, Cursor}; /// /// let mut buf = Cursor::new(b"hello world").reader(); /// /// assert_eq!(0, buf.get_ref().position()); /// ``` pub fn get_ref(&self) -> &B { &self.buf } /// Gets a mutable reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::Buf; /// use std::io::{self, Cursor}; /// /// let mut buf = Cursor::new(b"hello world").reader(); /// let mut dst = vec![]; /// /// buf.get_mut().set_position(2); /// io::copy(&mut buf, &mut dst).unwrap(); /// /// assert_eq!(*dst, b"llo world"[..]); /// ``` pub fn get_mut(&mut self) -> &mut B { &mut self.buf } /// Consumes this `Reader`, returning the underlying value. /// /// # Examples /// /// ```rust /// use bytes::Buf; /// use std::io::{self, Cursor}; /// /// let mut buf = Cursor::new(b"hello world").reader(); /// let mut dst = vec![]; /// /// io::copy(&mut buf, &mut dst).unwrap(); /// /// let buf = buf.into_inner(); /// assert_eq!(0, buf.remaining()); /// ``` pub fn into_inner(self) -> B { self.buf } } impl io::Read for Reader { fn read(&mut self, dst: &mut [u8]) -> io::Result { let len = cmp::min(self.buf.remaining(), dst.len()); Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]); Ok(len) } } impl io::BufRead for Reader { fn fill_buf(&mut self) -> io::Result<&[u8]> { Ok(self.buf.bytes()) } fn consume(&mut self, amt: usize) { self.buf.advance(amt) } } bytes-0.4.12/src/buf/take.rs010064400007650000024000000072001337670411000140010ustar0000000000000000use {Buf}; use std::cmp; /// A `Buf` adapter which limits the bytes read from an underlying buffer. /// /// This struct is generally created by calling `take()` on `Buf`. See /// documentation of [`take()`](trait.Buf.html#method.take) for more details. #[derive(Debug)] pub struct Take { inner: T, limit: usize, } pub fn new(inner: T, limit: usize) -> Take { Take { inner: inner, limit: limit, } } impl Take { /// Consumes this `Take`, returning the underlying value. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// let mut dst = vec![]; /// /// dst.put(&mut buf); /// assert_eq!(*dst, b"he"[..]); /// /// let mut buf = buf.into_inner(); /// /// dst.clear(); /// dst.put(&mut buf); /// assert_eq!(*dst, b"llo world"[..]); /// ``` pub fn into_inner(self) -> T { self.inner } /// Gets a reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// /// assert_eq!(0, buf.get_ref().position()); /// ``` pub fn get_ref(&self) -> &T { &self.inner } /// Gets a mutable reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// let mut dst = vec![]; /// /// buf.get_mut().set_position(2); /// /// dst.put(&mut buf); /// assert_eq!(*dst, b"ll"[..]); /// ``` pub fn get_mut(&mut self) -> &mut T { &mut self.inner } /// Returns the maximum number of bytes that can be read. /// /// # Note /// /// If the inner `Buf` has fewer bytes than indicated by this method then /// that is the actual number of available bytes. /// /// # Examples /// /// ```rust /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// /// assert_eq!(2, buf.limit()); /// assert_eq!(b'h', buf.get_u8()); /// assert_eq!(1, buf.limit()); /// ``` pub fn limit(&self) -> usize { self.limit } /// Sets the maximum number of bytes that can be read. /// /// # Note /// /// If the inner `Buf` has fewer bytes than `lim` then that is the actual /// number of available bytes. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// let mut dst = vec![]; /// /// dst.put(&mut buf); /// assert_eq!(*dst, b"he"[..]); /// /// dst.clear(); /// /// buf.set_limit(3); /// dst.put(&mut buf); /// assert_eq!(*dst, b"llo"[..]); /// ``` pub fn set_limit(&mut self, lim: usize) { self.limit = lim } } impl Buf for Take { fn remaining(&self) -> usize { cmp::min(self.inner.remaining(), self.limit) } fn bytes(&self) -> &[u8] { let bytes = self.inner.bytes(); &bytes[..cmp::min(bytes.len(), self.limit)] } fn advance(&mut self, cnt: usize) { assert!(cnt <= self.limit); self.inner.advance(cnt); self.limit -= cnt; } } bytes-0.4.12/src/buf/vec_deque.rs010064400007650000024000000015121344002305400150060ustar0000000000000000use std::collections::VecDeque; use super::Buf; impl Buf for VecDeque { fn remaining(&self) -> usize { self.len() } fn bytes(&self) -> &[u8] { let (s1, s2) = self.as_slices(); if s1.is_empty() { s2 } else { s1 } } fn advance(&mut self, cnt: usize) { self.drain(..cnt); } } #[cfg(test)] mod tests { use super::*; #[test] fn hello_world() { let mut buffer: VecDeque = VecDeque::new(); buffer.extend(b"hello world"); assert_eq!(11, buffer.remaining()); assert_eq!(b"hello world", buffer.bytes()); buffer.advance(6); assert_eq!(b"world", buffer.bytes()); buffer.extend(b" piece"); assert_eq!(b"world piece" as &[u8], &buffer.collect::>()[..]); } } bytes-0.4.12/src/buf/writer.rs010064400007650000024000000040321337670411000143710ustar0000000000000000use BufMut; use std::{cmp, io}; /// A `BufMut` adapter which implements `io::Write` for the inner value. /// /// This struct is generally created by calling `writer()` on `BufMut`. See /// documentation of [`writer()`](trait.BufMut.html#method.writer) for more /// details. #[derive(Debug)] pub struct Writer { buf: B, } pub fn new(buf: B) -> Writer { Writer { buf: buf } } impl Writer { /// Gets a reference to the underlying `BufMut`. /// /// It is inadvisable to directly write to the underlying `BufMut`. /// /// # Examples /// /// ```rust /// use bytes::BufMut; /// /// let mut buf = Vec::with_capacity(1024).writer(); /// /// assert_eq!(1024, buf.get_ref().capacity()); /// ``` pub fn get_ref(&self) -> &B { &self.buf } /// Gets a mutable reference to the underlying `BufMut`. /// /// It is inadvisable to directly write to the underlying `BufMut`. /// /// # Examples /// /// ```rust /// use bytes::BufMut; /// /// let mut buf = vec![].writer(); /// /// buf.get_mut().reserve(1024); /// /// assert_eq!(1024, buf.get_ref().capacity()); /// ``` pub fn get_mut(&mut self) -> &mut B { &mut self.buf } /// Consumes this `Writer`, returning the underlying value. /// /// # Examples /// /// ```rust /// use bytes::BufMut; /// use std::io::{self, Cursor}; /// /// let mut buf = vec![].writer(); /// let mut src = Cursor::new(b"hello world"); /// /// io::copy(&mut src, &mut buf).unwrap(); /// /// let buf = buf.into_inner(); /// assert_eq!(*buf, b"hello world"[..]); /// ``` pub fn into_inner(self) -> B { self.buf } } impl io::Write for Writer { fn write(&mut self, src: &[u8]) -> io::Result { let n = cmp::min(self.buf.remaining_mut(), src.len()); self.buf.put(&src[0..n]); Ok(n) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } bytes-0.4.12/src/bytes.rs010064400007650000024000002510641344002305400134310ustar0000000000000000use {IntoBuf, Buf, BufMut}; use buf::Iter; use debug; use std::{cmp, fmt, mem, hash, ops, slice, ptr, usize}; use std::borrow::{Borrow, BorrowMut}; use std::io::Cursor; use std::sync::atomic::{self, AtomicUsize, AtomicPtr}; use std::sync::atomic::Ordering::{Relaxed, Acquire, Release, AcqRel}; use std::iter::{FromIterator, Iterator}; /// A reference counted contiguous slice of memory. /// /// `Bytes` is an efficient container for storing and operating on contiguous /// slices of memory. It is intended for use primarily in networking code, but /// could have applications elsewhere as well. /// /// `Bytes` values facilitate zero-copy network programming by allowing multiple /// `Bytes` objects to point to the same underlying memory. This is managed by /// using a reference count to track when the memory is no longer needed and can /// be freed. /// /// ``` /// use bytes::Bytes; /// /// let mut mem = Bytes::from(&b"Hello world"[..]); /// let a = mem.slice(0, 5); /// /// assert_eq!(&a[..], b"Hello"); /// /// let b = mem.split_to(6); /// /// assert_eq!(&mem[..], b"world"); /// assert_eq!(&b[..], b"Hello "); /// ``` /// /// # Memory layout /// /// The `Bytes` struct itself is fairly small, limited to a pointer to the /// memory and 4 `usize` fields used to track information about which segment of /// the underlying memory the `Bytes` handle has access to. /// /// The memory layout looks like this: /// /// ```text /// +-------+ /// | Bytes | /// +-------+ /// / \_____ /// | \ /// v v /// +-----+------------------------------------+ /// | Arc | | Data | | /// +-----+------------------------------------+ /// ``` /// /// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory /// slice and a pointer to the start of the region visible by the handle. /// `Bytes` also tracks the length of its view into the memory. /// /// # Sharing /// /// The memory itself is reference counted, and multiple `Bytes` objects may /// point to the same region. Each `Bytes` handle point to different sections within /// the memory region, and `Bytes` handle may or may not have overlapping views /// into the memory. /// /// /// ```text /// /// Arc ptrs +---------+ /// ________________________ / | Bytes 2 | /// / +---------+ /// / +-----------+ | | /// |_________/ | Bytes 1 | | | /// | +-----------+ | | /// | | | ___/ data | tail /// | data | tail |/ | /// v v v v /// +-----+---------------------------------+-----+ /// | Arc | | | | | /// +-----+---------------------------------+-----+ /// ``` /// /// # Mutating /// /// While `Bytes` handles may potentially represent overlapping views of the /// underlying memory slice and may not be mutated, `BytesMut` handles are /// guaranteed to be the only handle able to view that slice of memory. As such, /// `BytesMut` handles are able to mutate the underlying memory. Note that /// holding a unique view to a region of memory does not mean that there are no /// other `Bytes` and `BytesMut` handles with disjoint views of the underlying /// memory. /// /// # Inline bytes /// /// As an optimization, when the slice referenced by a `Bytes` or `BytesMut` /// handle is small enough [^1], `with_capacity` will avoid the allocation by /// inlining the slice directly in the handle. In this case, a clone is no /// longer "shallow" and the data will be copied. Converting from a `Vec` will /// never use inlining. /// /// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems. /// pub struct Bytes { inner: Inner, } /// A unique reference to a contiguous slice of memory. /// /// `BytesMut` represents a unique view into a potentially shared memory region. /// Given the uniqueness guarantee, owners of `BytesMut` handles are able to /// mutate the memory. It is similar to a `Vec` but with less copies and /// allocations. /// /// For more detail, see [Bytes](struct.Bytes.html). /// /// # Growth /// /// One key difference from `Vec` is that most operations **do not /// implicitly grow the buffer**. This means that calling `my_bytes.put("hello /// world");` could panic if `my_bytes` does not have enough capacity. Before /// writing to the buffer, ensure that there is enough remaining capacity by /// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve` /// is preferable. /// /// The only exception is `extend` which implicitly reserves required capacity. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut buf = BytesMut::with_capacity(64); /// /// buf.put(b'h'); /// buf.put(b'e'); /// buf.put("llo"); /// /// assert_eq!(&buf[..], b"hello"); /// /// // Freeze the buffer so that it can be shared /// let a = buf.freeze(); /// /// // This does not allocate, instead `b` points to the same memory. /// let b = a.clone(); /// /// assert_eq!(&a[..], b"hello"); /// assert_eq!(&b[..], b"hello"); /// ``` pub struct BytesMut { inner: Inner, } // Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated // to `Inner` functions. The `Bytes` and `BytesMut` shims ensure that functions // that mutate the underlying buffer are only performed when the data range // being mutated is only available via a single `BytesMut` handle. // // # Data storage modes // // The goal of `bytes` is to be as efficient as possible across a wide range of // potential usage patterns. As such, `bytes` needs to be able to handle buffers // that are never shared, shared on a single thread, and shared across many // threads. `bytes` also needs to handle both tiny buffers as well as very large // buffers. For example, [Cassandra](http://cassandra.apache.org) values have // been known to be in the hundreds of megabyte, and HTTP header values can be a // few characters in size. // // To achieve high performance in these various situations, `Bytes` and // `BytesMut` use different strategies for storing the buffer depending on the // usage pattern. // // ## Delayed `Arc` allocation // // When a `Bytes` or `BytesMut` is first created, there is only one outstanding // handle referencing the buffer. Since sharing is not yet required, an `Arc`* is // not used and the buffer is backed by a `Vec` directly. Using an // `Arc>` requires two allocations, so if the buffer ends up never being // shared, that allocation is avoided. // // When sharing does become necessary (`clone`, `split_to`, `split_off`), that // is when the buffer is promoted to being shareable. The `Vec` is moved // into an `Arc` and both the original handle and the new handle use the same // buffer via the `Arc`. // // * `Arc` is being used to signify an atomically reference counted cell. We // don't use the `Arc` implementation provided by `std` and instead use our own. // This ends up simplifying a number of the `unsafe` code snippets. // // ## Inlining small buffers // // The `Bytes` / `BytesMut` structs require 4 pointer sized fields. On 64 bit // systems, this ends up being 32 bytes, which is actually a lot of storage for // cases where `Bytes` is being used to represent small byte strings, such as // HTTP header names and values. // // To avoid any allocation at all in these cases, `Bytes` will use the struct // itself for storing the buffer, reserving 1 byte for meta data. This means // that, on 64 bit systems, 31 byte buffers require no allocation at all. // // The byte used for metadata stores a 2 bits flag used to indicate that the // buffer is stored inline as well as 6 bits for tracking the buffer length (the // return value of `Bytes::len`). // // ## Static buffers // // `Bytes` can also represent a static buffer, which is created with // `Bytes::from_static`. No copying or allocations are required for tracking // static buffers. The pointer to the `&'static [u8]`, the length, and a flag // tracking that the `Bytes` instance represents a static buffer is stored in // the `Bytes` struct. // // # Struct layout // // Both `Bytes` and `BytesMut` are wrappers around `Inner`, which provides the // data fields as well as all of the function implementations. // // The `Inner` struct is carefully laid out in order to support the // functionality described above as well as being as small as possible. Size is // important as growing the size of the `Bytes` struct from 32 bytes to 40 bytes // added as much as 15% overhead in benchmarks using `Bytes` in an HTTP header // map structure. // // The `Inner` struct contains the following fields: // // * `ptr: *mut u8` // * `len: usize` // * `cap: usize` // * `arc: AtomicPtr` // // ## `ptr: *mut u8` // // A pointer to start of the handle's buffer view. When backed by a `Vec`, // this is always the `Vec`'s pointer. When backed by an `Arc>`, `ptr` // may have been shifted to point somewhere inside the buffer. // // When in "inlined" mode, `ptr` is used as part of the inlined buffer. // // ## `len: usize` // // The length of the handle's buffer view. When backed by a `Vec`, this is // always the `Vec`'s length. The slice represented by `ptr` and `len` should // (ideally) always be initialized memory. // // When in "inlined" mode, `len` is used as part of the inlined buffer. // // ## `cap: usize` // // The capacity of the handle's buffer view. When backed by a `Vec`, this is // always the `Vec`'s capacity. The slice represented by `ptr+len` and `cap-len` // may or may not be initialized memory. // // When in "inlined" mode, `cap` is used as part of the inlined buffer. // // ## `arc: AtomicPtr` // // When `Inner` is in allocated mode (backed by Vec or Arc>), this // will be the pointer to the `Arc` structure tracking the ref count for the // underlying buffer. When the pointer is null, then the `Arc` has not been // allocated yet and `self` is the only outstanding handle for the underlying // buffer. // // The lower two bits of `arc` are used to track the storage mode of `Inner`. // `0b01` indicates inline storage, `0b10` indicates static storage, and `0b11` // indicates vector storage, not yet promoted to Arc. Since pointers to // allocated structures are aligned, the lower two bits of a pointer will always // be 0. This allows disambiguating between a pointer and the two flags. // // When in "inlined" mode, the least significant byte of `arc` is also used to // store the length of the buffer view (vs. the capacity, which is a constant). // // The rest of `arc`'s bytes are used as part of the inline buffer, which means // that those bytes need to be located next to the `ptr`, `len`, and `cap` // fields, which make up the rest of the inline buffer. This requires special // casing the layout of `Inner` depending on if the target platform is big or // little endian. // // On little endian platforms, the `arc` field must be the first field in the // struct. On big endian platforms, the `arc` field must be the last field in // the struct. Since a deterministic struct layout is required, `Inner` is // annotated with `#[repr(C)]`. // // # Thread safety // // `Bytes::clone()` returns a new `Bytes` handle with no copying. This is done // by bumping the buffer ref count and returning a new struct pointing to the // same buffer. However, the `Arc` structure is lazily allocated. This means // that if `Bytes` is stored itself in an `Arc` (`Arc`), the `clone` // function can be called concurrently from multiple threads. This is why an // `AtomicPtr` is used for the `arc` field vs. a `*const`. // // Care is taken to ensure that the need for synchronization is minimized. Most // operations do not require any synchronization. // #[cfg(target_endian = "little")] #[repr(C)] struct Inner { // WARNING: Do not access the fields directly unless you know what you are // doing. Instead, use the fns. See implementation comment above. arc: AtomicPtr, ptr: *mut u8, len: usize, cap: usize, } #[cfg(target_endian = "big")] #[repr(C)] struct Inner { // WARNING: Do not access the fields directly unless you know what you are // doing. Instead, use the fns. See implementation comment above. ptr: *mut u8, len: usize, cap: usize, arc: AtomicPtr, } // Thread-safe reference-counted container for the shared storage. This mostly // the same as `std::sync::Arc` but without the weak counter. The ref counting // fns are based on the ones found in `std`. // // The main reason to use `Shared` instead of `std::sync::Arc` is that it ends // up making the overall code simpler and easier to reason about. This is due to // some of the logic around setting `Inner::arc` and other ways the `arc` field // is used. Using `Arc` ended up requiring a number of funky transmutes and // other shenanigans to make it work. struct Shared { vec: Vec, original_capacity_repr: usize, ref_count: AtomicUsize, } // Buffer storage strategy flags. const KIND_ARC: usize = 0b00; const KIND_INLINE: usize = 0b01; const KIND_STATIC: usize = 0b10; const KIND_VEC: usize = 0b11; const KIND_MASK: usize = 0b11; // The max original capacity value. Any `Bytes` allocated with a greater initial // capacity will default to this. const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17; // The original capacity algorithm will not take effect unless the originally // allocated capacity was at least 1kb in size. const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10; // The original capacity is stored in powers of 2 starting at 1kb to a max of // 64kb. Representing it as such requires only 3 bits of storage. const ORIGINAL_CAPACITY_MASK: usize = 0b11100; const ORIGINAL_CAPACITY_OFFSET: usize = 2; // When the storage is in the `Vec` representation, the pointer can be advanced // at most this value. This is due to the amount of storage available to track // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY // bits. const VEC_POS_OFFSET: usize = 5; const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET; const NOT_VEC_POS_MASK: usize = 0b11111; // Bit op constants for extracting the inline length value from the `arc` field. const INLINE_LEN_MASK: usize = 0b11111100; const INLINE_LEN_OFFSET: usize = 2; // Byte offset from the start of `Inner` to where the inline buffer data // starts. On little endian platforms, the first byte of the struct is the // storage flag, so the data is shifted by a byte. On big endian systems, the // data starts at the beginning of the struct. #[cfg(target_endian = "little")] const INLINE_DATA_OFFSET: isize = 1; #[cfg(target_endian = "big")] const INLINE_DATA_OFFSET: isize = 0; #[cfg(target_pointer_width = "64")] const PTR_WIDTH: usize = 64; #[cfg(target_pointer_width = "32")] const PTR_WIDTH: usize = 32; // Inline buffer capacity. This is the size of `Inner` minus 1 byte for the // metadata. #[cfg(target_pointer_width = "64")] const INLINE_CAP: usize = 4 * 8 - 1; #[cfg(target_pointer_width = "32")] const INLINE_CAP: usize = 4 * 4 - 1; /* * * ===== Bytes ===== * */ impl Bytes { /// Creates a new `Bytes` with the specified capacity. /// /// The returned `Bytes` will be able to hold at least `capacity` bytes /// without reallocating. If `capacity` is under `4 * size_of::() - 1`, /// then `BytesMut` will not allocate. /// /// It is important to note that this function does not specify the length /// of the returned `Bytes`, but only the capacity. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut bytes = Bytes::with_capacity(64); /// /// // `bytes` contains no data, even though there is capacity /// assert_eq!(bytes.len(), 0); /// /// bytes.extend_from_slice(&b"hello world"[..]); /// /// assert_eq!(&bytes[..], b"hello world"); /// ``` #[inline] pub fn with_capacity(capacity: usize) -> Bytes { Bytes { inner: Inner::with_capacity(capacity), } } /// Creates a new empty `Bytes`. /// /// This will not allocate and the returned `Bytes` handle will be empty. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let b = Bytes::new(); /// assert_eq!(&b[..], b""); /// ``` #[inline] pub fn new() -> Bytes { Bytes::with_capacity(0) } /// Creates a new `Bytes` from a static slice. /// /// The returned `Bytes` will point directly to the static slice. There is /// no allocating or copying. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let b = Bytes::from_static(b"hello"); /// assert_eq!(&b[..], b"hello"); /// ``` #[inline] pub fn from_static(bytes: &'static [u8]) -> Bytes { Bytes { inner: Inner::from_static(bytes), } } /// Returns the number of bytes contained in this `Bytes`. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let b = Bytes::from(&b"hello"[..]); /// assert_eq!(b.len(), 5); /// ``` #[inline] pub fn len(&self) -> usize { self.inner.len() } /// Returns true if the `Bytes` has a length of 0. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let b = Bytes::new(); /// assert!(b.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.inner.is_empty() } /// Returns a slice of self for the index range `[begin..end)`. /// /// This will increment the reference count for the underlying memory and /// return a new `Bytes` handle set to the slice. /// /// This operation is `O(1)`. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let a = Bytes::from(&b"hello world"[..]); /// let b = a.slice(2, 5); /// /// assert_eq!(&b[..], b"llo"); /// ``` /// /// # Panics /// /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing /// will panic. pub fn slice(&self, begin: usize, end: usize) -> Bytes { assert!(begin <= end); assert!(end <= self.len()); if end - begin <= INLINE_CAP { return Bytes::from(&self[begin..end]); } let mut ret = self.clone(); unsafe { ret.inner.set_end(end); ret.inner.set_start(begin); } ret } /// Returns a slice of self for the index range `[begin..self.len())`. /// /// This will increment the reference count for the underlying memory and /// return a new `Bytes` handle set to the slice. /// /// This operation is `O(1)` and is equivalent to `self.slice(begin, /// self.len())`. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let a = Bytes::from(&b"hello world"[..]); /// let b = a.slice_from(6); /// /// assert_eq!(&b[..], b"world"); /// ``` /// /// # Panics /// /// Requires that `begin <= self.len()`, otherwise slicing will panic. pub fn slice_from(&self, begin: usize) -> Bytes { self.slice(begin, self.len()) } /// Returns a slice of self for the index range `[0..end)`. /// /// This will increment the reference count for the underlying memory and /// return a new `Bytes` handle set to the slice. /// /// This operation is `O(1)` and is equivalent to `self.slice(0, end)`. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let a = Bytes::from(&b"hello world"[..]); /// let b = a.slice_to(5); /// /// assert_eq!(&b[..], b"hello"); /// ``` /// /// # Panics /// /// Requires that `end <= self.len()`, otherwise slicing will panic. pub fn slice_to(&self, end: usize) -> Bytes { self.slice(0, end) } /// Returns a slice of self that is equivalent to the given `subset`. /// /// When processing a `Bytes` buffer with other tools, one often gets a /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it. /// This function turns that `&[u8]` into another `Bytes`, as if one had /// called `self.slice()` with the offsets that correspond to `subset`. /// /// This operation is `O(1)`. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let bytes = Bytes::from(&b"012345678"[..]); /// let as_slice = bytes.as_ref(); /// let subset = &as_slice[2..6]; /// let subslice = bytes.slice_ref(&subset); /// assert_eq!(&subslice[..], b"2345"); /// ``` /// /// # Panics /// /// Requires that the given `sub` slice is in fact contained within the /// `Bytes` buffer; otherwise this function will panic. pub fn slice_ref(&self, subset: &[u8]) -> Bytes { let bytes_p = self.as_ptr() as usize; let bytes_len = self.len(); let sub_p = subset.as_ptr() as usize; let sub_len = subset.len(); assert!(sub_p >= bytes_p); assert!(sub_p + sub_len <= bytes_p + bytes_len); let sub_offset = sub_p - bytes_p; self.slice(sub_offset, sub_offset + sub_len) } /// Splits the bytes into two at the given index. /// /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes` /// contains elements `[at, len)`. /// /// This is an `O(1)` operation that just increases the reference count and /// sets a few indices. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut a = Bytes::from(&b"hello world"[..]); /// let b = a.split_off(5); /// /// assert_eq!(&a[..], b"hello"); /// assert_eq!(&b[..], b" world"); /// ``` /// /// # Panics /// /// Panics if `at > len`. pub fn split_off(&mut self, at: usize) -> Bytes { assert!(at <= self.len()); if at == self.len() { return Bytes::new(); } if at == 0 { return mem::replace(self, Bytes::new()); } Bytes { inner: self.inner.split_off(at), } } /// Splits the bytes into two at the given index. /// /// Afterwards `self` contains elements `[at, len)`, and the returned /// `Bytes` contains elements `[0, at)`. /// /// This is an `O(1)` operation that just increases the reference count and /// sets a few indices. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut a = Bytes::from(&b"hello world"[..]); /// let b = a.split_to(5); /// /// assert_eq!(&a[..], b" world"); /// assert_eq!(&b[..], b"hello"); /// ``` /// /// # Panics /// /// Panics if `at > len`. pub fn split_to(&mut self, at: usize) -> Bytes { assert!(at <= self.len()); if at == self.len() { return mem::replace(self, Bytes::new()); } if at == 0 { return Bytes::new(); } Bytes { inner: self.inner.split_to(at), } } #[deprecated(since = "0.4.1", note = "use split_to instead")] #[doc(hidden)] pub fn drain_to(&mut self, at: usize) -> Bytes { self.split_to(at) } /// Shortens the buffer, keeping the first `len` bytes and dropping the /// rest. /// /// If `len` is greater than the buffer's current length, this has no /// effect. /// /// The [`split_off`] method can emulate `truncate`, but this causes the /// excess bytes to be returned instead of dropped. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut buf = Bytes::from(&b"hello world"[..]); /// buf.truncate(5); /// assert_eq!(buf, b"hello"[..]); /// ``` /// /// [`split_off`]: #method.split_off pub fn truncate(&mut self, len: usize) { self.inner.truncate(len); } /// Shortens the buffer, dropping the first `cnt` bytes and keeping the /// rest. /// /// This is the same function as `Buf::advance`, and in the next breaking /// release of `bytes`, this implementation will be removed in favor of /// having `Bytes` implement `Buf`. /// /// # Panics /// /// This function panics if `cnt` is greater than `self.len()` #[inline] pub fn advance(&mut self, cnt: usize) { assert!(cnt <= self.len(), "cannot advance past `remaining`"); unsafe { self.inner.set_start(cnt); } } /// Clears the buffer, removing all data. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut buf = Bytes::from(&b"hello world"[..]); /// buf.clear(); /// assert!(buf.is_empty()); /// ``` pub fn clear(&mut self) { self.truncate(0); } /// Attempts to convert into a `BytesMut` handle. /// /// This will only succeed if there are no other outstanding references to /// the underlying chunk of memory. `Bytes` handles that contain inlined /// bytes will always be convertable to `BytesMut`. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let a = Bytes::from(&b"Mary had a little lamb, little lamb, little lamb..."[..]); /// /// // Create a shallow clone /// let b = a.clone(); /// /// // This will fail because `b` shares a reference with `a` /// let a = a.try_mut().unwrap_err(); /// /// drop(b); /// /// // This will succeed /// let mut a = a.try_mut().unwrap(); /// /// a[0] = b'b'; /// /// assert_eq!(&a[..4], b"bary"); /// ``` pub fn try_mut(mut self) -> Result { if self.inner.is_mut_safe() { Ok(BytesMut { inner: self.inner }) } else { Err(self) } } /// Appends given bytes to this object. /// /// If this `Bytes` object has not enough capacity, it is resized first. /// If it is shared (`refcount > 1`), it is copied first. /// /// This operation can be less effective than the similar operation on /// `BytesMut`, especially on small additions. /// /// # Examples /// /// ``` /// use bytes::Bytes; /// /// let mut buf = Bytes::from("aabb"); /// buf.extend_from_slice(b"ccdd"); /// buf.extend_from_slice(b"eeff"); /// /// assert_eq!(b"aabbccddeeff", &buf[..]); /// ``` pub fn extend_from_slice(&mut self, extend: &[u8]) { if extend.is_empty() { return; } let new_cap = self.len().checked_add(extend.len()).expect("capacity overflow"); let result = match mem::replace(self, Bytes::new()).try_mut() { Ok(mut bytes_mut) => { bytes_mut.extend_from_slice(extend); bytes_mut }, Err(bytes) => { let mut bytes_mut = BytesMut::with_capacity(new_cap); bytes_mut.put_slice(&bytes); bytes_mut.put_slice(extend); bytes_mut } }; mem::replace(self, result.freeze()); } } impl IntoBuf for Bytes { type Buf = Cursor; fn into_buf(self) -> Self::Buf { Cursor::new(self) } } impl<'a> IntoBuf for &'a Bytes { type Buf = Cursor; fn into_buf(self) -> Self::Buf { Cursor::new(self) } } impl Clone for Bytes { fn clone(&self) -> Bytes { Bytes { inner: unsafe { self.inner.shallow_clone(false) }, } } } impl AsRef<[u8]> for Bytes { #[inline] fn as_ref(&self) -> &[u8] { self.inner.as_ref() } } impl ops::Deref for Bytes { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.inner.as_ref() } } impl From for Bytes { fn from(src: BytesMut) -> Bytes { src.freeze() } } impl From> for Bytes { fn from(src: Vec) -> Bytes { BytesMut::from(src).freeze() } } impl From for Bytes { fn from(src: String) -> Bytes { BytesMut::from(src).freeze() } } impl<'a> From<&'a [u8]> for Bytes { fn from(src: &'a [u8]) -> Bytes { BytesMut::from(src).freeze() } } impl<'a> From<&'a str> for Bytes { fn from(src: &'a str) -> Bytes { BytesMut::from(src).freeze() } } impl FromIterator for BytesMut { fn from_iter>(into_iter: T) -> Self { let iter = into_iter.into_iter(); let (min, maybe_max) = iter.size_hint(); let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min)); for i in iter { out.reserve(1); out.put(i); } out } } impl FromIterator for Bytes { fn from_iter>(into_iter: T) -> Self { BytesMut::from_iter(into_iter).freeze() } } impl<'a> FromIterator<&'a u8> for BytesMut { fn from_iter>(into_iter: T) -> Self { BytesMut::from_iter(into_iter.into_iter().map(|b| *b)) } } impl<'a> FromIterator<&'a u8> for Bytes { fn from_iter>(into_iter: T) -> Self { BytesMut::from_iter(into_iter).freeze() } } impl PartialEq for Bytes { fn eq(&self, other: &Bytes) -> bool { self.inner.as_ref() == other.inner.as_ref() } } impl PartialOrd for Bytes { fn partial_cmp(&self, other: &Bytes) -> Option { self.inner.as_ref().partial_cmp(other.inner.as_ref()) } } impl Ord for Bytes { fn cmp(&self, other: &Bytes) -> cmp::Ordering { self.inner.as_ref().cmp(other.inner.as_ref()) } } impl Eq for Bytes { } impl Default for Bytes { #[inline] fn default() -> Bytes { Bytes::new() } } impl fmt::Debug for Bytes { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt) } } impl hash::Hash for Bytes { fn hash(&self, state: &mut H) where H: hash::Hasher { let s: &[u8] = self.as_ref(); s.hash(state); } } impl Borrow<[u8]> for Bytes { fn borrow(&self) -> &[u8] { self.as_ref() } } impl IntoIterator for Bytes { type Item = u8; type IntoIter = Iter>; fn into_iter(self) -> Self::IntoIter { self.into_buf().iter() } } impl<'a> IntoIterator for &'a Bytes { type Item = u8; type IntoIter = Iter>; fn into_iter(self) -> Self::IntoIter { self.into_buf().iter() } } impl Extend for Bytes { fn extend(&mut self, iter: T) where T: IntoIterator { let iter = iter.into_iter(); let (lower, upper) = iter.size_hint(); // Avoid possible conversion into mut if there's nothing to add if let Some(0) = upper { return; } let mut bytes_mut = match mem::replace(self, Bytes::new()).try_mut() { Ok(bytes_mut) => bytes_mut, Err(bytes) => { let mut bytes_mut = BytesMut::with_capacity(bytes.len() + lower); bytes_mut.put_slice(&bytes); bytes_mut } }; bytes_mut.extend(iter); mem::replace(self, bytes_mut.freeze()); } } impl<'a> Extend<&'a u8> for Bytes { fn extend(&mut self, iter: T) where T: IntoIterator { self.extend(iter.into_iter().map(|b| *b)) } } /* * * ===== BytesMut ===== * */ impl BytesMut { /// Creates a new `BytesMut` with the specified capacity. /// /// The returned `BytesMut` will be able to hold at least `capacity` bytes /// without reallocating. If `capacity` is under `4 * size_of::() - 1`, /// then `BytesMut` will not allocate. /// /// It is important to note that this function does not specify the length /// of the returned `BytesMut`, but only the capacity. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut bytes = BytesMut::with_capacity(64); /// /// // `bytes` contains no data, even though there is capacity /// assert_eq!(bytes.len(), 0); /// /// bytes.put(&b"hello world"[..]); /// /// assert_eq!(&bytes[..], b"hello world"); /// ``` #[inline] pub fn with_capacity(capacity: usize) -> BytesMut { BytesMut { inner: Inner::with_capacity(capacity), } } /// Creates a new `BytesMut` with default capacity. /// /// Resulting object has length 0 and unspecified capacity. /// This function does not allocate. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut bytes = BytesMut::new(); /// /// assert_eq!(0, bytes.len()); /// /// bytes.reserve(2); /// bytes.put_slice(b"xy"); /// /// assert_eq!(&b"xy"[..], &bytes[..]); /// ``` #[inline] pub fn new() -> BytesMut { BytesMut::with_capacity(0) } /// Returns the number of bytes contained in this `BytesMut`. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let b = BytesMut::from(&b"hello"[..]); /// assert_eq!(b.len(), 5); /// ``` #[inline] pub fn len(&self) -> usize { self.inner.len() } /// Returns true if the `BytesMut` has a length of 0. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let b = BytesMut::with_capacity(64); /// assert!(b.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the number of bytes the `BytesMut` can hold without reallocating. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let b = BytesMut::with_capacity(64); /// assert_eq!(b.capacity(), 64); /// ``` #[inline] pub fn capacity(&self) -> usize { self.inner.capacity() } /// Converts `self` into an immutable `Bytes`. /// /// The conversion is zero cost and is used to indicate that the slice /// referenced by the handle will no longer be mutated. Once the conversion /// is done, the handle can be cloned and shared across threads. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// use std::thread; /// /// let mut b = BytesMut::with_capacity(64); /// b.put("hello world"); /// let b1 = b.freeze(); /// let b2 = b1.clone(); /// /// let th = thread::spawn(move || { /// assert_eq!(&b1[..], b"hello world"); /// }); /// /// assert_eq!(&b2[..], b"hello world"); /// th.join().unwrap(); /// ``` #[inline] pub fn freeze(self) -> Bytes { Bytes { inner: self.inner } } /// Splits the bytes into two at the given index. /// /// Afterwards `self` contains elements `[0, at)`, and the returned /// `BytesMut` contains elements `[at, capacity)`. /// /// This is an `O(1)` operation that just increases the reference count /// and sets a few indices. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut a = BytesMut::from(&b"hello world"[..]); /// let mut b = a.split_off(5); /// /// a[0] = b'j'; /// b[0] = b'!'; /// /// assert_eq!(&a[..], b"jello"); /// assert_eq!(&b[..], b"!world"); /// ``` /// /// # Panics /// /// Panics if `at > capacity`. pub fn split_off(&mut self, at: usize) -> BytesMut { BytesMut { inner: self.inner.split_off(at), } } /// Removes the bytes from the current view, returning them in a new /// `BytesMut` handle. /// /// Afterwards, `self` will be empty, but will retain any additional /// capacity that it had before the operation. This is identical to /// `self.split_to(self.len())`. /// /// This is an `O(1)` operation that just increases the reference count and /// sets a few indices. /// /// # Examples /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut buf = BytesMut::with_capacity(1024); /// buf.put(&b"hello world"[..]); /// /// let other = buf.take(); /// /// assert!(buf.is_empty()); /// assert_eq!(1013, buf.capacity()); /// /// assert_eq!(other, b"hello world"[..]); /// ``` pub fn take(&mut self) -> BytesMut { let len = self.len(); self.split_to(len) } #[deprecated(since = "0.4.1", note = "use take instead")] #[doc(hidden)] pub fn drain(&mut self) -> BytesMut { self.take() } /// Splits the buffer into two at the given index. /// /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut` /// contains elements `[0, at)`. /// /// This is an `O(1)` operation that just increases the reference count and /// sets a few indices. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut a = BytesMut::from(&b"hello world"[..]); /// let mut b = a.split_to(5); /// /// a[0] = b'!'; /// b[0] = b'j'; /// /// assert_eq!(&a[..], b"!world"); /// assert_eq!(&b[..], b"jello"); /// ``` /// /// # Panics /// /// Panics if `at > len`. pub fn split_to(&mut self, at: usize) -> BytesMut { BytesMut { inner: self.inner.split_to(at), } } #[deprecated(since = "0.4.1", note = "use split_to instead")] #[doc(hidden)] pub fn drain_to(&mut self, at: usize) -> BytesMut { self.split_to(at) } /// Shortens the buffer, keeping the first `len` bytes and dropping the /// rest. /// /// If `len` is greater than the buffer's current length, this has no /// effect. /// /// The [`split_off`] method can emulate `truncate`, but this causes the /// excess bytes to be returned instead of dropped. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::from(&b"hello world"[..]); /// buf.truncate(5); /// assert_eq!(buf, b"hello"[..]); /// ``` /// /// [`split_off`]: #method.split_off pub fn truncate(&mut self, len: usize) { self.inner.truncate(len); } /// Shortens the buffer, dropping the first `cnt` bytes and keeping the /// rest. /// /// This is the same function as `Buf::advance`, and in the next breaking /// release of `bytes`, this implementation will be removed in favor of /// having `BytesMut` implement `Buf`. /// /// # Panics /// /// This function panics if `cnt` is greater than `self.len()` #[inline] pub fn advance(&mut self, cnt: usize) { assert!(cnt <= self.len(), "cannot advance past `remaining`"); unsafe { self.inner.set_start(cnt); } } /// Clears the buffer, removing all data. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::from(&b"hello world"[..]); /// buf.clear(); /// assert!(buf.is_empty()); /// ``` pub fn clear(&mut self) { self.truncate(0); } /// Resizes the buffer so that `len` is equal to `new_len`. /// /// If `new_len` is greater than `len`, the buffer is extended by the /// difference with each additional byte set to `value`. If `new_len` is /// less than `len`, the buffer is simply truncated. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::new(); /// /// buf.resize(3, 0x1); /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]); /// /// buf.resize(2, 0x2); /// assert_eq!(&buf[..], &[0x1, 0x1]); /// /// buf.resize(4, 0x3); /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]); /// ``` pub fn resize(&mut self, new_len: usize, value: u8) { self.inner.resize(new_len, value); } /// Sets the length of the buffer. /// /// This will explicitly set the size of the buffer without actually /// modifying the data, so it is up to the caller to ensure that the data /// has been initialized. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut b = BytesMut::from(&b"hello world"[..]); /// /// unsafe { /// b.set_len(5); /// } /// /// assert_eq!(&b[..], b"hello"); /// /// unsafe { /// b.set_len(11); /// } /// /// assert_eq!(&b[..], b"hello world"); /// ``` /// /// # Panics /// /// This method will panic if `len` is out of bounds for the underlying /// slice or if it comes after the `end` of the configured window. pub unsafe fn set_len(&mut self, len: usize) { self.inner.set_len(len) } /// Reserves capacity for at least `additional` more bytes to be inserted /// into the given `BytesMut`. /// /// More than `additional` bytes may be reserved in order to avoid frequent /// reallocations. A call to `reserve` may result in an allocation. /// /// Before allocating new buffer space, the function will attempt to reclaim /// space in the existing buffer. If the current handle references a small /// view in the original buffer and all other handles have been dropped, /// and the requested capacity is less than or equal to the existing /// buffer's capacity, then the current view will be copied to the front of /// the buffer and the handle will take ownership of the full buffer. /// /// # Examples /// /// In the following example, a new buffer is allocated. /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::from(&b"hello"[..]); /// buf.reserve(64); /// assert!(buf.capacity() >= 69); /// ``` /// /// In the following example, the existing buffer is reclaimed. /// /// ``` /// use bytes::{BytesMut, BufMut}; /// /// let mut buf = BytesMut::with_capacity(128); /// buf.put(&[0; 64][..]); /// /// let ptr = buf.as_ptr(); /// let other = buf.take(); /// /// assert!(buf.is_empty()); /// assert_eq!(buf.capacity(), 64); /// /// drop(other); /// buf.reserve(128); /// /// assert_eq!(buf.capacity(), 128); /// assert_eq!(buf.as_ptr(), ptr); /// ``` /// /// # Panics /// /// Panics if the new capacity overflows `usize`. pub fn reserve(&mut self, additional: usize) { self.inner.reserve(additional) } /// Appends given bytes to this object. /// /// If this `BytesMut` object has not enough capacity, it is resized first. /// So unlike `put_slice` operation, `extend_from_slice` does not panic. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::with_capacity(0); /// buf.extend_from_slice(b"aaabbb"); /// buf.extend_from_slice(b"cccddd"); /// /// assert_eq!(b"aaabbbcccddd", &buf[..]); /// ``` pub fn extend_from_slice(&mut self, extend: &[u8]) { self.reserve(extend.len()); self.put_slice(extend); } /// Combine splitted BytesMut objects back as contiguous. /// /// If `BytesMut` objects were not contiguous originally, they will be extended. /// /// # Examples /// /// ``` /// use bytes::BytesMut; /// /// let mut buf = BytesMut::with_capacity(64); /// buf.extend_from_slice(b"aaabbbcccddd"); /// /// let splitted = buf.split_off(6); /// assert_eq!(b"aaabbb", &buf[..]); /// assert_eq!(b"cccddd", &splitted[..]); /// /// buf.unsplit(splitted); /// assert_eq!(b"aaabbbcccddd", &buf[..]); /// ``` pub fn unsplit(&mut self, other: BytesMut) { let ptr; if other.is_empty() { return; } if self.is_empty() { *self = other; return; } unsafe { ptr = self.inner.ptr.offset(self.inner.len as isize); } if ptr == other.inner.ptr && self.inner.kind() == KIND_ARC && other.inner.kind() == KIND_ARC { debug_assert_eq!(self.inner.arc.load(Acquire), other.inner.arc.load(Acquire)); // Contiguous blocks, just combine directly self.inner.len += other.inner.len; self.inner.cap += other.inner.cap; } else { self.extend_from_slice(&other); } } } impl BufMut for BytesMut { #[inline] fn remaining_mut(&self) -> usize { self.capacity() - self.len() } #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { let new_len = self.len() + cnt; // This call will panic if `cnt` is too big self.inner.set_len(new_len); } #[inline] unsafe fn bytes_mut(&mut self) -> &mut [u8] { let len = self.len(); // This will never panic as `len` can never become invalid &mut self.inner.as_raw()[len..] } #[inline] fn put_slice(&mut self, src: &[u8]) { assert!(self.remaining_mut() >= src.len()); let len = src.len(); unsafe { self.bytes_mut()[..len].copy_from_slice(src); self.advance_mut(len); } } #[inline] fn put_u8(&mut self, n: u8) { self.inner.put_u8(n); } #[inline] fn put_i8(&mut self, n: i8) { self.put_u8(n as u8); } } impl IntoBuf for BytesMut { type Buf = Cursor; fn into_buf(self) -> Self::Buf { Cursor::new(self) } } impl<'a> IntoBuf for &'a BytesMut { type Buf = Cursor<&'a BytesMut>; fn into_buf(self) -> Self::Buf { Cursor::new(self) } } impl AsRef<[u8]> for BytesMut { #[inline] fn as_ref(&self) -> &[u8] { self.inner.as_ref() } } impl ops::Deref for BytesMut { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.as_ref() } } impl AsMut<[u8]> for BytesMut { fn as_mut(&mut self) -> &mut [u8] { self.inner.as_mut() } } impl ops::DerefMut for BytesMut { #[inline] fn deref_mut(&mut self) -> &mut [u8] { self.inner.as_mut() } } impl From> for BytesMut { fn from(src: Vec) -> BytesMut { BytesMut { inner: Inner::from_vec(src), } } } impl From for BytesMut { fn from(src: String) -> BytesMut { BytesMut::from(src.into_bytes()) } } impl<'a> From<&'a [u8]> for BytesMut { fn from(src: &'a [u8]) -> BytesMut { let len = src.len(); if len == 0 { BytesMut::new() } else if len <= INLINE_CAP { unsafe { let mut inner: Inner = mem::uninitialized(); // Set inline mask inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared); inner.set_inline_len(len); inner.as_raw()[0..len].copy_from_slice(src); BytesMut { inner: inner, } } } else { BytesMut::from(src.to_vec()) } } } impl<'a> From<&'a str> for BytesMut { fn from(src: &'a str) -> BytesMut { BytesMut::from(src.as_bytes()) } } impl From for BytesMut { fn from(src: Bytes) -> BytesMut { src.try_mut() .unwrap_or_else(|src| BytesMut::from(&src[..])) } } impl PartialEq for BytesMut { fn eq(&self, other: &BytesMut) -> bool { self.inner.as_ref() == other.inner.as_ref() } } impl PartialOrd for BytesMut { fn partial_cmp(&self, other: &BytesMut) -> Option { self.inner.as_ref().partial_cmp(other.inner.as_ref()) } } impl Ord for BytesMut { fn cmp(&self, other: &BytesMut) -> cmp::Ordering { self.inner.as_ref().cmp(other.inner.as_ref()) } } impl Eq for BytesMut { } impl Default for BytesMut { #[inline] fn default() -> BytesMut { BytesMut::new() } } impl fmt::Debug for BytesMut { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt) } } impl hash::Hash for BytesMut { fn hash(&self, state: &mut H) where H: hash::Hasher { let s: &[u8] = self.as_ref(); s.hash(state); } } impl Borrow<[u8]> for BytesMut { fn borrow(&self) -> &[u8] { self.as_ref() } } impl BorrowMut<[u8]> for BytesMut { fn borrow_mut(&mut self) -> &mut [u8] { self.as_mut() } } impl fmt::Write for BytesMut { #[inline] fn write_str(&mut self, s: &str) -> fmt::Result { if self.remaining_mut() >= s.len() { self.put_slice(s.as_bytes()); Ok(()) } else { Err(fmt::Error) } } #[inline] fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { fmt::write(self, args) } } impl Clone for BytesMut { fn clone(&self) -> BytesMut { BytesMut::from(&self[..]) } } impl IntoIterator for BytesMut { type Item = u8; type IntoIter = Iter>; fn into_iter(self) -> Self::IntoIter { self.into_buf().iter() } } impl<'a> IntoIterator for &'a BytesMut { type Item = u8; type IntoIter = Iter>; fn into_iter(self) -> Self::IntoIter { self.into_buf().iter() } } impl Extend for BytesMut { fn extend(&mut self, iter: T) where T: IntoIterator { let iter = iter.into_iter(); let (lower, _) = iter.size_hint(); self.reserve(lower); for b in iter { unsafe { self.bytes_mut()[0] = b; self.advance_mut(1); } } } } impl<'a> Extend<&'a u8> for BytesMut { fn extend(&mut self, iter: T) where T: IntoIterator { self.extend(iter.into_iter().map(|b| *b)) } } /* * * ===== Inner ===== * */ impl Inner { #[inline] fn from_static(bytes: &'static [u8]) -> Inner { let ptr = bytes.as_ptr() as *mut u8; Inner { // `arc` won't ever store a pointer. Instead, use it to // track the fact that the `Bytes` handle is backed by a // static buffer. arc: AtomicPtr::new(KIND_STATIC as *mut Shared), ptr: ptr, len: bytes.len(), cap: bytes.len(), } } #[inline] fn from_vec(mut src: Vec) -> Inner { let len = src.len(); let cap = src.capacity(); let ptr = src.as_mut_ptr(); mem::forget(src); let original_capacity_repr = original_capacity_to_repr(cap); let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; Inner { arc: AtomicPtr::new(arc as *mut Shared), ptr: ptr, len: len, cap: cap, } } #[inline] fn with_capacity(capacity: usize) -> Inner { if capacity <= INLINE_CAP { unsafe { // Using uninitialized memory is ~30% faster let mut inner: Inner = mem::uninitialized(); inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared); inner } } else { Inner::from_vec(Vec::with_capacity(capacity)) } } /// Return a slice for the handle's view into the shared buffer #[inline] fn as_ref(&self) -> &[u8] { unsafe { if self.is_inline() { slice::from_raw_parts(self.inline_ptr(), self.inline_len()) } else { slice::from_raw_parts(self.ptr, self.len) } } } /// Return a mutable slice for the handle's view into the shared buffer #[inline] fn as_mut(&mut self) -> &mut [u8] { debug_assert!(!self.is_static()); unsafe { if self.is_inline() { slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len()) } else { slice::from_raw_parts_mut(self.ptr, self.len) } } } /// Return a mutable slice for the handle's view into the shared buffer /// including potentially uninitialized bytes. #[inline] unsafe fn as_raw(&mut self) -> &mut [u8] { debug_assert!(!self.is_static()); if self.is_inline() { slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP) } else { slice::from_raw_parts_mut(self.ptr, self.cap) } } /// Insert a byte into the next slot and advance the len by 1. #[inline] fn put_u8(&mut self, n: u8) { if self.is_inline() { let len = self.inline_len(); assert!(len < INLINE_CAP); unsafe { *self.inline_ptr().offset(len as isize) = n; } self.set_inline_len(len + 1); } else { assert!(self.len < self.cap); unsafe { *self.ptr.offset(self.len as isize) = n; } self.len += 1; } } #[inline] fn len(&self) -> usize { if self.is_inline() { self.inline_len() } else { self.len } } /// Pointer to the start of the inline buffer #[inline] unsafe fn inline_ptr(&self) -> *mut u8 { (self as *const Inner as *mut Inner as *mut u8) .offset(INLINE_DATA_OFFSET) } #[inline] fn inline_len(&self) -> usize { let p: &usize = unsafe { mem::transmute(&self.arc) }; (p & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET } /// Set the length of the inline buffer. This is done by writing to the /// least significant byte of the `arc` field. #[inline] fn set_inline_len(&mut self, len: usize) { debug_assert!(len <= INLINE_CAP); let p = self.arc.get_mut(); *p = ((*p as usize & !INLINE_LEN_MASK) | (len << INLINE_LEN_OFFSET)) as _; } /// slice. #[inline] unsafe fn set_len(&mut self, len: usize) { if self.is_inline() { assert!(len <= INLINE_CAP); self.set_inline_len(len); } else { assert!(len <= self.cap); self.len = len; } } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } #[inline] fn capacity(&self) -> usize { if self.is_inline() { INLINE_CAP } else { self.cap } } fn split_off(&mut self, at: usize) -> Inner { let mut other = unsafe { self.shallow_clone(true) }; unsafe { other.set_start(at); self.set_end(at); } return other } fn split_to(&mut self, at: usize) -> Inner { let mut other = unsafe { self.shallow_clone(true) }; unsafe { other.set_end(at); self.set_start(at); } return other } fn truncate(&mut self, len: usize) { if len <= self.len() { unsafe { self.set_len(len); } } } fn resize(&mut self, new_len: usize, value: u8) { let len = self.len(); if new_len > len { let additional = new_len - len; self.reserve(additional); unsafe { let dst = self.as_raw()[len..].as_mut_ptr(); ptr::write_bytes(dst, value, additional); self.set_len(new_len); } } else { self.truncate(new_len); } } unsafe fn set_start(&mut self, start: usize) { // Setting the start to 0 is a no-op, so return early if this is the // case. if start == 0 { return; } let kind = self.kind(); // Always check `inline` first, because if the handle is using inline // data storage, all of the `Inner` struct fields will be gibberish. if kind == KIND_INLINE { assert!(start <= INLINE_CAP); let len = self.inline_len(); if len <= start { self.set_inline_len(0); } else { // `set_start` is essentially shifting data off the front of the // view. Inlined buffers only track the length of the slice. // So, to update the start, the data at the new starting point // is copied to the beginning of the buffer. let new_len = len - start; let dst = self.inline_ptr(); let src = (dst as *const u8).offset(start as isize); ptr::copy(src, dst, new_len); self.set_inline_len(new_len); } } else { assert!(start <= self.cap); if kind == KIND_VEC { // Setting the start when in vec representation is a little more // complicated. First, we have to track how far ahead the // "start" of the byte buffer from the beginning of the vec. We // also have to ensure that we don't exceed the maximum shift. let (mut pos, prev) = self.uncoordinated_get_vec_pos(); pos += start; if pos <= MAX_VEC_POS { self.uncoordinated_set_vec_pos(pos, prev); } else { // The repr must be upgraded to ARC. This will never happen // on 64 bit systems and will only happen on 32 bit systems // when shifting past 134,217,727 bytes. As such, we don't // worry too much about performance here. let _ = self.shallow_clone(true); } } // Updating the start of the view is setting `ptr` to point to the // new start and updating the `len` field to reflect the new length // of the view. self.ptr = self.ptr.offset(start as isize); if self.len >= start { self.len -= start; } else { self.len = 0; } self.cap -= start; } } unsafe fn set_end(&mut self, end: usize) { debug_assert!(self.is_shared()); // Always check `inline` first, because if the handle is using inline // data storage, all of the `Inner` struct fields will be gibberish. if self.is_inline() { assert!(end <= INLINE_CAP); let new_len = cmp::min(self.inline_len(), end); self.set_inline_len(new_len); } else { assert!(end <= self.cap); self.cap = end; self.len = cmp::min(self.len, end); } } /// Checks if it is safe to mutate the memory fn is_mut_safe(&mut self) -> bool { let kind = self.kind(); // Always check `inline` first, because if the handle is using inline // data storage, all of the `Inner` struct fields will be gibberish. if kind == KIND_INLINE { // Inlined buffers can always be mutated as the data is never shared // across handles. true } else if kind == KIND_VEC { true } else if kind == KIND_STATIC { false } else { // Otherwise, the underlying buffer is potentially shared with other // handles, so the ref_count needs to be checked. unsafe { (**self.arc.get_mut()).is_unique() } } } /// Increments the ref count. This should only be done if it is known that /// it can be done safely. As such, this fn is not public, instead other /// fns will use this one while maintaining the guarantees. /// Parameter `mut_self` should only be set to `true` if caller holds /// `&mut self` reference. /// /// "Safely" is defined as not exposing two `BytesMut` values that point to /// the same byte window. /// /// This function is thread safe. unsafe fn shallow_clone(&self, mut_self: bool) -> Inner { // Always check `inline` first, because if the handle is using inline // data storage, all of the `Inner` struct fields will be gibberish. // // Additionally, if kind is STATIC, then Arc is *never* changed, making // it safe and faster to check for it now before an atomic acquire. if self.is_inline_or_static() { // In this case, a shallow_clone still involves copying the data. let mut inner: Inner = mem::uninitialized(); ptr::copy_nonoverlapping( self, &mut inner, 1, ); inner } else { self.shallow_clone_sync(mut_self) } } #[cold] unsafe fn shallow_clone_sync(&self, mut_self: bool) -> Inner { // The function requires `&self`, this means that `shallow_clone` // could be called concurrently. // // The first step is to load the value of `arc`. This will determine // how to proceed. The `Acquire` ordering synchronizes with the // `compare_and_swap` that comes later in this function. The goal is // to ensure that if `arc` is currently set to point to a `Shared`, // that the current thread acquires the associated memory. let arc = self.arc.load(Acquire); let kind = arc as usize & KIND_MASK; if kind == KIND_ARC { self.shallow_clone_arc(arc) } else { assert!(kind == KIND_VEC); self.shallow_clone_vec(arc as usize, mut_self) } } unsafe fn shallow_clone_arc(&self, arc: *mut Shared) -> Inner { debug_assert!(arc as usize & KIND_MASK == KIND_ARC); let old_size = (*arc).ref_count.fetch_add(1, Relaxed); if old_size == usize::MAX { abort(); } Inner { arc: AtomicPtr::new(arc), .. *self } } #[cold] unsafe fn shallow_clone_vec(&self, arc: usize, mut_self: bool) -> Inner { // If the buffer is still tracked in a `Vec`. It is time to // promote the vec to an `Arc`. This could potentially be called // concurrently, so some care must be taken. debug_assert!(arc & KIND_MASK == KIND_VEC); let original_capacity_repr = (arc as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET; // The vec offset cannot be concurrently mutated, so there // should be no danger reading it. let off = (arc as usize) >> VEC_POS_OFFSET; // First, allocate a new `Shared` instance containing the // `Vec` fields. It's important to note that `ptr`, `len`, // and `cap` cannot be mutated without having `&mut self`. // This means that these fields will not be concurrently // updated and since the buffer hasn't been promoted to an // `Arc`, those three fields still are the components of the // vector. let shared = Box::new(Shared { vec: rebuild_vec(self.ptr, self.len, self.cap, off), original_capacity_repr: original_capacity_repr, // Initialize refcount to 2. One for this reference, and one // for the new clone that will be returned from // `shallow_clone`. ref_count: AtomicUsize::new(2), }); let shared = Box::into_raw(shared); // The pointer should be aligned, so this assert should // always succeed. debug_assert!(0 == (shared as usize & 0b11)); // If there are no references to self in other threads, // expensive atomic operations can be avoided. if mut_self { self.arc.store(shared, Relaxed); return Inner { arc: AtomicPtr::new(shared), .. *self }; } // Try compare & swapping the pointer into the `arc` field. // `Release` is used synchronize with other threads that // will load the `arc` field. // // If the `compare_and_swap` fails, then the thread lost the // race to promote the buffer to shared. The `Acquire` // ordering will synchronize with the `compare_and_swap` // that happened in the other thread and the `Shared` // pointed to by `actual` will be visible. let actual = self.arc.compare_and_swap(arc as *mut Shared, shared, AcqRel); if actual as usize == arc { // The upgrade was successful, the new handle can be // returned. return Inner { arc: AtomicPtr::new(shared), .. *self }; } // The upgrade failed, a concurrent clone happened. Release // the allocation that was made in this thread, it will not // be needed. let shared = Box::from_raw(shared); mem::forget(*shared); // Buffer already promoted to shared storage, so increment ref // count. self.shallow_clone_arc(actual) } #[inline] fn reserve(&mut self, additional: usize) { let len = self.len(); let rem = self.capacity() - len; if additional <= rem { // The handle can already store at least `additional` more bytes, so // there is no further work needed to be done. return; } let kind = self.kind(); // Always check `inline` first, because if the handle is using inline // data storage, all of the `Inner` struct fields will be gibberish. if kind == KIND_INLINE { let new_cap = len + additional; // Promote to a vector let mut v = Vec::with_capacity(new_cap); v.extend_from_slice(self.as_ref()); self.ptr = v.as_mut_ptr(); self.len = v.len(); self.cap = v.capacity(); // Since the minimum capacity is `INLINE_CAP`, don't bother encoding // the original capacity as INLINE_CAP self.arc = AtomicPtr::new(KIND_VEC as *mut Shared); mem::forget(v); return; } if kind == KIND_VEC { // If there's enough free space before the start of the buffer, then // just copy the data backwards and reuse the already-allocated // space. // // Otherwise, since backed by a vector, use `Vec::reserve` unsafe { let (off, prev) = self.uncoordinated_get_vec_pos(); // Only reuse space if we stand to gain at least capacity/2 // bytes of space back if off >= additional && off >= (self.cap / 2) { // There's space - reuse it // // Just move the pointer back to the start after copying // data back. let base_ptr = self.ptr.offset(-(off as isize)); ptr::copy(self.ptr, base_ptr, self.len); self.ptr = base_ptr; self.uncoordinated_set_vec_pos(0, prev); // Length stays constant, but since we moved backwards we // can gain capacity back. self.cap += off; } else { // No space - allocate more let mut v = rebuild_vec(self.ptr, self.len, self.cap, off); v.reserve(additional); // Update the info self.ptr = v.as_mut_ptr().offset(off as isize); self.len = v.len() - off; self.cap = v.capacity() - off; // Drop the vec reference mem::forget(v); } return; } } let arc = *self.arc.get_mut(); debug_assert!(kind == KIND_ARC); // Reserving involves abandoning the currently shared buffer and // allocating a new vector with the requested capacity. // // Compute the new capacity let mut new_cap = len + additional; let original_capacity; let original_capacity_repr; unsafe { original_capacity_repr = (*arc).original_capacity_repr; original_capacity = original_capacity_from_repr(original_capacity_repr); // First, try to reclaim the buffer. This is possible if the current // handle is the only outstanding handle pointing to the buffer. if (*arc).is_unique() { // This is the only handle to the buffer. It can be reclaimed. // However, before doing the work of copying data, check to make // sure that the vector has enough capacity. let v = &mut (*arc).vec; if v.capacity() >= new_cap { // The capacity is sufficient, reclaim the buffer let ptr = v.as_mut_ptr(); ptr::copy(self.ptr, ptr, len); self.ptr = ptr; self.cap = v.capacity(); return; } // The vector capacity is not sufficient. The reserve request is // asking for more than the initial buffer capacity. Allocate more // than requested if `new_cap` is not much bigger than the current // capacity. // // There are some situations, using `reserve_exact` that the // buffer capacity could be below `original_capacity`, so do a // check. new_cap = cmp::max( cmp::max(v.capacity() << 1, new_cap), original_capacity); } else { new_cap = cmp::max(new_cap, original_capacity); } } // Create a new vector to store the data let mut v = Vec::with_capacity(new_cap); // Copy the bytes v.extend_from_slice(self.as_ref()); // Release the shared handle. This must be done *after* the bytes are // copied. release_shared(arc); // Update self self.ptr = v.as_mut_ptr(); self.len = v.len(); self.cap = v.capacity(); let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; self.arc = AtomicPtr::new(arc as *mut Shared); // Forget the vector handle mem::forget(v); } /// Returns true if the buffer is stored inline #[inline] fn is_inline(&self) -> bool { self.kind() == KIND_INLINE } #[inline] fn is_inline_or_static(&self) -> bool { // The value returned by `kind` isn't itself safe, but the value could // inform what operations to take, and unsafely do something without // synchronization. // // KIND_INLINE and KIND_STATIC will *never* change, so branches on that // information is safe. let kind = self.kind(); kind == KIND_INLINE || kind == KIND_STATIC } /// Used for `debug_assert` statements. &mut is used to guarantee that it is /// safe to check VEC_KIND #[inline] fn is_shared(&mut self) -> bool { match self.kind() { KIND_VEC => false, _ => true, } } /// Used for `debug_assert` statements #[inline] fn is_static(&mut self) -> bool { match self.kind() { KIND_STATIC => true, _ => false, } } #[inline] fn kind(&self) -> usize { // This function is going to probably raise some eyebrows. The function // returns true if the buffer is stored inline. This is done by checking // the least significant bit in the `arc` field. // // Now, you may notice that `arc` is an `AtomicPtr` and this is // accessing it as a normal field without performing an atomic load... // // Again, the function only cares about the least significant bit, and // this bit is set when `Inner` is created and never changed after that. // All platforms have atomic "word" operations and won't randomly flip // bits, so even without any explicit atomic operations, reading the // flag will be correct. // // This is undefind behavior due to a data race, but experimental // evidence shows that it works in practice (discussion: // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853). // // This function is very critical performance wise as it is called for // every operation. Performing an atomic load would mess with the // compiler's ability to optimize. Simple benchmarks show up to a 10% // slowdown using a `Relaxed` atomic load on x86. #[cfg(target_endian = "little")] #[inline] fn imp(arc: &AtomicPtr) -> usize { unsafe { let p: *const u8 = mem::transmute(arc); (*p as usize) & KIND_MASK } } #[cfg(target_endian = "big")] #[inline] fn imp(arc: &AtomicPtr) -> usize { unsafe { let p: *const usize = mem::transmute(arc); *p & KIND_MASK } } imp(&self.arc) } #[inline] fn uncoordinated_get_vec_pos(&mut self) -> (usize, usize) { // Similar to above, this is a pretty crazed function. This should only // be called when in the KIND_VEC mode. This + the &mut self argument // guarantees that there is no possibility of concurrent calls to this // function. let prev = unsafe { let p: &AtomicPtr = &self.arc; let p: *const usize = mem::transmute(p); *p }; (prev >> VEC_POS_OFFSET, prev) } #[inline] fn uncoordinated_set_vec_pos(&mut self, pos: usize, prev: usize) { // Once more... crazy debug_assert!(pos <= MAX_VEC_POS); unsafe { let p: &mut AtomicPtr = &mut self.arc; let p: &mut usize = mem::transmute(p); *p = (pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK); } } } fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec { unsafe { let ptr = ptr.offset(-(off as isize)); len += off; cap += off; Vec::from_raw_parts(ptr, len, cap) } } impl Drop for Inner { fn drop(&mut self) { let kind = self.kind(); if kind == KIND_VEC { let (off, _) = self.uncoordinated_get_vec_pos(); // Vector storage, free the vector let _ = rebuild_vec(self.ptr, self.len, self.cap, off); } else if kind == KIND_ARC { release_shared(*self.arc.get_mut()); } } } fn release_shared(ptr: *mut Shared) { // `Shared` storage... follow the drop steps from Arc. unsafe { if (*ptr).ref_count.fetch_sub(1, Release) != 1 { return; } // This fence is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` fence. This // means that use of the data happens before decreasing the reference // count, which happens before this fence, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) atomic::fence(Acquire); // Drop the data Box::from_raw(ptr); } } impl Shared { fn is_unique(&self) -> bool { // The goal is to check if the current handle is the only handle // that currently has access to the buffer. This is done by // checking if the `ref_count` is currently 1. // // The `Acquire` ordering synchronizes with the `Release` as // part of the `fetch_sub` in `release_shared`. The `fetch_sub` // operation guarantees that any mutations done in other threads // are ordered before the `ref_count` is decremented. As such, // this `Acquire` will guarantee that those mutations are // visible to the current thread. self.ref_count.load(Acquire) == 1 } } fn original_capacity_to_repr(cap: usize) -> usize { let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH) } fn original_capacity_from_repr(repr: usize) -> usize { if repr == 0 { return 0; } 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1)) } #[test] fn test_original_capacity_to_repr() { assert_eq!(original_capacity_to_repr(0), 0); let max_width = 32; for width in 1..(max_width + 1) { let cap = 1 << width - 1; let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH { 0 } else if width < MAX_ORIGINAL_CAPACITY_WIDTH { width - MIN_ORIGINAL_CAPACITY_WIDTH } else { MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH }; assert_eq!(original_capacity_to_repr(cap), expected); if width > 1 { assert_eq!(original_capacity_to_repr(cap + 1), expected); } // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 { assert_eq!(original_capacity_to_repr(cap - 24), expected - 1); assert_eq!(original_capacity_to_repr(cap + 76), expected); } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 { assert_eq!(original_capacity_to_repr(cap - 1), expected - 1); assert_eq!(original_capacity_to_repr(cap - 48), expected - 1); } } } #[test] fn test_original_capacity_from_repr() { assert_eq!(0, original_capacity_from_repr(0)); let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH; assert_eq!(min_cap, original_capacity_from_repr(1)); assert_eq!(min_cap * 2, original_capacity_from_repr(2)); assert_eq!(min_cap * 4, original_capacity_from_repr(3)); assert_eq!(min_cap * 8, original_capacity_from_repr(4)); assert_eq!(min_cap * 16, original_capacity_from_repr(5)); assert_eq!(min_cap * 32, original_capacity_from_repr(6)); assert_eq!(min_cap * 64, original_capacity_from_repr(7)); } unsafe impl Send for Inner {} unsafe impl Sync for Inner {} /* * * ===== PartialEq / PartialOrd ===== * */ impl PartialEq<[u8]> for BytesMut { fn eq(&self, other: &[u8]) -> bool { &**self == other } } impl PartialOrd<[u8]> for BytesMut { fn partial_cmp(&self, other: &[u8]) -> Option { (**self).partial_cmp(other) } } impl PartialEq for [u8] { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for [u8] { fn partial_cmp(&self, other: &BytesMut) -> Option { other.partial_cmp(self) } } impl PartialEq for BytesMut { fn eq(&self, other: &str) -> bool { &**self == other.as_bytes() } } impl PartialOrd for BytesMut { fn partial_cmp(&self, other: &str) -> Option { (**self).partial_cmp(other.as_bytes()) } } impl PartialEq for str { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for str { fn partial_cmp(&self, other: &BytesMut) -> Option { other.partial_cmp(self) } } impl PartialEq> for BytesMut { fn eq(&self, other: &Vec) -> bool { *self == &other[..] } } impl PartialOrd> for BytesMut { fn partial_cmp(&self, other: &Vec) -> Option { (**self).partial_cmp(&other[..]) } } impl PartialEq for Vec { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for Vec { fn partial_cmp(&self, other: &BytesMut) -> Option { other.partial_cmp(self) } } impl PartialEq for BytesMut { fn eq(&self, other: &String) -> bool { *self == &other[..] } } impl PartialOrd for BytesMut { fn partial_cmp(&self, other: &String) -> Option { (**self).partial_cmp(other.as_bytes()) } } impl PartialEq for String { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl PartialOrd for String { fn partial_cmp(&self, other: &BytesMut) -> Option { other.partial_cmp(self) } } impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut where BytesMut: PartialEq { fn eq(&self, other: &&'a T) -> bool { *self == **other } } impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut where BytesMut: PartialOrd { fn partial_cmp(&self, other: &&'a T) -> Option { self.partial_cmp(*other) } } impl<'a> PartialEq for &'a [u8] { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl<'a> PartialOrd for &'a [u8] { fn partial_cmp(&self, other: &BytesMut) -> Option { other.partial_cmp(self) } } impl<'a> PartialEq for &'a str { fn eq(&self, other: &BytesMut) -> bool { *other == *self } } impl<'a> PartialOrd for &'a str { fn partial_cmp(&self, other: &BytesMut) -> Option { other.partial_cmp(self) } } impl PartialEq<[u8]> for Bytes { fn eq(&self, other: &[u8]) -> bool { self.inner.as_ref() == other } } impl PartialOrd<[u8]> for Bytes { fn partial_cmp(&self, other: &[u8]) -> Option { self.inner.as_ref().partial_cmp(other) } } impl PartialEq for [u8] { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for [u8] { fn partial_cmp(&self, other: &Bytes) -> Option { other.partial_cmp(self) } } impl PartialEq for Bytes { fn eq(&self, other: &str) -> bool { self.inner.as_ref() == other.as_bytes() } } impl PartialOrd for Bytes { fn partial_cmp(&self, other: &str) -> Option { self.inner.as_ref().partial_cmp(other.as_bytes()) } } impl PartialEq for str { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for str { fn partial_cmp(&self, other: &Bytes) -> Option { other.partial_cmp(self) } } impl PartialEq> for Bytes { fn eq(&self, other: &Vec) -> bool { *self == &other[..] } } impl PartialOrd> for Bytes { fn partial_cmp(&self, other: &Vec) -> Option { self.inner.as_ref().partial_cmp(&other[..]) } } impl PartialEq for Vec { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for Vec { fn partial_cmp(&self, other: &Bytes) -> Option { other.partial_cmp(self) } } impl PartialEq for Bytes { fn eq(&self, other: &String) -> bool { *self == &other[..] } } impl PartialOrd for Bytes { fn partial_cmp(&self, other: &String) -> Option { self.inner.as_ref().partial_cmp(other.as_bytes()) } } impl PartialEq for String { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl PartialOrd for String { fn partial_cmp(&self, other: &Bytes) -> Option { other.partial_cmp(self) } } impl<'a> PartialEq for &'a [u8] { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl<'a> PartialOrd for &'a [u8] { fn partial_cmp(&self, other: &Bytes) -> Option { other.partial_cmp(self) } } impl<'a> PartialEq for &'a str { fn eq(&self, other: &Bytes) -> bool { *other == *self } } impl<'a> PartialOrd for &'a str { fn partial_cmp(&self, other: &Bytes) -> Option { other.partial_cmp(self) } } impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes where Bytes: PartialEq { fn eq(&self, other: &&'a T) -> bool { *self == **other } } impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes where Bytes: PartialOrd { fn partial_cmp(&self, other: &&'a T) -> Option { self.partial_cmp(&**other) } } impl PartialEq for Bytes { fn eq(&self, other: &BytesMut) -> bool { &other[..] == &self[..] } } impl PartialEq for BytesMut { fn eq(&self, other: &Bytes) -> bool { &other[..] == &self[..] } } // While there is `std::process:abort`, it's only available in Rust 1.17, and // our minimum supported version is currently 1.15. So, this acts as an abort // by triggering a double panic, which always aborts in Rust. struct Abort; impl Drop for Abort { fn drop(&mut self) { panic!(); } } #[inline(never)] #[cold] fn abort() { let _a = Abort; panic!(); } bytes-0.4.12/src/debug.rs010064400007650000024000000026321337670411000133730ustar0000000000000000use std::fmt; /// Alternative implementation of `fmt::Debug` for byte slice. /// /// Standard `Debug` implementation for `[u8]` is comma separated /// list of numbers. Since large amount of byte strings are in fact /// ASCII strings or contain a lot of ASCII strings (e. g. HTTP), /// it is convenient to print strings as ASCII when possible. /// /// This struct wraps `&[u8]` just to override `fmt::Debug`. /// /// `BsDebug` is not a part of public API of bytes crate. pub struct BsDebug<'a>(pub &'a [u8]); impl<'a> fmt::Debug for BsDebug<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { try!(write!(fmt, "b\"")); for &c in self.0 { // https://doc.rust-lang.org/reference.html#byte-escapes if c == b'\n' { try!(write!(fmt, "\\n")); } else if c == b'\r' { try!(write!(fmt, "\\r")); } else if c == b'\t' { try!(write!(fmt, "\\t")); } else if c == b'\\' || c == b'"' { try!(write!(fmt, "\\{}", c as char)); } else if c == b'\0' { try!(write!(fmt, "\\0")); // ASCII printable } else if c >= 0x20 && c < 0x7f { try!(write!(fmt, "{}", c as char)); } else { try!(write!(fmt, "\\x{:02x}", c)); } } try!(write!(fmt, "\"")); Ok(()) } } bytes-0.4.12/src/either.rs010064400007650000024000000040431337670411000135630ustar0000000000000000extern crate either; use {Buf, BufMut}; use self::either::Either; use self::either::Either::*; use iovec::IoVec; impl Buf for Either where L: Buf, R: Buf, { fn remaining(&self) -> usize { match *self { Left(ref b) => b.remaining(), Right(ref b) => b.remaining(), } } fn bytes(&self) -> &[u8] { match *self { Left(ref b) => b.bytes(), Right(ref b) => b.bytes(), } } fn bytes_vec<'a>(&'a self, dst: &mut [&'a IoVec]) -> usize { match *self { Left(ref b) => b.bytes_vec(dst), Right(ref b) => b.bytes_vec(dst), } } fn advance(&mut self, cnt: usize) { match *self { Left(ref mut b) => b.advance(cnt), Right(ref mut b) => b.advance(cnt), } } fn copy_to_slice(&mut self, dst: &mut [u8]) { match *self { Left(ref mut b) => b.copy_to_slice(dst), Right(ref mut b) => b.copy_to_slice(dst), } } } impl BufMut for Either where L: BufMut, R: BufMut, { fn remaining_mut(&self) -> usize { match *self { Left(ref b) => b.remaining_mut(), Right(ref b) => b.remaining_mut(), } } unsafe fn bytes_mut(&mut self) -> &mut [u8] { match *self { Left(ref mut b) => b.bytes_mut(), Right(ref mut b) => b.bytes_mut(), } } unsafe fn bytes_vec_mut<'a>(&'a mut self, dst: &mut [&'a mut IoVec]) -> usize { match *self { Left(ref mut b) => b.bytes_vec_mut(dst), Right(ref mut b) => b.bytes_vec_mut(dst), } } unsafe fn advance_mut(&mut self, cnt: usize) { match *self { Left(ref mut b) => b.advance_mut(cnt), Right(ref mut b) => b.advance_mut(cnt), } } fn put_slice(&mut self, src: &[u8]) { match *self { Left(ref mut b) => b.put_slice(src), Right(ref mut b) => b.put_slice(src), } } } bytes-0.4.12/src/lib.rs010064400007650000024000000063571344003026700130600ustar0000000000000000//! Provides abstractions for working with bytes. //! //! The `bytes` crate provides an efficient byte buffer structure //! ([`Bytes`](struct.Bytes.html)) and traits for working with buffer //! implementations ([`Buf`], [`BufMut`]). //! //! [`Buf`]: trait.Buf.html //! [`BufMut`]: trait.BufMut.html //! //! # `Bytes` //! //! `Bytes` is an efficient container for storing and operating on continguous //! slices of memory. It is intended for use primarily in networking code, but //! could have applications elsewhere as well. //! //! `Bytes` values facilitate zero-copy network programming by allowing multiple //! `Bytes` objects to point to the same underlying memory. This is managed by //! using a reference count to track when the memory is no longer needed and can //! be freed. //! //! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]` //! or `Vec`), but usually a `BytesMut` is used first and written to. For //! example: //! //! ```rust //! use bytes::{BytesMut, BufMut, BigEndian}; //! //! let mut buf = BytesMut::with_capacity(1024); //! buf.put(&b"hello world"[..]); //! buf.put_u16::(1234); //! //! let a = buf.take(); //! assert_eq!(a, b"hello world\x04\xD2"[..]); //! //! buf.put(&b"goodbye world"[..]); //! //! let b = buf.take(); //! assert_eq!(b, b"goodbye world"[..]); //! //! assert_eq!(buf.capacity(), 998); //! ``` //! //! In the above example, only a single buffer of 1024 is allocated. The handles //! `a` and `b` will share the underlying buffer and maintain indices tracking //! the view into the buffer represented by the handle. //! //! See the [struct docs] for more details. //! //! [struct docs]: struct.Bytes.html //! //! # `Buf`, `BufMut` //! //! These two traits provide read and write access to buffers. The underlying //! storage may or may not be in contiguous memory. For example, `Bytes` is a //! buffer that guarantees contiguous memory, but a [rope] stores the bytes in //! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current //! position in the underlying byte storage. When bytes are read or written, the //! cursor is advanced. //! //! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) //! //! ## Relation with `Read` and `Write` //! //! At first glance, it may seem that `Buf` and `BufMut` overlap in //! functionality with `std::io::Read` and `std::io::Write`. However, they //! serve different purposes. A buffer is the value that is provided as an //! argument to `Read::read` and `Write::write`. `Read` and `Write` may then //! perform a syscall, which has the potential of failing. Operations on `Buf` //! and `BufMut` are infallible. #![deny(warnings, missing_docs, missing_debug_implementations)] #![doc(html_root_url = "https://docs.rs/bytes/0.4.12")] extern crate byteorder; extern crate iovec; pub mod buf; pub use buf::{ Buf, BufMut, IntoBuf, }; #[deprecated(since = "0.4.1", note = "moved to `buf` module")] #[doc(hidden)] pub use buf::{ Reader, Writer, Take, }; mod bytes; mod debug; pub use bytes::{Bytes, BytesMut}; #[deprecated] pub use byteorder::{ByteOrder, BigEndian, LittleEndian}; // Optional Serde support #[cfg(feature = "serde")] #[doc(hidden)] pub mod serde; // Optional `Either` support #[cfg(feature = "either")] mod either; bytes-0.4.12/src/serde.rs010064400007650000024000000044421337670411000134100ustar0000000000000000extern crate serde; use std::{cmp, fmt}; use self::serde::{Serialize, Serializer, Deserialize, Deserializer, de}; use super::{Bytes, BytesMut}; macro_rules! serde_impl { ($ty:ident, $visitor_ty:ident) => ( impl Serialize for $ty { #[inline] fn serialize(&self, serializer: S) -> Result where S: Serializer { serializer.serialize_bytes(&self) } } struct $visitor_ty; impl<'de> de::Visitor<'de> for $visitor_ty { type Value = $ty; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("byte array") } #[inline] fn visit_seq(self, mut seq: V) -> Result where V: de::SeqAccess<'de> { let len = cmp::min(seq.size_hint().unwrap_or(0), 4096); let mut values = Vec::with_capacity(len); while let Some(value) = try!(seq.next_element()) { values.push(value); } Ok(values.into()) } #[inline] fn visit_bytes(self, v: &[u8]) -> Result where E: de::Error { Ok($ty::from(v)) } #[inline] fn visit_byte_buf(self, v: Vec) -> Result where E: de::Error { Ok($ty::from(v)) } #[inline] fn visit_str(self, v: &str) -> Result where E: de::Error { Ok($ty::from(v)) } #[inline] fn visit_string(self, v: String) -> Result where E: de::Error { Ok($ty::from(v)) } } impl<'de> Deserialize<'de> for $ty { #[inline] fn deserialize(deserializer: D) -> Result<$ty, D::Error> where D: Deserializer<'de> { deserializer.deserialize_byte_buf($visitor_ty) } } ); } serde_impl!(Bytes, BytesVisitor); serde_impl!(BytesMut, BytesMutVisitor); bytes-0.4.12/tests/test_buf.rs010064400007650000024000000021521337670411000144700ustar0000000000000000extern crate bytes; extern crate byteorder; extern crate iovec; use bytes::Buf; use iovec::IoVec; use std::io::Cursor; #[test] fn test_fresh_cursor_vec() { let mut buf = Cursor::new(b"hello".to_vec()); assert_eq!(buf.remaining(), 5); assert_eq!(buf.bytes(), b"hello"); buf.advance(2); assert_eq!(buf.remaining(), 3); assert_eq!(buf.bytes(), b"llo"); buf.advance(3); assert_eq!(buf.remaining(), 0); assert_eq!(buf.bytes(), b""); } #[test] fn test_get_u8() { let mut buf = Cursor::new(b"\x21zomg"); assert_eq!(0x21, buf.get_u8()); } #[test] fn test_get_u16() { let buf = b"\x21\x54zomg"; assert_eq!(0x2154, Cursor::new(buf).get_u16_be()); assert_eq!(0x5421, Cursor::new(buf).get_u16_le()); } #[test] #[should_panic] fn test_get_u16_buffer_underflow() { let mut buf = Cursor::new(b"\x21"); buf.get_u16_be(); } #[test] fn test_bufs_vec() { let buf = Cursor::new(b"hello world"); let b1: &[u8] = &mut [0]; let b2: &[u8] = &mut [0]; let mut dst: [&IoVec; 2] = [b1.into(), b2.into()]; assert_eq!(1, buf.bytes_vec(&mut dst[..])); } bytes-0.4.12/tests/test_buf_mut.rs010064400007650000024000000032471337670411000153630ustar0000000000000000extern crate bytes; extern crate byteorder; extern crate iovec; use bytes::{BufMut, BytesMut}; use iovec::IoVec; use std::usize; use std::fmt::Write; #[test] fn test_vec_as_mut_buf() { let mut buf = Vec::with_capacity(64); assert_eq!(buf.remaining_mut(), usize::MAX); unsafe { assert!(buf.bytes_mut().len() >= 64); } buf.put(&b"zomg"[..]); assert_eq!(&buf, b"zomg"); assert_eq!(buf.remaining_mut(), usize::MAX - 4); assert_eq!(buf.capacity(), 64); for _ in 0..16 { buf.put(&b"zomg"[..]); } assert_eq!(buf.len(), 68); } #[test] fn test_put_u8() { let mut buf = Vec::with_capacity(8); buf.put::(33); assert_eq!(b"\x21", &buf[..]); } #[test] fn test_put_u16() { let mut buf = Vec::with_capacity(8); buf.put_u16_be(8532); assert_eq!(b"\x21\x54", &buf[..]); buf.clear(); buf.put_u16_le(8532); assert_eq!(b"\x54\x21", &buf[..]); } #[test] fn test_vec_advance_mut() { // Regression test for carllerche/bytes#108. let mut buf = Vec::with_capacity(8); unsafe { buf.advance_mut(12); assert_eq!(buf.len(), 12); assert!(buf.capacity() >= 12, "capacity: {}", buf.capacity()); } } #[test] fn test_clone() { let mut buf = BytesMut::with_capacity(100); buf.write_str("this is a test").unwrap(); let buf2 = buf.clone(); buf.write_str(" of our emergecy broadcast system").unwrap(); assert!(buf != buf2); } #[test] fn test_bufs_vec_mut() { use std::mem; let mut buf = BytesMut::from(&b"hello world"[..]); unsafe { let mut dst: [&mut IoVec; 2] = mem::zeroed(); assert_eq!(1, buf.bytes_vec_mut(&mut dst[..])); } } bytes-0.4.12/tests/test_bytes.rs010064400007650000024000000425341337670411000150520ustar0000000000000000extern crate bytes; use bytes::{Bytes, BytesMut, BufMut, IntoBuf}; const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb"; const SHORT: &'static [u8] = b"hello world"; fn inline_cap() -> usize { use std::mem; 4 * mem::size_of::() - 1 } fn is_sync() {} fn is_send() {} #[test] fn test_bounds() { is_sync::(); is_sync::(); is_send::(); is_send::(); } #[test] fn from_slice() { let a = Bytes::from(&b"abcdefgh"[..]); assert_eq!(a, b"abcdefgh"[..]); assert_eq!(a, &b"abcdefgh"[..]); assert_eq!(a, Vec::from(&b"abcdefgh"[..])); assert_eq!(b"abcdefgh"[..], a); assert_eq!(&b"abcdefgh"[..], a); assert_eq!(Vec::from(&b"abcdefgh"[..]), a); let a = BytesMut::from(&b"abcdefgh"[..]); assert_eq!(a, b"abcdefgh"[..]); assert_eq!(a, &b"abcdefgh"[..]); assert_eq!(a, Vec::from(&b"abcdefgh"[..])); assert_eq!(b"abcdefgh"[..], a); assert_eq!(&b"abcdefgh"[..], a); assert_eq!(Vec::from(&b"abcdefgh"[..]), a); } #[test] fn fmt() { let a = format!("{:?}", Bytes::from(&b"abcdefg"[..])); let b = "b\"abcdefg\""; assert_eq!(a, b); let a = format!("{:?}", BytesMut::from(&b"abcdefg"[..])); assert_eq!(a, b); } #[test] fn fmt_write() { use std::fmt::Write; use std::iter::FromIterator; let s = String::from_iter((0..10).map(|_| "abcdefg")); let mut a = BytesMut::with_capacity(64); write!(a, "{}", &s[..64]).unwrap(); assert_eq!(a, s[..64].as_bytes()); let mut b = BytesMut::with_capacity(64); write!(b, "{}", &s[..32]).unwrap(); write!(b, "{}", &s[32..64]).unwrap(); assert_eq!(b, s[..64].as_bytes()); let mut c = BytesMut::with_capacity(64); write!(c, "{}", s).unwrap_err(); assert!(c.is_empty()); } #[test] fn len() { let a = Bytes::from(&b"abcdefg"[..]); assert_eq!(a.len(), 7); let a = BytesMut::from(&b"abcdefg"[..]); assert_eq!(a.len(), 7); let a = Bytes::from(&b""[..]); assert!(a.is_empty()); let a = BytesMut::from(&b""[..]); assert!(a.is_empty()); } #[test] fn index() { let a = Bytes::from(&b"hello world"[..]); assert_eq!(a[0..5], *b"hello"); } #[test] fn slice() { let a = Bytes::from(&b"hello world"[..]); let b = a.slice(3, 5); assert_eq!(b, b"lo"[..]); let b = a.slice(0, 0); assert_eq!(b, b""[..]); let b = a.slice(3, 3); assert_eq!(b, b""[..]); let b = a.slice(a.len(), a.len()); assert_eq!(b, b""[..]); let b = a.slice_to(5); assert_eq!(b, b"hello"[..]); let b = a.slice_from(3); assert_eq!(b, b"lo world"[..]); } #[test] #[should_panic] fn slice_oob_1() { let a = Bytes::from(&b"hello world"[..]); a.slice(5, inline_cap() + 1); } #[test] #[should_panic] fn slice_oob_2() { let a = Bytes::from(&b"hello world"[..]); a.slice(inline_cap() + 1, inline_cap() + 5); } #[test] fn split_off() { let mut hello = Bytes::from(&b"helloworld"[..]); let world = hello.split_off(5); assert_eq!(hello, &b"hello"[..]); assert_eq!(world, &b"world"[..]); let mut hello = BytesMut::from(&b"helloworld"[..]); let world = hello.split_off(5); assert_eq!(hello, &b"hello"[..]); assert_eq!(world, &b"world"[..]); } #[test] #[should_panic] fn split_off_oob() { let mut hello = Bytes::from(&b"helloworld"[..]); hello.split_off(inline_cap() + 1); } #[test] fn split_off_uninitialized() { let mut bytes = BytesMut::with_capacity(1024); let other = bytes.split_off(128); assert_eq!(bytes.len(), 0); assert_eq!(bytes.capacity(), 128); assert_eq!(other.len(), 0); assert_eq!(other.capacity(), 896); } #[test] fn split_off_to_loop() { let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; for i in 0..(s.len() + 1) { { let mut bytes = Bytes::from(&s[..]); let off = bytes.split_off(i); assert_eq!(i, bytes.len()); let mut sum = Vec::new(); sum.extend(&bytes); sum.extend(&off); assert_eq!(&s[..], &sum[..]); } { let mut bytes = BytesMut::from(&s[..]); let off = bytes.split_off(i); assert_eq!(i, bytes.len()); let mut sum = Vec::new(); sum.extend(&bytes); sum.extend(&off); assert_eq!(&s[..], &sum[..]); } { let mut bytes = Bytes::from(&s[..]); let off = bytes.split_to(i); assert_eq!(i, off.len()); let mut sum = Vec::new(); sum.extend(&off); sum.extend(&bytes); assert_eq!(&s[..], &sum[..]); } { let mut bytes = BytesMut::from(&s[..]); let off = bytes.split_to(i); assert_eq!(i, off.len()); let mut sum = Vec::new(); sum.extend(&off); sum.extend(&bytes); assert_eq!(&s[..], &sum[..]); } } } #[test] fn split_to_1() { // Inline let mut a = Bytes::from(SHORT); let b = a.split_to(4); assert_eq!(SHORT[4..], a); assert_eq!(SHORT[..4], b); // Allocated let mut a = Bytes::from(LONG); let b = a.split_to(4); assert_eq!(LONG[4..], a); assert_eq!(LONG[..4], b); let mut a = Bytes::from(LONG); let b = a.split_to(30); assert_eq!(LONG[30..], a); assert_eq!(LONG[..30], b); } #[test] fn split_to_2() { let mut a = Bytes::from(LONG); assert_eq!(LONG, a); let b = a.split_to(1); assert_eq!(LONG[1..], a); drop(b); } #[test] #[should_panic] fn split_to_oob() { let mut hello = Bytes::from(&b"helloworld"[..]); hello.split_to(inline_cap() + 1); } #[test] #[should_panic] fn split_to_oob_mut() { let mut hello = BytesMut::from(&b"helloworld"[..]); hello.split_to(inline_cap() + 1); } #[test] fn split_to_uninitialized() { let mut bytes = BytesMut::with_capacity(1024); let other = bytes.split_to(128); assert_eq!(bytes.len(), 0); assert_eq!(bytes.capacity(), 896); assert_eq!(other.len(), 0); assert_eq!(other.capacity(), 128); } #[test] fn split_off_to_at_gt_len() { fn make_bytes() -> Bytes { let mut bytes = BytesMut::with_capacity(100); bytes.put_slice(&[10, 20, 30, 40]); bytes.freeze() } use std::panic; make_bytes().split_to(4); make_bytes().split_off(4); assert!(panic::catch_unwind(move || { make_bytes().split_to(5); }).is_err()); assert!(panic::catch_unwind(move || { make_bytes().split_off(5); }).is_err()); } #[test] fn fns_defined_for_bytes_mut() { let mut bytes = BytesMut::from(&b"hello world"[..]); bytes.as_ptr(); bytes.as_mut_ptr(); // Iterator let v: Vec = bytes.iter().map(|b| *b).collect(); assert_eq!(&v[..], bytes); } #[test] fn mut_into_buf() { let mut v = vec![0, 0, 0, 0]; let s = &mut v[..]; s.into_buf().put_u32_le(42); } #[test] fn reserve_convert() { // Inline -> Vec let mut bytes = BytesMut::with_capacity(8); bytes.put("hello"); bytes.reserve(40); assert_eq!(bytes.capacity(), 45); assert_eq!(bytes, "hello"); // Inline -> Inline let mut bytes = BytesMut::with_capacity(inline_cap()); bytes.put("abcdefghijkl"); let a = bytes.split_to(10); bytes.reserve(inline_cap() - 3); assert_eq!(inline_cap(), bytes.capacity()); assert_eq!(bytes, "kl"); assert_eq!(a, "abcdefghij"); // Vec -> Vec let mut bytes = BytesMut::from(LONG); bytes.reserve(64); assert_eq!(bytes.capacity(), LONG.len() + 64); // Arc -> Vec let mut bytes = BytesMut::from(LONG); let a = bytes.split_to(30); bytes.reserve(128); assert!(bytes.capacity() >= bytes.len() + 128); drop(a); } #[test] fn reserve_growth() { let mut bytes = BytesMut::with_capacity(64); bytes.put("hello world"); let _ = bytes.take(); bytes.reserve(65); assert_eq!(bytes.capacity(), 128); } #[test] fn reserve_allocates_at_least_original_capacity() { let mut bytes = BytesMut::with_capacity(1024); for i in 0..1020 { bytes.put(i as u8); } let _other = bytes.take(); bytes.reserve(16); assert_eq!(bytes.capacity(), 1024); } #[test] fn reserve_max_original_capacity_value() { const SIZE: usize = 128 * 1024; let mut bytes = BytesMut::with_capacity(SIZE); for _ in 0..SIZE { bytes.put(0u8); } let _other = bytes.take(); bytes.reserve(16); assert_eq!(bytes.capacity(), 64 * 1024); } // Without either looking at the internals of the BytesMut or doing weird stuff // with the memory allocator, there's no good way to automatically verify from // within the program that this actually recycles memory. Instead, just exercise // the code path to ensure that the results are correct. #[test] fn reserve_vec_recycling() { let mut bytes = BytesMut::from(Vec::with_capacity(16)); assert_eq!(bytes.capacity(), 16); bytes.put("0123456789012345"); bytes.advance(10); assert_eq!(bytes.capacity(), 6); bytes.reserve(8); assert_eq!(bytes.capacity(), 16); } #[test] fn reserve_in_arc_unique_does_not_overallocate() { let mut bytes = BytesMut::with_capacity(1000); bytes.take(); // now bytes is Arc and refcount == 1 assert_eq!(1000, bytes.capacity()); bytes.reserve(2001); assert_eq!(2001, bytes.capacity()); } #[test] fn reserve_in_arc_unique_doubles() { let mut bytes = BytesMut::with_capacity(1000); bytes.take(); // now bytes is Arc and refcount == 1 assert_eq!(1000, bytes.capacity()); bytes.reserve(1001); assert_eq!(2000, bytes.capacity()); } #[test] fn reserve_in_arc_nonunique_does_not_overallocate() { let mut bytes = BytesMut::with_capacity(1000); let _copy = bytes.take(); // now bytes is Arc and refcount == 2 assert_eq!(1000, bytes.capacity()); bytes.reserve(2001); assert_eq!(2001, bytes.capacity()); } #[test] fn inline_storage() { let mut bytes = BytesMut::with_capacity(inline_cap()); let zero = [0u8; 64]; bytes.put(&zero[0..inline_cap()]); assert_eq!(*bytes, zero[0..inline_cap()]); } #[test] fn extend_mut() { let mut bytes = BytesMut::with_capacity(0); bytes.extend(LONG); assert_eq!(*bytes, LONG[..]); } #[test] fn extend_shr() { let mut bytes = Bytes::new(); bytes.extend(LONG); assert_eq!(*bytes, LONG[..]); } #[test] fn extend_from_slice_mut() { for &i in &[3, 34] { let mut bytes = BytesMut::new(); bytes.extend_from_slice(&LONG[..i]); bytes.extend_from_slice(&LONG[i..]); assert_eq!(LONG[..], *bytes); } } #[test] fn extend_from_slice_shr() { for &i in &[3, 34] { let mut bytes = Bytes::new(); bytes.extend_from_slice(&LONG[..i]); bytes.extend_from_slice(&LONG[i..]); assert_eq!(LONG[..], *bytes); } } #[test] fn from_static() { let mut a = Bytes::from_static(b"ab"); let b = a.split_off(1); assert_eq!(a, b"a"[..]); assert_eq!(b, b"b"[..]); } #[test] fn advance_inline() { let mut a = Bytes::from(&b"hello world"[..]); a.advance(6); assert_eq!(a, &b"world"[..]); } #[test] fn advance_static() { let mut a = Bytes::from_static(b"hello world"); a.advance(6); assert_eq!(a, &b"world"[..]); } #[test] fn advance_vec() { let mut a = BytesMut::from(b"hello world boooo yah world zomg wat wat".to_vec()); a.advance(16); assert_eq!(a, b"o yah world zomg wat wat"[..]); a.advance(4); assert_eq!(a, b"h world zomg wat wat"[..]); // Reserve some space. a.reserve(1024); assert_eq!(a, b"h world zomg wat wat"[..]); a.advance(6); assert_eq!(a, b"d zomg wat wat"[..]); } #[test] #[should_panic] fn advance_past_len() { let mut a = BytesMut::from(b"hello world".to_vec()); a.advance(20); } #[test] // Only run these tests on little endian systems. CI uses qemu for testing // little endian... and qemu doesn't really support threading all that well. #[cfg(target_endian = "little")] fn stress() { // Tests promoting a buffer from a vec -> shared in a concurrent situation use std::sync::{Arc, Barrier}; use std::thread; const THREADS: usize = 8; const ITERS: usize = 1_000; for i in 0..ITERS { let data = [i as u8; 256]; let buf = Arc::new(Bytes::from(&data[..])); let barrier = Arc::new(Barrier::new(THREADS)); let mut joins = Vec::with_capacity(THREADS); for _ in 0..THREADS { let c = barrier.clone(); let buf = buf.clone(); joins.push(thread::spawn(move || { c.wait(); let buf: Bytes = (*buf).clone(); drop(buf); })); } for th in joins { th.join().unwrap(); } assert_eq!(*buf, data[..]); } } #[test] fn partial_eq_bytesmut() { let bytes = Bytes::from(&b"The quick red fox"[..]); let bytesmut = BytesMut::from(&b"The quick red fox"[..]); assert!(bytes == bytesmut); assert!(bytesmut == bytes); let bytes2 = Bytes::from(&b"Jumped over the lazy brown dog"[..]); assert!(bytes2 != bytesmut); assert!(bytesmut != bytes2); } #[test] fn unsplit_basic() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaabbbcccddd"); let splitted = buf.split_off(6); assert_eq!(b"aaabbb", &buf[..]); assert_eq!(b"cccddd", &splitted[..]); buf.unsplit(splitted); assert_eq!(b"aaabbbcccddd", &buf[..]); } #[test] fn unsplit_empty_other() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaabbbcccddd"); // empty other let other = BytesMut::new(); buf.unsplit(other); assert_eq!(b"aaabbbcccddd", &buf[..]); } #[test] fn unsplit_empty_self() { // empty self let mut buf = BytesMut::new(); let mut other = BytesMut::with_capacity(64); other.extend_from_slice(b"aaabbbcccddd"); buf.unsplit(other); assert_eq!(b"aaabbbcccddd", &buf[..]); } #[test] fn unsplit_inline_arc() { let mut buf = BytesMut::with_capacity(8); //inline buf.extend_from_slice(b"aaaabbbb"); let mut buf2 = BytesMut::with_capacity(64); buf2.extend_from_slice(b"ccccddddeeee"); buf2.split_off(8); //arc buf.unsplit(buf2); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn unsplit_arc_inline() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaaabbbbeeee"); buf.split_off(8); //arc let mut buf2 = BytesMut::with_capacity(8); //inline buf2.extend_from_slice(b"ccccdddd"); buf.unsplit(buf2); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn unsplit_both_inline() { let mut buf = BytesMut::with_capacity(16); //inline buf.extend_from_slice(b"aaaabbbbccccdddd"); let splitted = buf.split_off(8); // both inline assert_eq!(b"aaaabbbb", &buf[..]); assert_eq!(b"ccccdddd", &splitted[..]); buf.unsplit(splitted); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn unsplit_arc_different() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaaabbbbeeee"); buf.split_off(8); //arc let mut buf2 = BytesMut::with_capacity(64); buf2.extend_from_slice(b"ccccddddeeee"); buf2.split_off(8); //arc buf.unsplit(buf2); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn unsplit_arc_non_contiguous() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); let mut buf2 = buf.split_off(8); //arc let buf3 = buf2.split_off(4); //arc buf.unsplit(buf3); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn unsplit_two_split_offs() { let mut buf = BytesMut::with_capacity(64); buf.extend_from_slice(b"aaaabbbbccccdddd"); let mut buf2 = buf.split_off(8); //arc let buf3 = buf2.split_off(4); //arc buf2.unsplit(buf3); buf.unsplit(buf2); assert_eq!(b"aaaabbbbccccdddd", &buf[..]); } #[test] fn from_iter_no_size_hint() { use std::iter; let mut expect = vec![]; let actual: Bytes = iter::repeat(b'x') .scan(100, |cnt, item| { if *cnt >= 1 { *cnt -= 1; expect.push(item); Some(item) } else { None } }) .collect(); assert_eq!(&actual[..], &expect[..]); } fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) { let slice = &(bytes.as_ref()[start..end]); let sub = bytes.slice_ref(&slice); assert_eq!(&sub[..], expected); } #[test] fn slice_ref_works() { let bytes = Bytes::from(&b"012345678"[..]); test_slice_ref(&bytes, 0, 0, b""); test_slice_ref(&bytes, 0, 3, b"012"); test_slice_ref(&bytes, 2, 6, b"2345"); test_slice_ref(&bytes, 7, 9, b"78"); test_slice_ref(&bytes, 9, 9, b""); } #[test] fn slice_ref_empty() { let bytes = Bytes::from(&b""[..]); let slice = &(bytes.as_ref()[0..0]); let sub = bytes.slice_ref(&slice); assert_eq!(&sub[..], b""); } #[test] #[should_panic] fn slice_ref_catches_not_a_subset() { let bytes = Bytes::from(&b"012345678"[..]); let slice = &b"012345"[0..4]; bytes.slice_ref(slice); } #[test] #[should_panic] fn slice_ref_catches_not_an_empty_subset() { let bytes = Bytes::from(&b"012345678"[..]); let slice = &b""[0..0]; bytes.slice_ref(slice); } #[test] #[should_panic] fn empty_slice_ref_catches_not_an_empty_subset() { let bytes = Bytes::from(&b""[..]); let slice = &b""[0..0]; bytes.slice_ref(slice); } bytes-0.4.12/tests/test_chain.rs010064400007650000024000000062301337670411000147770ustar0000000000000000extern crate bytes; extern crate iovec; use bytes::{Buf, BufMut, Bytes, BytesMut}; use bytes::buf::Chain; use iovec::IoVec; use std::io::Cursor; #[test] fn collect_two_bufs() { let a = Cursor::new(Bytes::from(&b"hello"[..])); let b = Cursor::new(Bytes::from(&b"world"[..])); let res: Vec = a.chain(b).collect(); assert_eq!(res, &b"helloworld"[..]); } #[test] fn writing_chained() { let mut a = BytesMut::with_capacity(64); let mut b = BytesMut::with_capacity(64); { let mut buf = Chain::new(&mut a, &mut b); for i in 0..128 { buf.put(i as u8); } } assert_eq!(64, a.len()); assert_eq!(64, b.len()); for i in 0..64 { let expect = i as u8; assert_eq!(expect, a[i]); assert_eq!(expect + 64, b[i]); } } #[test] fn iterating_two_bufs() { let a = Cursor::new(Bytes::from(&b"hello"[..])); let b = Cursor::new(Bytes::from(&b"world"[..])); let res: Vec = a.chain(b).iter().collect(); assert_eq!(res, &b"helloworld"[..]); } #[test] fn vectored_read() { let a = Cursor::new(Bytes::from(&b"hello"[..])); let b = Cursor::new(Bytes::from(&b"world"[..])); let mut buf = a.chain(b); { let b1: &[u8] = &mut [0]; let b2: &[u8] = &mut [0]; let b3: &[u8] = &mut [0]; let b4: &[u8] = &mut [0]; let mut iovecs: [&IoVec; 4] = [b1.into(), b2.into(), b3.into(), b4.into()]; assert_eq!(2, buf.bytes_vec(&mut iovecs)); assert_eq!(iovecs[0][..], b"hello"[..]); assert_eq!(iovecs[1][..], b"world"[..]); assert_eq!(iovecs[2][..], b"\0"[..]); assert_eq!(iovecs[3][..], b"\0"[..]); } buf.advance(2); { let b1: &[u8] = &mut [0]; let b2: &[u8] = &mut [0]; let b3: &[u8] = &mut [0]; let b4: &[u8] = &mut [0]; let mut iovecs: [&IoVec; 4] = [b1.into(), b2.into(), b3.into(), b4.into()]; assert_eq!(2, buf.bytes_vec(&mut iovecs)); assert_eq!(iovecs[0][..], b"llo"[..]); assert_eq!(iovecs[1][..], b"world"[..]); assert_eq!(iovecs[2][..], b"\0"[..]); assert_eq!(iovecs[3][..], b"\0"[..]); } buf.advance(3); { let b1: &[u8] = &mut [0]; let b2: &[u8] = &mut [0]; let b3: &[u8] = &mut [0]; let b4: &[u8] = &mut [0]; let mut iovecs: [&IoVec; 4] = [b1.into(), b2.into(), b3.into(), b4.into()]; assert_eq!(1, buf.bytes_vec(&mut iovecs)); assert_eq!(iovecs[0][..], b"world"[..]); assert_eq!(iovecs[1][..], b"\0"[..]); assert_eq!(iovecs[2][..], b"\0"[..]); assert_eq!(iovecs[3][..], b"\0"[..]); } buf.advance(3); { let b1: &[u8] = &mut [0]; let b2: &[u8] = &mut [0]; let b3: &[u8] = &mut [0]; let b4: &[u8] = &mut [0]; let mut iovecs: [&IoVec; 4] = [b1.into(), b2.into(), b3.into(), b4.into()]; assert_eq!(1, buf.bytes_vec(&mut iovecs)); assert_eq!(iovecs[0][..], b"ld"[..]); assert_eq!(iovecs[1][..], b"\0"[..]); assert_eq!(iovecs[2][..], b"\0"[..]); assert_eq!(iovecs[3][..], b"\0"[..]); } } bytes-0.4.12/tests/test_debug.rs010064400007650000024000000025001337670411000147770ustar0000000000000000extern crate bytes; use bytes::Bytes; #[test] fn fmt() { let vec: Vec<_> = (0..0x100).map(|b| b as u8).collect(); let expected = "b\"\ \\0\\x01\\x02\\x03\\x04\\x05\\x06\\x07\ \\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\ \\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\ \\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\ \x20!\\\"#$%&'()*+,-./0123456789:;<=>?\ @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\ `abcdefghijklmnopqrstuvwxyz{|}~\\x7f\ \\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\ \\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\ \\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\ \\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\ \\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\ \\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\ \\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\ \\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\ \\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\ \\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\ \\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\ \\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\ \\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\ \\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\ \\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\ \\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff\""; assert_eq!(expected, format!("{:?}", Bytes::from(vec))); } bytes-0.4.12/tests/test_from_buf.rs010064400007650000024000000014301337670411000155110ustar0000000000000000extern crate bytes; use bytes::{Buf, Bytes, BytesMut}; use std::io::Cursor; const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb"; const SHORT: &'static [u8] = b"hello world"; #[test] fn collect_to_vec() { let buf: Vec = Cursor::new(SHORT).collect(); assert_eq!(buf, SHORT); let buf: Vec = Cursor::new(LONG).collect(); assert_eq!(buf, LONG); } #[test] fn collect_to_bytes() { let buf: Bytes = Cursor::new(SHORT).collect(); assert_eq!(buf, SHORT); let buf: Bytes = Cursor::new(LONG).collect(); assert_eq!(buf, LONG); } #[test] fn collect_to_bytes_mut() { let buf: BytesMut = Cursor::new(SHORT).collect(); assert_eq!(buf, SHORT); let buf: BytesMut = Cursor::new(LONG).collect(); assert_eq!(buf, LONG); } bytes-0.4.12/tests/test_iter.rs010064400007650000024000000006701337670411000146620ustar0000000000000000extern crate bytes; use bytes::{Buf, IntoBuf, Bytes}; #[test] fn iter_len() { let buf = Bytes::from(&b"hello world"[..]).into_buf(); let iter = buf.iter(); assert_eq!(iter.size_hint(), (11, Some(11))); assert_eq!(iter.len(), 11); } #[test] fn empty_iter_len() { let buf = Bytes::from(&b""[..]).into_buf(); let iter = buf.iter(); assert_eq!(iter.size_hint(), (0, Some(0))); assert_eq!(iter.len(), 0); } bytes-0.4.12/tests/test_reader.rs010064400007650000024000000013421337670411000151560ustar0000000000000000extern crate bytes; use std::io::{BufRead, Cursor, Read}; use bytes::Buf; #[test] fn read() { let buf1 = Cursor::new(b"hello "); let buf2 = Cursor::new(b"world"); let buf = Buf::chain(buf1, buf2); // Disambiguate with Read::chain let mut buffer = Vec::new(); buf.reader().read_to_end(&mut buffer).unwrap(); assert_eq!(b"hello world", &buffer[..]); } #[test] fn buf_read() { let buf1 = Cursor::new(b"hell"); let buf2 = Cursor::new(b"o\nworld"); let mut reader = Buf::chain(buf1, buf2).reader(); let mut line = String::new(); reader.read_line(&mut line).unwrap(); assert_eq!("hello\n", &line); line.clear(); reader.read_line(&mut line).unwrap(); assert_eq!("world", &line); } bytes-0.4.12/tests/test_serde.rs010064400007650000024000000010421337670411000150130ustar0000000000000000#![cfg(feature = "serde")] extern crate bytes; extern crate serde_test; use serde_test::{Token, assert_tokens}; #[test] fn test_ser_de_empty() { let b = bytes::Bytes::new(); assert_tokens(&b, &[Token::Bytes(b"")]); let b = bytes::BytesMut::with_capacity(0); assert_tokens(&b, &[Token::Bytes(b"")]); } #[test] fn test_ser_de() { let b = bytes::Bytes::from(&b"bytes"[..]); assert_tokens(&b, &[Token::Bytes(b"bytes")]); let b = bytes::BytesMut::from(&b"bytes"[..]); assert_tokens(&b, &[Token::Bytes(b"bytes")]); } bytes-0.4.12/tests/test_take.rs010064400007650000024000000005351337670411000146430ustar0000000000000000extern crate bytes; use bytes::Buf; use std::io::Cursor; #[test] fn long_take() { // Tests that take with a size greater than the buffer length will not // overrun the buffer. Regression test for #138. let buf = Cursor::new(b"hello world").take(100); assert_eq!(11, buf.remaining()); assert_eq!(b"hello world", buf.bytes()); } bytes-0.4.12/.cargo_vcs_info.json0000644000000001120000000000000122770ustar00{ "git": { "sha1": "4948b1053b1af8f474a107b958dd0086ada06b17" } }