capnp-futures-0.19.0/.cargo_vcs_info.json0000644000000001530000000000100137160ustar { "git": { "sha1": "4e1a30a1ea658d7c8f5bf9a91238fe7953058da4" }, "path_in_vcs": "capnp-futures" }capnp-futures-0.19.0/CHANGELOG.md000064400000000000000000000037511046102023000143260ustar 00000000000000## v0.19.0 Follow v0.19.0 release of other capnp crates. ## v0.18.2 - Fix overflow bug in read_message that could potentially lead to denial of service attacks on 32-bit targets. ## v0.18.1 - Fix two bugs in serialize_packed::PackedRead where a premature end-of-file could trigger an infinite loop. ## v0.18.0 - Follow v0.18.0 release of other capnp crates. ## v0.17.0 - Follow v0.17.0 release of other capnp crates. ## v0.16.0 - Follow v0.16.0 release of other capnp crates. ## v0.15.1 - Fill in unimiplemented len() method of write_queue::Sender. - Add is_empty() method to write_queue::Sender. - Apply a bunch of formatting and style fixes that should have no observable effects. ## v0.15.0 - Follow v0.15.0 release of other capnp crates. ## v0.14.2 - Add serialize_packed module. ## v0.14.1 - Include LICENSE in published crate. ## v0.14.0 - Make `read_message()` return an error on EOF, to match the behavior of `capnp::serialize::read_message()`. ## v0.13.2 - Rename `read_message()` to `try_read_message()`, for consistency with `capnp::serialize::try_read_message()`. ## v0.13.1 - Remove unneeded dependency on 'executor' feature of the future crate. ## v0.13.0 - Remove some requirements for 'static lifetimes. ## v0.12.0 - Use new capnp::serialize::SegmentLengthsBuilder API. ## v0.11.0 - Remove serialize::Transport. - Switch to std::future::Future. - Bump minimum supported rustc version to 1.39.0. ## v0.10.1 - Remove dependency on byteorder crate, in favor of from_le_bytes() and to_le_bytes(). ## v0.10.0 - Update to 2018 edition. - Update minimum required rustc version to 1.35. ## v0.9.1 - Call flush() after writing each message, to allow usage with a std::io::BufWriter wrapper. ## v0.9.0 - No changes -- just a version bump to match the rest of the capnp crates. ## v0.1.1 - Add `serialize::Transport`. - Update byteorder dependency. ## v0.1.0 - Add `WriteQueue`. ## v0.0.2 - Add `ReadStream`. ## v0.0.1 - Code pulled in from https://github.com/dwrensha/capnproto-rust/pull/66. capnp-futures-0.19.0/CONTRIBUTORS000064400000000000000000000005671046102023000143770ustar 00000000000000The following people have made large code contributions to this repository. Those contributions are copyright the respective authors and licensed by them under the same MIT license terms as the rest of the library. David Renshaw: Primary Author Dan Burkert: The [initial implementation](https://github.com/dwrensha/capnproto-rust/pull/42) of nonblocking reading and writing.capnp-futures-0.19.0/Cargo.toml0000644000000022060000000000100117150ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "capnp-futures" version = "0.19.0" authors = ["David Renshaw "] description = "async serialization for Cap'n Proto messages" documentation = "https://docs.rs/capnp-futures/" readme = "README.md" keywords = ["async"] license = "MIT" repository = "https://github.com/dwrensha/capnproto-rust" [dependencies.capnp] version = "0.19.0" [dependencies.futures] version = "0.3.0" features = ["std"] default-features = false [dev-dependencies.capnp] version = "0.19.0" features = ["quickcheck"] [dev-dependencies.futures] version = "0.3.0" features = ["executor"] default-features = false [dev-dependencies.quickcheck] version = "1" [lints] capnp-futures-0.19.0/Cargo.toml.orig000064400000000000000000000013121046102023000153730ustar 00000000000000[package] name = "capnp-futures" version = "0.19.0" authors = [ "David Renshaw " ] license = "MIT" description = "async serialization for Cap'n Proto messages" repository = "https://github.com/dwrensha/capnproto-rust" documentation = "https://docs.rs/capnp-futures/" edition = "2021" keywords = ["async"] [dependencies] capnp = { version = "0.19.0", path = "../capnp" } [dependencies.futures] version = "0.3.0" default-features = false features = ["std"] [dev-dependencies.futures] version = "0.3.0" default-features = false features = ["executor"] [dev-dependencies] capnp = { version = "0.19.0", path = "../capnp", features = ["quickcheck"] } quickcheck = "1" [lints] workspace = true capnp-futures-0.19.0/LICENSE000064400000000000000000000021121046102023000135100ustar 00000000000000Copyright (c) 2013-2018 Sandstorm Development Group, Inc. and contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.capnp-futures-0.19.0/README.md000064400000000000000000000003361046102023000137700ustar 00000000000000[![crates.io](https://img.shields.io/crates/v/capnp-futures.svg)](https://crates.io/crates/capnp-futures) [documentation](https://docs.rs/capnp-futures/) Asynchronous reading and writing of Cap'n Proto messages in Rust. capnp-futures-0.19.0/src/lib.rs000064400000000000000000000024371046102023000144200ustar 00000000000000// Copyright (c) 2013-2016 Sandstorm Development Group, Inc. and contributors // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. pub use read_stream::ReadStream; pub use write_queue::{write_queue, Sender}; mod read_stream; pub mod serialize; pub mod serialize_packed; mod write_queue; capnp-futures-0.19.0/src/read_stream.rs000064400000000000000000000056761046102023000161500ustar 00000000000000// Copyright (c) 2016 Sandstorm Development Group, Inc. and contributors // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use futures::future::Future; use futures::stream::Stream; use futures::AsyncRead; use std::pin::Pin; use std::task::{Context, Poll}; use capnp::{message, Error}; async fn read_next_message( mut reader: R, options: message::ReaderOptions, ) -> Result<(R, Option>), Error> where R: AsyncRead + Unpin, { let m = crate::serialize::try_read_message(&mut reader, options).await?; Ok((reader, m)) } type ReadStreamResult = Result<(R, Option>), Error>; /// An incoming sequence of messages. #[must_use = "streams do nothing unless polled"] pub struct ReadStream<'a, R> where R: AsyncRead + Unpin, { options: message::ReaderOptions, read: Pin> + 'a>>, } impl<'a, R> Unpin for ReadStream<'a, R> where R: AsyncRead + Unpin {} impl<'a, R> ReadStream<'a, R> where R: AsyncRead + Unpin + 'a, { pub fn new(reader: R, options: message::ReaderOptions) -> Self { ReadStream { read: Box::pin(read_next_message(reader, options)), options, } } } impl<'a, R> Stream for ReadStream<'a, R> where R: AsyncRead + Unpin + 'a, { type Item = Result, Error>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let (r, m) = match Future::poll(self.read.as_mut(), cx) { Poll::Pending => return Poll::Pending, Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e))), Poll::Ready(Ok(x)) => x, }; self.read = Box::pin(read_next_message(r, self.options)); match m { Some(message) => Poll::Ready(Some(Ok(message))), None => Poll::Ready(None), } } } capnp-futures-0.19.0/src/serialize.rs000064400000000000000000000521551046102023000156430ustar 00000000000000// Copyright (c) 2013-2016 Sandstorm Development Group, Inc. and contributors // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. //! Asynchronous reading and writing of messages using the //! [standard stream framing](https://capnproto.org/encoding.html#serialization-over-a-stream). use capnp::serialize::{OwnedSegments, SegmentLengthsBuilder}; use capnp::{message, Error, OutputSegments, Result}; use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; /// Asynchronously reads a message from `reader`. pub async fn read_message( reader: R, options: message::ReaderOptions, ) -> Result> where R: AsyncRead + Unpin, { match try_read_message(reader, options).await? { Some(s) => Ok(s), None => Err(Error::failed("Premature end of file".to_string())), } } /// Asynchronously reads a message from `reader`. Returns `None` if `reader` /// has zero bytes left (i.e. is at end-of-file). To read a stream /// containing an unknown number of messages, you could call this function /// repeatedly until it returns `None`. pub async fn try_read_message( mut reader: R, options: message::ReaderOptions, ) -> Result>> where R: AsyncRead + Unpin, { let Some(segment_lengths_builder) = read_segment_table(&mut reader, options).await? else { return Ok(None); }; Ok(Some( read_segments( reader, segment_lengths_builder.into_owned_segments(), options, ) .await?, )) } async fn read_segment_table( mut reader: R, options: message::ReaderOptions, ) -> Result> where R: AsyncRead + Unpin, { let mut buf: [u8; 8] = [0; 8]; { let n = reader.read(&mut buf[..]).await?; if n == 0 { return Ok(None); } else if n < 8 { reader.read_exact(&mut buf[n..]).await?; } } let (segment_count, first_segment_length) = parse_segment_table_first(&buf[..])?; let mut segment_lengths_builder = SegmentLengthsBuilder::with_capacity(segment_count); segment_lengths_builder.try_push_segment(first_segment_length)?; if segment_count > 1 { if segment_count < 4 { // small enough that we can reuse our existing buffer reader.read_exact(&mut buf).await?; for idx in 0..(segment_count - 1) { let segment_len = u32::from_le_bytes(buf[(idx * 4)..(idx + 1) * 4].try_into().unwrap()) as usize; segment_lengths_builder.try_push_segment(segment_len)?; } } else { let mut segment_sizes = vec![0u8; (segment_count & !1) * 4]; reader.read_exact(&mut segment_sizes[..]).await?; for idx in 0..(segment_count - 1) { let segment_len = u32::from_le_bytes(segment_sizes[(idx * 4)..(idx + 1) * 4].try_into().unwrap()) as usize; segment_lengths_builder.try_push_segment(segment_len)?; } } } // Don't accept a message which the receiver couldn't possibly traverse without hitting the // traversal limit. Without this check, a malicious client could transmit a very large segment // size to make the receiver allocate excessive space and possibly crash. if let Some(traversal_limit_in_words) = options.traversal_limit_in_words { if segment_lengths_builder.total_words() > traversal_limit_in_words { return Err(Error::failed(format!( "Message has {} words, which is too large. To increase the limit on the \ receiving end, see capnp::message::ReaderOptions.", segment_lengths_builder.total_words() ))); } } Ok(Some(segment_lengths_builder)) } /// Reads segments from `read`. async fn read_segments( mut read: R, mut owned_segments: OwnedSegments, options: message::ReaderOptions, ) -> Result> where R: AsyncRead + Unpin, { read.read_exact(&mut owned_segments[..]).await?; Ok(message::Reader::new(owned_segments, options)) } /// Parses the first word of the segment table. /// /// The segment table format for streams is defined in the Cap'n Proto /// [encoding spec](https://capnproto.org/encoding.html#serialization-over-a-stream) /// /// Returns the segment count and first segment length, or a state if the /// read would block. fn parse_segment_table_first(buf: &[u8]) -> Result<(usize, usize)> { let segment_count = u32::from_le_bytes(buf[0..4].try_into().unwrap()).wrapping_add(1); if segment_count >= 512 { return Err(Error::failed(format!("Too many segments: {segment_count}"))); } else if segment_count == 0 { return Err(Error::failed(format!("Too few segments: {segment_count}"))); } let first_segment_len = u32::from_le_bytes(buf[4..8].try_into().unwrap()); Ok((segment_count as usize, first_segment_len as usize)) } /// Something that contains segments ready to be written out. pub trait AsOutputSegments { fn as_output_segments(&self) -> OutputSegments; } impl<'a, M> AsOutputSegments for &'a M where M: AsOutputSegments, { fn as_output_segments(&self) -> OutputSegments { (*self).as_output_segments() } } impl AsOutputSegments for message::Builder where A: message::Allocator, { fn as_output_segments(&self) -> OutputSegments { self.get_segments_for_output() } } /*impl <'a, A> AsOutputSegments for &'a message::Builder where A: message::Allocator { fn as_output_segments<'b>(&'b self) -> OutputSegments<'b> { self.get_segments_for_output() } }*/ impl AsOutputSegments for ::std::rc::Rc> where A: message::Allocator, { fn as_output_segments(&self) -> OutputSegments { self.get_segments_for_output() } } /// Writes the provided message to `writer`. Does not call `flush()`. pub async fn write_message(mut writer: W, message: M) -> Result<()> where W: AsyncWrite + Unpin, M: AsOutputSegments, { let segments = message.as_output_segments(); write_segment_table(&mut writer, &segments[..]).await?; write_segments(writer, &segments[..]).await?; Ok(()) } async fn write_segment_table(mut write: W, segments: &[&[u8]]) -> ::std::io::Result<()> where W: AsyncWrite + Unpin, { let mut buf: [u8; 8] = [0; 8]; let segment_count = segments.len(); // write the first Word, which contains segment_count and the 1st segment length buf[0..4].copy_from_slice(&(segment_count as u32 - 1).to_le_bytes()); buf[4..8].copy_from_slice(&((segments[0].len() / 8) as u32).to_le_bytes()); write.write_all(&buf).await?; if segment_count > 1 { if segment_count < 4 { for idx in 1..segment_count { buf[(idx - 1) * 4..idx * 4] .copy_from_slice(&((segments[idx].len() / 8) as u32).to_le_bytes()); } if segment_count == 2 { for value in &mut buf[4..8] { *value = 0; } } write.write_all(&buf).await?; } else { let mut buf = vec![0; (segment_count & !1) * 4]; for idx in 1..segment_count { buf[(idx - 1) * 4..idx * 4] .copy_from_slice(&((segments[idx].len() / 8) as u32).to_le_bytes()); } if segment_count % 2 == 0 { for idx in (buf.len() - 4)..(buf.len()) { buf[idx] = 0 } } write.write_all(&buf).await?; } } Ok(()) } /// Writes segments to `write`. async fn write_segments(mut write: W, segments: &[&[u8]]) -> Result<()> where W: AsyncWrite + Unpin, { for segment in segments { write.write_all(segment).await?; } Ok(()) } #[cfg(test)] pub mod test { use std::cmp; use std::io::{self, Read, Write}; use std::pin::Pin; use std::task::{Context, Poll}; use futures::io::Cursor; use futures::{AsyncRead, AsyncWrite}; use quickcheck::{quickcheck, TestResult}; use capnp::message::ReaderSegments; use capnp::{message, OutputSegments}; use super::{read_segment_table, try_read_message, write_message, AsOutputSegments}; #[test] fn test_read_segment_table() { let mut exec = futures::executor::LocalPool::new(); let mut buf = vec![]; buf.extend( [ 0, 0, 0, 0, // 1 segments 0, 0, 0, 0, ], // 0 length ); let segment_lengths = exec .run_until(read_segment_table( Cursor::new(&buf[..]), message::ReaderOptions::new(), )) .unwrap() .unwrap(); assert_eq!(0, segment_lengths.total_words()); assert_eq!(vec![(0, 0)], segment_lengths.to_segment_indices()); buf.clear(); buf.extend( [ 0, 0, 0, 0, // 1 segments 1, 0, 0, 0, ], // 1 length ); let segment_lengths = exec .run_until(read_segment_table( &mut Cursor::new(&buf[..]), message::ReaderOptions::new(), )) .unwrap() .unwrap(); assert_eq!(1, segment_lengths.total_words()); assert_eq!(vec![(0, 1)], segment_lengths.to_segment_indices()); buf.clear(); buf.extend( [ 1, 0, 0, 0, // 2 segments 1, 0, 0, 0, // 1 length 1, 0, 0, 0, // 1 length 0, 0, 0, 0, ], // padding ); let segment_lengths = exec .run_until(read_segment_table( &mut Cursor::new(&buf[..]), message::ReaderOptions::new(), )) .unwrap() .unwrap(); assert_eq!(2, segment_lengths.total_words()); assert_eq!(vec![(0, 1), (1, 2)], segment_lengths.to_segment_indices()); buf.clear(); buf.extend( [ 2, 0, 0, 0, // 3 segments 1, 0, 0, 0, // 1 length 1, 0, 0, 0, // 1 length 0, 1, 0, 0, ], // 256 length ); let segment_lengths = exec .run_until(read_segment_table( &mut Cursor::new(&buf[..]), message::ReaderOptions::new(), )) .unwrap() .unwrap(); assert_eq!(258, segment_lengths.total_words()); assert_eq!( vec![(0, 1), (1, 2), (2, 258)], segment_lengths.to_segment_indices() ); buf.clear(); buf.extend( [ 3, 0, 0, 0, // 4 segments 77, 0, 0, 0, // 77 length 23, 0, 0, 0, // 23 length 1, 0, 0, 0, // 1 length 99, 0, 0, 0, // 99 length 0, 0, 0, 0, ], // padding ); let segment_lengths = exec .run_until(read_segment_table( &mut Cursor::new(&buf[..]), message::ReaderOptions::new(), )) .unwrap() .unwrap(); assert_eq!(200, segment_lengths.total_words()); assert_eq!( vec![(0, 77), (77, 100), (100, 101), (101, 200)], segment_lengths.to_segment_indices() ); buf.clear(); } #[test] fn test_read_invalid_segment_table() { let mut exec = futures::executor::LocalPool::new(); let mut buf = vec![]; buf.extend([0, 2, 0, 0]); // 513 segments buf.extend([0; 513 * 4]); assert!(exec .run_until(read_segment_table( Cursor::new(&buf[..]), message::ReaderOptions::new() )) .is_err()); buf.clear(); buf.extend([0, 0, 0, 0]); // 1 segments assert!(exec .run_until(read_segment_table( Cursor::new(&buf[..]), message::ReaderOptions::new() )) .is_err()); buf.clear(); buf.extend([0, 0, 0, 0]); // 1 segments buf.extend([0; 3]); assert!(exec .run_until(read_segment_table( Cursor::new(&buf[..]), message::ReaderOptions::new() )) .is_err()); buf.clear(); buf.extend([255, 255, 255, 255]); // 0 segments assert!(exec .run_until(read_segment_table( Cursor::new(&buf[..]), message::ReaderOptions::new() )) .is_err()); buf.clear(); } fn construct_segment_table(segments: &[&[u8]]) -> Vec { let mut exec = futures::executor::LocalPool::new(); let mut buf = vec![]; exec.run_until(super::write_segment_table(&mut buf, segments)) .unwrap(); buf } #[test] fn test_construct_segment_table() { let segment_0: [u8; 0] = []; let segment_1 = [1, 0, 0, 0, 0, 0, 0, 0]; let segment_199 = [197; 199 * 8]; let buf = construct_segment_table(&[&segment_0]); assert_eq!( &[ 0, 0, 0, 0, // 1 segments 0, 0, 0, 0 ], // 0 length &buf[..] ); let buf = construct_segment_table(&[&segment_1]); assert_eq!( &[ 0, 0, 0, 0, // 1 segments 1, 0, 0, 0 ], // 1 length &buf[..] ); let buf = construct_segment_table(&[&segment_199]); assert_eq!( &[ 0, 0, 0, 0, // 1 segments 199, 0, 0, 0 ], // 199 length &buf[..] ); let buf = construct_segment_table(&[&segment_0, &segment_1]); assert_eq!( &[ 1, 0, 0, 0, // 2 segments 0, 0, 0, 0, // 0 length 1, 0, 0, 0, // 1 length 0, 0, 0, 0 ], // padding &buf[..] ); let buf = construct_segment_table(&[&segment_199, &segment_1, &segment_199, &segment_0]); assert_eq!( &[ 3, 0, 0, 0, // 4 segments 199, 0, 0, 0, // 199 length 1, 0, 0, 0, // 1 length 199, 0, 0, 0, // 199 length 0, 0, 0, 0, // 0 length 0, 0, 0, 0 ], // padding &buf[..] ); let buf = construct_segment_table(&[ &segment_199, &segment_1, &segment_199, &segment_0, &segment_1, ]); assert_eq!( &[ 4, 0, 0, 0, // 5 segments 199, 0, 0, 0, // 199 length 1, 0, 0, 0, // 1 length 199, 0, 0, 0, // 199 length 0, 0, 0, 0, // 0 length 1, 0, 0, 0 ], // 1 length &buf[..] ); } impl AsOutputSegments for Vec> { fn as_output_segments(&self) -> OutputSegments { if self.is_empty() { OutputSegments::SingleSegment([&[]]) } else if self.len() == 1 { OutputSegments::SingleSegment([capnp::Word::words_to_bytes(&self[0][..])]) } else { OutputSegments::MultiSegment( self.iter() .map(|segment| capnp::Word::words_to_bytes(&segment[..])) .collect::>(), ) } } } /// Wraps a `Read` instance and introduces blocking. pub(crate) struct BlockingRead where R: Read, { /// The wrapped reader pub read: R, /// Number of bytes to read before blocking blocking_period: usize, /// Number of bytes read since last blocking idx: usize, } impl BlockingRead where R: Read, { pub(crate) fn new(read: R, blocking_period: usize) -> Self { Self { read, blocking_period, idx: 0, } } } impl AsyncRead for BlockingRead where R: Read + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8], ) -> Poll> { if self.idx == 0 { self.idx = self.blocking_period; cx.waker().wake_by_ref(); Poll::Pending } else { let len = cmp::min(self.idx, buf.len()); let bytes_read = match self.read.read(&mut buf[..len]) { Err(e) => return Poll::Ready(Err(e)), Ok(n) => n, }; self.idx -= bytes_read; Poll::Ready(Ok(bytes_read)) } } } /// Wraps a `Write` instance and introduces blocking. pub(crate) struct BlockingWrite where W: Write, { /// The wrapped writer writer: W, /// Number of bytes to write before blocking blocking_period: usize, /// Number of bytes written since last blocking idx: usize, } impl BlockingWrite where W: Write, { pub(crate) fn new(writer: W, blocking_period: usize) -> Self { Self { writer, blocking_period, idx: 0, } } pub(crate) fn into_writer(self) -> W { self.writer } } impl AsyncWrite for BlockingWrite where W: Write + Unpin, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8], ) -> Poll> { if self.idx == 0 { self.idx = self.blocking_period; cx.waker().wake_by_ref(); Poll::Pending } else { let len = cmp::min(self.idx, buf.len()); let bytes_written = match self.writer.write(&buf[..len]) { Err(e) => return Poll::Ready(Err(e)), Ok(n) => n, }; self.idx -= bytes_written; Poll::Ready(Ok(bytes_written)) } } fn poll_flush(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(self.writer.flush()) } fn poll_close(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(Ok(())) } } #[cfg_attr(miri, ignore)] // Miri takes a long time with quickcheck #[test] fn check_round_trip_async() { fn round_trip( read_blocking_period: usize, write_blocking_period: usize, segments: Vec>, ) -> TestResult { if segments.is_empty() || read_blocking_period == 0 || write_blocking_period == 0 { return TestResult::discard(); } let (mut read, segments) = { let cursor = std::io::Cursor::new(Vec::new()); let mut writer = BlockingWrite::new(cursor, write_blocking_period); futures::executor::block_on(Box::pin(write_message(&mut writer, &segments))) .expect("writing"); let mut cursor = writer.into_writer(); cursor.set_position(0); (BlockingRead::new(cursor, read_blocking_period), segments) }; let message = futures::executor::block_on(Box::pin(try_read_message( &mut read, Default::default(), ))) .expect("reading") .unwrap(); let message_segments = message.into_segments(); TestResult::from_bool(segments.iter().enumerate().all(|(i, segment)| { capnp::Word::words_to_bytes(&segment[..]) == message_segments.get_segment(i as u32).unwrap() })) } quickcheck(round_trip as fn(usize, usize, Vec>) -> TestResult); } } capnp-futures-0.19.0/src/serialize_packed.rs000064400000000000000000000635211046102023000171510ustar 00000000000000// Copyright (c) 2013-2015 Sandstorm Development Group, Inc. and contributors // Licensed under the MIT License: // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. //! Asynchronous reading and writing of messages using the //! [packed stream encoding](https://capnproto.org/encoding.html#packing). use std::pin::Pin; use std::task::{Context, Poll}; use capnp::serialize::OwnedSegments; use capnp::{message, Result}; use futures::{AsyncRead, AsyncWrite}; use crate::serialize::AsOutputSegments; enum PackedReadStage { Start, WritingZeroes, BufferingWord, DrainingBuffer, WritingPassthrough, } /// An `AsyncRead` wrapper that unpacks packed data. pub struct PackedRead where R: AsyncRead + Unpin, { inner: R, stage: PackedReadStage, // 10 = tag byte, up to 8 word bytes, and possibly one pass-through count buf: [u8; 10], buf_pos: usize, // number of bytes that we actually want to read into the buffer buf_size: usize, num_run_bytes_remaining: usize, } impl PackedRead where R: AsyncRead + Unpin, { /// Creates a new `PackedRead` from a `AsyncRead`. For optimal performance, /// `inner` should be a buffered `AsyncRead`. pub fn new(inner: R) -> Self { Self { inner, stage: PackedReadStage::Start, buf: [0; 10], buf_pos: 0, buf_size: 10, num_run_bytes_remaining: 0, } } } impl AsyncRead for PackedRead where R: AsyncRead + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>, outbuf: &mut [u8], ) -> Poll> { let Self { stage, inner, buf, buf_pos, num_run_bytes_remaining, buf_size, .. } = &mut *self; loop { match *stage { PackedReadStage::Start => { match Pin::new(&mut *inner).poll_read(cx, &mut buf[*buf_pos..2])? { Poll::Pending => return Poll::Pending, Poll::Ready(n) => { if n == 0 { return Poll::Ready(Ok(0)); } *buf_pos += n; if *buf_pos >= 2 { let tag = buf[0]; let count = buf[1]; if tag == 0 { *stage = PackedReadStage::WritingZeroes; *num_run_bytes_remaining = (count as usize + 1) * 8; } else { *stage = PackedReadStage::BufferingWord; *buf_size = buf[0].count_ones() as usize + 1; if *buf_size == 9 { // add a byte for the count of pass-through words *buf_size = 10 } if *buf_pos >= *buf_size { // Skip the BufferingWord stage, because // there is nothing left to buffer. *stage = PackedReadStage::DrainingBuffer; *buf_pos = 1; } } } } } } PackedReadStage::WritingZeroes => { let num_zeroes = std::cmp::min(outbuf.len(), *num_run_bytes_remaining); for value in outbuf.iter_mut().take(num_zeroes) { *value = 0; } if num_zeroes >= *num_run_bytes_remaining { *buf_pos = 0; *stage = PackedReadStage::Start; } else { *num_run_bytes_remaining -= num_zeroes; } return Poll::Ready(Ok(num_zeroes)); } PackedReadStage::BufferingWord => { match Pin::new(&mut *inner).poll_read(cx, &mut buf[*buf_pos..*buf_size])? { Poll::Pending => return Poll::Pending, Poll::Ready(0) => { return Poll::Ready(Err(std::io::Error::from( std::io::ErrorKind::UnexpectedEof, ))) } Poll::Ready(n) => { *buf_pos += n; if *buf_pos >= *buf_size { *stage = PackedReadStage::DrainingBuffer; *buf_pos = 1; } } } } PackedReadStage::DrainingBuffer => { let mut ii = 0; let mut bitnum = *buf_pos - 1; while ii < outbuf.len() && bitnum < 8 { let is_nonzero = (buf[0] & (1u8 << bitnum)) != 0; outbuf[ii] = buf[*buf_pos] & ((-i8::from(is_nonzero)) as u8); ii += 1; *buf_pos += usize::from(is_nonzero); bitnum += 1; } if bitnum == 8 { // We finished the word. if *buf_pos == *buf_size { // There are no passthrough words. *stage = PackedReadStage::Start; } else { // We need to read some passthrough words. *num_run_bytes_remaining = (buf[*buf_pos] as usize) * 8; *stage = PackedReadStage::WritingPassthrough; } *buf_pos = 0; } else { // We did not finish the word. } return Poll::Ready(Ok(ii)); } PackedReadStage::WritingPassthrough => { let upper_bound = std::cmp::min(*num_run_bytes_remaining, outbuf.len()); if upper_bound == 0 { *stage = PackedReadStage::Start; } else { match Pin::new(&mut *inner).poll_read(cx, &mut outbuf[0..upper_bound])? { Poll::Pending => return Poll::Pending, Poll::Ready(n) => { if n == 0 { return Poll::Ready(Ok(0)); } if n >= *num_run_bytes_remaining { *stage = PackedReadStage::Start; } *num_run_bytes_remaining -= n; return Poll::Ready(Ok(n)); } } } } } } } } /// Asynchronously reads a packed message from `read`. Returns `None` if `read` /// has zero bytes left (i.e. is at end-of-file). To read a stream /// containing an unknown number of messages, you could call this function /// repeatedly until it returns `None`. pub async fn try_read_message( read: R, options: message::ReaderOptions, ) -> Result>> where R: AsyncRead + Unpin, { let packed_read = PackedRead::new(read); crate::serialize::try_read_message(packed_read, options).await } /// Asynchronously reads a message from `reader`. pub async fn read_message( reader: R, options: message::ReaderOptions, ) -> Result> where R: AsyncRead + Unpin, { match try_read_message(reader, options).await? { Some(s) => Ok(s), None => Err(capnp::Error::failed("Premature end of file".to_string())), } } #[derive(PartialEq, Debug)] enum PackedWriteStage { Start, WriteWord, WriteRunWordCount, WriteUncompressedRun, } /// An `AsyncWrite` wrapper that packs any data passed into it. pub struct PackedWrite where W: AsyncWrite + Unpin, { inner: W, stage: PackedWriteStage, buf: [u8; 8], buf_pos: usize, // tag and packed word packed_buf: [u8; 9], packed_buf_size: usize, run_bytes_remaining: usize, } struct FinishPendingWrites where W: AsyncWrite + Unpin, { inner: PackedWrite, } impl FinishPendingWrites where W: AsyncWrite + Unpin, { fn new(inner: PackedWrite) -> Self { Self { inner } } } impl std::future::Future for FinishPendingWrites where W: AsyncWrite + Unpin, { type Output = std::result::Result<(), capnp::Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { match self.inner.finish_pending_writes(cx)? { Poll::Ready(()) => Poll::Ready(Ok(())), Poll::Pending => Poll::Pending, } } } /// Writes the provided message to `writer`. Does not call `writer.flush()`, /// so that multiple successive calls can amortize work when `writer` is /// buffered. pub async fn write_message(writer: W, message: M) -> Result<()> where W: AsyncWrite + Unpin, M: AsOutputSegments, { let mut packed_write = PackedWrite::new(writer); crate::serialize::write_message(&mut packed_write, message).await?; // Finish any pending work, so that nothing gets lost when we drop // the `PackedWrite`. FinishPendingWrites::new(packed_write).await } impl PackedWrite where W: AsyncWrite + Unpin, { /// Creates a new `PackedWrite` from a `AsyncWrite`. For optimal performance, /// `inner` should be a buffered `AsyncWrite`. pub fn new(inner: W) -> Self { Self { inner, stage: PackedWriteStage::Start, buf: [0; 8], buf_pos: 0, packed_buf: [0; 9], packed_buf_size: 0, run_bytes_remaining: 0, } } fn poll_write_aux( &mut self, cx: &mut Context<'_>, mut inbuf: &[u8], ) -> Poll> { let mut inbuf_bytes_consumed: usize = 0; let Self { stage, inner, buf, buf_pos, packed_buf, packed_buf_size, run_bytes_remaining, } = self; loop { match *stage { PackedWriteStage::Start => { if inbuf.is_empty() { return Poll::Ready(Ok(inbuf_bytes_consumed)); } // copy inbuf into buf let buf_bytes_remaining = 8 - *buf_pos; let bytes_to_copy = std::cmp::min(buf_bytes_remaining, inbuf.len()); buf[*buf_pos..(*buf_pos + bytes_to_copy)] .copy_from_slice(&inbuf[..bytes_to_copy]); inbuf = &inbuf[bytes_to_copy..]; inbuf_bytes_consumed += bytes_to_copy; *buf_pos += bytes_to_copy; if *buf_pos == 8 { // compute tag packed_buf[0] = 0; let mut packed_buf_idx: usize = 1; for (ii, b) in buf.iter().enumerate() { if *b != 0 { packed_buf[0] |= 1 << ii; packed_buf[packed_buf_idx] = *b; packed_buf_idx += 1; } } *buf_pos = 0; *packed_buf_size = packed_buf_idx; *stage = PackedWriteStage::WriteWord; } } PackedWriteStage::WriteWord => { match Pin::new(&mut *inner) .poll_write(cx, &packed_buf[*buf_pos..*packed_buf_size])? { Poll::Pending => { if inbuf_bytes_consumed == 0 { return Poll::Pending; } else { return Poll::Ready(Ok(inbuf_bytes_consumed)); } } Poll::Ready(n) => { *buf_pos += n; } } if *buf_pos == *packed_buf_size { if packed_buf[0] == 0 { // see how long of a run we can make let mut words_in_run = inbuf.len() / 8; for (idx, inb) in inbuf.iter().enumerate() { if *inb != 0 { words_in_run = idx / 8; break; } } *run_bytes_remaining = words_in_run * 8; *stage = PackedWriteStage::WriteRunWordCount; } else if packed_buf[0] == 255 { // See how long of a run we can make. // We look for at least two zeros because that's the point // where our compression scheme becomes a net win. let mut words_in_run = inbuf.len() / 8; let mut zero_bytes_in_word = 0; for (idx, inb) in inbuf.iter().enumerate() { if idx % 8 == 0 { zero_bytes_in_word = 0; } if *inb == 0 { zero_bytes_in_word += 1; if zero_bytes_in_word > 1 { words_in_run = idx / 8; break; } } } *run_bytes_remaining = words_in_run * 8; *stage = PackedWriteStage::WriteRunWordCount; } else { *buf_pos = 0; *stage = PackedWriteStage::Start; } } } PackedWriteStage::WriteRunWordCount => { match Pin::new(&mut *inner) .poll_write(cx, &[(*run_bytes_remaining / 8) as u8])? { Poll::Pending => { if inbuf_bytes_consumed == 0 { return Poll::Pending; } else { return Poll::Ready(Ok(inbuf_bytes_consumed)); } } Poll::Ready(1) => { if packed_buf[0] == 0 { // we're done here inbuf = &inbuf[(*run_bytes_remaining)..]; inbuf_bytes_consumed += *run_bytes_remaining; *buf_pos = 0; *stage = PackedWriteStage::Start; } else { // need to forward the uncompressed words *stage = PackedWriteStage::WriteUncompressedRun; } } Poll::Ready(0) => { // just loop around and try again } Poll::Ready(_) => panic!("should not be possible"), } } PackedWriteStage::WriteUncompressedRun => { match Pin::new(&mut *inner).poll_write(cx, &inbuf[..*run_bytes_remaining])? { Poll::Pending => { if inbuf_bytes_consumed == 0 { return Poll::Pending; } else { return Poll::Ready(Ok(inbuf_bytes_consumed)); } } Poll::Ready(n) => { inbuf_bytes_consumed += n; inbuf = &inbuf[n..]; if n < *run_bytes_remaining { *run_bytes_remaining -= n; } else { *buf_pos = 0; *stage = PackedWriteStage::Start; } } } } } } } /// Finish any work that we can do without any new bytes. fn finish_pending_writes( &mut self, cx: &mut Context<'_>, ) -> Poll> { while self.stage == PackedWriteStage::WriteWord || self.stage == PackedWriteStage::WriteRunWordCount { match self.poll_write_aux(cx, &[])? { Poll::Pending => return Poll::Pending, Poll::Ready(_) => (), } } Poll::Ready(Ok(())) } } impl AsyncWrite for PackedWrite where W: AsyncWrite + Unpin, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, inbuf: &[u8], ) -> Poll> { (*self).poll_write_aux(cx, inbuf) } fn poll_flush( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { match (*self).finish_pending_writes(cx)? { Poll::Pending => return Poll::Pending, Poll::Ready(_) => (), } Pin::new(&mut self.inner).poll_flush(cx) } fn poll_close( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { Pin::new(&mut self.inner).poll_close(cx) } } #[cfg(test)] pub mod test { use crate::serialize::test::{BlockingRead, BlockingWrite}; use crate::serialize_packed::{try_read_message, PackedRead, PackedWrite}; use capnp::message::ReaderSegments; use futures::{AsyncReadExt, AsyncWriteExt}; use quickcheck::{quickcheck, TestResult}; pub fn check_unpacks_to(blocking_period: usize, packed: &[u8], unpacked: &[u8]) { let mut packed_read = PackedRead::new(crate::serialize::test::BlockingRead::new( packed, blocking_period, )); let mut bytes: Vec = vec![0; unpacked.len()]; futures::executor::block_on(Box::pin(packed_read.read_exact(&mut bytes))).expect("reading"); assert!(packed_read.inner.read.is_empty()); // nothing left to read assert_eq!(bytes, unpacked); } pub fn check_packing_with_periods( read_blocking_period: usize, write_blocking_period: usize, unpacked: &[u8], packed: &[u8], ) { // -------- // write let mut bytes: Vec = vec![0; packed.len()]; { let mut packed_write = PackedWrite::new(crate::serialize::test::BlockingWrite::new( &mut bytes[..], write_blocking_period, )); futures::executor::block_on(Box::pin(packed_write.write_all(unpacked))) .expect("writing"); futures::executor::block_on(Box::pin(packed_write.flush())).expect("flushing"); } assert_eq!(bytes, packed); // -------- // read check_unpacks_to(read_blocking_period, packed, unpacked); } pub fn check_packing(unpacked: &[u8], packed: &[u8]) { for ii in 1..10 { for jj in 1..10 { check_packing_with_periods(ii, jj, unpacked, packed); } } } #[test] pub fn simple_packing() { check_packing(&[], &[]); check_packing(&[0; 8], &[0, 0]); check_packing(&[0, 0, 12, 0, 0, 34, 0, 0], &[0x24, 12, 34]); check_packing( &[1, 3, 2, 4, 5, 7, 6, 8], &[0xff, 1, 3, 2, 4, 5, 7, 6, 8, 0], ); check_packing( &[0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 2, 4, 5, 7, 6, 8], &[0, 0, 0xff, 1, 3, 2, 4, 5, 7, 6, 8, 0], ); check_packing( &[0, 0, 12, 0, 0, 34, 0, 0, 1, 3, 2, 4, 5, 7, 6, 8], &[0x24, 12, 34, 0xff, 1, 3, 2, 4, 5, 7, 6, 8, 0], ); check_packing( &[1, 3, 2, 4, 5, 7, 6, 8, 8, 6, 7, 4, 5, 2, 3, 1], &[0xff, 1, 3, 2, 4, 5, 7, 6, 8, 1, 8, 6, 7, 4, 5, 2, 3, 1], ); check_packing( &[ 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 0, 2, 4, 0, 9, 0, 5, 1, ], &[ 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 3, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 0xd6, 2, 4, 9, 5, 1, ], ); check_packing( &[ 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 6, 2, 4, 3, 9, 0, 5, 1, 1, 2, 3, 4, 5, 6, 7, 8, 0, 2, 4, 0, 9, 0, 5, 1, ], &[ 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 3, 1, 2, 3, 4, 5, 6, 7, 8, 6, 2, 4, 3, 9, 0, 5, 1, 1, 2, 3, 4, 5, 6, 7, 8, 0xd6, 2, 4, 9, 5, 1, ], ); check_packing( &[ 8, 0, 100, 6, 0, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 1, ], &[0xed, 8, 100, 6, 1, 1, 2, 0, 2, 0xd4, 1, 2, 3, 1], ); check_packing(&[0; 16], &[0, 1]); check_packing( &[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], &[0, 2], ); } fn round_trip( read_blocking_period: usize, write_blocking_period: usize, segments: Vec>, ) -> TestResult { if segments.is_empty() || read_blocking_period == 0 || write_blocking_period == 0 { return TestResult::discard(); } let (mut read, segments) = { let cursor = std::io::Cursor::new(Vec::new()); let mut writer = BlockingWrite::new(cursor, write_blocking_period); futures::executor::block_on(Box::pin(crate::serialize_packed::write_message( &mut writer, &segments, ))) .expect("writing"); futures::executor::block_on(Box::pin(writer.flush())).expect("writing"); let mut cursor = writer.into_writer(); cursor.set_position(0); (BlockingRead::new(cursor, read_blocking_period), segments) }; let message = futures::executor::block_on(Box::pin( crate::serialize_packed::try_read_message(&mut read, Default::default()), )) .expect("reading") .unwrap(); let message_segments = message.into_segments(); TestResult::from_bool(segments.iter().enumerate().all(|(i, segment)| { capnp::Word::words_to_bytes(&segment[..]) == message_segments.get_segment(i as u32).unwrap() })) } #[test] fn check_packed_round_trip_async_bug() { assert!(!round_trip( 1, 1, vec![vec![ capnp::word(8, 14, 90, 7, 21, 13, 59, 17), capnp::word(0, 31, 21, 73, 0, 54, 61, 12) ]] ) .is_failure()); } #[cfg_attr(miri, ignore)] #[test] fn check_packed_round_trip_async() { quickcheck(round_trip as fn(usize, usize, Vec>) -> TestResult); } #[test] fn read_empty() { let words = []; // Before https://github.com/capnproto/capnproto-rust/pull/446 // this would loop forever. let message = futures::executor::block_on(Box::pin(try_read_message(&words[..], Default::default()))) .expect("reading"); assert!(message.is_none()); } #[test] fn eof_mid_message() { let words = [0xfe, 0x3, 0x3]; let result = futures::executor::block_on(Box::pin(try_read_message(&words[..], Default::default()))); match result { Ok(_) => panic!("expected error"), Err(e) => assert_eq!(e.kind, capnp::ErrorKind::PrematureEndOfFile), } } } capnp-futures-0.19.0/src/write_queue.rs000064400000000000000000000106601046102023000162050ustar 00000000000000// Copyright (c) 2016 Sandstorm Development Group, Inc. and contributors // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use futures::channel::oneshot; use futures::future::Future; use futures::{AsyncWrite, AsyncWriteExt, StreamExt, TryFutureExt}; use capnp::Error; use crate::serialize::AsOutputSegments; enum Item where M: AsOutputSegments, { Message(M, oneshot::Sender), Done(Result<(), Error>, oneshot::Sender<()>), } /// A handle that allows messages to be sent to a write queue. pub struct Sender where M: AsOutputSegments, { sender: futures::channel::mpsc::UnboundedSender>, in_flight: std::sync::Arc, } impl Clone for Sender where M: AsOutputSegments, { fn clone(&self) -> Self { Self { sender: self.sender.clone(), in_flight: self.in_flight.clone(), } } } /// Creates a new write queue that wraps the given `AsyncWrite`. pub fn write_queue(mut writer: W) -> (Sender, impl Future>) where W: AsyncWrite + Unpin, M: AsOutputSegments, { let (tx, mut rx) = futures::channel::mpsc::unbounded(); let in_flight = std::sync::Arc::new(std::sync::atomic::AtomicI32::new(0)); let sender = Sender { sender: tx, in_flight: in_flight.clone(), }; let queue = async move { while let Some(item) = rx.next().await { match item { Item::Message(m, returner) => { let result = crate::serialize::write_message(&mut writer, &m).await; in_flight.fetch_sub(1, std::sync::atomic::Ordering::SeqCst); result?; writer.flush().await?; let _ = returner.send(m); } Item::Done(r, finisher) => { let _ = finisher.send(()); return r; } } } Ok(()) }; (sender, queue) } impl Sender where M: AsOutputSegments, { /// Enqueues a message to be written. The returned future resolves once the write /// has completed. pub fn send(&mut self, message: M) -> impl Future> + Unpin { let (complete, oneshot) = oneshot::channel(); let _ = self.sender.unbounded_send(Item::Message(message, complete)); oneshot.map_err(|oneshot::Canceled| Error::disconnected("WriteQueue has terminated".into())) } /// Returns the number of messages queued to be written. pub fn len(&self) -> usize { let result = self.in_flight.load(std::sync::atomic::Ordering::SeqCst); assert!(result >= 0); result as usize } pub fn is_empty(&self) -> bool { self.len() == 0 } /// Commands the queue to stop writing messages once it is empty. After this method has been called, /// any new calls to `send()` will return a future that immediately resolves to an error. /// If the passed-in `result` is an error, then the `WriteQueue` will resolve to that error. pub fn terminate( &mut self, result: Result<(), Error>, ) -> impl Future> + Unpin { let (complete, receiver) = oneshot::channel(); let _ = self.sender.unbounded_send(Item::Done(result, complete)); receiver .map_err(|oneshot::Canceled| Error::disconnected("WriteQueue has terminated".into())) } }