h3-0.0.6/.cargo_vcs_info.json0000644000000001400000000000100113440ustar { "git": { "sha1": "4525caec836501181f6eb307cae38469ab6a7159" }, "path_in_vcs": "h3" }h3-0.0.6/Cargo.toml0000644000000045520000000000100073550ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.63" name = "h3" version = "0.0.6" authors = [ "Sean McArthur ", "Jean-Christophe BEGUE ", ] description = "An async HTTP/3 implementation." documentation = "https://docs.rs/h3" readme = "README.md" keywords = [ "http3", "quic", "h3", ] categories = [ "network-programming", "web-programming::http-client", "web-programming::http-server", ] license = "MIT" repository = "https://github.com/hyperium/h3" [dependencies.bytes] version = "1" [dependencies.fastrand] version = "2.0.1" [dependencies.futures-util] version = "0.3" features = ["io"] default-features = false [dependencies.http] version = "1" [dependencies.pin-project-lite] version = "0.2" default_features = false [dependencies.tokio] version = "1" features = ["sync"] [dependencies.tracing] version = "0.1.40" optional = true [dev-dependencies.assert_matches] version = "1.5.0" [dev-dependencies.futures] version = "0.3.28" [dev-dependencies.futures-util] version = "0.3" features = ["io"] default-features = false [dev-dependencies.proptest] version = "1" [dev-dependencies.quinn] version = "0.11" features = [ "runtime-tokio", "rustls", "ring", ] default-features = false [dev-dependencies.quinn-proto] version = "0.11" default-features = false [dev-dependencies.rcgen] version = "0.13" [dev-dependencies.rustls] version = "0.23" features = [ "logging", "ring", "std", ] default-features = false [dev-dependencies.tokio] version = "1" features = [ "rt", "macros", "io-util", "io-std", ] [dev-dependencies.tokio-util] version = "0.7.9" [dev-dependencies.tracing-subscriber] version = "0.3" features = [ "fmt", "ansi", "env-filter", "time", "tracing-log", ] default-features = false [features] i-implement-a-third-party-backend-and-opt-into-breaking-changes = [] tracing = ["dep:tracing"] h3-0.0.6/Cargo.toml.orig000064400000000000000000000031461046102023000130340ustar 00000000000000[package] name = "h3" version = "0.0.6" rust-version = "1.63" authors = [ "Sean McArthur ", "Jean-Christophe BEGUE ", ] license = "MIT" edition = "2021" documentation = "https://docs.rs/h3" repository = "https://github.com/hyperium/h3" readme = "../README.md" description = "An async HTTP/3 implementation." keywords = ["http3", "quic", "h3"] categories = [ "network-programming", "web-programming::http-client", "web-programming::http-server", ] [features] i-implement-a-third-party-backend-and-opt-into-breaking-changes = [] tracing = ["dep:tracing"] [dependencies] bytes = "1" futures-util = { version = "0.3", default-features = false, features = ["io"] } http = "1" tokio = { version = "1", features = ["sync"] } pin-project-lite = { version = "0.2", default_features = false } tracing = {version = "0.1.40", optional = true} fastrand = "2.0.1" [dev-dependencies] assert_matches = "1.5.0" futures-util = { version = "0.3", default-features = false, features = ["io"] } proptest = "1" quinn = { version = "0.11", default-features = false, features = [ "runtime-tokio", "rustls", "ring", ] } quinn-proto = { version = "0.11", default-features = false } rcgen = "0.13" rustls = { version = "0.23", default-features = false, features = ["logging", "ring", "std"] } tokio = { version = "1", features = ["rt", "macros", "io-util", "io-std"] } tracing-subscriber = { version = "0.3", default-features = false, features = [ "fmt", "ansi", "env-filter", "time", "tracing-log", ] } futures = { version = "0.3.28" } tokio-util = { version = "0.7.9" } h3-0.0.6/LICENSE000064400000000000000000000020361046102023000111470ustar 00000000000000Copyright (c) 2020 h3 authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. h3-0.0.6/README.md000064400000000000000000000121271046102023000114230ustar 00000000000000# h3 An async HTTP/3 implementation. [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) [![CI](https://github.com/hyperium/h3/workflows/CI/badge.svg)](https://github.com/hyperium/h3/actions?query=workflow%3ACI) [![Discord chat](https://img.shields.io/discord/500028886025895936.svg?logo=discord)](https://discord.gg/q5mVhMD) This crate provides an [HTTP/3][spec] implementation that is generic over a provided QUIC transport. This allows the project to focus on just HTTP/3, while letting users pick their QUIC implementation based on their specific needs. It includes client and server APIs. Check the original [design][] for more details. [spec]: https://www.rfc-editor.org/rfc/rfc9114 [design]: docs/PROPOSAL.md ## Status The `h3` crate is still very experimental. While the client and servers do work, there may still be bugs. And the API could change as we continue to explore. That said, we eagerly welcome contributions, trying it out in test environments, and using at your own risk. The eventual goal is to use `h3` as an internal dependency of [hyper][]. [hyper]: https://hyper.rs ### Duvet This create uses the [duvet crate][] to check compliance of the [spec][]. The generated [report][] displays the current status of the requirements of the spec. Get more information about this tool in the [contributing][] document. [duvet crate]: https://crates.io/crates/duvet [spec]: https://www.rfc-editor.org/rfc/rfc9114 [report]: https://hyper.rs/h3/ci/compliance/report.html#/ [contributing]: CONTRIBUTING.md ## Features * HTTP/3 client and server implementation * Async only API * QUIC transport abstraction via traits in the [`quic`](./h3/src/quic.rs) module * Runtime independent (h3 does not spawn tasks and works with any runtime) * Supported QUIC implementations to date are [Quinn](https://github.com/quinn-rs/quinn) ([h3-quinn](./h3-quinn/)) and [s2n-quic](https://github.com/aws/s2n-quic) ([s2n-quic-h3](https://github.com/aws/s2n-quic/tree/main/quic/s2n-quic-h3)) ## Overview * **h3** HTTP/3 implementation * **h3-quinn** QUIC transport implementation based on [Quinn](https://github.com/quinn-rs/quinn/) ## Getting Started The [examples](./examples) directory can help get started in two ways: - There are ready-to-use `client` and `server` binaries to interact with _other_ HTTP/3 peers. Check the README in that directory. - The source code of those examples can help teach how to use `h3` as either a client or a server. ### Server ```rust let (endpoint, mut incoming) = h3_quinn::quinn::Endpoint::server(server_config, "[::]:443".parse()?)?; while let Some((req, stream)) = h3_conn.accept().await? { loop { match h3_conn.accept().await { Ok(Some((req, mut stream))) => { let resp = http::Response::builder().status(Status::OK).body(())?; stream.send_response(resp).await?; stream.send_data(Bytes::new("It works!")).await?; stream.finish().await?; } Ok(None) => { break; } Err(err) => { match err.get_error_level() { ErrorLevel::ConnectionError => break, ErrorLevel::StreamError => continue, } } } } } endpoint.wait_idle(); ``` You can find a full server example in [`examples/server.rs`](./examples/server.rs) ### Client ``` rust let addr: SocketAddr = "[::1]:443".parse()?; let quic = h3_quinn::Connection::new(client_endpoint.connect(addr, "server")?.await?); let (mut driver, mut send_request) = h3::client::new(quinn_conn).await?; let drive = async move { future::poll_fn(|cx| driver.poll_close(cx)).await?; Ok::<(), Box>(()) }; let request = async move { let req = http::Request::builder().uri(dest).body(())?; let mut stream = send_request.send_request(req).await?; stream.finish().await?; let resp = stream.recv_response().await?; while let Some(mut chunk) = stream.recv_data().await? { let mut out = tokio::io::stdout(); out.write_all_buf(&mut chunk).await?; out.flush().await?; } Ok::<_, Box>(()) }; let (req_res, drive_res) = tokio::join!(request, drive); req_res?; drive_res?; client_endpoint.wait_idle().await; ``` You can find a full client example in [`examples/client.rs`](./examples/client.rs) ## QUIC Generic As mentioned, the goal of this library is to be generic over a QUIC implementation. To that effect, integrations with QUIC libraries exist: - [`h3-quinn`](./h3-quinn/): in this same repository. - [`s2n-quic-h3`](https://github.com/aws/s2n-quic/tree/main/quic/s2n-quic-h3) ## Interoperability This crate as well as the quic implementations are tested ([quinn](https://github.com/quinn-rs/quinn-interop), [s2n-quic](https://github.com/aws/s2n-quic/tree/main/scripts/interop)) for interoperability and performance in the [quic-interop-runner](https://github.com/marten-seemann/quic-interop-runner). You can see the results at (https://interop.seemann.io/). ## License h3 is provided under the MIT license. See [LICENSE](LICENSE). h3-0.0.6/src/buf.rs000064400000000000000000000103141046102023000120510ustar 00000000000000use std::collections::VecDeque; use std::io::IoSlice; use bytes::{Buf, Bytes}; #[derive(Debug)] pub(crate) struct BufList { bufs: VecDeque, } impl BufList { pub(crate) fn new() -> BufList { BufList { bufs: VecDeque::new(), } } #[inline] #[allow(dead_code)] pub(crate) fn push(&mut self, buf: T) { debug_assert!(buf.has_remaining()); self.bufs.push_back(buf); } pub fn cursor(&self) -> Cursor { Cursor { buf: self, pos_total: 0, index: 0, pos_front: 0, } } } impl BufList { pub fn take_first_chunk(&mut self) -> Option { self.bufs.pop_front() } pub fn take_chunk(&mut self, max_len: usize) -> Option { let chunk = self .bufs .front_mut() .map(|chunk| chunk.split_to(usize::min(max_len, chunk.remaining()))); if let Some(front) = self.bufs.front() { if front.remaining() == 0 { let _ = self.bufs.pop_front(); } } chunk } pub fn push_bytes(&mut self, buf: &mut T) where T: Buf, { debug_assert!(buf.has_remaining()); self.bufs.push_back(buf.copy_to_bytes(buf.remaining())) } } #[cfg(test)] impl From for BufList { fn from(b: T) -> Self { let mut buf = Self::new(); buf.push(b); buf } } impl Buf for BufList { #[inline] fn remaining(&self) -> usize { self.bufs.iter().map(|buf| buf.remaining()).sum() } #[inline] fn chunk(&self) -> &[u8] { self.bufs.front().map(Buf::chunk).unwrap_or_default() } #[inline] fn advance(&mut self, mut cnt: usize) { while cnt > 0 { { let front = &mut self.bufs[0]; let rem = front.remaining(); if rem > cnt { front.advance(cnt); return; } else { front.advance(rem); cnt -= rem; } } self.bufs.pop_front(); } } #[inline] fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { if dst.is_empty() { return 0; } let mut vecs = 0; for buf in &self.bufs { vecs += buf.chunks_vectored(&mut dst[vecs..]); if vecs == dst.len() { break; } } vecs } } pub struct Cursor<'a, B> { buf: &'a BufList, pos_total: usize, // position amongst all bytes pos_front: usize, // position in the current front buffer index: usize, // current front buffer index } impl<'a, B: Buf> Cursor<'a, B> { pub fn position(&self) -> usize { self.pos_total } } impl<'a, B: Buf> Buf for Cursor<'a, B> { #[inline] fn remaining(&self) -> usize { self.buf.remaining() - self.pos_total } #[inline] fn chunk(&self) -> &[u8] { &self.buf.bufs[self.index].chunk()[self.pos_front..] } #[inline] fn advance(&mut self, mut cnt: usize) { assert!(cnt <= self.buf.remaining() - self.pos_total); while cnt > 0 { { let front = &self.buf.bufs[self.index]; let rem = front.remaining() - self.pos_front; if rem > cnt { self.pos_total += cnt; self.pos_front += cnt; return; } else { self.pos_total += rem; self.pos_front = 0; cnt -= rem; } } self.index += 1; } } #[inline] fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { self.buf.chunks_vectored(dst) } } #[cfg(test)] mod tests { use super::*; use bytes::Bytes; #[test] fn cursor_advance() { let buf = BufList::from(Bytes::from_static(&[1u8, 2, 3, 4])); let mut cur = buf.cursor(); cur.advance(2); assert_eq!(cur.remaining(), 2); cur.advance(2); assert_eq!(cur.remaining(), 0); } } h3-0.0.6/src/client/builder.rs000064400000000000000000000073311046102023000142060ustar 00000000000000//! HTTP/3 client builder use std::{ marker::PhantomData, sync::{atomic::AtomicUsize, Arc}, task::Poll, }; use bytes::{Buf, Bytes}; use futures_util::future; use crate::{ config::Config, connection::{ConnectionInner, SharedStateRef}, error::Error, quic::{self}, }; use super::connection::{Connection, SendRequest}; /// Start building a new HTTP/3 client pub fn builder() -> Builder { Builder::new() } /// Create a new HTTP/3 client with default settings pub async fn new(conn: C) -> Result<(Connection, SendRequest), Error> where C: quic::Connection, O: quic::OpenStreams, { //= https://www.rfc-editor.org/rfc/rfc9114#section-3.3 //= type=implication //# Clients SHOULD NOT open more than one HTTP/3 connection to a given IP //# address and UDP port, where the IP address and port might be derived //# from a URI, a selected alternative service ([ALTSVC]), a configured //# proxy, or name resolution of any of these. Builder::new().build(conn).await } /// HTTP/3 client builder /// /// Set the configuration for a new client. /// /// # Examples /// ```rust /// # use h3::quic; /// # async fn doc(quic: C) /// # where /// # C: quic::Connection, /// # O: quic::OpenStreams, /// # B: bytes::Buf, /// # { /// let h3_conn = h3::client::builder() /// .max_field_section_size(8192) /// .build(quic) /// .await /// .expect("Failed to build connection"); /// # } /// ``` pub struct Builder { config: Config, } impl Builder { pub(super) fn new() -> Self { Builder { config: Default::default(), } } #[cfg(test)] pub fn send_settings(&mut self, value: bool) -> &mut Self { self.config.send_settings = value; self } /// Set the maximum header size this client is willing to accept /// /// See [header size constraints] section of the specification for details. /// /// [header size constraints]: https://www.rfc-editor.org/rfc/rfc9114.html#name-header-size-constraints pub fn max_field_section_size(&mut self, value: u64) -> &mut Self { self.config.settings.max_field_section_size = value; self } /// Just like in HTTP/2, HTTP/3 also uses the concept of "grease" /// to prevent potential interoperability issues in the future. /// In HTTP/3, the concept of grease is used to ensure that the protocol can evolve /// and accommodate future changes without breaking existing implementations. pub fn send_grease(&mut self, enabled: bool) -> &mut Self { self.config.send_grease = enabled; self } /// Create a new HTTP/3 client from a `quic` connection pub async fn build( &mut self, quic: C, ) -> Result<(Connection, SendRequest), Error> where C: quic::Connection, O: quic::OpenStreams, B: Buf, { let open = quic.opener(); let conn_state = SharedStateRef::default(); let conn_waker = Some(future::poll_fn(|cx| Poll::Ready(cx.waker().clone())).await); Ok(( Connection { inner: ConnectionInner::new(quic, conn_state.clone(), self.config).await?, sent_closing: None, recv_closing: None, }, SendRequest { open, conn_state, conn_waker, max_field_section_size: self.config.settings.max_field_section_size, sender_count: Arc::new(AtomicUsize::new(1)), send_grease_frame: self.config.send_grease, _buf: PhantomData, }, )) } } h3-0.0.6/src/client/connection.rs000064400000000000000000000412021046102023000147120ustar 00000000000000//! Client implementation of the HTTP/3 protocol use std::{ marker::PhantomData, sync::{atomic::AtomicUsize, Arc}, task::{Context, Poll, Waker}, }; use bytes::{Buf, BytesMut}; use futures_util::future; use http::request; #[cfg(feature = "tracing")] use tracing::{info, instrument, trace}; use crate::{ connection::{self, ConnectionInner, ConnectionState, SharedStateRef}, error::{Code, Error, ErrorLevel}, frame::FrameStream, proto::{frame::Frame, headers::Header, push::PushId}, qpack, quic::{self, StreamId}, stream::{self, BufRecvStream}, }; use super::stream::RequestStream; /// HTTP/3 request sender /// /// [`send_request()`] initiates a new request and will resolve when it is ready to be sent /// to the server. Then a [`RequestStream`] will be returned to send a request body (for /// POST, PUT methods) and receive a response. After the whole body is sent, it is necessary /// to call [`RequestStream::finish()`] to let the server know the request transfer is complete. /// This includes the cases where no body is sent at all. /// /// This struct is cloneable so multiple requests can be sent concurrently. /// /// Existing instances are atomically counted internally, so whenever all of them have been /// dropped, the connection will be automatically closed with HTTP/3 connection error code /// `HTTP_NO_ERROR = 0`. /// /// # Examples /// /// ## Sending a request with no body /// /// ```rust /// # use h3::{quic, client::*}; /// # use http::{Request, Response}; /// # use bytes::Buf; /// # async fn doc(mut send_request: SendRequest) -> Result<(), Box> /// # where /// # T: quic::OpenStreams, /// # B: Buf, /// # { /// // Prepare the HTTP request to send to the server /// let request = Request::get("https://www.example.com/").body(())?; /// /// // Send the request to the server /// let mut req_stream: RequestStream<_, _> = send_request.send_request(request).await?; /// // Don't forget to end up the request by finishing the send stream. /// req_stream.finish().await?; /// // Receive the response /// let response: Response<()> = req_stream.recv_response().await?; /// // Process the response... /// # Ok(()) /// # } /// # pub fn main() {} /// ``` /// /// ## Sending a request with a body and trailers /// /// ```rust /// # use h3::{quic, client::*}; /// # use http::{Request, Response, HeaderMap}; /// # use bytes::{Buf, Bytes}; /// # async fn doc(mut send_request: SendRequest) -> Result<(), Box> /// # where /// # T: quic::OpenStreams, /// # { /// // Prepare the HTTP request to send to the server /// let request = Request::get("https://www.example.com/").body(())?; /// /// // Send the request to the server /// let mut req_stream = send_request.send_request(request).await?; /// // Send some data /// req_stream.send_data("body".into()).await?; /// // Prepare the trailers /// let mut trailers = HeaderMap::new(); /// trailers.insert("trailer", "value".parse()?); /// // Send them and finish the send stream /// req_stream.send_trailers(trailers).await?; /// // We don't need to finish the send stream, as `send_trailers()` did it for us /// /// // Receive the response. /// let response = req_stream.recv_response().await?; /// // Process the response... /// # Ok(()) /// # } /// # pub fn main() {} /// ``` /// /// [`send_request()`]: struct.SendRequest.html#method.send_request /// [`RequestStream`]: struct.RequestStream.html /// [`RequestStream::finish()`]: struct.RequestStream.html#method.finish pub struct SendRequest where T: quic::OpenStreams, B: Buf, { pub(super) open: T, pub(super) conn_state: SharedStateRef, pub(super) max_field_section_size: u64, // maximum size for a header we receive // counts instances of SendRequest to close the connection when the last is dropped. pub(super) sender_count: Arc, pub(super) conn_waker: Option, pub(super) _buf: PhantomData, pub(super) send_grease_frame: bool, } impl SendRequest where T: quic::OpenStreams, B: Buf, { /// Send an HTTP/3 request to the server #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn send_request( &mut self, req: http::Request<()>, ) -> Result, Error> { let (peer_max_field_section_size, closing) = { let state = self.conn_state.read("send request lock state"); (state.peer_config.max_field_section_size, state.closing) }; if closing { return Err(Error::closing()); } let (parts, _) = req.into_parts(); let request::Parts { method, uri, headers, extensions, .. } = parts; let headers = Header::request(method, uri, headers, extensions)?; //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1 //= type=implication //# A //# client MUST send only a single request on a given stream. let mut stream = future::poll_fn(|cx| self.open.poll_open_bidi(cx)) .await .map_err(|e| self.maybe_conn_err(e))?; //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2 //= type=TODO //# Characters in field names MUST be //# converted to lowercase prior to their encoding. //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.1 //= type=TODO //# To allow for better compression efficiency, the Cookie header field //# ([COOKIES]) MAY be split into separate field lines, each with one or //# more cookie-pairs, before compression. let mut block = BytesMut::new(); let mem_size = qpack::encode_stateless(&mut block, headers)?; //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //# An implementation that //# has received this parameter SHOULD NOT send an HTTP message header //# that exceeds the indicated size, as the peer will likely refuse to //# process it. if mem_size > peer_max_field_section_size { return Err(Error::header_too_big(mem_size, peer_max_field_section_size)); } stream::write(&mut stream, Frame::Headers(block.freeze())) .await .map_err(|e| self.maybe_conn_err(e))?; let request_stream = RequestStream { inner: connection::RequestStream::new( FrameStream::new(BufRecvStream::new(stream)), self.max_field_section_size, self.conn_state.clone(), self.send_grease_frame, ), }; // send the grease frame only once self.send_grease_frame = false; Ok(request_stream) } } impl ConnectionState for SendRequest where T: quic::OpenStreams, B: Buf, { fn shared_state(&self) -> &SharedStateRef { &self.conn_state } } impl Clone for SendRequest where T: quic::OpenStreams + Clone, B: Buf, { fn clone(&self) -> Self { self.sender_count .fetch_add(1, std::sync::atomic::Ordering::Release); Self { open: self.open.clone(), conn_state: self.conn_state.clone(), max_field_section_size: self.max_field_section_size, sender_count: self.sender_count.clone(), conn_waker: self.conn_waker.clone(), _buf: PhantomData, send_grease_frame: self.send_grease_frame, } } } impl Drop for SendRequest where T: quic::OpenStreams, B: Buf, { fn drop(&mut self) { if self .sender_count .fetch_sub(1, std::sync::atomic::Ordering::AcqRel) == 1 { if let Some(w) = Option::take(&mut self.conn_waker) { w.wake() } self.shared_state().write("SendRequest drop").error = Some(Error::closed()); self.open.close(Code::H3_NO_ERROR, b""); } } } /// Client connection driver /// /// Maintains the internal state of an HTTP/3 connection, including control and QPACK. /// It needs to be polled continuously via [`poll_close()`]. On connection closure, this /// will resolve to `Ok(())` if the peer sent `HTTP_NO_ERROR`, or `Err()` if a connection-level /// error occurred. /// /// [`shutdown()`] initiates a graceful shutdown of this connection. After calling it, no request /// initiation will be further allowed. Then [`poll_close()`] will resolve when all ongoing requests /// and push streams complete. Finally, a connection closure with `HTTP_NO_ERROR` code will be /// sent to the server. /// /// # Examples /// /// ## Drive a connection concurrently /// /// ```rust /// # use bytes::Buf; /// # use futures_util::future; /// # use h3::{client::*, quic}; /// # use tokio::task::JoinHandle; /// # async fn doc(mut connection: Connection) /// # -> JoinHandle>> /// # where /// # C: quic::Connection + Send + 'static, /// # C::SendStream: Send + 'static, /// # C::RecvStream: Send + 'static, /// # B: Buf + Send + 'static, /// # { /// // Run the driver on a different task /// tokio::spawn(async move { /// future::poll_fn(|cx| connection.poll_close(cx)).await?; /// Ok::<(), Box>(()) /// }) /// # } /// ``` /// /// ## Shutdown a connection gracefully /// /// ```rust /// # use bytes::Buf; /// # use futures_util::future; /// # use h3::quic; /// # use h3::client::Connection; /// # use h3::client::SendRequest; /// # use tokio::{self, sync::oneshot, task::JoinHandle}; /// # async fn doc(mut connection: Connection) /// # -> Result<(), Box> /// # where /// # C: quic::Connection + Send + 'static, /// # C::SendStream: Send + 'static, /// # C::RecvStream: Send + 'static, /// # B: Buf + Send + 'static, /// # { /// // Prepare a channel to stop the driver thread /// let (shutdown_tx, shutdown_rx) = oneshot::channel(); /// /// // Run the driver on a different task /// let driver = tokio::spawn(async move { /// tokio::select! { /// // Drive the connection /// closed = future::poll_fn(|cx| connection.poll_close(cx)) => closed?, /// // Listen for shutdown condition /// max_streams = shutdown_rx => { /// // Initiate shutdown /// connection.shutdown(max_streams?); /// // Wait for ongoing work to complete /// future::poll_fn(|cx| connection.poll_close(cx)).await?; /// } /// }; /// /// Ok::<(), Box>(()) /// }); /// /// // Do client things, wait for close condition... /// /// // Initiate shutdown /// shutdown_tx.send(2); /// // Wait for the connection to be closed /// driver.await? /// # } /// ``` /// [`poll_close()`]: struct.Connection.html#method.poll_close /// [`shutdown()`]: struct.Connection.html#method.shutdown pub struct Connection where C: quic::Connection, B: Buf, { pub(super) inner: ConnectionInner, // Has a GOAWAY frame been sent? If so, this PushId is the last we are willing to accept. pub(super) sent_closing: Option, // Has a GOAWAY frame been received? If so, this is StreamId the last the remote will accept. pub(super) recv_closing: Option, } impl Connection where C: quic::Connection, B: Buf, { /// Initiate a graceful shutdown, accepting `max_push` potentially in-flight server pushes #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn shutdown(&mut self, _max_push: usize) -> Result<(), Error> { // TODO: Calculate remaining pushes once server push is implemented. self.inner.shutdown(&mut self.sent_closing, PushId(0)).await } /// Wait until the connection is closed #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn wait_idle(&mut self) -> Result<(), Error> { future::poll_fn(|cx| self.poll_close(cx)).await } /// Maintain the connection state until it is closed #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { while let Poll::Ready(result) = self.inner.poll_control(cx) { match result { //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.2 //= type=TODO //# When a 0-RTT QUIC connection is being used, the initial value of each //# server setting is the value used in the previous session. Clients //# SHOULD store the settings the server provided in the HTTP/3 //# connection where resumption information was provided, but they MAY //# opt not to store settings in certain cases (e.g., if the session //# ticket is received before the SETTINGS frame). //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.2 //= type=TODO //# A client MUST comply //# with stored settings -- or default values if no values are stored -- //# when attempting 0-RTT. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.2 //= type=TODO //# Once a server has provided new settings, //# clients MUST comply with those values. Ok(Frame::Settings(_)) => { #[cfg(feature = "tracing")] trace!("Got settings"); () } Ok(Frame::Goaway(id)) => { //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.6 //# The GOAWAY frame is always sent on the control stream. In the //# server-to-client direction, it carries a QUIC stream ID for a client- //# initiated bidirectional stream encoded as a variable-length integer. //# A client MUST treat receipt of a GOAWAY frame containing a stream ID //# of any other type as a connection error of type H3_ID_ERROR. if !StreamId::from(id).is_request() { return Poll::Ready(Err(Code::H3_ID_ERROR.with_reason( format!("non-request StreamId in a GoAway frame: {}", id), ErrorLevel::ConnectionError, ))); } self.inner.process_goaway(&mut self.recv_closing, id)?; #[cfg(feature = "tracing")] info!("Server initiated graceful shutdown, last: StreamId({})", id); } //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.5 //# If a PUSH_PROMISE frame is received on the control stream, the client //# MUST respond with a connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.7 //# A client MUST treat the //# receipt of a MAX_PUSH_ID frame as a connection error of type //# H3_FRAME_UNEXPECTED. Ok(frame) => { return Poll::Ready(Err(Code::H3_FRAME_UNEXPECTED.with_reason( format!("on client control stream: {:?}", frame), ErrorLevel::ConnectionError, ))) } Err(e) => { let connection_error = self.inner.shared.read("poll_close").error.clone(); let connection_error = match connection_error { Some(e) => e, None => { self.inner.shared.write("poll_close error").error = Some(e.clone()); e } }; if connection_error.is_closed() { return Poll::Ready(Ok(())); } return Poll::Ready(Err(connection_error)); } } } //= https://www.rfc-editor.org/rfc/rfc9114#section-6.1 //# Clients MUST treat //# receipt of a server-initiated bidirectional stream as a connection //# error of type H3_STREAM_CREATION_ERROR unless such an extension has //# been negotiated. if self.inner.poll_accept_request(cx).is_ready() { return Poll::Ready(Err(self.inner.close( Code::H3_STREAM_CREATION_ERROR, "client received a bidirectional stream", ))); } Poll::Pending } } h3-0.0.6/src/client/mod.rs000064400000000000000000000003261046102023000133340ustar 00000000000000//! HTTP/3 client mod connection; mod stream; mod builder; pub use builder::builder; pub use builder::new; pub use builder::Builder; pub use connection::{Connection, SendRequest}; pub use stream::RequestStream; h3-0.0.6/src/client/stream.rs000064400000000000000000000203071046102023000140510ustar 00000000000000use bytes::Buf; use futures_util::future; use http::{HeaderMap, Response}; #[cfg(feature = "tracing")] use tracing::instrument; use crate::{ connection::{self, ConnectionState, SharedStateRef}, error::{Code, Error, ErrorLevel}, proto::{frame::Frame, headers::Header}, qpack, quic::{self}, }; use std::convert::TryFrom; /// Manage request bodies transfer, response and trailers. /// /// Once a request has been sent via [`send_request()`], a response can be awaited by calling /// [`recv_response()`]. A body for this request can be sent with [`send_data()`], then the request /// shall be completed by either sending trailers with [`send_trailers()`], or [`finish()`]. /// /// After receiving the response's headers, it's body can be read by [`recv_data()`] until it returns /// `None`. Then the trailers will eventually be available via [`recv_trailers()`]. /// /// TODO: If data is polled before the response has been received, an error will be thrown. /// /// TODO: If trailers are polled but the body hasn't been fully received, an UNEXPECT_FRAME error will be /// thrown /// /// Whenever the client wants to cancel this request, it can call [`stop_sending()`], which will /// put an end to any transfer concerning it. /// /// # Examples /// /// ```rust /// # use h3::{quic, client::*}; /// # use http::{Request, Response}; /// # use bytes::Buf; /// # use tokio::io::AsyncWriteExt; /// # async fn doc(mut req_stream: RequestStream) -> Result<(), Box> /// # where /// # T: quic::RecvStream, /// # { /// // Prepare the HTTP request to send to the server /// let request = Request::get("https://www.example.com/").body(())?; /// /// // Receive the response /// let response = req_stream.recv_response().await?; /// // Receive the body /// while let Some(mut chunk) = req_stream.recv_data().await? { /// let mut out = tokio::io::stdout(); /// out.write_all_buf(&mut chunk).await?; /// out.flush().await?; /// } /// # Ok(()) /// # } /// # pub fn main() {} /// ``` /// /// [`send_request()`]: struct.SendRequest.html#method.send_request /// [`recv_response()`]: #method.recv_response /// [`recv_data()`]: #method.recv_data /// [`send_data()`]: #method.send_data /// [`send_trailers()`]: #method.send_trailers /// [`recv_trailers()`]: #method.recv_trailers /// [`finish()`]: #method.finish /// [`stop_sending()`]: #method.stop_sending pub struct RequestStream { pub(super) inner: connection::RequestStream, } impl ConnectionState for RequestStream { fn shared_state(&self) -> &SharedStateRef { &self.inner.conn_state } } impl RequestStream where S: quic::RecvStream, { /// Receive the HTTP/3 response /// /// This should be called before trying to receive any data with [`recv_data()`]. /// /// [`recv_data()`]: #method.recv_data #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn recv_response(&mut self) -> Result, Error> { let mut frame = future::poll_fn(|cx| self.inner.stream.poll_next(cx)) .await .map_err(|e| self.maybe_conn_err(e))? .ok_or_else(|| { Code::H3_GENERAL_PROTOCOL_ERROR.with_reason( "Did not receive response headers", ErrorLevel::ConnectionError, ) })?; //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.5 //= type=TODO //# A client MUST treat //# receipt of a PUSH_PROMISE frame that contains a larger push ID than //# the client has advertised as a connection error of H3_ID_ERROR. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.5 //= type=TODO //# If a client //# receives a push ID that has already been promised and detects a //# mismatch, it MUST respond with a connection error of type //# H3_GENERAL_PROTOCOL_ERROR. let decoded = if let Frame::Headers(ref mut encoded) = frame { match qpack::decode_stateless(encoded, self.inner.max_field_section_size) { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //# An HTTP/3 implementation MAY impose a limit on the maximum size of //# the message header it will accept on an individual HTTP message. Err(qpack::DecoderError::HeaderTooLong(cancel_size)) => { self.inner.stop_sending(Code::H3_REQUEST_CANCELLED); return Err(Error::header_too_big( cancel_size, self.inner.max_field_section_size, )); } Ok(decoded) => decoded, Err(e) => return Err(e.into()), } } else { return Err(Code::H3_FRAME_UNEXPECTED.with_reason( "First response frame is not headers", ErrorLevel::ConnectionError, )); }; let qpack::Decoded { fields, .. } = decoded; let (status, headers) = Header::try_from(fields)?.into_response_parts()?; let mut resp = Response::new(()); *resp.status_mut() = status; *resp.headers_mut() = headers; *resp.version_mut() = http::Version::HTTP_3; Ok(resp) } /// Receive some of the request body. // TODO what if called before recv_response ? #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn recv_data(&mut self) -> Result, Error> { self.inner.recv_data().await } /// Receive an optional set of trailers for the response. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn recv_trailers(&mut self) -> Result, Error> { let res = self.inner.recv_trailers().await; if let Err(ref e) = res { if e.is_header_too_big() { self.inner.stream.stop_sending(Code::H3_REQUEST_CANCELLED); } } res } /// Tell the peer to stop sending into the underlying QUIC stream #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn stop_sending(&mut self, error_code: crate::error::Code) { // TODO take by value to prevent any further call as this request is cancelled // rename `cancel()` ? self.inner.stream.stop_sending(error_code) } } impl RequestStream where S: quic::SendStream, B: Buf, { /// Send some data on the request body. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn send_data(&mut self, buf: B) -> Result<(), Error> { self.inner.send_data(buf).await } /// Send a set of trailers to end the request. /// /// Either [`RequestStream::finish`] or /// [`RequestStream::send_trailers`] must be called to finalize a /// request. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), Error> { self.inner.send_trailers(trailers).await } /// End the request without trailers. /// /// Either [`RequestStream::finish`] or /// [`RequestStream::send_trailers`] must be called to finalize a /// request. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn finish(&mut self) -> Result<(), Error> { self.inner.finish().await } //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1.1 //= type=TODO //# Implementations SHOULD cancel requests by abruptly terminating any //# directions of a stream that are still open. To do so, an //# implementation resets the sending parts of streams and aborts reading //# on the receiving parts of streams; see Section 2.4 of //# [QUIC-TRANSPORT]. } impl RequestStream where S: quic::BidiStream, B: Buf, { /// Split this stream into two halves that can be driven independently. pub fn split( self, ) -> ( RequestStream, RequestStream, ) { let (send, recv) = self.inner.split(); (RequestStream { inner: send }, RequestStream { inner: recv }) } } h3-0.0.6/src/config.rs000064400000000000000000000156331046102023000125530ustar 00000000000000use std::convert::TryFrom; use crate::proto::{frame, varint::VarInt}; /// Configures the HTTP/3 connection #[derive(Debug, Clone, Copy)] #[non_exhaustive] pub struct Config { /// Just like in HTTP/2, HTTP/3 also uses the concept of "grease" /// to prevent potential interoperability issues in the future. /// In HTTP/3, the concept of grease is used to ensure that the protocol can evolve /// and accommodate future changes without breaking existing implementations. pub(crate) send_grease: bool, #[cfg(test)] pub(crate) send_settings: bool, /// HTTP/3 Settings pub settings: Settings, } /// HTTP/3 Settings #[derive(Debug, Clone, Copy)] pub struct Settings { /// The MAX_FIELD_SECTION_SIZE in HTTP/3 refers to the maximum size of the dynamic table used in HPACK compression. /// HPACK is the compression algorithm used in HTTP/3 to reduce the size of the header fields in HTTP requests and responses. /// In HTTP/3, the MAX_FIELD_SECTION_SIZE is set to 12. /// This means that the dynamic table used for HPACK compression can have a maximum size of 2^12 bytes, which is 4KB. pub(crate) max_field_section_size: u64, /// https://datatracker.ietf.org/doc/html/draft-ietf-webtrans-http3/#section-3.1 /// Sets `SETTINGS_ENABLE_WEBTRANSPORT` if enabled pub(crate) enable_webtransport: bool, /// https://www.rfc-editor.org/info/rfc8441 defines an extended CONNECT method in Section 4, /// enabled by the SETTINGS_ENABLE_CONNECT_PROTOCOL parameter. /// That parameter is only defined for HTTP/2. /// for extended CONNECT in HTTP/3; instead, the SETTINGS_ENABLE_WEBTRANSPORT setting implies that an endpoint supports extended CONNECT. pub(crate) enable_extended_connect: bool, /// Enable HTTP Datagrams, see https://datatracker.ietf.org/doc/rfc9297/ for details pub(crate) enable_datagram: bool, /// The maximum number of concurrent streams that can be opened by the peer. pub(crate) max_webtransport_sessions: u64, } impl From<&frame::Settings> for Settings { fn from(settings: &frame::Settings) -> Self { let defaults: Self = Default::default(); Self { max_field_section_size: settings .get(frame::SettingId::MAX_HEADER_LIST_SIZE) .unwrap_or(defaults.max_field_section_size), enable_webtransport: settings .get(frame::SettingId::ENABLE_WEBTRANSPORT) .map(|value| value != 0) .unwrap_or(defaults.enable_webtransport), max_webtransport_sessions: settings .get(frame::SettingId::WEBTRANSPORT_MAX_SESSIONS) .unwrap_or(defaults.max_webtransport_sessions), enable_datagram: settings .get(frame::SettingId::H3_DATAGRAM) .map(|value| value != 0) .unwrap_or(defaults.enable_datagram), enable_extended_connect: settings .get(frame::SettingId::ENABLE_CONNECT_PROTOCOL) .map(|value| value != 0) .unwrap_or(defaults.enable_extended_connect), } } } impl TryFrom for frame::Settings { type Error = frame::SettingsError; fn try_from(value: Config) -> Result { let mut settings = frame::Settings::default(); let Config { send_grease, #[cfg(test)] send_settings: _, settings: Settings { max_field_section_size, enable_webtransport, enable_extended_connect, enable_datagram, max_webtransport_sessions, }, } = value; if send_grease { // Grease Settings (https://www.rfc-editor.org/rfc/rfc9114.html#name-defined-settings-parameters) //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.1 //# Setting identifiers of the format 0x1f * N + 0x21 for non-negative //# integer values of N are reserved to exercise the requirement that //# unknown identifiers be ignored. Such settings have no defined //# meaning. Endpoints SHOULD include at least one such setting in their //# SETTINGS frame. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.1 //# Setting identifiers that were defined in [HTTP/2] where there is no //# corresponding HTTP/3 setting have also been reserved //# (Section 11.2.2). These reserved settings MUST NOT be sent, and //# their receipt MUST be treated as a connection error of type //# H3_SETTINGS_ERROR. match settings.insert(frame::SettingId::grease(), 0) { Ok(_) => (), Err(_err) => { #[cfg(feature = "tracing")] tracing::warn!("Error when adding the grease Setting. Reason {}", _err); } } } settings.insert( frame::SettingId::MAX_HEADER_LIST_SIZE, max_field_section_size, )?; settings.insert( frame::SettingId::ENABLE_CONNECT_PROTOCOL, enable_extended_connect as u64, )?; settings.insert( frame::SettingId::ENABLE_WEBTRANSPORT, enable_webtransport as u64, )?; settings.insert(frame::SettingId::H3_DATAGRAM, enable_datagram as u64)?; settings.insert( frame::SettingId::WEBTRANSPORT_MAX_SESSIONS, max_webtransport_sessions, )?; Ok(settings) } } impl Default for Settings { fn default() -> Self { Self { max_field_section_size: VarInt::MAX.0, enable_webtransport: false, enable_extended_connect: false, enable_datagram: false, max_webtransport_sessions: 0, } } } impl Settings { /// https://datatracker.ietf.org/doc/html/draft-ietf-webtrans-http3/#section-3.1 /// Sets `SETTINGS_ENABLE_WEBTRANSPORT` if enabled pub fn enable_webtransport(&self) -> bool { self.enable_webtransport } /// Enable HTTP Datagrams, see https://datatracker.ietf.org/doc/rfc9297/ for details pub fn enable_datagram(&self) -> bool { self.enable_datagram } /// https://www.rfc-editor.org/info/rfc8441 defines an extended CONNECT method in Section 4, /// enabled by the SETTINGS_ENABLE_CONNECT_PROTOCOL parameter. /// That parameter is only defined for HTTP/2. /// for extended CONNECT in HTTP/3; instead, the SETTINGS_ENABLE_WEBTRANSPORT setting implies that an endpoint supports extended CONNECT. pub fn enable_extended_connect(&self) -> bool { self.enable_extended_connect } } impl Default for Config { fn default() -> Self { Self { send_grease: true, #[cfg(test)] send_settings: true, settings: Default::default(), } } } h3-0.0.6/src/connection.rs000064400000000000000000001141311046102023000134360ustar 00000000000000use std::{ convert::TryFrom, marker::PhantomData, sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, task::{Context, Poll}, }; use bytes::{Buf, Bytes, BytesMut}; use futures_util::{future, ready}; use http::HeaderMap; use stream::WriteBuf; #[cfg(feature = "tracing")] use tracing::{instrument, warn}; use crate::{ config::{Config, Settings}, error::{Code, Error}, frame::FrameStream, proto::{ frame::{self, Frame, PayloadLen}, headers::Header, stream::StreamType, varint::VarInt, }, qpack, quic::{self, SendStream}, stream::{self, AcceptRecvStream, AcceptedRecvStream, BufRecvStream, UniStreamHeader}, webtransport::SessionId, }; #[doc(hidden)] #[non_exhaustive] pub struct SharedState { // Peer settings pub peer_config: Settings, // connection-wide error, concerns all RequestStreams and drivers pub error: Option, // Has a GOAWAY frame been sent or received? pub closing: bool, } #[derive(Clone)] #[doc(hidden)] pub struct SharedStateRef(Arc>); impl SharedStateRef { pub fn read(&self, panic_msg: &'static str) -> RwLockReadGuard { self.0.read().expect(panic_msg) } pub fn write(&self, panic_msg: &'static str) -> RwLockWriteGuard { self.0.write().expect(panic_msg) } } impl Default for SharedStateRef { fn default() -> Self { Self(Arc::new(RwLock::new(SharedState { peer_config: Default::default(), error: None, closing: false, }))) } } #[allow(missing_docs)] pub trait ConnectionState { fn shared_state(&self) -> &SharedStateRef; fn maybe_conn_err>(&self, err: E) -> Error { if let Some(ref e) = self.shared_state().0.read().unwrap().error { e.clone() } else { err.into() } } } #[allow(missing_docs)] pub struct AcceptedStreams where C: quic::Connection, B: Buf, { #[allow(missing_docs)] pub wt_uni_streams: Vec<(SessionId, BufRecvStream)>, } impl Default for AcceptedStreams where C: quic::Connection, B: Buf, { fn default() -> Self { Self { wt_uni_streams: Default::default(), } } } #[allow(missing_docs)] pub struct ConnectionInner where C: quic::Connection, B: Buf, { pub(super) shared: SharedStateRef, /// TODO: breaking encapsulation just to see if we can get this to work, will fix before merging pub conn: C, control_send: C::SendStream, control_recv: Option>, decoder_send: Option, decoder_recv: Option>, encoder_send: Option, encoder_recv: Option>, /// Buffers incoming uni/recv streams which have yet to be claimed. /// /// This is opposed to discarding them by returning in `poll_accept_recv`, which may cause them to be missed by something else polling. /// /// See: /// /// In WebTransport over HTTP/3, the client MAY send its SETTINGS frame, as well as /// multiple WebTransport CONNECT requests, WebTransport data streams and WebTransport /// datagrams, all within a single flight. As those can arrive out of order, a WebTransport /// server could be put into a situation where it receives a stream or a datagram without a /// corresponding session. Similarly, a client may receive a server-initiated stream or a /// datagram before receiving the CONNECT response headers from the server.To handle this /// case, WebTransport endpoints SHOULD buffer streams and datagrams until those can be /// associated with an established session. To avoid resource exhaustion, the endpoints /// MUST limit the number of buffered streams and datagrams. When the number of buffered /// streams is exceeded, a stream SHALL be closed by sending a RESET_STREAM and/or /// STOP_SENDING with the H3_WEBTRANSPORT_BUFFERED_STREAM_REJECTED error code. When the /// number of buffered datagrams is exceeded, a datagram SHALL be dropped. It is up to an /// implementation to choose what stream or datagram to discard. accepted_streams: AcceptedStreams, pending_recv_streams: Vec>, got_peer_settings: bool, pub send_grease_frame: bool, // tells if the grease steam should be sent send_grease_stream_flag: bool, // step of the grease sending poll fn grease_step: GreaseStatus, pub config: Config, } enum GreaseStatus where S: SendStream, B: Buf, { /// Grease stream is not started NotStarted(PhantomData), /// Grease steam is started without data Started(Option), /// Grease stream is started with data DataPrepared(Option), /// Data is sent on grease stream DataSent(S), /// Grease stream is finished Finished, } impl ConnectionInner where C: quic::Connection, B: Buf, { /// Sends the settings and initializes the control streams #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn send_control_stream_headers(&mut self) -> Result<(), Error> { #[cfg(test)] if !self.config.send_settings { return Ok(()); } let settings = frame::Settings::try_from(self.config) .map_err(|e| Code::H3_INTERNAL_ERROR.with_cause(e))?; #[cfg(feature = "tracing")] tracing::debug!("Sending server settings: {:#x?}", settings); //= https://www.rfc-editor.org/rfc/rfc9114#section-3.2 //# After the QUIC connection is //# established, a SETTINGS frame MUST be sent by each endpoint as the //# initial frame of their respective HTTP control stream. //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.1 //# Each side MUST initiate a single control stream at the beginning of //# the connection and send its SETTINGS frame as the first frame on this //# stream. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4 //# A SETTINGS frame MUST be sent as the first frame of //# each control stream (see Section 6.2.1) by each peer, and it MUST NOT //# be sent subsequently. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4 //= type=implication //# SETTINGS frames MUST NOT be sent on any stream other than the control //# stream. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.2 //= type=implication //# Endpoints MUST NOT require any data to be received from //# the peer prior to sending the SETTINGS frame; settings MUST be sent //# as soon as the transport is ready to send data. let mut decoder_send = Option::take(&mut self.decoder_send); let mut encoder_send = Option::take(&mut self.encoder_send); let (control, ..) = future::join3( stream::write( &mut self.control_send, WriteBuf::from(UniStreamHeader::Control(settings)), ), async { if let Some(stream) = &mut decoder_send { let _ = stream::write(stream, WriteBuf::from(UniStreamHeader::Decoder)).await; } }, async { if let Some(stream) = &mut encoder_send { let _ = stream::write(stream, WriteBuf::from(UniStreamHeader::Encoder)).await; } }, ) .await; self.decoder_send = decoder_send; self.encoder_send = encoder_send; control } /// Initiates the connection and opens a control stream #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn new(mut conn: C, shared: SharedStateRef, config: Config) -> Result { //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2 //# Endpoints SHOULD create the HTTP control stream as well as the //# unidirectional streams required by mandatory extensions (such as the //# QPACK encoder and decoder streams) first, and then create additional // start streams let (control_send, qpack_encoder, qpack_decoder) = ( future::poll_fn(|cx| conn.poll_open_send(cx)).await, future::poll_fn(|cx| conn.poll_open_send(cx)).await, future::poll_fn(|cx| conn.poll_open_send(cx)).await, ); let control_send = control_send.map_err(|e| Code::H3_STREAM_CREATION_ERROR.with_transport(e))?; let qpack_encoder = match qpack_encoder { Ok(stream) => Some(stream), Err(_) => None, }; let qpack_decoder = match qpack_decoder { Ok(stream) => Some(stream), Err(_) => None, }; //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.1 //= type=implication //# The //# sender MUST NOT close the control stream, and the receiver MUST NOT //# request that the sender close the control stream. let mut conn_inner = Self { shared, conn, control_send, control_recv: None, decoder_recv: None, encoder_recv: None, pending_recv_streams: Vec::with_capacity(3), got_peer_settings: false, send_grease_frame: config.send_grease, config, accepted_streams: Default::default(), decoder_send: qpack_decoder, encoder_send: qpack_encoder, // send grease stream if configured send_grease_stream_flag: config.send_grease, // start at first step grease_step: GreaseStatus::NotStarted(PhantomData), }; conn_inner.send_control_stream_headers().await?; Ok(conn_inner) } /// Send GOAWAY with specified max_id, iff max_id is smaller than the previous one. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn shutdown( &mut self, sent_closing: &mut Option, max_id: T, ) -> Result<(), Error> where T: From + PartialOrd + Copy, VarInt: From, { if let Some(sent_id) = sent_closing { if *sent_id <= max_id { return Ok(()); } } *sent_closing = Some(max_id); self.shared.write("shutdown").closing = true; //= https://www.rfc-editor.org/rfc/rfc9114#section-3.3 //# When either endpoint chooses to close the HTTP/3 //# connection, the terminating endpoint SHOULD first send a GOAWAY frame //# (Section 5.2) so that both endpoints can reliably determine whether //# previously sent frames have been processed and gracefully complete or //# terminate any necessary remaining tasks. stream::write(&mut self.control_send, Frame::Goaway(max_id.into())).await } #[allow(missing_docs)] #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn poll_accept_request( &mut self, cx: &mut Context<'_>, ) -> Poll, Error>> { { let state = self.shared.read("poll_accept_request"); if let Some(ref e) = state.error { return Poll::Ready(Err(e.clone())); } } // Accept the request by accepting the next bidirectional stream // .into().into() converts the impl QuicError into crate::error::Error. // The `?` operator doesn't work here for some reason. self.conn.poll_accept_bidi(cx).map_err(|e| e.into().into()) } /// Polls incoming streams /// /// Accepted streams which are not control, decoder, or encoder streams are buffer in `accepted_recv_streams` #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn poll_accept_recv(&mut self, cx: &mut Context<'_>) -> Result<(), Error> { if let Some(ref e) = self.shared.read("poll_accept_request").error { return Err(e.clone()); } // Get all currently pending streams loop { match self.conn.poll_accept_recv(cx)? { Poll::Ready(Some(stream)) => self .pending_recv_streams .push(AcceptRecvStream::new(stream)), Poll::Ready(None) => { return Err(Code::H3_GENERAL_PROTOCOL_ERROR.with_reason( "Connection closed unexpected", crate::error::ErrorLevel::ConnectionError, )) } Poll::Pending => break, } } let mut resolved = vec![]; for (index, pending) in self.pending_recv_streams.iter_mut().enumerate() { match pending.poll_type(cx)? { Poll::Ready(()) => resolved.push(index), Poll::Pending => (), } } for (removed, index) in resolved.into_iter().enumerate() { //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2 //= type=implication //# As certain stream types can affect connection state, a recipient //# SHOULD NOT discard data from incoming unidirectional streams prior to //# reading the stream type. let stream = self .pending_recv_streams .remove(index - removed) .into_stream()?; match stream { //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.1 //# Only one control stream per peer is permitted; //# receipt of a second stream claiming to be a control stream MUST be //# treated as a connection error of type H3_STREAM_CREATION_ERROR. AcceptedRecvStream::Control(s) => { if self.control_recv.is_some() { return Err( self.close(Code::H3_STREAM_CREATION_ERROR, "got two control streams") ); } self.control_recv = Some(s); } enc @ AcceptedRecvStream::Encoder(_) => { if let Some(_prev) = self.encoder_recv.replace(enc) { return Err( self.close(Code::H3_STREAM_CREATION_ERROR, "got two encoder streams") ); } } dec @ AcceptedRecvStream::Decoder(_) => { if let Some(_prev) = self.decoder_recv.replace(dec) { return Err( self.close(Code::H3_STREAM_CREATION_ERROR, "got two decoder streams") ); } } AcceptedRecvStream::WebTransportUni(id, s) if self.config.settings.enable_webtransport => { // Store until someone else picks it up, like a webtransport session which is // not yet established. self.accepted_streams.wt_uni_streams.push((id, s)) } //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.3 //= type=implication //# Endpoints MUST NOT consider these streams to have any meaning upon //# receipt. _ => (), } } Ok(()) } /// Waits for the control stream to be received and reads subsequent frames. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn poll_control(&mut self, cx: &mut Context<'_>) -> Poll, Error>> { if let Some(ref e) = self.shared.read("poll_accept_request").error { return Poll::Ready(Err(e.clone())); } let recv = { // TODO self.poll_accept_recv(cx)?; if let Some(v) = &mut self.control_recv { v } else { // Try later return Poll::Pending; } }; let recvd = ready!(recv.poll_next(cx))?; let res = match recvd { //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.1 //# If either control //# stream is closed at any point, this MUST be treated as a connection //# error of type H3_CLOSED_CRITICAL_STREAM. None => Err(self.close(Code::H3_CLOSED_CRITICAL_STREAM, "control stream closed")), Some(frame) => { match frame { Frame::Settings(settings) if !self.got_peer_settings => { //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4 //= type=TODO //# A receiver MAY treat the presence of duplicate //# setting identifiers as a connection error of type H3_SETTINGS_ERROR. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.1 //= type=TODO //# Setting identifiers that were defined in [HTTP/2] where there is no //# corresponding HTTP/3 setting have also been reserved //# (Section 11.2.2). These reserved settings MUST NOT be sent, and //# their receipt MUST be treated as a connection error of type //# H3_SETTINGS_ERROR. self.got_peer_settings = true; //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4 //= type=implication //# An implementation MUST ignore any parameter with an identifier it //# does not understand. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.1 //= type=implication //# Endpoints MUST NOT consider such settings to have //# any meaning upon receipt. let mut shared = self.shared.write("connection settings write"); shared.peer_config = (&settings).into(); Ok(Frame::Settings(settings)) } f @ Frame::Goaway(_) => Ok(f), f @ Frame::CancelPush(_) | f @ Frame::MaxPushId(_) => { if self.got_peer_settings { //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.3 //= type=TODO //# If a CANCEL_PUSH frame is received that //# references a push ID greater than currently allowed on the //# connection, this MUST be treated as a connection error of type //# H3_ID_ERROR. Ok(f) } else { //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.1 //# If the first frame of the control stream is any other frame //# type, this MUST be treated as a connection error of type //# H3_MISSING_SETTINGS. Err(self.close( Code::H3_MISSING_SETTINGS, format!("received {:?} before settings on control stream", f), )) } } //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1 //# Receipt of an invalid sequence of frames MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.1 //= type=implication //# DATA frames MUST be associated with an HTTP request or response. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.1 //# If //# a DATA frame is received on a control stream, the recipient MUST //# respond with a connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.2 //# If a HEADERS frame is received on a control stream, the recipient //# MUST respond with a connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4 //# If an endpoint receives a second SETTINGS //# frame on the control stream, the endpoint MUST respond with a //# connection error of type H3_FRAME_UNEXPECTED. frame => Err(self.close( Code::H3_FRAME_UNEXPECTED, format!("on control stream: {:?}", frame), )), } } }; if self.send_grease_stream_flag { //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.8 //= type=implication //# Frame types of the format 0x1f * N + 0x21 for non-negative integer //# values of N are reserved to exercise the requirement that unknown //# types be ignored (Section 9). These frames have no semantics, and //# they MAY be sent on any stream where frames are allowed to be sent. ready!(self.poll_grease_stream(cx)); } Poll::Ready(res) } #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub(crate) fn process_goaway( &mut self, recv_closing: &mut Option, id: VarInt, ) -> Result<(), Error> where T: From + Copy, VarInt: From, { { //= https://www.rfc-editor.org/rfc/rfc9114#section-5.2 //# An endpoint MAY send multiple GOAWAY frames indicating different //# identifiers, but the identifier in each frame MUST NOT be greater //# than the identifier in any previous frame, since clients might //# already have retried unprocessed requests on another HTTP connection. //= https://www.rfc-editor.org/rfc/rfc9114#section-5.2 //# Like the server, //# the client MAY send subsequent GOAWAY frames so long as the specified //# push ID is no greater than any previously sent value. if let Some(prev_id) = recv_closing.map(VarInt::from) { if prev_id < id { //= https://www.rfc-editor.org/rfc/rfc9114#section-5.2 //# Receiving a GOAWAY containing a larger identifier than previously //# received MUST be treated as a connection error of type H3_ID_ERROR. return Err(self.close( Code::H3_ID_ERROR, format!( "received a GoAway({}) greater than the former one ({})", id, prev_id ), )); } } *recv_closing = Some(id.into()); if !self.shared.read("connection goaway read").closing { self.shared.write("connection goaway overwrite").closing = true; } Ok(()) } } /// Closes a Connection with code and reason. /// It returns an [`Error`] which can be returned. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn close>(&mut self, code: Code, reason: T) -> Error { self.shared.write("connection close err").error = Some(code.with_reason(reason.as_ref(), crate::error::ErrorLevel::ConnectionError)); self.conn.close(code, reason.as_ref().as_bytes()); code.with_reason(reason.as_ref(), crate::error::ErrorLevel::ConnectionError) } // start grease stream and send data #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] fn poll_grease_stream(&mut self, cx: &mut Context<'_>) -> Poll<()> { if matches!(self.grease_step, GreaseStatus::NotStarted(_)) { self.grease_step = match self.conn.poll_open_send(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(Err(_)) => { // could not create grease stream // don't try again self.send_grease_stream_flag = false; #[cfg(feature = "tracing")] warn!("grease stream creation failed with"); return Poll::Ready(()); } Poll::Ready(Ok(stream)) => GreaseStatus::Started(Some(stream)), }; }; //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.3 //# Stream types of the format 0x1f * N + 0x21 for non-negative integer //# values of N are reserved to exercise the requirement that unknown //# types be ignored. These streams have no semantics, and they can be //# sent when application-layer padding is desired. They MAY also be //# sent on connections where no data is currently being transferred. if let GreaseStatus::Started(stream) = &mut self.grease_step { if let Some(stream) = stream { if stream .send_data((StreamType::grease(), Frame::Grease)) .is_err() { self.send_grease_stream_flag = false; #[cfg(feature = "tracing")] warn!("write data on grease stream failed with"); return Poll::Ready(()); }; } self.grease_step = GreaseStatus::DataPrepared(stream.take()); }; if let GreaseStatus::DataPrepared(stream) = &mut self.grease_step { if let Some(stream) = stream { match stream.poll_ready(cx) { Poll::Ready(Ok(_)) => (), Poll::Pending => return Poll::Pending, Poll::Ready(Err(_)) => { // could not write grease frame // don't try again self.send_grease_stream_flag = false; #[cfg(feature = "tracing")] warn!("write data on grease stream failed with"); return Poll::Ready(()); } }; } self.grease_step = GreaseStatus::DataSent(match stream.take() { Some(stream) => stream, None => { // this should never happen self.send_grease_stream_flag = false; return Poll::Ready(()); } }); }; //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.3 //# When sending a reserved stream type, //# the implementation MAY either terminate the stream cleanly or reset //# it. if let GreaseStatus::DataSent(stream) = &mut self.grease_step { match stream.poll_finish(cx) { Poll::Ready(Ok(_)) => (), Poll::Pending => return Poll::Pending, Poll::Ready(Err(_)) => { // could not finish grease stream // don't try again self.send_grease_stream_flag = false; #[cfg(feature = "tracing")] warn!("finish grease stream failed with"); return Poll::Ready(()); } }; self.grease_step = GreaseStatus::Finished; }; // grease stream is closed // don't do another one self.send_grease_stream_flag = false; Poll::Ready(()) } #[allow(missing_docs)] #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn accepted_streams_mut(&mut self) -> &mut AcceptedStreams { &mut self.accepted_streams } } #[allow(missing_docs)] pub struct RequestStream { pub(super) stream: FrameStream, pub(super) trailers: Option, pub(super) conn_state: SharedStateRef, pub(super) max_field_section_size: u64, send_grease_frame: bool, } impl RequestStream { #[allow(missing_docs)] pub fn new( stream: FrameStream, max_field_section_size: u64, conn_state: SharedStateRef, grease: bool, ) -> Self { Self { stream, conn_state, max_field_section_size, trailers: None, send_grease_frame: grease, } } } impl ConnectionState for RequestStream { fn shared_state(&self) -> &SharedStateRef { &self.conn_state } } impl RequestStream where S: quic::RecvStream, { /// Receive some of the request body. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn poll_recv_data( &mut self, cx: &mut Context<'_>, ) -> Poll, Error>> { if !self.stream.has_data() { let frame = self .stream .poll_next(cx) .map_err(|e| self.maybe_conn_err(e))?; match ready!(frame) { Some(Frame::Data { .. }) => (), Some(Frame::Headers(encoded)) => { self.trailers = Some(encoded); return Poll::Ready(Ok(None)); } //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1 //# Receipt of an invalid sequence of frames MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.3 //# Receiving a //# CANCEL_PUSH frame on a stream other than the control stream MUST be //# treated as a connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4 //# If an endpoint receives a SETTINGS frame on a different //# stream, the endpoint MUST respond with a connection error of type //# H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.6 //# A client MUST treat a GOAWAY frame on a stream other than //# the control stream as a connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.7 //# The MAX_PUSH_ID frame is always sent on the control stream. Receipt //# of a MAX_PUSH_ID frame on any other stream MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. Some(_) => return Poll::Ready(Err(Code::H3_FRAME_UNEXPECTED.into())), None => return Poll::Ready(Ok(None)), } } self.stream .poll_data(cx) .map_err(|e| self.maybe_conn_err(e)) } /// Receive some of the request body. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn recv_data(&mut self) -> Result, Error> { future::poll_fn(|cx| self.poll_recv_data(cx)).await } /// Receive trailers #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn recv_trailers(&mut self) -> Result, Error> { let mut trailers = if let Some(encoded) = self.trailers.take() { encoded } else { let frame = future::poll_fn(|cx| self.stream.poll_next(cx)) .await .map_err(|e| self.maybe_conn_err(e))?; match frame { Some(Frame::Headers(encoded)) => encoded, //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1 //# Receipt of an invalid sequence of frames MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.3 //# Receiving a //# CANCEL_PUSH frame on a stream other than the control stream MUST be //# treated as a connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4 //# If an endpoint receives a SETTINGS frame on a different //# stream, the endpoint MUST respond with a connection error of type //# H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.6 //# A client MUST treat a GOAWAY frame on a stream other than //# the control stream as a connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.7 //# The MAX_PUSH_ID frame is always sent on the control stream. Receipt //# of a MAX_PUSH_ID frame on any other stream MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. Some(_) => return Err(Code::H3_FRAME_UNEXPECTED.into()), None => return Ok(None), } }; if !self.stream.is_eos() { // Get the trailing frame let trailing_frame = future::poll_fn(|cx| self.stream.poll_next(cx)) .await .map_err(|e| self.maybe_conn_err(e))?; if trailing_frame.is_some() { // if it's not unknown or reserved, fail. return Err(Code::H3_FRAME_UNEXPECTED.into()); } } let qpack::Decoded { fields, .. } = match qpack::decode_stateless(&mut trailers, self.max_field_section_size) { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //# An HTTP/3 implementation MAY impose a limit on the maximum size of //# the message header it will accept on an individual HTTP message. Err(qpack::DecoderError::HeaderTooLong(cancel_size)) => { return Err(Error::header_too_big( cancel_size, self.max_field_section_size, )) } Ok(decoded) => decoded, Err(e) => return Err(e.into()), }; Ok(Some(Header::try_from(fields)?.into_fields())) } #[allow(missing_docs)] #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn stop_sending(&mut self, err_code: Code) { self.stream.stop_sending(err_code); } } impl RequestStream where S: quic::SendStream, B: Buf, { /// Send some data on the response body. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn send_data(&mut self, buf: B) -> Result<(), Error> { let frame = Frame::Data(buf); stream::write(&mut self.stream, frame) .await .map_err(|e| self.maybe_conn_err(e))?; Ok(()) } /// Send a set of trailers to end the request. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), Error> { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2 //= type=TODO //# Characters in field names MUST be //# converted to lowercase prior to their encoding. let mut block = BytesMut::new(); let mem_size = qpack::encode_stateless(&mut block, Header::trailer(trailers))?; let max_mem_size = self .conn_state .read("send_trailers shared state read") .peer_config .max_field_section_size; //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //# An implementation that //# has received this parameter SHOULD NOT send an HTTP message header //# that exceeds the indicated size, as the peer will likely refuse to //# process it. if mem_size > max_mem_size { return Err(Error::header_too_big(mem_size, max_mem_size)); } stream::write(&mut self.stream, Frame::Headers(block.freeze())) .await .map_err(|e| self.maybe_conn_err(e))?; Ok(()) } /// Stops a stream with an error code #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn stop_stream(&mut self, code: Code) { self.stream.reset(code.into()); } #[allow(missing_docs)] #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn finish(&mut self) -> Result<(), Error> { if self.send_grease_frame { // send a grease frame once per Connection stream::write(&mut self.stream, Frame::Grease) .await .map_err(|e| self.maybe_conn_err(e))?; self.send_grease_frame = false; } future::poll_fn(|cx| self.stream.poll_finish(cx)) .await .map_err(|e| self.maybe_conn_err(e)) } } impl RequestStream where S: quic::BidiStream, B: Buf, { #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub(crate) fn split( self, ) -> ( RequestStream, RequestStream, ) { let (send, recv) = self.stream.split(); ( RequestStream { stream: send, trailers: None, conn_state: self.conn_state.clone(), max_field_section_size: 0, send_grease_frame: self.send_grease_frame, }, RequestStream { stream: recv, trailers: self.trailers, conn_state: self.conn_state, max_field_section_size: self.max_field_section_size, send_grease_frame: self.send_grease_frame, }, ) } } h3-0.0.6/src/error.rs000064400000000000000000000342521046102023000124350ustar 00000000000000//! HTTP/3 Error types use std::{fmt, sync::Arc}; use crate::{frame, proto, qpack, quic}; /// Cause of an error thrown by our own h3 layer type Cause = Box; /// Error thrown by the underlying QUIC impl pub(crate) type TransportError = Box; /// A general error that can occur when handling the HTTP/3 protocol. #[derive(Clone)] pub struct Error { /// The error kind. pub(crate) inner: Box, } /// An HTTP/3 "application error code". #[derive(PartialEq, Eq, Hash, Clone, Copy)] pub struct Code { code: u64, } impl Code { /// Numerical error code /// /// See /// and pub fn value(&self) -> u64 { self.code } } impl PartialEq for Code { fn eq(&self, other: &u64) -> bool { *other == self.code } } /// The error kind. #[derive(Clone)] pub(crate) struct ErrorImpl { pub(crate) kind: Kind, cause: Option>, } /// Some errors affect the whole connection, others only one Request or Stream. /// See [errors](https://www.rfc-editor.org/rfc/rfc9114.html#errors) for mor details. #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub enum ErrorLevel { /// Error that will close the whole connection ConnectionError, /// Error scoped to a single stream StreamError, } // Warning: this enum is public only for testing purposes. Do not use it in // downstream code or be prepared to refactor as changes happen. #[doc(hidden)] #[non_exhaustive] #[derive(Clone, Debug)] pub enum Kind { #[non_exhaustive] Application { code: Code, reason: Option>, level: ErrorLevel, }, #[non_exhaustive] HeaderTooBig { actual_size: u64, max_size: u64, }, // Error from QUIC layer #[non_exhaustive] Transport(Arc), // Connection has been closed with `Code::NO_ERROR` Closed, // Currently in a graceful shutdown procedure Closing, Timeout, } // ===== impl Code ===== macro_rules! codes { ( $( $(#[$docs:meta])* ($num:expr, $name:ident); )+ ) => { impl Code { $( $(#[$docs])* pub const $name: Code = Code{code: $num}; )+ } impl fmt::Debug for Code { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.code { $( $num => f.write_str(stringify!($name)), )+ other => write!(f, "{:#x}", other), } } } } } codes! { /// Datagram or capsule parse error /// See: (0x33, H3_DATAGRAM_ERROR); /// No error. This is used when the connection or stream needs to be /// closed, but there is no error to signal. (0x100, H3_NO_ERROR); /// Peer violated protocol requirements in a way that does not match a more /// specific error code, or endpoint declines to use the more specific /// error code. (0x101, H3_GENERAL_PROTOCOL_ERROR); /// An internal error has occurred in the HTTP stack. (0x102, H3_INTERNAL_ERROR); /// The endpoint detected that its peer created a stream that it will not /// accept. (0x103, H3_STREAM_CREATION_ERROR); /// A stream required by the HTTP/3 connection was closed or reset. (0x104, H3_CLOSED_CRITICAL_STREAM); /// A frame was received that was not permitted in the current state or on /// the current stream. (0x105, H3_FRAME_UNEXPECTED); /// A frame that fails to satisfy layout requirements or with an invalid /// size was received. (0x106, H3_FRAME_ERROR); /// The endpoint detected that its peer is exhibiting a behavior that might /// be generating excessive load. (0x107, H3_EXCESSIVE_LOAD); /// A Stream ID or Push ID was used incorrectly, such as exceeding a limit, /// reducing a limit, or being reused. (0x108, H3_ID_ERROR); /// An endpoint detected an error in the payload of a SETTINGS frame. (0x109, H3_SETTINGS_ERROR); /// No SETTINGS frame was received at the beginning of the control stream. (0x10a, H3_MISSING_SETTINGS); /// A server rejected a request without performing any application /// processing. (0x10b, H3_REQUEST_REJECTED); /// The request or its response (including pushed response) is cancelled. (0x10c, H3_REQUEST_CANCELLED); /// The client's stream terminated without containing a fully-formed /// request. (0x10d, H3_REQUEST_INCOMPLETE); /// An HTTP message was malformed and cannot be processed. (0x10e, H3_MESSAGE_ERROR); /// The TCP connection established in response to a CONNECT request was /// reset or abnormally closed. (0x10f, H3_CONNECT_ERROR); /// The requested operation cannot be served over HTTP/3. The peer should /// retry over HTTP/1.1. (0x110, H3_VERSION_FALLBACK); /// The decoder failed to interpret an encoded field section and is not /// able to continue decoding that field section. (0x200, QPACK_DECOMPRESSION_FAILED); /// The decoder failed to interpret an encoder instruction received on the /// encoder stream. (0x201, QPACK_ENCODER_STREAM_ERROR); /// The encoder failed to interpret a decoder instruction received on the /// decoder stream. (0x202, QPACK_DECODER_STREAM_ERROR); } impl Code { pub(crate) fn with_reason>>(self, reason: S, level: ErrorLevel) -> Error { Error::new(Kind::Application { code: self, reason: Some(reason.into()), level, }) } pub(crate) fn with_cause>(self, cause: E) -> Error { Error::from(self).with_cause(cause) } pub(crate) fn with_transport>>(self, err: E) -> Error { Error::new(Kind::Transport(Arc::new(err.into()))) } } impl From for u64 { fn from(code: Code) -> u64 { code.code } } // ===== impl Error ===== impl Error { fn new(kind: Kind) -> Self { Error { inner: Box::new(ErrorImpl { kind, cause: None }), } } /// Returns the error code from the error if available pub fn try_get_code(&self) -> Option { match self.inner.kind { Kind::Application { code, .. } => Some(code), _ => None, } } /// returns the [`ErrorLevel`] of an [`Error`] /// This indicates weather an accept loop should continue. pub fn get_error_level(&self) -> ErrorLevel { match self.inner.kind { Kind::Application { code: _, reason: _, level, } => level, // return Connection error on other kinds _ => ErrorLevel::ConnectionError, } } pub(crate) fn header_too_big(actual_size: u64, max_size: u64) -> Self { Error::new(Kind::HeaderTooBig { actual_size, max_size, }) } pub(crate) fn with_cause>(mut self, cause: E) -> Self { self.inner.cause = Some(Arc::new(cause.into())); self } pub(crate) fn closing() -> Self { Self::new(Kind::Closing) } pub(crate) fn closed() -> Self { Self::new(Kind::Closed) } pub(crate) fn is_closed(&self) -> bool { if let Kind::Closed = self.inner.kind { return true; } false } pub(crate) fn is_header_too_big(&self) -> bool { matches!(&self.inner.kind, Kind::HeaderTooBig { .. }) } #[doc(hidden)] pub fn kind(&self) -> Kind { self.inner.kind.clone() } } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut builder = f.debug_struct("h3::Error"); match self.inner.kind { Kind::Closed => { builder.field("connection closed", &true); } Kind::Closing => { builder.field("closing", &true); } Kind::Timeout => { builder.field("timeout", &true); } Kind::Application { code, ref reason, .. } => { builder.field("code", &code); if let Some(reason) = reason { builder.field("reason", reason); } } Kind::Transport(ref e) => { builder.field("kind", &e); builder.field("code: ", &e.err_code()); } Kind::HeaderTooBig { actual_size, max_size, } => { builder.field("header_size", &actual_size); builder.field("max_size", &max_size); } } if let Some(ref cause) = self.inner.cause { builder.field("cause", cause); } builder.finish() } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.inner.kind { Kind::Closed => write!(f, "connection is closed")?, Kind::Closing => write!(f, "connection is gracefully closing")?, Kind::Transport(ref e) => write!(f, "quic transport error: {}", e)?, Kind::Timeout => write!(f, "timeout",)?, Kind::Application { code, ref reason, .. } => { if let Some(reason) = reason { write!(f, "application error: {}", reason)? } else { write!(f, "application error {:?}", code)? } } Kind::HeaderTooBig { actual_size, max_size, } => write!( f, "issued header size {} o is beyond peer's limit {} o", actual_size, max_size )?, }; if let Some(ref cause) = self.inner.cause { write!(f, "cause: {}", cause)? } Ok(()) } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { self.inner.cause.as_ref().map(|e| &***e as _) } } impl From for Error { fn from(code: Code) -> Error { Error::new(Kind::Application { code, reason: None, level: ErrorLevel::ConnectionError, }) } } impl From for Error { fn from(e: qpack::EncoderError) -> Self { Self::from(Code::QPACK_ENCODER_STREAM_ERROR).with_cause(e) } } impl From for Error { fn from(e: qpack::DecoderError) -> Self { match e { qpack::DecoderError::InvalidStaticIndex(_) => { Self::from(Code::QPACK_DECOMPRESSION_FAILED).with_cause(e) } _ => Self::from(Code::QPACK_DECODER_STREAM_ERROR).with_cause(e), } } } impl From for Error { fn from(e: proto::headers::HeaderError) -> Self { Error::new(Kind::Application { code: Code::H3_MESSAGE_ERROR, reason: None, level: ErrorLevel::StreamError, }) .with_cause(e) } } impl From for Error { fn from(e: frame::FrameStreamError) -> Self { match e { frame::FrameStreamError::Quic(e) => e.into(), //= https://www.rfc-editor.org/rfc/rfc9114#section-7.1 //# When a stream terminates cleanly, if the last frame on the stream was //# truncated, this MUST be treated as a connection error of type //# H3_FRAME_ERROR. frame::FrameStreamError::UnexpectedEnd => Code::H3_FRAME_ERROR .with_reason("received incomplete frame", ErrorLevel::ConnectionError), frame::FrameStreamError::Proto(e) => match e { proto::frame::FrameError::InvalidStreamId(_) | proto::frame::FrameError::InvalidPushId(_) => Code::H3_ID_ERROR, proto::frame::FrameError::Settings(_) => Code::H3_SETTINGS_ERROR, proto::frame::FrameError::UnsupportedFrame(_) | proto::frame::FrameError::UnknownFrame(_) => Code::H3_FRAME_UNEXPECTED, //= https://www.rfc-editor.org/rfc/rfc9114#section-7.1 //# A frame payload that contains additional bytes //# after the identified fields or a frame payload that terminates before //# the end of the identified fields MUST be treated as a connection //# error of type H3_FRAME_ERROR. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.1 //# In particular, redundant length //# encodings MUST be verified to be self-consistent; see Section 10.8. proto::frame::FrameError::Incomplete(_) | proto::frame::FrameError::InvalidFrameValue | proto::frame::FrameError::Malformed => Code::H3_FRAME_ERROR, } .with_cause(e), } } } impl From for Box { fn from(e: Error) -> Self { Box::new(e) } } impl From for Error where T: Into, { fn from(e: T) -> Self { let quic_error: TransportError = e.into(); if quic_error.is_timeout() { return Error::new(Kind::Timeout); } match quic_error.err_code() { Some(c) if Code::H3_NO_ERROR == c => Error::new(Kind::Closed), Some(c) => Error::new(Kind::Application { code: Code { code: c }, reason: None, level: ErrorLevel::ConnectionError, }), None => Error::new(Kind::Transport(Arc::new(quic_error))), } } } impl From for Error { fn from(e: proto::stream::InvalidStreamId) -> Self { Self::from(Code::H3_ID_ERROR).with_cause(format!("{}", e)) } } #[cfg(test)] mod tests { use super::Error; use std::mem; #[test] fn test_size_of() { assert_eq!(mem::size_of::(), mem::size_of::()); } } h3-0.0.6/src/ext.rs000064400000000000000000000072051046102023000121020ustar 00000000000000//! Extensions for the HTTP/3 protocol. use std::convert::TryFrom; use std::str::FromStr; use bytes::{Buf, Bytes}; use crate::{ error::Code, proto::{stream::StreamId, varint::VarInt}, Error, }; /// Describes the `:protocol` pseudo-header for extended connect /// /// See: #[derive(Copy, PartialEq, Debug, Clone)] pub struct Protocol(ProtocolInner); impl Protocol { /// WebTransport protocol pub const WEB_TRANSPORT: Protocol = Protocol(ProtocolInner::WebTransport); /// RFC 9298 protocol pub const CONNECT_UDP: Protocol = Protocol(ProtocolInner::ConnectUdp); /// Return a &str representation of the `:protocol` pseudo-header value #[inline] pub fn as_str(&self) -> &str { match self.0 { ProtocolInner::WebTransport => "webtransport", ProtocolInner::ConnectUdp => "connect-udp", } } } #[derive(Copy, PartialEq, Debug, Clone)] enum ProtocolInner { WebTransport, ConnectUdp, } /// Error when parsing the protocol pub struct InvalidProtocol; impl FromStr for Protocol { type Err = InvalidProtocol; fn from_str(s: &str) -> Result { match s { "webtransport" => Ok(Self(ProtocolInner::WebTransport)), "connect-udp" => Ok(Self(ProtocolInner::ConnectUdp)), _ => Err(InvalidProtocol), } } } /// HTTP datagram frames /// See: pub struct Datagram { /// Stream id divided by 4 stream_id: StreamId, /// The data contained in the datagram payload: B, } impl Datagram where B: Buf, { /// Creates a new datagram frame pub fn new(stream_id: StreamId, payload: B) -> Self { assert!( stream_id.into_inner() % 4 == 0, "StreamId is not divisible by 4" ); Self { stream_id, payload } } /// Decodes a datagram frame from the QUIC datagram pub fn decode(mut buf: B) -> Result { let q_stream_id = VarInt::decode(&mut buf) .map_err(|_| Code::H3_DATAGRAM_ERROR.with_cause("Malformed datagram frame"))?; //= https://www.rfc-editor.org/rfc/rfc9297#section-2.1 // Quarter Stream ID: A variable-length integer that contains the value of the client-initiated bidirectional // stream that this datagram is associated with divided by four (the division by four stems // from the fact that HTTP requests are sent on client-initiated bidirectional streams, // which have stream IDs that are divisible by four). The largest legal QUIC stream ID // value is 262-1, so the largest legal value of the Quarter Stream ID field is 260-1. // Receipt of an HTTP/3 Datagram that includes a larger value MUST be treated as an HTTP/3 // connection error of type H3_DATAGRAM_ERROR (0x33). let stream_id = StreamId::try_from(u64::from(q_stream_id) * 4) .map_err(|_| Code::H3_DATAGRAM_ERROR.with_cause("Invalid stream id"))?; let payload = buf; Ok(Self { stream_id, payload }) } #[inline] /// Returns the associated stream id of the datagram pub fn stream_id(&self) -> StreamId { self.stream_id } #[inline] /// Returns the datagram payload pub fn payload(&self) -> &B { &self.payload } /// Encode the datagram to wire format pub fn encode(self, buf: &mut D) { (VarInt::from(self.stream_id) / 4).encode(buf); buf.put(self.payload); } /// Returns the datagram payload pub fn into_payload(self) -> B { self.payload } } h3-0.0.6/src/frame.rs000064400000000000000000000436621046102023000124030ustar 00000000000000use std::task::{Context, Poll}; use bytes::Buf; #[cfg(feature = "tracing")] use tracing::trace; use crate::stream::{BufRecvStream, WriteBuf}; use crate::{ buf::BufList, error::TransportError, proto::{ frame::{self, Frame, PayloadLen}, stream::StreamId, }, quic::{BidiStream, RecvStream, SendStream}, }; /// Decodes Frames from the underlying QUIC stream pub struct FrameStream { pub stream: BufRecvStream, // Already read data from the stream decoder: FrameDecoder, remaining_data: usize, } impl FrameStream { pub fn new(stream: BufRecvStream) -> Self { Self { stream, decoder: FrameDecoder::default(), remaining_data: 0, } } /// Unwraps the Framed streamer and returns the underlying stream **without** data loss for /// partially received/read frames. pub fn into_inner(self) -> BufRecvStream { self.stream } } impl FrameStream where S: RecvStream, { pub fn poll_next( &mut self, cx: &mut Context<'_>, ) -> Poll>, FrameStreamError>> { assert!( self.remaining_data == 0, "There is still data to read, please call poll_data() until it returns None." ); loop { let end = self.try_recv(cx)?; return match self.decoder.decode(self.stream.buf_mut())? { Some(Frame::Data(PayloadLen(len))) => { self.remaining_data = len; Poll::Ready(Ok(Some(Frame::Data(PayloadLen(len))))) } frame @ Some(Frame::WebTransportStream(_)) => { self.remaining_data = usize::MAX; Poll::Ready(Ok(frame)) } Some(frame) => Poll::Ready(Ok(Some(frame))), None => match end { // Received a chunk but frame is incomplete, poll until we get `Pending`. Poll::Ready(false) => continue, Poll::Pending => Poll::Pending, Poll::Ready(true) => { if self.stream.buf_mut().has_remaining() { // Reached the end of receive stream, but there is still some data: // The frame is incomplete. Poll::Ready(Err(FrameStreamError::UnexpectedEnd)) } else { Poll::Ready(Ok(None)) } } }, }; } } /// Retrieves the next piece of data in an incoming data packet or webtransport stream /// /// /// WebTransport bidirectional payload has no finite length and is processed until the end of the stream. pub fn poll_data( &mut self, cx: &mut Context<'_>, ) -> Poll, FrameStreamError>> { if self.remaining_data == 0 { return Poll::Ready(Ok(None)); }; let end = match self.try_recv(cx) { Poll::Ready(Ok(end)) => end, Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), Poll::Pending => false, }; let data = self.stream.buf_mut().take_chunk(self.remaining_data); match (data, end) { (None, true) => Poll::Ready(Ok(None)), (None, false) => Poll::Pending, (Some(d), true) if d.remaining() < self.remaining_data && !self.stream.buf_mut().has_remaining() => { Poll::Ready(Err(FrameStreamError::UnexpectedEnd)) } (Some(d), _) => { self.remaining_data -= d.remaining(); Poll::Ready(Ok(Some(d))) } } } /// Stops the underlying stream with the provided error code pub(crate) fn stop_sending(&mut self, error_code: crate::error::Code) { self.stream.stop_sending(error_code.into()); } pub(crate) fn has_data(&self) -> bool { self.remaining_data != 0 } pub(crate) fn is_eos(&self) -> bool { self.stream.is_eos() && !self.stream.buf().has_remaining() } fn try_recv(&mut self, cx: &mut Context<'_>) -> Poll> { if self.stream.is_eos() { return Poll::Ready(Ok(true)); } match self.stream.poll_read(cx) { Poll::Ready(Err(e)) => Poll::Ready(Err(FrameStreamError::Quic(e.into()))), Poll::Pending => Poll::Pending, Poll::Ready(Ok(eos)) => Poll::Ready(Ok(eos)), } } pub fn id(&self) -> StreamId { self.stream.recv_id() } } impl SendStream for FrameStream where T: SendStream, B: Buf, { type Error = >::Error; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.stream.poll_ready(cx) } fn send_data>>(&mut self, data: D) -> Result<(), Self::Error> { self.stream.send_data(data) } fn poll_finish(&mut self, cx: &mut Context<'_>) -> Poll> { self.stream.poll_finish(cx) } fn reset(&mut self, reset_code: u64) { self.stream.reset(reset_code) } fn send_id(&self) -> StreamId { self.stream.send_id() } } impl FrameStream where S: BidiStream, B: Buf, { pub(crate) fn split(self) -> (FrameStream, FrameStream) { let (send, recv) = self.stream.split(); ( FrameStream { stream: send, decoder: FrameDecoder::default(), remaining_data: 0, }, FrameStream { stream: recv, decoder: self.decoder, remaining_data: self.remaining_data, }, ) } } #[derive(Default)] pub struct FrameDecoder { expected: Option, } impl FrameDecoder { fn decode( &mut self, src: &mut BufList, ) -> Result>, FrameStreamError> { // Decode in a loop since we ignore unknown frames, and there may be // other frames already in our BufList. loop { if !src.has_remaining() { return Ok(None); } if let Some(min) = self.expected { if src.remaining() < min { return Ok(None); } } let (pos, decoded) = { let mut cur = src.cursor(); let decoded = Frame::decode(&mut cur); (cur.position(), decoded) }; match decoded { Err(frame::FrameError::UnknownFrame(_ty)) => { #[cfg(feature = "tracing")] trace!("ignore unknown frame type {:#x}", _ty); src.advance(pos); self.expected = None; continue; } Err(frame::FrameError::Incomplete(min)) => { self.expected = Some(min); return Ok(None); } Err(e) => return Err(e.into()), Ok(frame) => { src.advance(pos); self.expected = None; return Ok(Some(frame)); } } } } } #[derive(Debug)] pub enum FrameStreamError { Proto(frame::FrameError), Quic(TransportError), UnexpectedEnd, } impl From for FrameStreamError { fn from(err: frame::FrameError) -> Self { FrameStreamError::Proto(err) } } #[cfg(test)] mod tests { use super::*; use assert_matches::assert_matches; use bytes::{BufMut, Bytes, BytesMut}; use futures_util::future::poll_fn; use std::{collections::VecDeque, fmt, sync::Arc}; use crate::{ proto::{coding::Encode, frame::FrameType, varint::VarInt}, quic, }; // Decoder #[test] fn one_frame() { let mut buf = BytesMut::with_capacity(16); Frame::headers(&b"salut"[..]).encode_with_payload(&mut buf); let mut buf = BufList::from(buf); let mut decoder = FrameDecoder::default(); assert_matches!(decoder.decode(&mut buf), Ok(Some(Frame::Headers(_)))); } #[test] fn incomplete_frame() { let frame = Frame::headers(&b"salut"[..]); let mut buf = BytesMut::with_capacity(16); frame.encode(&mut buf); buf.truncate(buf.len() - 1); let mut buf = BufList::from(buf); let mut decoder = FrameDecoder::default(); assert_matches!(decoder.decode(&mut buf), Ok(None)); } #[test] fn header_spread_multiple_buf() { let mut buf = BytesMut::with_capacity(16); Frame::headers(&b"salut"[..]).encode_with_payload(&mut buf); let mut buf_list = BufList::new(); // Cut buffer between type and length buf_list.push(&buf[..1]); buf_list.push(&buf[1..]); let mut decoder = FrameDecoder::default(); assert_matches!(decoder.decode(&mut buf_list), Ok(Some(Frame::Headers(_)))); } #[test] fn varint_spread_multiple_buf() { let mut buf = BytesMut::with_capacity(16); Frame::headers("salut".repeat(1024)).encode_with_payload(&mut buf); let mut buf_list = BufList::new(); // Cut buffer in the middle of length's varint buf_list.push(&buf[..2]); buf_list.push(&buf[2..]); let mut decoder = FrameDecoder::default(); assert_matches!(decoder.decode(&mut buf_list), Ok(Some(Frame::Headers(_)))); } #[test] fn two_frames_then_incomplete() { let mut buf = BytesMut::with_capacity(64); Frame::headers(&b"header"[..]).encode_with_payload(&mut buf); Frame::Data(&b"body"[..]).encode_with_payload(&mut buf); Frame::headers(&b"trailer"[..]).encode_with_payload(&mut buf); buf.truncate(buf.len() - 1); let mut buf = BufList::from(buf); let mut decoder = FrameDecoder::default(); assert_matches!(decoder.decode(&mut buf), Ok(Some(Frame::Headers(_)))); assert_matches!( decoder.decode(&mut buf), Ok(Some(Frame::Data(PayloadLen(4)))) ); assert_matches!(decoder.decode(&mut buf), Ok(None)); } // FrameStream macro_rules! assert_poll_matches { ($poll_fn:expr, $match:pat) => { assert_matches!( poll_fn($poll_fn).await, $match ); }; ($poll_fn:expr, $match:pat if $cond:expr ) => { assert_matches!( poll_fn($poll_fn).await, $match if $cond ); } } #[tokio::test] async fn poll_full_request() { let mut recv = FakeRecv::default(); let mut buf = BytesMut::with_capacity(64); Frame::headers(&b"header"[..]).encode_with_payload(&mut buf); Frame::Data(&b"body"[..]).encode_with_payload(&mut buf); Frame::headers(&b"trailer"[..]).encode_with_payload(&mut buf); recv.chunk(buf.freeze()); let mut stream: FrameStream<_, ()> = FrameStream::new(BufRecvStream::new(recv)); assert_poll_matches!(|cx| stream.poll_next(cx), Ok(Some(Frame::Headers(_)))); assert_poll_matches!( |cx| stream.poll_next(cx), Ok(Some(Frame::Data(PayloadLen(4)))) ); assert_poll_matches!( |cx| to_bytes(stream.poll_data(cx)), Ok(Some(b)) if b.remaining() == 4 ); assert_poll_matches!(|cx| stream.poll_next(cx), Ok(Some(Frame::Headers(_)))); } #[tokio::test] async fn poll_next_incomplete_frame() { let mut recv = FakeRecv::default(); let mut buf = BytesMut::with_capacity(64); Frame::headers(&b"header"[..]).encode_with_payload(&mut buf); let mut buf = buf.freeze(); recv.chunk(buf.split_to(buf.len() - 1)); let mut stream: FrameStream<_, ()> = FrameStream::new(BufRecvStream::new(recv)); assert_poll_matches!( |cx| stream.poll_next(cx), Err(FrameStreamError::UnexpectedEnd) ); } #[tokio::test] #[should_panic( expected = "There is still data to read, please call poll_data() until it returns None" )] async fn poll_next_reamining_data() { let mut recv = FakeRecv::default(); let mut buf = BytesMut::with_capacity(64); FrameType::DATA.encode(&mut buf); VarInt::from(4u32).encode(&mut buf); recv.chunk(buf.freeze()); let mut stream: FrameStream<_, ()> = FrameStream::new(BufRecvStream::new(recv)); assert_poll_matches!( |cx| stream.poll_next(cx), Ok(Some(Frame::Data(PayloadLen(4)))) ); // There is still data to consume, poll_next should panic let _ = poll_fn(|cx| stream.poll_next(cx)).await; } #[tokio::test] async fn poll_data_split() { let mut recv = FakeRecv::default(); let mut buf = BytesMut::with_capacity(64); // Body is split into two bufs Frame::Data(Bytes::from("body")).encode_with_payload(&mut buf); let mut buf = buf.freeze(); recv.chunk(buf.split_to(buf.len() - 2)); recv.chunk(buf); let mut stream: FrameStream<_, ()> = FrameStream::new(BufRecvStream::new(recv)); // We get the total size of data about to be received assert_poll_matches!( |cx| stream.poll_next(cx), Ok(Some(Frame::Data(PayloadLen(4)))) ); // Then we get parts of body, chunked as they arrived assert_poll_matches!( |cx| to_bytes(stream.poll_data(cx)), Ok(Some(b)) if b.remaining() == 2 ); assert_poll_matches!( |cx| to_bytes(stream.poll_data(cx)), Ok(Some(b)) if b.remaining() == 2 ); } #[tokio::test] async fn poll_data_unexpected_end() { let mut recv = FakeRecv::default(); let mut buf = BytesMut::with_capacity(64); // Truncated body FrameType::DATA.encode(&mut buf); VarInt::from(4u32).encode(&mut buf); buf.put_slice(&b"b"[..]); recv.chunk(buf.freeze()); let mut stream: FrameStream<_, ()> = FrameStream::new(BufRecvStream::new(recv)); assert_poll_matches!( |cx| stream.poll_next(cx), Ok(Some(Frame::Data(PayloadLen(4)))) ); assert_poll_matches!( |cx| to_bytes(stream.poll_data(cx)), Err(FrameStreamError::UnexpectedEnd) ); } #[tokio::test] async fn poll_data_ignores_unknown_frames() { use crate::proto::varint::BufMutExt as _; let mut recv = FakeRecv::default(); let mut buf = BytesMut::with_capacity(64); // grease a lil crate::proto::frame::FrameType::grease().encode(&mut buf); buf.write_var(0); // grease with some data crate::proto::frame::FrameType::grease().encode(&mut buf); buf.write_var(6); buf.put_slice(b"grease"); // Body Frame::Data(Bytes::from("body")).encode_with_payload(&mut buf); recv.chunk(buf.freeze()); let mut stream: FrameStream<_, ()> = FrameStream::new(BufRecvStream::new(recv)); assert_poll_matches!( |cx| stream.poll_next(cx), Ok(Some(Frame::Data(PayloadLen(4)))) ); assert_poll_matches!( |cx| to_bytes(stream.poll_data(cx)), Ok(Some(b)) if &*b == b"body" ); } #[tokio::test] async fn poll_data_eos_but_buffered_data() { let mut recv = FakeRecv::default(); let mut buf = BytesMut::with_capacity(64); FrameType::DATA.encode(&mut buf); VarInt::from(4u32).encode(&mut buf); buf.put_slice(&b"bo"[..]); recv.chunk(buf.clone().freeze()); let mut stream: FrameStream<_, ()> = FrameStream::new(BufRecvStream::new(recv)); assert_poll_matches!( |cx| stream.poll_next(cx), Ok(Some(Frame::Data(PayloadLen(4)))) ); buf.truncate(0); buf.put_slice(&b"dy"[..]); stream.stream.buf_mut().push_bytes(&mut buf.freeze()); assert_poll_matches!( |cx| to_bytes(stream.poll_data(cx)), Ok(Some(b)) if &*b == b"bo" ); assert_poll_matches!( |cx| to_bytes(stream.poll_data(cx)), Ok(Some(b)) if &*b == b"dy" ); } // Helpers #[derive(Default)] struct FakeRecv { chunks: VecDeque, } impl FakeRecv { fn chunk(&mut self, buf: Bytes) -> &mut Self { self.chunks.push_back(buf); self } } impl RecvStream for FakeRecv { type Buf = Bytes; type Error = FakeError; fn poll_data( &mut self, _: &mut Context<'_>, ) -> Poll, Self::Error>> { Poll::Ready(Ok(self.chunks.pop_front())) } fn stop_sending(&mut self, _: u64) { unimplemented!() } fn recv_id(&self) -> StreamId { unimplemented!() } } #[derive(Debug)] struct FakeError; impl quic::Error for FakeError { fn is_timeout(&self) -> bool { unimplemented!() } fn err_code(&self) -> Option { unimplemented!() } } impl std::error::Error for FakeError {} impl fmt::Display for FakeError { fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { unimplemented!() } } impl From for Arc { fn from(_: FakeError) -> Self { unimplemented!() } } fn to_bytes( x: Poll, FrameStreamError>>, ) -> Poll, FrameStreamError>> { x.map(|b| b.map(|b| b.map(|mut b| b.copy_to_bytes(b.remaining())))) } } h3-0.0.6/src/lib.rs000064400000000000000000000027171046102023000120530ustar 00000000000000//! HTTP/3 client and server #![deny(missing_docs, clippy::self_named_module_files)] #![allow(clippy::derive_partial_eq_without_eq)] pub mod client; mod config; pub mod error; pub mod ext; pub mod quic; pub mod server; pub use error::Error; mod buf; #[cfg(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes")] #[allow(missing_docs)] pub mod connection; #[cfg(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes")] #[allow(missing_docs)] pub mod frame; #[cfg(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes")] #[allow(missing_docs)] pub mod proto; #[cfg(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes")] #[allow(missing_docs)] pub mod stream; #[cfg(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes")] #[allow(missing_docs)] pub mod webtransport; #[cfg(not(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes"))] mod connection; #[cfg(not(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes"))] mod frame; #[cfg(not(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes"))] mod proto; #[cfg(not(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes"))] mod stream; #[cfg(not(feature = "i-implement-a-third-party-backend-and-opt-into-breaking-changes"))] mod webtransport; #[allow(dead_code)] mod qpack; #[cfg(test)] mod tests; #[cfg(test)] extern crate self as h3; h3-0.0.6/src/proto/coding.rs000064400000000000000000000027401046102023000137070ustar 00000000000000use bytes::{Buf, BufMut}; use super::varint::VarInt; #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct UnexpectedEnd(pub usize); pub type Result = ::std::result::Result; // Trait for encoding / decoding helpers on basic types, such as `u16`, for // example: `buf.decode::()?`. // This enables to return `UnexpectedEnd` instead of panicking as the `Buf` // impls do when there is not enough bytes. pub trait Encode { fn encode(&self, buf: &mut B); } pub trait Decode: Sized { fn decode(buf: &mut B) -> Result; } impl Encode for u8 { fn encode(&self, buf: &mut B) { buf.put_u8(*self); } } impl Decode for u8 { fn decode(buf: &mut B) -> Result { if buf.remaining() < 1 { return Err(UnexpectedEnd(1)); } Ok(buf.get_u8()) } } pub trait BufExt { fn get(&mut self) -> Result; fn get_var(&mut self) -> Result; } impl BufExt for T { fn get(&mut self) -> Result { U::decode(self) } fn get_var(&mut self) -> Result { Ok(VarInt::decode(self)?.into_inner()) } } pub trait BufMutExt { fn write(&mut self, x: T); fn write_var(&mut self, x: u64); } impl BufMutExt for T { fn write(&mut self, x: U) { x.encode(self); } fn write_var(&mut self, x: u64) { VarInt::from_u64(x).unwrap().encode(self); } } h3-0.0.6/src/proto/frame.rs000064400000000000000000000575761046102023000135570ustar 00000000000000use bytes::{Buf, BufMut, Bytes}; use std::{ convert::TryInto, fmt::{self, Debug}, }; #[cfg(feature = "tracing")] use tracing::trace; use crate::webtransport::SessionId; use super::{ coding::{Decode, Encode}, push::{InvalidPushId, PushId}, stream::InvalidStreamId, varint::{BufExt, BufMutExt, UnexpectedEnd, VarInt}, }; #[derive(Debug, PartialEq)] pub enum FrameError { Malformed, UnsupportedFrame(u64), // Known frames that should generate an error UnknownFrame(u64), // Unknown frames that should be ignored InvalidFrameValue, Incomplete(usize), Settings(SettingsError), InvalidStreamId(InvalidStreamId), InvalidPushId(InvalidPushId), } impl std::error::Error for FrameError {} impl fmt::Display for FrameError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { FrameError::Malformed => write!(f, "frame is malformed"), FrameError::UnsupportedFrame(c) => write!(f, "frame 0x{:x} is not allowed h3", c), FrameError::UnknownFrame(c) => write!(f, "frame 0x{:x} ignored", c), FrameError::InvalidFrameValue => write!(f, "frame value is invalid"), FrameError::Incomplete(x) => write!(f, "internal error: frame incomplete {}", x), FrameError::Settings(x) => write!(f, "invalid settings: {}", x), FrameError::InvalidStreamId(x) => write!(f, "{}", x), FrameError::InvalidPushId(x) => write!(f, "{}", x), } } } pub enum Frame { Data(B), Headers(Bytes), CancelPush(PushId), Settings(Settings), PushPromise(PushPromise), Goaway(VarInt), MaxPushId(PushId), /// Describes the header for a webtransport stream. /// /// The payload is sent streaming until the stream is closed /// /// Unwrap the framed streamer and read the inner stream until the end. /// /// Conversely, when sending, send this frame and unwrap the stream WebTransportStream(SessionId), Grease, } /// Represents the available data len for a `Data` frame on a RecvStream /// /// Decoding received frames does not handle `Data` frames payload. Instead, receiving it /// and passing it to the user is left under the responsibility of `RequestStream`s. pub struct PayloadLen(pub usize); impl From for PayloadLen { fn from(len: usize) -> Self { PayloadLen(len) } } impl Frame { pub const MAX_ENCODED_SIZE: usize = VarInt::MAX_SIZE * 7; /// Decodes a Frame from the stream according to pub fn decode(buf: &mut T) -> Result { let remaining = buf.remaining(); let ty = FrameType::decode(buf).map_err(|_| FrameError::Incomplete(remaining + 1))?; // Webtransport streams need special handling as they have no length. // // See: https://datatracker.ietf.org/doc/html/draft-ietf-webtrans-http3/#section-4.2 if ty == FrameType::WEBTRANSPORT_BI_STREAM { #[cfg(feature = "tracing")] tracing::trace!("webtransport frame"); return Ok(Frame::WebTransportStream(SessionId::decode(buf)?)); } let len = buf .get_var() .map_err(|_| FrameError::Incomplete(remaining + 1))?; if ty == FrameType::DATA { return Ok(Frame::Data((len as usize).into())); } if buf.remaining() < len as usize { return Err(FrameError::Incomplete(2 + len as usize)); } let mut payload = buf.take(len as usize); #[cfg(feature = "tracing")] trace!("frame ty: {:?}", ty); let frame = match ty { FrameType::HEADERS => Ok(Frame::Headers(payload.copy_to_bytes(len as usize))), FrameType::SETTINGS => Ok(Frame::Settings(Settings::decode(&mut payload)?)), FrameType::CANCEL_PUSH => Ok(Frame::CancelPush(payload.get_var()?.try_into()?)), FrameType::PUSH_PROMISE => Ok(Frame::PushPromise(PushPromise::decode(&mut payload)?)), FrameType::GOAWAY => Ok(Frame::Goaway(VarInt::decode(&mut payload)?)), FrameType::MAX_PUSH_ID => Ok(Frame::MaxPushId(payload.get_var()?.try_into()?)), FrameType::H2_PRIORITY | FrameType::H2_PING | FrameType::H2_WINDOW_UPDATE | FrameType::H2_CONTINUATION => Err(FrameError::UnsupportedFrame(ty.0)), FrameType::WEBTRANSPORT_BI_STREAM | FrameType::DATA => unreachable!(), _ => { buf.advance(len as usize); Err(FrameError::UnknownFrame(ty.0)) } }; if let Ok(_frame) = &frame { #[cfg(feature = "tracing")] trace!( "got frame {:?}, len: {}, remaining: {}", _frame, len, buf.remaining() ); } frame } } impl Encode for Frame where B: Buf, { fn encode(&self, buf: &mut T) { match self { Frame::Data(b) => { FrameType::DATA.encode(buf); buf.write_var(b.remaining() as u64); } Frame::Headers(f) => { FrameType::HEADERS.encode(buf); buf.write_var(f.len() as u64); } Frame::Settings(f) => f.encode(buf), Frame::PushPromise(f) => f.encode(buf), Frame::CancelPush(id) => simple_frame_encode(FrameType::CANCEL_PUSH, (*id).into(), buf), Frame::Goaway(id) => simple_frame_encode(FrameType::GOAWAY, *id, buf), Frame::MaxPushId(id) => simple_frame_encode(FrameType::MAX_PUSH_ID, (*id).into(), buf), Frame::Grease => { FrameType::grease().encode(buf); buf.write_var(6); buf.put_slice(b"grease"); } Frame::WebTransportStream(id) => { FrameType::WEBTRANSPORT_BI_STREAM.encode(buf); id.encode(buf); // rest of the data is sent streaming } } } } impl Frame where B: Buf, { pub fn payload(&self) -> Option<&dyn Buf> { match self { Frame::Data(f) => Some(f), Frame::Headers(f) => Some(f), Frame::PushPromise(f) => Some(&f.encoded), _ => None, } } pub fn payload_mut(&mut self) -> Option<&mut dyn Buf> { match self { Frame::Data(f) => Some(f), Frame::Headers(f) => Some(f), Frame::PushPromise(f) => Some(&mut f.encoded), _ => None, } } #[cfg(test)] pub fn encode_with_payload(&mut self, buf: &mut T) { self.encode(buf); match self { Frame::Data(b) => { while b.has_remaining() { let pos = { let chunk = b.chunk(); buf.put_slice(chunk); chunk.len() }; b.advance(pos) } } Frame::Headers(b) => buf.put_slice(b), _ => (), } } } impl fmt::Debug for Frame { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Frame::Data(len) => write!(f, "Data: {} bytes", len.0), Frame::Headers(frame) => write!(f, "Headers({} entries)", frame.len()), Frame::Settings(_) => write!(f, "Settings"), Frame::CancelPush(id) => write!(f, "CancelPush({})", id), Frame::PushPromise(frame) => write!(f, "PushPromise({})", frame.id), Frame::Goaway(id) => write!(f, "GoAway({})", id), Frame::MaxPushId(id) => write!(f, "MaxPushId({})", id), Frame::Grease => write!(f, "Grease()"), Frame::WebTransportStream(session) => write!(f, "WebTransportStream({:?})", session), } } } impl fmt::Debug for Frame where B: Buf, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Frame::Data(data) => write!(f, "Data: {} bytes", data.remaining()), Frame::Headers(frame) => write!(f, "Headers({} entries)", frame.len()), Frame::Settings(_) => write!(f, "Settings"), Frame::CancelPush(id) => write!(f, "CancelPush({})", id), Frame::PushPromise(frame) => write!(f, "PushPromise({})", frame.id), Frame::Goaway(id) => write!(f, "GoAway({})", id), Frame::MaxPushId(id) => write!(f, "MaxPushId({})", id), Frame::Grease => write!(f, "Grease()"), Frame::WebTransportStream(_) => write!(f, "WebTransportStream()"), } } } /// Compare two frames ignoring data /// /// Only useful for `encode() -> Frame` then `decode() -> Frame` unit tests. #[cfg(test)] impl PartialEq> for Frame { fn eq(&self, other: &Frame) -> bool { match self { Frame::Data(_) => matches!(other, Frame::Data(_)), Frame::Settings(x) => matches!(other, Frame::Settings(y) if x == y), Frame::Headers(x) => matches!(other, Frame::Headers(y) if x == y), Frame::CancelPush(x) => matches!(other, Frame::CancelPush(y) if x == y), Frame::PushPromise(x) => matches!(other, Frame::PushPromise(y) if x == y), Frame::Goaway(x) => matches!(other, Frame::Goaway(y) if x == y), Frame::MaxPushId(x) => matches!(other, Frame::MaxPushId(y) if x == y), Frame::Grease => matches!(other, Frame::Grease), Frame::WebTransportStream(x) => { matches!(other, Frame::WebTransportStream(y) if x == y) } } } } #[cfg(test)] impl Frame { pub fn headers>(block: T) -> Self { Frame::Headers(block.into()) } } macro_rules! frame_types { {$($name:ident = $val:expr,)*} => { impl FrameType { $(pub const $name: FrameType = FrameType($val);)* } } } frame_types! { DATA = 0x0, HEADERS = 0x1, H2_PRIORITY = 0x2, CANCEL_PUSH = 0x3, SETTINGS = 0x4, PUSH_PROMISE = 0x5, H2_PING = 0x6, GOAWAY = 0x7, H2_WINDOW_UPDATE = 0x8, H2_CONTINUATION = 0x9, MAX_PUSH_ID = 0xD, // Reserved frame types WEBTRANSPORT_BI_STREAM = 0x41, } impl FrameType { /// returns a FrameType type with random number of the 0x1f * N + 0x21 /// format within the range of the Varint implementation pub fn grease() -> Self { FrameType(fastrand::u64(0..0x210842108421083) * 0x1f + 0x21) } } #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub struct FrameType(u64); impl FrameType { fn decode(buf: &mut B) -> Result { Ok(FrameType(buf.get_var()?)) } pub fn encode(&self, buf: &mut B) { buf.write_var(self.0); } #[cfg(test)] pub(crate) const RESERVED: FrameType = FrameType(0x1f * 1337 + 0x21); } pub(crate) trait FrameHeader { fn len(&self) -> usize; const TYPE: FrameType; fn encode_header(&self, buf: &mut T) { Self::TYPE.encode(buf); buf.write_var(self.len() as u64); } } #[derive(Debug, PartialEq)] pub struct PushPromise { id: u64, encoded: Bytes, } impl FrameHeader for PushPromise { const TYPE: FrameType = FrameType::PUSH_PROMISE; fn encode_header(&self, buf: &mut T) { Self::TYPE.encode(buf); buf.write_var(self.len() as u64); buf.write_var(self.id); } fn len(&self) -> usize { VarInt::from_u64(self.id) .expect("PushPromise id varint overflow") .size() + self.encoded.as_ref().len() } } impl PushPromise { fn decode(buf: &mut B) -> Result { Ok(PushPromise { id: buf.get_var()?, encoded: buf.copy_to_bytes(buf.remaining()), }) } fn encode(&self, buf: &mut B) { self.encode_header(buf); buf.put(self.encoded.clone()); } } fn simple_frame_encode(ty: FrameType, id: VarInt, buf: &mut B) { ty.encode(buf); buf.write_var(id.size() as u64); id.encode(buf); } #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] pub struct SettingId(pub u64); impl SettingId { const NONE: SettingId = SettingId(0); /// returns a SettingId type with random number of the 0x1f * N + 0x21 /// format within the range of the Varint implementation pub fn grease() -> Self { SettingId(fastrand::u64(0..0x210842108421083) * 0x1f + 0x21) } fn is_supported(self) -> bool { matches!( self, SettingId::MAX_HEADER_LIST_SIZE | SettingId::QPACK_MAX_TABLE_CAPACITY | SettingId::QPACK_MAX_BLOCKED_STREAMS | SettingId::ENABLE_CONNECT_PROTOCOL | SettingId::ENABLE_WEBTRANSPORT | SettingId::WEBTRANSPORT_MAX_SESSIONS | SettingId::H3_DATAGRAM, ) } /// Returns if a Settings Identifier is forbidden fn is_forbidden(&self) -> bool { //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.1 //# Setting identifiers that were defined in [HTTP/2] where there is no //# corresponding HTTP/3 setting have also been reserved //# (Section 11.2.2). These reserved settings MUST NOT be sent, and //# their receipt MUST be treated as a connection error of type //# H3_SETTINGS_ERROR. matches!( self, SettingId(0x00) | SettingId(0x02) | SettingId(0x03) | SettingId(0x04) | SettingId(0x05) ) } fn decode(buf: &mut B) -> Result { Ok(SettingId(buf.get_var()?)) } fn encode(&self, buf: &mut B) { buf.write_var(self.0); } } macro_rules! setting_identifiers { {$($name:ident = $val:expr,)*} => { impl SettingId { $(pub const $name: SettingId = SettingId($val);)* } } } setting_identifiers! { QPACK_MAX_TABLE_CAPACITY = 0x1, QPACK_MAX_BLOCKED_STREAMS = 0x7, MAX_HEADER_LIST_SIZE = 0x6, // https://datatracker.ietf.org/doc/html/rfc9220#section-5 ENABLE_CONNECT_PROTOCOL = 0x8, // https://datatracker.ietf.org/doc/html/rfc9297#name-http-3-setting H3_DATAGRAM = 0x33, // https://datatracker.ietf.org/doc/html/draft-ietf-webtrans-http3/#section-8.2 ENABLE_WEBTRANSPORT = 0x2B603742, // https://datatracker.ietf.org/doc/html/draft-ietf-webtrans-http3/#section-8.2 H3_SETTING_ENABLE_DATAGRAM_CHROME_SPECIFIC= 0xFFD277, WEBTRANSPORT_MAX_SESSIONS = 0x2b603743, } const SETTINGS_LEN: usize = 8; #[derive(Debug, PartialEq)] pub struct Settings { entries: [(SettingId, u64); SETTINGS_LEN], len: usize, } impl Default for Settings { fn default() -> Self { Self { entries: [(SettingId::NONE, 0); SETTINGS_LEN], len: 0, } } } impl FrameHeader for Settings { const TYPE: FrameType = FrameType::SETTINGS; fn len(&self) -> usize { self.entries[..self.len].iter().fold(0, |len, (id, val)| { len + VarInt::from_u64(id.0).unwrap().size() + VarInt::from_u64(*val).unwrap().size() }) } } impl Settings { pub const MAX_ENCODED_SIZE: usize = SETTINGS_LEN * 2 * VarInt::MAX_SIZE; pub fn insert(&mut self, id: SettingId, value: u64) -> Result<(), SettingsError> { if self.len >= self.entries.len() { return Err(SettingsError::Exceeded); } //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4 //# The same setting identifier MUST NOT occur more than once in the //# SETTINGS frame. if self.entries[..self.len].iter().any(|(i, _)| *i == id) { return Err(SettingsError::Repeated(id)); } self.entries[self.len] = (id, value); self.len += 1; Ok(()) } pub fn get(&self, id: SettingId) -> Option { for (entry_id, value) in self.entries.iter() { if id == *entry_id { return Some(*value); } } None } pub(crate) fn encode(&self, buf: &mut T) { self.encode_header(buf); for (id, val) in self.entries[..self.len].iter() { id.encode(buf); buf.write_var(*val); } } pub(super) fn decode(buf: &mut T) -> Result { let mut settings = Settings::default(); while buf.has_remaining() { if buf.remaining() < 2 { // remains less than 2 * minimum-size varint return Err(SettingsError::Malformed); } let identifier = SettingId::decode(buf).map_err(|_| SettingsError::Malformed)?; let value = buf.get_var().map_err(|_| SettingsError::Malformed)?; if identifier.is_forbidden() { //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.1 //# Setting identifiers that were defined in [HTTP/2] where there is no //# corresponding HTTP/3 setting have also been reserved //# (Section 11.2.2). These reserved settings MUST NOT be sent, and //# their receipt MUST be treated as a connection error of type //# H3_SETTINGS_ERROR. return Err(SettingsError::InvalidSettingId(identifier.0)); } if identifier.is_supported() { //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4.1 //# Setting identifiers that were defined in [HTTP/2] where there is no //# corresponding HTTP/3 setting have also been reserved //# (Section 11.2.2). These reserved settings MUST NOT be sent, and //# their receipt MUST be treated as a connection error of type //# H3_SETTINGS_ERROR. settings.insert(identifier, value)?; } else { #[cfg(feature = "tracing")] tracing::debug!("Unsupported setting: {:#x?}", identifier); } } Ok(settings) } } #[derive(Debug, PartialEq)] pub enum SettingsError { Exceeded, Malformed, Repeated(SettingId), InvalidSettingId(u64), InvalidSettingValue(SettingId, u64), } impl std::error::Error for SettingsError {} impl fmt::Display for SettingsError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SettingsError::Exceeded => write!( f, "max settings number exceeded, check for duplicate entries" ), SettingsError::Malformed => write!(f, "malformed settings frame"), SettingsError::Repeated(id) => write!(f, "got setting 0x{:x} twice", id.0), SettingsError::InvalidSettingId(id) => write!(f, "setting id 0x{:x} is invalid", id), SettingsError::InvalidSettingValue(id, val) => { write!(f, "setting 0x{:x} has invalid value {}", id.0, val) } } } } impl From for FrameError { fn from(e: SettingsError) -> Self { Self::Settings(e) } } impl From for FrameError { fn from(e: UnexpectedEnd) -> Self { FrameError::Incomplete(e.0) } } impl From for FrameError { fn from(e: InvalidStreamId) -> Self { FrameError::InvalidStreamId(e) } } impl From for FrameError { fn from(e: InvalidPushId) -> Self { FrameError::InvalidPushId(e) } } #[cfg(test)] mod tests { use super::*; use assert_matches::assert_matches; use std::io::Cursor; #[test] fn unknown_frame_type() { let mut buf = Cursor::new(&[22, 4, 0, 255, 128, 0, 3, 1, 2]); assert_matches!(Frame::decode(&mut buf), Err(FrameError::UnknownFrame(22))); assert_matches!(Frame::decode(&mut buf), Ok(Frame::CancelPush(PushId(2)))); } #[test] fn len_unexpected_end() { let mut buf = Cursor::new(&[0, 255]); let decoded = Frame::decode(&mut buf); assert_matches!(decoded, Err(FrameError::Incomplete(3))); } #[test] fn type_unexpected_end() { let mut buf = Cursor::new(&[255]); let decoded = Frame::decode(&mut buf); assert_matches!(decoded, Err(FrameError::Incomplete(2))); } #[test] fn buffer_too_short() { let mut buf = Cursor::new(&[4, 4, 0, 255, 128]); let decoded = Frame::decode(&mut buf); assert_matches!(decoded, Err(FrameError::Incomplete(6))); } fn codec_frame_check(mut frame: Frame, wire: &[u8], check_frame: Frame) { let mut buf = Vec::new(); frame.encode_with_payload(&mut buf); assert_eq!(&buf, &wire); let mut read = Cursor::new(&buf); let decoded = Frame::decode(&mut read).unwrap(); assert_eq!(check_frame, decoded); } #[test] fn settings_frame() { codec_frame_check( Frame::Settings(Settings { entries: [ (SettingId::MAX_HEADER_LIST_SIZE, 0xfad1), (SettingId::QPACK_MAX_TABLE_CAPACITY, 0xfad2), (SettingId::QPACK_MAX_BLOCKED_STREAMS, 0xfad3), (SettingId(95), 0), (SettingId::NONE, 0), (SettingId::NONE, 0), (SettingId::NONE, 0), (SettingId::NONE, 0), ], len: 4, }), &[ 4, 18, 6, 128, 0, 250, 209, 1, 128, 0, 250, 210, 7, 128, 0, 250, 211, 64, 95, 0, ], Frame::Settings(Settings { entries: [ (SettingId::MAX_HEADER_LIST_SIZE, 0xfad1), (SettingId::QPACK_MAX_TABLE_CAPACITY, 0xfad2), (SettingId::QPACK_MAX_BLOCKED_STREAMS, 0xfad3), // check without the Grease setting because this is ignored (SettingId(0), 0), (SettingId::NONE, 0), (SettingId::NONE, 0), (SettingId::NONE, 0), (SettingId::NONE, 0), ], len: 3, }), ); } #[test] fn settings_frame_emtpy() { codec_frame_check( Frame::Settings(Settings::default()), &[4, 0], Frame::Settings(Settings::default()), ); } #[test] fn data_frame() { codec_frame_check( Frame::Data(Bytes::from("1234567")), &[0, 7, 49, 50, 51, 52, 53, 54, 55], Frame::Data(Bytes::from("1234567")), ); } #[test] fn simple_frames() { codec_frame_check( Frame::CancelPush(PushId(2)), &[3, 1, 2], Frame::CancelPush(PushId(2)), ); codec_frame_check( Frame::Goaway(VarInt(2)), &[7, 1, 2], Frame::Goaway(VarInt(2)), ); codec_frame_check( Frame::MaxPushId(PushId(2)), &[13, 1, 2], Frame::MaxPushId(PushId(2)), ); } #[test] fn headers_frames() { codec_frame_check( Frame::headers("TODO QPACK"), &[1, 10, 84, 79, 68, 79, 32, 81, 80, 65, 67, 75], Frame::headers("TODO QPACK"), ); codec_frame_check( Frame::PushPromise(PushPromise { id: 134, encoded: Bytes::from("TODO QPACK"), }), &[5, 12, 64, 134, 84, 79, 68, 79, 32, 81, 80, 65, 67, 75], Frame::PushPromise(PushPromise { id: 134, encoded: Bytes::from("TODO QPACK"), }), ); } #[test] fn reserved_frame() { let mut raw = vec![]; VarInt::from_u32(0x21 + 2 * 0x1f).encode(&mut raw); raw.extend(&[6, 0, 255, 128, 0, 250, 218]); let mut buf = Cursor::new(&raw); let decoded = Frame::decode(&mut buf); assert_matches!(decoded, Err(FrameError::UnknownFrame(95))); } } h3-0.0.6/src/proto/headers.rs000064400000000000000000000517021046102023000140610ustar 00000000000000use std::{ convert::TryFrom, fmt, iter::{IntoIterator, Iterator}, str::FromStr, }; use http::{ header::{self, HeaderName, HeaderValue}, uri::{self, Authority, Parts, PathAndQuery, Scheme, Uri}, Extensions, HeaderMap, Method, StatusCode, }; use crate::{ext::Protocol, qpack::HeaderField}; #[derive(Debug)] #[cfg_attr(test, derive(PartialEq, Clone))] pub struct Header { pseudo: Pseudo, fields: HeaderMap, } #[allow(clippy::len_without_is_empty)] impl Header { /// Creates a new `Header` frame data suitable for sending a request pub fn request( method: Method, uri: Uri, fields: HeaderMap, ext: Extensions, ) -> Result { match (uri.authority(), fields.get("host")) { (None, None) => Err(HeaderError::MissingAuthority), (Some(a), Some(h)) if a.as_str() != h => Err(HeaderError::ContradictedAuthority), _ => Ok(Self { pseudo: Pseudo::request(method, uri, ext), fields, }), } } pub fn response(status: StatusCode, fields: HeaderMap) -> Self { Self { pseudo: Pseudo::response(status), fields, } } pub fn trailer(fields: HeaderMap) -> Self { Self { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3 //# Pseudo-header fields MUST NOT appear in trailer //# sections. pseudo: Pseudo::default(), fields, } } pub fn into_request_parts( self, ) -> Result<(Method, Uri, Option, HeaderMap), HeaderError> { let mut uri = Uri::builder(); if let Some(path) = self.pseudo.path { uri = uri.path_and_query(path.as_str().as_bytes()); } if let Some(scheme) = self.pseudo.scheme { uri = uri.scheme(scheme.as_str().as_bytes()); } //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //# If the :scheme pseudo-header field identifies a scheme that has a //# mandatory authority component (including "http" and "https"), the //# request MUST contain either an :authority pseudo-header field or a //# Host header field. //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=TODO //# If the scheme does not have a mandatory authority component and none //# is provided in the request target, the request MUST NOT contain the //# :authority pseudo-header or Host header fields. match (self.pseudo.authority, self.fields.get("host")) { (None, None) => return Err(HeaderError::MissingAuthority), (Some(a), None) => uri = uri.authority(a.as_str().as_bytes()), (None, Some(h)) => uri = uri.authority(h.as_bytes()), //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //# If both fields are present, they MUST contain the same value. (Some(a), Some(h)) if a.as_str() != h => { return Err(HeaderError::ContradictedAuthority) } (Some(_), Some(h)) => uri = uri.authority(h.as_bytes()), } Ok(( self.pseudo.method.ok_or(HeaderError::MissingMethod)?, // When empty host field is built into an uri it fails //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //# If these fields are present, they MUST NOT be //# empty. uri.build().map_err(HeaderError::InvalidRequest)?, self.pseudo.protocol, self.fields, )) } pub fn into_response_parts(self) -> Result<(StatusCode, HeaderMap), HeaderError> { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.2 //= type=implication //# For responses, a single ":status" pseudo-header field is defined that //# carries the HTTP status code; see Section 15 of [HTTP]. This pseudo- //# header field MUST be included in all responses; otherwise, the //# response is malformed (see Section 4.1.2). Ok(( self.pseudo.status.ok_or(HeaderError::MissingStatus)?, self.fields, )) } pub fn into_fields(self) -> HeaderMap { self.fields } pub fn len(&self) -> usize { self.pseudo.len() + self.fields.len() } pub fn size(&self) -> usize { self.pseudo.len() + self.fields.len() } #[cfg(test)] pub(crate) fn authory_mut(&mut self) -> &mut Option { &mut self.pseudo.authority } } impl IntoIterator for Header { type Item = HeaderField; type IntoIter = HeaderIter; fn into_iter(self) -> Self::IntoIter { HeaderIter { pseudo: Some(self.pseudo), last_header_name: None, fields: self.fields.into_iter(), } } } pub struct HeaderIter { pseudo: Option, last_header_name: Option, fields: header::IntoIter, } impl Iterator for HeaderIter { type Item = HeaderField; fn next(&mut self) -> Option { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3 //# All pseudo-header fields MUST appear in the header section before //# regular header fields. if let Some(ref mut pseudo) = self.pseudo { if let Some(method) = pseudo.method.take() { return Some((":method", method.as_str()).into()); } if let Some(scheme) = pseudo.scheme.take() { return Some((":scheme", scheme.as_str().as_bytes()).into()); } if let Some(authority) = pseudo.authority.take() { return Some((":authority", authority.as_str().as_bytes()).into()); } if let Some(path) = pseudo.path.take() { return Some((":path", path.as_str().as_bytes()).into()); } if let Some(status) = pseudo.status.take() { return Some((":status", status.as_str()).into()); } if let Some(protocol) = pseudo.protocol.take() { return Some((":protocol", protocol.as_str().as_bytes()).into()); } } self.pseudo = None; for (new_header_name, header_value) in self.fields.by_ref() { if let Some(new) = new_header_name { self.last_header_name = Some(new); } if let (Some(ref n), v) = (&self.last_header_name, header_value) { return Some((n.as_str(), v.as_bytes()).into()); } } None } } impl TryFrom> for Header { type Error = HeaderError; fn try_from(headers: Vec) -> Result { let mut fields = HeaderMap::with_capacity(headers.len()); let mut pseudo = Pseudo::default(); for field in headers.into_iter() { let (name, value) = field.into_inner(); match Field::parse(name, value)? { Field::Method(m) => { pseudo.method = Some(m); pseudo.len += 1; } Field::Scheme(s) => { pseudo.scheme = Some(s); pseudo.len += 1; } Field::Authority(a) => { pseudo.authority = Some(a); pseudo.len += 1; } Field::Path(p) => { pseudo.path = Some(p); pseudo.len += 1; } Field::Status(s) => { pseudo.status = Some(s); pseudo.len += 1; } Field::Header((n, v)) => { fields.append(n, v); } Field::Protocol(p) => { pseudo.protocol = Some(p); pseudo.len += 1; } } } Ok(Header { pseudo, fields }) } } enum Field { Method(Method), Scheme(Scheme), Authority(Authority), Path(PathAndQuery), Status(StatusCode), Protocol(Protocol), Header((HeaderName, HeaderValue)), } impl Field { fn parse(name: N, value: V) -> Result where N: AsRef<[u8]>, V: AsRef<[u8]>, { let name = name.as_ref(); if name.is_empty() { return Err(HeaderError::InvalidHeaderName("name is empty".into())); } //= https://www.rfc-editor.org/rfc/rfc9114#section-10.3 //# Requests or responses containing invalid field names MUST be treated //# as malformed. //= https://www.rfc-editor.org/rfc/rfc9114#section-10.3 //# Any request or response that contains a //# character not permitted in a field value MUST be treated as //# malformed. //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2 //= type=implication //# A request or //# response containing uppercase characters in field names MUST be //# treated as malformed. if name[0] != b':' { return Ok(Field::Header(( HeaderName::from_lowercase(name).map_err(|_| HeaderError::invalid_name(name))?, HeaderValue::from_bytes(value.as_ref()) .map_err(|_| HeaderError::invalid_value(name, value))?, ))); } Ok(match name { b":scheme" => Field::Scheme(try_value(name, value)?), //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //# If these fields are present, they MUST NOT be //# empty. b":authority" => Field::Authority(try_value(name, value)?), b":path" => Field::Path(try_value(name, value)?), b":method" => Field::Method( Method::from_bytes(value.as_ref()) .map_err(|_| HeaderError::invalid_value(name, value))?, ), b":status" => Field::Status( StatusCode::from_bytes(value.as_ref()) .map_err(|_| HeaderError::invalid_value(name, value))?, ), b":protocol" => Field::Protocol(try_value(name, value)?), _ => return Err(HeaderError::invalid_name(name)), }) } } fn try_value(name: N, value: V) -> Result where N: AsRef<[u8]>, V: AsRef<[u8]>, R: FromStr, { let (name, value) = (name.as_ref(), value.as_ref()); let s = std::str::from_utf8(value).map_err(|_| HeaderError::invalid_value(name, value))?; R::from_str(s).map_err(|_| HeaderError::invalid_value(name, value)) } /// Pseudo-header fields have the same purpose as data from the first line of HTTP/1.X, /// but are conveyed along with other headers. For example ':method' and ':path' in a /// request, and ':status' in a response. They must be placed before all other fields, /// start with ':', and be lowercase. /// See RFC7540 section 8.1.2.1. for more details. #[derive(Debug, Default)] #[cfg_attr(test, derive(PartialEq, Clone))] struct Pseudo { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3 //= type=implication //# Endpoints MUST NOT //# generate pseudo-header fields other than those defined in this //# document. // Request method: Option, scheme: Option, authority: Option, path: Option, // Response status: Option, protocol: Option, len: usize, } #[allow(clippy::len_without_is_empty)] impl Pseudo { fn request(method: Method, uri: Uri, ext: Extensions) -> Self { let Parts { scheme, authority, path_and_query, .. } = uri::Parts::from(uri); //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=implication //# This pseudo-header field MUST NOT be empty for "http" or "https" //# URIs; "http" or "https" URIs that do not contain a path component //# MUST include a value of / (ASCII 0x2f). let path = path_and_query.map_or_else( || PathAndQuery::from_static("/"), |path| { if path.path().is_empty() && method != Method::OPTIONS { PathAndQuery::from_static("/") } else { path } }, ); // If the method is connect, the `:protocol` pseudo-header MAY be defined // // See: [https://www.rfc-editor.org/rfc/rfc8441#section-4] let protocol = if method == Method::CONNECT { ext.get::().copied() } else { None }; let len = 3 + authority.is_some() as usize + protocol.is_some() as usize; //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3 //= type=implication //# Pseudo-header fields defined for requests MUST NOT appear //# in responses; pseudo-header fields defined for responses MUST NOT //# appear in requests. //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=implication //# All HTTP/3 requests MUST include exactly one value for the :method, //# :scheme, and :path pseudo-header fields, unless the request is a //# CONNECT request; see Section 4.4. Self { method: Some(method), scheme: scheme.or(Some(Scheme::HTTPS)), authority, path: Some(path), status: None, protocol, len, } } fn response(status: StatusCode) -> Self { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3 //= type=implication //# Pseudo-header fields defined for requests MUST NOT appear //# in responses; pseudo-header fields defined for responses MUST NOT //# appear in requests. Pseudo { method: None, scheme: None, authority: None, path: None, status: Some(status), len: 1, protocol: None, } } fn len(&self) -> usize { self.len } } #[derive(Debug)] pub enum HeaderError { InvalidHeaderName(String), InvalidHeaderValue(String), InvalidRequest(http::Error), MissingMethod, MissingStatus, MissingAuthority, ContradictedAuthority, } impl HeaderError { fn invalid_name(name: N) -> Self where N: AsRef<[u8]>, { HeaderError::InvalidHeaderName(format!("{:?}", name.as_ref())) } fn invalid_value(name: N, value: V) -> Self where N: AsRef<[u8]>, V: AsRef<[u8]>, { HeaderError::InvalidHeaderValue(format!( "{:?} {:?}", String::from_utf8_lossy(name.as_ref()), value.as_ref() )) } } impl std::error::Error for HeaderError {} impl fmt::Display for HeaderError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { HeaderError::InvalidHeaderName(h) => write!(f, "invalid header name: {}", h), HeaderError::InvalidHeaderValue(v) => write!(f, "invalid header value: {}", v), HeaderError::InvalidRequest(r) => write!(f, "invalid request: {}", r), HeaderError::MissingMethod => write!(f, "missing method in request headers"), HeaderError::MissingStatus => write!(f, "missing status in response headers"), HeaderError::MissingAuthority => write!(f, "missing authority"), HeaderError::ContradictedAuthority => { write!(f, "uri and authority field are in contradiction") } } } } #[cfg(test)] mod tests { use super::*; use assert_matches::assert_matches; #[test] fn request_has_no_authority_nor_host() { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=test //# If the :scheme pseudo-header field identifies a scheme that has a //# mandatory authority component (including "http" and "https"), the //# request MUST contain either an :authority pseudo-header field or a //# Host header field. let headers = Header::try_from(vec![(b":method", Method::GET.as_str()).into()]).unwrap(); assert!(headers.pseudo.authority.is_none()); assert_matches!( headers.into_request_parts(), Err(HeaderError::MissingAuthority) ); } #[test] fn request_has_empty_authority() { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=test //# If these fields are present, they MUST NOT be //# empty. assert_matches!( Header::try_from(vec![ (b":method", Method::GET.as_str()).into(), (b":authority", b"").into(), ]), Err(HeaderError::InvalidHeaderValue(_)) ); } #[test] fn request_has_empty_host() { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=test //# If these fields are present, they MUST NOT be //# empty. let headers = Header::try_from(vec![ (b":method", Method::GET.as_str()).into(), (b"host", b"").into(), ]) .unwrap(); assert_matches!( headers.into_request_parts(), Err(HeaderError::InvalidRequest(_)) ); } #[test] fn request_has_authority() { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=test //# If the :scheme pseudo-header field identifies a scheme that has a //# mandatory authority component (including "http" and "https"), the //# request MUST contain either an :authority pseudo-header field or a //# Host header field. let headers = Header::try_from(vec![ (b":method", Method::GET.as_str()).into(), (b":authority", b"test.com").into(), ]) .unwrap(); assert_matches!(headers.into_request_parts(), Ok(_)); } #[test] fn request_has_host() { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=test //# If the :scheme pseudo-header field identifies a scheme that has a //# mandatory authority component (including "http" and "https"), the //# request MUST contain either an :authority pseudo-header field or a //# Host header field. let headers = Header::try_from(vec![ (b":method", Method::GET.as_str()).into(), (b"host", b"test.com").into(), ]) .unwrap(); assert!(headers.pseudo.authority.is_none()); assert_matches!(headers.into_request_parts(), Ok(_)); } #[test] fn request_has_same_host_and_authority() { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=test //# If both fields are present, they MUST contain the same value. let headers = Header::try_from(vec![ (b":method", Method::GET.as_str()).into(), (b":authority", b"test.com").into(), (b"host", b"test.com").into(), ]) .unwrap(); assert_matches!(headers.into_request_parts(), Ok(_)); } #[test] fn request_has_different_host_and_authority() { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.3.1 //= type=test //# If both fields are present, they MUST contain the same value. let headers = Header::try_from(vec![ (b":method", Method::GET.as_str()).into(), (b":authority", b"authority.com").into(), (b"host", b"host.com").into(), ]) .unwrap(); assert_matches!( headers.into_request_parts(), Err(HeaderError::ContradictedAuthority) ); } #[test] fn preserves_duplicate_headers() { let headers = Header::try_from(vec![ (b":method", Method::GET.as_str()).into(), (b":authority", b"test.com").into(), (b"set-cookie", b"foo=foo").into(), (b"set-cookie", b"bar=bar").into(), (b"other-header", b"other-header-value").into(), ]) .unwrap(); assert_eq!( headers .clone() .into_iter() .filter(|h| h.name.as_ref() == b"set-cookie") .collect::>(), vec![ HeaderField { name: std::borrow::Cow::Borrowed(b"set-cookie"), value: std::borrow::Cow::Borrowed(b"foo=foo") }, HeaderField { name: std::borrow::Cow::Borrowed(b"set-cookie"), value: std::borrow::Cow::Borrowed(b"bar=bar") } ] ); assert_eq!( headers .into_iter() .filter(|h| h.name.as_ref() == b"other-header") .collect::>(), vec![HeaderField { name: std::borrow::Cow::Borrowed(b"other-header"), value: std::borrow::Cow::Borrowed(b"other-header-value") },] ); } } h3-0.0.6/src/proto/mod.rs000064400000000000000000000002061046102023000132160ustar 00000000000000pub mod coding; #[allow(dead_code)] pub mod frame; #[allow(dead_code)] pub mod headers; pub mod push; pub mod stream; pub mod varint; h3-0.0.6/src/proto/push.rs000064400000000000000000000017271046102023000134270ustar 00000000000000use std::convert::TryFrom; use std::fmt::{self, Display}; use super::varint::VarInt; #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct PushId(pub(crate) u64); #[derive(Debug, PartialEq)] pub struct InvalidPushId(u64); impl TryFrom for PushId { type Error = InvalidPushId; fn try_from(v: u64) -> Result { match VarInt::try_from(v) { Ok(id) => Ok(id.into()), Err(_) => Err(InvalidPushId(v)), } } } impl Display for InvalidPushId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "invalid push id: {:x}", self.0) } } impl From for PushId { fn from(v: VarInt) -> Self { Self(v.0) } } impl From for VarInt { fn from(v: PushId) -> Self { Self(v.0) } } impl fmt::Display for PushId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "push {}", self.0) } } h3-0.0.6/src/proto/stream.rs000064400000000000000000000122551046102023000137410ustar 00000000000000use bytes::{Buf, BufMut}; use std::{ convert::TryFrom, fmt::{self, Display}, ops::Add, }; use crate::webtransport::SessionId; use super::{ coding::{BufExt, BufMutExt, Decode, Encode, UnexpectedEnd}, varint::VarInt, }; #[derive(Debug, PartialEq, Eq, Clone)] pub struct StreamType(u64); macro_rules! stream_types { {$($name:ident = $val:expr,)*} => { impl StreamType { $(pub const $name: StreamType = StreamType($val);)* } } } stream_types! { CONTROL = 0x00, PUSH = 0x01, ENCODER = 0x02, DECODER = 0x03, WEBTRANSPORT_BIDI = 0x41, WEBTRANSPORT_UNI = 0x54, } impl StreamType { pub const MAX_ENCODED_SIZE: usize = VarInt::MAX_SIZE; pub fn value(&self) -> u64 { self.0 } /// returns a StreamType type with random number of the 0x1f * N + 0x21 /// format within the range of the Varint implementation pub fn grease() -> Self { StreamType(fastrand::u64(0..0x210842108421083) * 0x1f + 0x21) } } impl Decode for StreamType { fn decode(buf: &mut B) -> Result { Ok(StreamType(buf.get_var()?)) } } impl Encode for StreamType { fn encode(&self, buf: &mut W) { buf.write_var(self.0); } } impl fmt::Display for StreamType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { &StreamType::CONTROL => write!(f, "Control"), &StreamType::ENCODER => write!(f, "Encoder"), &StreamType::DECODER => write!(f, "Decoder"), &StreamType::WEBTRANSPORT_UNI => write!(f, "WebTransportUni"), x => write!(f, "StreamType({})", x.0), } } } /// Identifier for a stream #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct StreamId(#[cfg(not(test))] u64, #[cfg(test)] pub(crate) u64); impl fmt::Display for StreamId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let initiator = match self.initiator() { Side::Client => "client", Side::Server => "server", }; let dir = match self.dir() { Dir::Uni => "uni", Dir::Bi => "bi", }; write!( f, "{} {}directional stream {}", initiator, dir, self.index() ) } } impl StreamId { pub(crate) const FIRST_REQUEST: Self = Self::new(0, Dir::Bi, Side::Client); /// Is this a client-initiated request? pub fn is_request(&self) -> bool { self.dir() == Dir::Bi && self.initiator() == Side::Client } /// Is this a server push? pub fn is_push(&self) -> bool { self.dir() == Dir::Uni && self.initiator() == Side::Server } /// Which side of a connection initiated the stream pub(crate) fn initiator(self) -> Side { if self.0 & 0x1 == 0 { Side::Client } else { Side::Server } } /// Create a new StreamId const fn new(index: u64, dir: Dir, initiator: Side) -> Self { StreamId((index) << 2 | (dir as u64) << 1 | initiator as u64) } /// Distinguishes streams of the same initiator and directionality pub fn index(self) -> u64 { self.0 >> 2 } /// Which directions data flows in fn dir(self) -> Dir { if self.0 & 0x2 == 0 { Dir::Bi } else { Dir::Uni } } pub(crate) fn into_inner(self) -> u64 { self.0 } } impl TryFrom for StreamId { type Error = InvalidStreamId; fn try_from(v: u64) -> Result { if v > VarInt::MAX.0 { return Err(InvalidStreamId(v)); } Ok(Self(v)) } } impl From for StreamId { fn from(v: VarInt) -> Self { Self(v.0) } } impl From for VarInt { fn from(v: StreamId) -> Self { Self(v.0) } } /// Invalid StreamId, for example because it's too large #[derive(Debug, PartialEq)] pub struct InvalidStreamId(pub(crate) u64); impl Display for InvalidStreamId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "invalid stream id: {:x}", self.0) } } impl Encode for StreamId { fn encode(&self, buf: &mut B) { VarInt::from_u64(self.0).unwrap().encode(buf); } } impl Add for StreamId { type Output = StreamId; #[allow(clippy::suspicious_arithmetic_impl)] fn add(self, rhs: usize) -> Self::Output { let index = u64::min( u64::saturating_add(self.index(), rhs as u64), VarInt::MAX.0 >> 2, ); Self::new(index, self.dir(), self.initiator()) } } impl From for StreamId { fn from(value: SessionId) -> Self { Self(value.into_inner()) } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum Side { /// The initiator of a connection Client = 0, /// The acceptor of a connection Server = 1, } /// Whether a stream communicates data in both directions or only from the initiator #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum Dir { /// Data flows in both directions Bi = 0, /// Data flows only from the stream's initiator Uni = 1, } h3-0.0.6/src/proto/varint.rs000064400000000000000000000121571046102023000137520ustar 00000000000000use std::{convert::TryInto, fmt, ops::Div}; use bytes::{Buf, BufMut}; pub use super::coding::UnexpectedEnd; /// An integer less than 2^62 /// /// Values of this type are suitable for encoding as QUIC variable-length integer. // It would be neat if we could express to Rust that the top two bits are available for use as enum // discriminants #[derive(Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct VarInt(pub(crate) u64); impl Div for VarInt { type Output = Self; fn div(self, rhs: u64) -> Self::Output { Self(self.0 / rhs) } } impl VarInt { /// The largest representable value pub const MAX: VarInt = VarInt((1 << 62) - 1); /// The largest encoded value length pub const MAX_SIZE: usize = 8; /// Construct a `VarInt` infallibly pub const fn from_u32(x: u32) -> Self { VarInt(x as u64) } /// Succeeds iff `x` < 2^62 pub fn from_u64(x: u64) -> Result { if x < 2u64.pow(62) { Ok(VarInt(x)) } else { Err(VarIntBoundsExceeded(x)) } } /// Create a VarInt without ensuring it's in range /// /// # Safety /// /// `x` must be less than 2^62. pub const unsafe fn from_u64_unchecked(x: u64) -> Self { VarInt(x) } /// Extract the integer value pub const fn into_inner(self) -> u64 { self.0 } /// Compute the number of bytes needed to encode this value pub fn size(self) -> usize { let x = self.0; if x < 2u64.pow(6) { 1 } else if x < 2u64.pow(14) { 2 } else if x < 2u64.pow(30) { 4 } else if x < 2u64.pow(62) { 8 } else { unreachable!("malformed VarInt"); } } /// Length of an encoded value from its first byte pub fn encoded_size(first: u8) -> usize { 2usize.pow((first >> 6) as u32) } pub fn decode(r: &mut B) -> Result { if !r.has_remaining() { return Err(UnexpectedEnd(0)); } let mut buf = [0; 8]; buf[0] = r.get_u8(); let tag = buf[0] >> 6; buf[0] &= 0b0011_1111; let x = match tag { 0b00 => u64::from(buf[0]), 0b01 => { if r.remaining() < 1 { return Err(UnexpectedEnd(1)); } r.copy_to_slice(&mut buf[1..2]); u64::from(u16::from_be_bytes(buf[..2].try_into().unwrap())) } 0b10 => { if r.remaining() < 3 { return Err(UnexpectedEnd(2)); } r.copy_to_slice(&mut buf[1..4]); u64::from(u32::from_be_bytes(buf[..4].try_into().unwrap())) } 0b11 => { if r.remaining() < 7 { return Err(UnexpectedEnd(3)); } r.copy_to_slice(&mut buf[1..8]); u64::from_be_bytes(buf) } _ => unreachable!(), }; Ok(VarInt(x)) } pub fn encode(&self, w: &mut B) { let x = self.0; if x < 2u64.pow(6) { w.put_u8(x as u8); } else if x < 2u64.pow(14) { w.put_u16(0b01 << 14 | x as u16); } else if x < 2u64.pow(30) { w.put_u32(0b10 << 30 | x as u32); } else if x < 2u64.pow(62) { w.put_u64(0b11 << 62 | x); } else { unreachable!("malformed VarInt") } } } impl From for u64 { fn from(x: VarInt) -> u64 { x.0 } } impl From for VarInt { fn from(x: u8) -> Self { VarInt(x.into()) } } impl From for VarInt { fn from(x: u16) -> Self { VarInt(x.into()) } } impl From for VarInt { fn from(x: u32) -> Self { VarInt(x.into()) } } impl std::convert::TryFrom for VarInt { type Error = VarIntBoundsExceeded; /// Succeeds iff `x` < 2^62 fn try_from(x: u64) -> Result { VarInt::from_u64(x) } } impl std::convert::TryFrom for VarInt { type Error = VarIntBoundsExceeded; /// Succeeds iff `x` < 2^62 fn try_from(x: usize) -> Result { VarInt::try_from(x as u64) } } impl fmt::Debug for VarInt { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl fmt::Display for VarInt { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } pub trait BufExt { fn get_var(&mut self) -> Result; } impl BufExt for T { fn get_var(&mut self) -> Result { Ok(VarInt::decode(self)?.into_inner()) } } pub trait BufMutExt { fn write_var(&mut self, x: u64); } impl BufMutExt for T { fn write_var(&mut self, x: u64) { VarInt::from_u64(x).unwrap().encode(self); } } /// Error returned when constructing a `VarInt` from a value >= 2^62 #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct VarIntBoundsExceeded(pub(crate) u64); h3-0.0.6/src/qpack/block.rs000064400000000000000000000371041046102023000134740ustar 00000000000000use bytes::{Buf, BufMut}; use super::{parse_error::ParseError, prefix_int, prefix_string}; // 4.5. Field Line Representations // Single header field line. These representations reference the static table or // the dynamic table in a particular state, but do not modify that state. pub enum HeaderBlockField { // 4.5.2. Indexed Field Line // Entry in the static table, or in the dynamic table with an absolute index // less than the value of the Base. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 1 | T | Index (6+) | // +---+---+-----------------------+ Indexed, // 4.5.3. Indexed Field Line With Post-Base Index // Entry in the dynamic table with an absolute index greater than or equal // to the value of the Base. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 0 | 0 | 0 | 1 | Index (4+) | // +---+---+---+---+---------------+ IndexedWithPostBase, // 4.5.4. Literal Field Line With Name Reference // Entry in the dynamic table with an absolute index greater than or equal // to the value of the Base. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 0 | 1 | N | T |Name Index (4+)| // +---+---+---+---+---------------+ // | H | Value Length (7+) | // +---+---------------------------+ // | Value String (Length bytes) | // +-------------------------------+ LiteralWithNameRef, // 4.5.5. Literal Field Line With Post-Base Name Reference // The field name matches a name of an entry in the static table, or in the // dynamic table with an absolute index less than the value of the Base. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 0 | 0 | 0 | 0 | N |NameIdx(3+)| // +---+---+---+---+---+-----------+ // | H | Value Length (7+) | // +---+---------------------------+ // | Value String (Length bytes) | // +-------------------------------+ LiteralWithPostBaseNameRef, // 4.5.6. Literal Field Line With Literal Name // Field name and field value are encoded as string literals. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 0 | 0 | 1 | N | H |NameLen(3+)| // +---+---+---+---+---+-----------+ // | Name String (Length bytes) | // +---+---------------------------+ // | H | Value Length (7+) | // +---+---------------------------+ // | Value String (Length bytes) | // +-------------------------------+ Literal, Unknown, } impl HeaderBlockField { // Check how the next field is encoded according its first byte pub fn decode(first: u8) -> Self { if first & 0b1000_0000 != 0 { HeaderBlockField::Indexed } else if first & 0b1111_0000 == 0b0001_0000 { HeaderBlockField::IndexedWithPostBase } else if first & 0b1100_0000 == 0b0100_0000 { HeaderBlockField::LiteralWithNameRef } else if first & 0b1111_0000 == 0 { HeaderBlockField::LiteralWithPostBaseNameRef } else if first & 0b1110_0000 == 0b0010_0000 { HeaderBlockField::Literal } else { HeaderBlockField::Unknown } } } // 4.5.1. Encoded Field Section Prefix #[derive(Debug, PartialEq)] pub struct HeaderPrefix { encoded_insert_count: usize, sign_negative: bool, delta_base: usize, } impl HeaderPrefix { pub fn new(required: usize, base: usize, total_inserted: usize, max_table_size: usize) -> Self { if max_table_size == 0 { return Self { encoded_insert_count: 0, sign_negative: false, delta_base: 0, }; } if required == 0 { return Self { encoded_insert_count: 0, delta_base: 0, sign_negative: false, }; } assert!(required <= total_inserted); let (sign_negative, delta_base) = if required > base { (true, required - base - 1) } else { (false, base - required) }; let max_entries = max_table_size / 32; Self { encoded_insert_count: required % (2 * max_entries) + 1, sign_negative, delta_base, } } pub fn get( self, total_inserted: usize, max_table_size: usize, ) -> Result<(usize, usize), ParseError> { if max_table_size == 0 { return Ok((0, 0)); } // 4.5.1.1. Required Insert Count let required = if self.encoded_insert_count == 0 { 0 } else { let mut insert_count = self.encoded_insert_count - 1; let max_entries = max_table_size / 32; let mut wrapped = total_inserted % (2 * max_entries); if wrapped >= insert_count + max_entries { insert_count += 2 * max_entries; } else if wrapped + max_entries < insert_count { wrapped += 2 * max_entries; } insert_count + total_inserted - wrapped }; let base = if required == 0 { 0 } else if !self.sign_negative { required + self.delta_base } else { if self.delta_base + 1 > required { return Err(ParseError::InvalidBase( required as isize - self.delta_base as isize - 1, )); } required - self.delta_base - 1 }; Ok((required, base)) } // 4.5.1. Encoded Field Section Prefix // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | Required Insert Count (8+) | // +---+---------------------------+ // | S | Delta Base (7+) | // +---+---------------------------+ // | Encoded Field Lines ... // +-------------------------------+ pub fn decode(buf: &mut R) -> Result { let (_, encoded_insert_count) = prefix_int::decode(8, buf)?; let (sign_negative, delta_base) = prefix_int::decode(7, buf)?; if encoded_insert_count > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } if delta_base > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } Ok(Self { encoded_insert_count: encoded_insert_count as usize, delta_base: delta_base as usize, sign_negative: sign_negative == 1, }) } pub fn encode(&self, buf: &mut W) { let sign_bit = if self.sign_negative { 1 } else { 0 }; prefix_int::encode(8, 0, self.encoded_insert_count as u64, buf); prefix_int::encode(7, sign_bit, self.delta_base as u64, buf); } } #[derive(Debug, PartialEq)] pub enum Indexed { Static(usize), Dynamic(usize), } impl Indexed { pub fn decode(buf: &mut R) -> Result { match prefix_int::decode(6, buf)? { (0b11, i) => { if i > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } Ok(Indexed::Static(i as usize)) } (0b10, i) => { if i > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } Ok(Indexed::Dynamic(i as usize)) } (f, _) => Err(ParseError::InvalidPrefix(f)), } } pub fn encode(&self, buf: &mut W) { match self { Indexed::Static(i) => prefix_int::encode(6, 0b11, *i as u64, buf), Indexed::Dynamic(i) => prefix_int::encode(6, 0b10, *i as u64, buf), } } } #[derive(Debug, PartialEq)] pub struct IndexedWithPostBase(pub usize); impl IndexedWithPostBase { pub fn decode(buf: &mut R) -> Result { match prefix_int::decode(4, buf)? { (0b0001, i) => { if i > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } Ok(IndexedWithPostBase(i as usize)) } (f, _) => Err(ParseError::InvalidPrefix(f)), } } pub fn encode(&self, buf: &mut W) { prefix_int::encode(4, 0b0001, self.0 as u64, buf) } } #[derive(Debug, PartialEq)] pub enum LiteralWithNameRef { Static { index: usize, value: Vec }, Dynamic { index: usize, value: Vec }, } impl LiteralWithNameRef { pub fn new_static>>(index: usize, value: T) -> Self { LiteralWithNameRef::Static { index, value: value.into(), } } pub fn new_dynamic>>(index: usize, value: T) -> Self { LiteralWithNameRef::Dynamic { index, value: value.into(), } } pub fn decode(buf: &mut R) -> Result { match prefix_int::decode(4, buf)? { (f, i) if f & 0b0101 == 0b0101 => { if i > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } Ok(LiteralWithNameRef::new_static( i as usize, prefix_string::decode(8, buf)?, )) } (f, i) if f & 0b0101 == 0b0100 => { if i > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } Ok(LiteralWithNameRef::new_dynamic( i as usize, prefix_string::decode(8, buf)?, )) } (f, _) => Err(ParseError::InvalidPrefix(f)), } } pub fn encode(&self, buf: &mut W) -> Result<(), prefix_string::Error> { match self { LiteralWithNameRef::Static { index, value } => { prefix_int::encode(4, 0b0101, *index as u64, buf); prefix_string::encode(8, 0, value, buf)?; } LiteralWithNameRef::Dynamic { index, value } => { prefix_int::encode(4, 0b0100, *index as u64, buf); prefix_string::encode(8, 0, value, buf)?; } } Ok(()) } } #[derive(Debug, PartialEq)] pub struct LiteralWithPostBaseNameRef { pub index: usize, pub value: Vec, } impl LiteralWithPostBaseNameRef { pub fn new>>(index: usize, value: T) -> Self { LiteralWithPostBaseNameRef { index, value: value.into(), } } pub fn decode(buf: &mut R) -> Result { match prefix_int::decode(3, buf)? { (f, i) if f & 0b1111_0000 == 0 => { if i > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } Ok(LiteralWithPostBaseNameRef::new( i as usize, prefix_string::decode(8, buf)?, )) } (f, _) => Err(ParseError::InvalidPrefix(f)), } } pub fn encode(&self, buf: &mut W) -> Result<(), prefix_string::Error> { prefix_int::encode(3, 0b0000, self.index as u64, buf); prefix_string::encode(8, 0, &self.value, buf)?; Ok(()) } } #[derive(Debug, PartialEq)] pub struct Literal { pub name: Vec, pub value: Vec, } impl Literal { pub fn new>>(name: T, value: T) -> Self { Literal { name: name.into(), value: value.into(), } } pub fn decode(buf: &mut R) -> Result { if buf.remaining() < 1 { return Err(ParseError::Integer(prefix_int::Error::UnexpectedEnd)); } else if buf.chunk()[0] & 0b1110_0000 != 0b0010_0000 { return Err(ParseError::InvalidPrefix(buf.chunk()[0])); } Ok(Literal::new( prefix_string::decode(4, buf)?, prefix_string::decode(8, buf)?, )) } pub fn encode(&self, buf: &mut W) -> Result<(), prefix_string::Error> { prefix_string::encode(4, 0b0010, &self.name, buf)?; prefix_string::encode(8, 0, &self.value, buf)?; Ok(()) } } #[cfg(test)] mod test { use super::*; use std::convert::TryInto; use std::io::Cursor; const TABLE_SIZE: usize = 4096; #[test] fn indexed_static() { let field = Indexed::Static(42); let mut buf = vec![]; field.encode(&mut buf); let mut read = Cursor::new(&buf); assert_eq!(Indexed::decode(&mut read), Ok(field)); } #[test] fn indexed_dynamic() { let field = Indexed::Dynamic(42); let mut buf = vec![]; field.encode(&mut buf); let mut read = Cursor::new(&buf); assert_eq!(Indexed::decode(&mut read), Ok(field)); } #[test] fn indexed_with_postbase() { let field = IndexedWithPostBase(42); let mut buf = vec![]; field.encode(&mut buf); let mut read = Cursor::new(&buf); assert_eq!(IndexedWithPostBase::decode(&mut read), Ok(field)); } #[test] fn literal_with_name_ref() { let field = LiteralWithNameRef::new_static(42, "foo"); let mut buf = vec![]; field.encode(&mut buf).unwrap(); let mut read = Cursor::new(&buf); assert_eq!(LiteralWithNameRef::decode(&mut read), Ok(field)); } #[test] fn literal_with_post_base_name_ref() { let field = LiteralWithPostBaseNameRef::new(42, "foo"); let mut buf = vec![]; field.encode(&mut buf).unwrap(); let mut read = Cursor::new(&buf); assert_eq!(LiteralWithPostBaseNameRef::decode(&mut read), Ok(field)); } #[test] fn literal() { let field = Literal::new("foo", "bar"); let mut buf = vec![]; field.encode(&mut buf).unwrap(); let mut read = Cursor::new(&buf); assert_eq!(Literal::decode(&mut read), Ok(field)); } #[test] fn header_prefix() { let prefix = HeaderPrefix::new(10, 5, 12, TABLE_SIZE); let mut buf = vec![]; prefix.encode(&mut buf); let mut read = Cursor::new(&buf); let decoded = HeaderPrefix::decode(&mut read); assert_eq!(decoded, Ok(prefix)); assert_eq!(decoded.unwrap().get(13, 3332).unwrap(), (10, 5)); } #[test] fn header_prefix_table_size_0() { HeaderPrefix::new(10, 5, 12, 0).get(1, 0).unwrap(); } #[test] fn base_index_too_small() { let mut buf = vec![]; let encoded_largest_ref: u64 = ((2 % (2 * TABLE_SIZE / 32)) + 1).try_into().unwrap(); prefix_int::encode(8, 0, encoded_largest_ref, &mut buf); prefix_int::encode(7, 1, 2, &mut buf); // base index negative = 0 let mut read = Cursor::new(&buf); assert_eq!( HeaderPrefix::decode(&mut read).unwrap().get(2, TABLE_SIZE), Err(ParseError::InvalidBase(-1)) ); } } h3-0.0.6/src/qpack/decoder.rs000064400000000000000000000615101046102023000140050ustar 00000000000000use bytes::{Buf, BufMut}; use std::{convert::TryInto, fmt, io::Cursor, num::TryFromIntError}; #[cfg(feature = "tracing")] use tracing::trace; use super::{ dynamic::{DynamicTable, DynamicTableDecoder, Error as DynamicTableError}, field::HeaderField, static_::{Error as StaticError, StaticTable}, vas, }; use super::{ block::{ HeaderBlockField, HeaderPrefix, Indexed, IndexedWithPostBase, Literal, LiteralWithNameRef, LiteralWithPostBaseNameRef, }, parse_error::ParseError, stream::{ Duplicate, DynamicTableSizeUpdate, EncoderInstruction, HeaderAck, InsertCountIncrement, InsertWithNameRef, InsertWithoutNameRef, StreamCancel, }, }; use super::{prefix_int, prefix_string}; #[derive(Debug, PartialEq)] pub enum Error { InvalidInteger(prefix_int::Error), InvalidString(prefix_string::Error), InvalidIndex(vas::Error), DynamicTable(DynamicTableError), InvalidStaticIndex(usize), UnknownPrefix(u8), MissingRefs(usize), BadBaseIndex(isize), UnexpectedEnd, HeaderTooLong(u64), BufSize(TryFromIntError), } impl std::error::Error for Error {} impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result { match self { Error::InvalidInteger(e) => write!(f, "invalid integer: {}", e), Error::InvalidString(e) => write!(f, "invalid string: {:?}", e), Error::InvalidIndex(e) => write!(f, "invalid dynamic index: {:?}", e), Error::DynamicTable(e) => write!(f, "dynamic table error: {:?}", e), Error::InvalidStaticIndex(i) => write!(f, "unknown static index: {}", i), Error::UnknownPrefix(p) => write!(f, "unknown instruction code: 0x{}", p), Error::MissingRefs(n) => write!(f, "missing {} refs to decode bloc", n), Error::BadBaseIndex(i) => write!(f, "out of bounds base index: {}", i), Error::UnexpectedEnd => write!(f, "unexpected end"), Error::HeaderTooLong(_) => write!(f, "header too long"), Error::BufSize(_) => write!(f, "number in buffer wrong size"), } } } pub fn ack_header(stream_id: u64, decoder: &mut W) { HeaderAck(stream_id).encode(decoder); } pub fn stream_canceled(stream_id: u64, decoder: &mut W) { StreamCancel(stream_id).encode(decoder); } #[derive(PartialEq, Debug)] pub struct Decoded { /// The decoded fields pub fields: Vec, /// Whether one or more encoded fields were referencing the dynamic table pub dyn_ref: bool, /// Decoded size, calculated as stated in "4.1.1.3. Header Size Constraints" pub mem_size: u64, } pub struct Decoder { table: DynamicTable, } impl Decoder { // Decode field lines received on Request of Push stream. // https://www.rfc-editor.org/rfc/rfc9204.html#name-field-line-representations pub fn decode_header(&self, buf: &mut T) -> Result { let (required_ref, base) = HeaderPrefix::decode(buf)? .get(self.table.total_inserted(), self.table.max_mem_size())?; if required_ref > self.table.total_inserted() { return Err(Error::MissingRefs(required_ref)); } let decoder_table = self.table.decoder(base); let mut mem_size = 0; let mut fields = Vec::new(); while buf.has_remaining() { let field = Self::parse_header_field(&decoder_table, buf)?; mem_size += field.mem_size() as u64; fields.push(field); } Ok(Decoded { fields, mem_size, dyn_ref: required_ref > 0, }) } // The receiving side of encoder stream pub fn on_encoder_recv( &mut self, read: &mut R, write: &mut W, ) -> Result { let inserted_on_start = self.table.total_inserted(); while let Some(instruction) = self.parse_instruction(read)? { #[cfg(feature = "tracing")] trace!("instruction {:?}", instruction); match instruction { Instruction::Insert(field) => self.table.put(field)?, Instruction::TableSizeUpdate(size) => { self.table.set_max_size(size)?; } } } if self.table.total_inserted() != inserted_on_start { InsertCountIncrement((self.table.total_inserted() - inserted_on_start).try_into()?) .encode(write); } Ok(self.table.total_inserted()) } fn parse_instruction(&self, read: &mut R) -> Result, Error> { if read.remaining() < 1 { return Ok(None); } let mut buf = Cursor::new(read.chunk()); let first = buf.chunk()[0]; let instruction = match EncoderInstruction::decode(first) { EncoderInstruction::Unknown => return Err(Error::UnknownPrefix(first)), EncoderInstruction::DynamicTableSizeUpdate => { DynamicTableSizeUpdate::decode(&mut buf)?.map(|x| Instruction::TableSizeUpdate(x.0)) } EncoderInstruction::InsertWithoutNameRef => InsertWithoutNameRef::decode(&mut buf)? .map(|x| Instruction::Insert(HeaderField::new(x.name, x.value))), EncoderInstruction::Duplicate => match Duplicate::decode(&mut buf)? { Some(Duplicate(index)) => { Some(Instruction::Insert(self.table.get_relative(index)?.clone())) } None => None, }, EncoderInstruction::InsertWithNameRef => match InsertWithNameRef::decode(&mut buf)? { Some(InsertWithNameRef::Static { index, value }) => Some(Instruction::Insert( StaticTable::get(index)?.with_value(value), )), Some(InsertWithNameRef::Dynamic { index, value }) => Some(Instruction::Insert( self.table.get_relative(index)?.with_value(value), )), None => None, }, }; if instruction.is_some() { let pos = buf.position(); read.advance(pos as usize); } Ok(instruction) } fn parse_header_field( table: &DynamicTableDecoder, buf: &mut R, ) -> Result { let first = buf.chunk()[0]; let field = match HeaderBlockField::decode(first) { HeaderBlockField::Indexed => match Indexed::decode(buf)? { Indexed::Static(index) => StaticTable::get(index)?.clone(), Indexed::Dynamic(index) => table.get_relative(index)?.clone(), }, HeaderBlockField::IndexedWithPostBase => { let index = IndexedWithPostBase::decode(buf)?.0; table.get_postbase(index)?.clone() } HeaderBlockField::LiteralWithNameRef => match LiteralWithNameRef::decode(buf)? { LiteralWithNameRef::Static { index, value } => { StaticTable::get(index)?.with_value(value) } LiteralWithNameRef::Dynamic { index, value } => { table.get_relative(index)?.with_value(value) } }, HeaderBlockField::LiteralWithPostBaseNameRef => { let literal = LiteralWithPostBaseNameRef::decode(buf)?; table.get_postbase(literal.index)?.with_value(literal.value) } HeaderBlockField::Literal => { let literal = Literal::decode(buf)?; HeaderField::new(literal.name, literal.value) } _ => return Err(Error::UnknownPrefix(first)), }; Ok(field) } } // Decode field lines received on Request or Push stream. // https://www.rfc-editor.org/rfc/rfc9204.html#name-field-line-representations pub fn decode_stateless(buf: &mut T, max_size: u64) -> Result { let (required_ref, _base) = HeaderPrefix::decode(buf)?.get(0, 0)?; if required_ref > 0 { return Err(Error::MissingRefs(required_ref)); } let mut mem_size = 0; let mut fields = Vec::new(); while buf.has_remaining() { let field = match HeaderBlockField::decode(buf.chunk()[0]) { HeaderBlockField::IndexedWithPostBase => return Err(Error::MissingRefs(0)), HeaderBlockField::LiteralWithPostBaseNameRef => return Err(Error::MissingRefs(0)), HeaderBlockField::Indexed => match Indexed::decode(buf)? { Indexed::Static(index) => StaticTable::get(index)?.clone(), Indexed::Dynamic(_) => return Err(Error::MissingRefs(0)), }, HeaderBlockField::LiteralWithNameRef => match LiteralWithNameRef::decode(buf)? { LiteralWithNameRef::Dynamic { .. } => return Err(Error::MissingRefs(0)), LiteralWithNameRef::Static { index, value } => { StaticTable::get(index)?.with_value(value) } }, HeaderBlockField::Literal => { let literal = Literal::decode(buf)?; HeaderField::new(literal.name, literal.value) } _ => return Err(Error::UnknownPrefix(buf.chunk()[0])), }; mem_size += field.mem_size() as u64; // Cancel decoding if the header is considered too big if mem_size > max_size { return Err(Error::HeaderTooLong(mem_size)); } fields.push(field); } Ok(Decoded { fields, mem_size, dyn_ref: false, }) } #[cfg(test)] impl From for Decoder { fn from(table: DynamicTable) -> Self { Self { table } } } #[derive(PartialEq)] enum Instruction { Insert(HeaderField), TableSizeUpdate(usize), } impl fmt::Debug for Instruction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Instruction::Insert(h) => write!(f, "Instruction::Insert {{ {} }}", h), Instruction::TableSizeUpdate(n) => { write!(f, "Instruction::TableSizeUpdate {{ {} }}", n) } } } } impl From for Error { fn from(e: prefix_int::Error) -> Self { match e { prefix_int::Error::UnexpectedEnd => Error::UnexpectedEnd, e => Error::InvalidInteger(e), } } } impl From for Error { fn from(e: prefix_string::Error) -> Self { match e { prefix_string::Error::UnexpectedEnd => Error::UnexpectedEnd, e => Error::InvalidString(e), } } } impl From for Error { fn from(e: vas::Error) -> Self { Error::InvalidIndex(e) } } impl From for Error { fn from(e: StaticError) -> Self { match e { StaticError::Unknown(i) => Error::InvalidStaticIndex(i), } } } impl From for Error { fn from(e: DynamicTableError) -> Self { Error::DynamicTable(e) } } impl From for Error { fn from(e: ParseError) -> Self { match e { ParseError::Integer(x) => Error::InvalidInteger(x), ParseError::String(x) => Error::InvalidString(x), ParseError::InvalidPrefix(p) => Error::UnknownPrefix(p), ParseError::InvalidBase(b) => Error::BadBaseIndex(b), } } } impl From for Error { fn from(error: TryFromIntError) -> Self { Error::BufSize(error) } } #[cfg(test)] mod tests { use super::*; use crate::qpack::tests::helpers::{build_table_with_size, TABLE_SIZE}; #[test] fn test_header_too_long() { let mut trailers = http::HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers.insert("trailer2", "value2".parse().unwrap()); let mut buf = bytes::BytesMut::new(); let _ = crate::qpack::encode_stateless( &mut buf, crate::proto::headers::Header::trailer(trailers), ); let result = decode_stateless(&mut buf, 2); assert_eq!(result, Err(Error::HeaderTooLong(44))); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-insert-with-name-reference * 4.3.2. Insert With Name Reference */ #[test] fn test_insert_field_with_name_ref_into_dynamic_table() { let mut buf = vec![]; InsertWithNameRef::new_static(1, "serial value") .encode(&mut buf) .unwrap(); let mut decoder = Decoder::from(build_table_with_size(0)); let mut enc = Cursor::new(&buf); let mut dec = vec![]; assert!(decoder.on_encoder_recv(&mut enc, &mut dec).is_ok()); assert_eq!( decoder.table.decoder(1).get_relative(0), Ok(&StaticTable::get(1).unwrap().with_value("serial value")) ); let mut dec_cursor = Cursor::new(&dec); assert_eq!( InsertCountIncrement::decode(&mut dec_cursor), Ok(Some(InsertCountIncrement(1))) ); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-insert-with-name-reference * 4.3.2. Insert With Name Reference */ #[test] fn test_insert_field_with_wrong_name_index_from_static_table() { let mut buf = vec![]; InsertWithNameRef::new_static(3000, "") .encode(&mut buf) .unwrap(); let mut enc = Cursor::new(&buf); let mut decoder = Decoder::from(build_table_with_size(0)); let res = decoder.on_encoder_recv(&mut enc, &mut vec![]); assert_eq!(res, Err(Error::InvalidStaticIndex(3000))); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-insert-with-name-referencehtml * 4.3.2. Insert With Name Reference */ #[test] fn test_insert_field_with_wrong_name_index_from_dynamic_table() { let mut buf = vec![]; InsertWithNameRef::new_dynamic(3000, "") .encode(&mut buf) .unwrap(); let mut enc = Cursor::new(&buf); let mut dec = vec![]; let mut decoder = Decoder::from(build_table_with_size(0)); let res = decoder.on_encoder_recv(&mut enc, &mut dec); assert_eq!( res, Err(Error::DynamicTable(DynamicTableError::BadRelativeIndex( 3000 ))) ); assert!(dec.is_empty()); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-insert-with-literal-name * 4.3.3. Insert with Literal Name */ #[test] fn test_insert_field_without_name_ref() { let mut buf = vec![]; InsertWithoutNameRef::new("key", "value") .encode(&mut buf) .unwrap(); let mut decoder = Decoder::from(build_table_with_size(0)); let mut enc = Cursor::new(&buf); let mut dec = vec![]; assert!(decoder.on_encoder_recv(&mut enc, &mut dec).is_ok()); assert_eq!( decoder.table.decoder(1).get_relative(0), Ok(&HeaderField::new("key", "value")) ); let mut dec_cursor = Cursor::new(&dec); assert_eq!( InsertCountIncrement::decode(&mut dec_cursor), Ok(Some(InsertCountIncrement(1))) ); } fn insert_fields(table: &mut DynamicTable, fields: Vec) { for field in fields { table.put(field).unwrap(); } } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-duplicate * 4.3.4. Duplicate */ #[test] fn test_duplicate_field() { // let mut table = build_table_with_size(0); let mut table = build_table_with_size(0); insert_fields( &mut table, vec![HeaderField::new("", ""), HeaderField::new("", "")], ); let mut decoder = Decoder::from(table); let mut buf = vec![]; Duplicate(1).encode(&mut buf); let mut enc = Cursor::new(&buf); let mut dec = vec![]; let res = decoder.on_encoder_recv(&mut enc, &mut dec); assert_eq!(res, Ok(3)); let mut dec_cursor = Cursor::new(&dec); assert_eq!( InsertCountIncrement::decode(&mut dec_cursor), Ok(Some(InsertCountIncrement(1))) ); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-set-dynamic-table-capacity * 4.3.1. Set Dynamic Table Capacity */ #[test] fn test_dynamic_table_size_update() { let mut buf = vec![]; DynamicTableSizeUpdate(25).encode(&mut buf); let mut enc = Cursor::new(&buf); let mut dec = vec![]; let mut decoder = Decoder::from(build_table_with_size(0)); let res = decoder.on_encoder_recv(&mut enc, &mut dec); assert_eq!(res, Ok(0)); let actual_max_size = decoder.table.max_mem_size(); assert_eq!(actual_max_size, 25); assert!(dec.is_empty()); } #[test] fn enc_recv_buf_too_short() { let decoder = Decoder::from(build_table_with_size(0)); let mut buf = vec![]; { let mut enc = Cursor::new(&buf); assert_eq!(decoder.parse_instruction(&mut enc), Ok(None)); } buf.push(0b1000_0000); let mut enc = Cursor::new(&buf); assert_eq!(decoder.parse_instruction(&mut enc), Ok(None)); } #[test] fn enc_recv_accepts_truncated_messages() { let mut buf = vec![]; InsertWithoutNameRef::new("keyfoobarbaz", "value") .encode(&mut buf) .unwrap(); let mut decoder = Decoder::from(build_table_with_size(0)); // cut in middle of the first int let mut enc = Cursor::new(&buf[..2]); let mut dec = vec![]; assert!(decoder.on_encoder_recv(&mut enc, &mut dec).is_ok()); assert_eq!(enc.position(), 0); // cut the last byte of the 2nd string let mut enc = Cursor::new(&buf[..buf.len() - 1]); let mut dec = vec![]; assert!(decoder.on_encoder_recv(&mut enc, &mut dec).is_ok()); assert_eq!(enc.position(), 0); InsertWithoutNameRef::new("keyfoobarbaz2", "value") .encode(&mut buf) .unwrap(); // the first valid field is inserted and buf is left at the first byte of incomplete string let mut enc = Cursor::new(&buf[..buf.len() - 1]); let mut dec = vec![]; assert!(decoder.on_encoder_recv(&mut enc, &mut dec).is_ok()); assert_eq!(enc.position(), 15); let mut dec_cursor = Cursor::new(&dec); assert_eq!( InsertCountIncrement::decode(&mut dec_cursor), Ok(Some(InsertCountIncrement(1))) ); } #[test] fn largest_ref_too_big() { let decoder = Decoder::from(build_table_with_size(0)); let mut buf = vec![]; HeaderPrefix::new(8, 8, 10, TABLE_SIZE).encode(&mut buf); let mut read = Cursor::new(&buf); assert_eq!(decoder.decode_header(&mut read), Err(Error::MissingRefs(8))); } fn field(n: usize) -> HeaderField { HeaderField::new(format!("foo{}", n), "bar") } // Largest Reference // Base Index = 2 // | // foo2 foo1 // +-----+-----+ // | 2 | 1 | Absolute Index // +-----+-----+ // | 0 | 1 | Relative Index // --+---+-----+ #[test] fn decode_indexed_header_field() { let mut buf = vec![]; HeaderPrefix::new(2, 2, 2, TABLE_SIZE).encode(&mut buf); Indexed::Dynamic(0).encode(&mut buf); Indexed::Dynamic(1).encode(&mut buf); Indexed::Static(18).encode(&mut buf); let mut read = Cursor::new(&buf); let decoder = Decoder::from(build_table_with_size(2)); let Decoded { fields, dyn_ref, .. } = decoder.decode_header(&mut read).unwrap(); assert!(dyn_ref); assert_eq!( fields, &[field(2), field(1), StaticTable::get(18).unwrap().clone()] ) } // Largest Reference // Base Index = 2 // | // foo4 foo3 foo2 foo1 // +---+-----+-----+-----+ // | 4 | 3 | 2 | 1 | Absolute Index // +---+-----+-----+-----+ // | 0 | 1 | Relative Index // +-----+-----+---+-----+ // | 1 | 0 | Post-Base Index // +---+-----+ #[test] fn decode_post_base_indexed() { let mut buf = vec![]; HeaderPrefix::new(4, 2, 4, TABLE_SIZE).encode(&mut buf); Indexed::Dynamic(0).encode(&mut buf); IndexedWithPostBase(0).encode(&mut buf); IndexedWithPostBase(1).encode(&mut buf); let mut read = Cursor::new(&buf); let decoder = Decoder::from(build_table_with_size(4)); let Decoded { fields, dyn_ref, .. } = decoder.decode_header(&mut read).unwrap(); assert!(dyn_ref); assert_eq!(fields, &[field(2), field(3), field(4)]) } #[test] fn decode_name_ref_header_field() { let mut buf = vec![]; HeaderPrefix::new(2, 2, 4, TABLE_SIZE).encode(&mut buf); LiteralWithNameRef::new_dynamic(1, "new bar1") .encode(&mut buf) .unwrap(); LiteralWithNameRef::new_static(18, "PUT") .encode(&mut buf) .unwrap(); let mut read = Cursor::new(&buf); let decoder = Decoder::from(build_table_with_size(4)); let Decoded { fields, dyn_ref, .. } = decoder.decode_header(&mut read).unwrap(); assert!(dyn_ref); assert_eq!( fields, &[ field(1).with_value("new bar1"), StaticTable::get(18).unwrap().with_value("PUT") ] ) } #[test] fn decode_post_base_name_ref_header_field() { let mut buf = vec![]; HeaderPrefix::new(2, 2, 4, TABLE_SIZE).encode(&mut buf); LiteralWithPostBaseNameRef::new(0, "new bar3") .encode(&mut buf) .unwrap(); let mut read = Cursor::new(&buf); let decoder = Decoder::from(build_table_with_size(4)); let Decoded { fields, .. } = decoder.decode_header(&mut read).unwrap(); assert_eq!(fields, &[field(3).with_value("new bar3")]); } #[test] fn decode_without_name_ref_header_field() { let mut buf = vec![]; HeaderPrefix::new(0, 0, 0, TABLE_SIZE).encode(&mut buf); Literal::new("foo", "bar").encode(&mut buf).unwrap(); let mut read = Cursor::new(&buf); let decoder = Decoder::from(build_table_with_size(0)); let Decoded { fields, .. } = decoder.decode_header(&mut read).unwrap(); assert_eq!( fields, &[HeaderField::new(b"foo".to_vec(), b"bar".to_vec())] ); } // Largest Reference = 4 // | Base Index = 0 // | | // foo4 foo3 foo2 foo1 // +---+-----+-----+-----+ // | 4 | 3 | 2 | 1 | Absolute Index // +---+-----+-----+-----+ // Relative Index // +---+-----+-----+-----+ // | 2 | 2 | 1 | 0 | Post-Base Index // +---+-----+-----+-----+ #[test] fn decode_single_pass_encoded() { let mut buf = vec![]; HeaderPrefix::new(4, 0, 4, TABLE_SIZE).encode(&mut buf); IndexedWithPostBase(0).encode(&mut buf); IndexedWithPostBase(1).encode(&mut buf); IndexedWithPostBase(2).encode(&mut buf); IndexedWithPostBase(3).encode(&mut buf); let mut read = Cursor::new(&buf); let decoder = Decoder::from(build_table_with_size(4)); let Decoded { fields, .. } = decoder.decode_header(&mut read).unwrap(); assert_eq!(fields, &[field(1), field(2), field(3), field(4)]); } #[test] fn largest_ref_greater_than_max_entries() { let max_entries = TABLE_SIZE / 32; // some fields evicted let table = build_table_with_size(max_entries + 10); let mut buf = vec![]; // Pre-base relative reference HeaderPrefix::new( max_entries + 5, max_entries + 5, max_entries + 10, TABLE_SIZE, ) .encode(&mut buf); Indexed::Dynamic(10).encode(&mut buf); let mut read = Cursor::new(&buf); let decoder = Decoder::from(build_table_with_size(max_entries + 10)); let Decoded { fields, .. } = decoder.decode_header(&mut read).expect("decode"); assert_eq!(fields, &[field(max_entries - 5)]); let mut buf = vec![]; // Post-base reference HeaderPrefix::new( max_entries + 10, max_entries + 5, max_entries + 10, TABLE_SIZE, ) .encode(&mut buf); IndexedWithPostBase(0).encode(&mut buf); IndexedWithPostBase(4).encode(&mut buf); let mut read = Cursor::new(&buf); let decoder = Decoder::from(table); let Decoded { fields, .. } = decoder.decode_header(&mut read).unwrap(); assert_eq!(fields, &[field(max_entries + 6), field(max_entries + 10)]); } } h3-0.0.6/src/qpack/dynamic.rs000064400000000000000000001272271046102023000140340ustar 00000000000000use std::{ borrow::Cow, collections::{btree_map::Entry as BTEntry, hash_map::Entry, BTreeMap, HashMap, VecDeque}, }; use super::{field::HeaderField, static_::StaticTable}; use crate::qpack::vas::{self, VirtualAddressSpace}; /** * https://www.rfc-editor.org/rfc/rfc9204.html#maximum-dynamic-table-capacity */ const SETTINGS_MAX_TABLE_CAPACITY_MAX: usize = 1_073_741_823; // 2^30 -1 const SETTINGS_MAX_BLOCKED_STREAMS_MAX: usize = 65_535; // 2^16 - 1 #[derive(Debug, PartialEq)] pub enum Error { BadRelativeIndex(usize), BadPostbaseIndex(usize), BadIndex(usize), MaxTableSizeReached, MaximumTableSizeTooLarge, MaxBlockedStreamsTooLarge, UnknownStreamId(u64), NoTrackingData, InvalidTrackingCount, } pub struct DynamicTableDecoder<'a> { table: &'a DynamicTable, base: usize, } impl<'a> DynamicTableDecoder<'a> { pub(super) fn get_relative(&self, index: usize) -> Result<&HeaderField, Error> { let real_index = self.table.vas.relative_base(self.base, index)?; self.table .fields .get(real_index) .ok_or(Error::BadIndex(real_index)) } pub(super) fn get_postbase(&self, index: usize) -> Result<&HeaderField, Error> { let real_index = self.table.vas.post_base(self.base, index)?; self.table .fields .get(real_index) .ok_or(Error::BadIndex(real_index)) } } pub struct DynamicTableEncoder<'a> { table: &'a mut DynamicTable, base: usize, commited: bool, stream_id: u64, block_refs: HashMap, } impl<'a> Drop for DynamicTableEncoder<'a> { fn drop(&mut self) { if !self.commited { // TODO maybe possible to replace and not clone here? // HOW Err should be handled? self.table .track_cancel(self.block_refs.iter().map(|(x, y)| (*x, *y))) .ok(); } } } impl<'a> DynamicTableEncoder<'a> { pub(super) fn max_size(&self) -> usize { self.table.max_size } pub(super) fn base(&self) -> usize { self.base } pub(super) fn total_inserted(&self) -> usize { self.table.total_inserted() } pub(super) fn commit(&mut self, largest_ref: usize) { self.table .track_block(self.stream_id, self.block_refs.clone()); self.table.register_blocked(largest_ref); self.commited = true; } pub(super) fn find(&mut self, field: &HeaderField) -> DynamicLookupResult { self.lookup_result(self.table.field_map.get(field).cloned()) } fn lookup_result(&mut self, absolute: Option) -> DynamicLookupResult { match absolute { Some(absolute) if absolute <= self.base => { self.track_ref(absolute); DynamicLookupResult::Relative { index: self.base - absolute, absolute, } } Some(absolute) if absolute > self.base => { self.track_ref(absolute); DynamicLookupResult::PostBase { index: absolute - self.base - 1, absolute, } } _ => DynamicLookupResult::NotFound, } } pub(super) fn insert(&mut self, field: &HeaderField) -> Result { if self.table.blocked_count >= self.table.blocked_max { return Ok(DynamicInsertionResult::NotInserted( self.find_name(&field.name), )); } let index = match self.table.insert(field.clone()) { Ok(Some(index)) => index, Err(Error::MaxTableSizeReached) | Ok(None) => { return Ok(DynamicInsertionResult::NotInserted( self.find_name(&field.name), )); } Err(e) => return Err(e), }; self.track_ref(index); let field_index = match self.table.field_map.entry(field.clone()) { Entry::Occupied(mut e) => { let ref_index = e.insert(index); self.table .name_map .entry(field.name.clone()) .and_modify(|i| *i = index); Some(( ref_index, DynamicInsertionResult::Duplicated { relative: index - ref_index - 1, postbase: index - self.base - 1, absolute: index, }, )) } Entry::Vacant(e) => { e.insert(index); None } }; if let Some((ref_index, result)) = field_index { self.track_ref(ref_index); return Ok(result); } if let Some(static_idx) = StaticTable::find_name(&field.name) { return Ok(DynamicInsertionResult::InsertedWithStaticNameRef { postbase: index - self.base - 1, index: static_idx, absolute: index, }); } let result = match self.table.name_map.entry(field.name.clone()) { Entry::Occupied(mut e) => { let ref_index = e.insert(index); self.track_ref(ref_index); DynamicInsertionResult::InsertedWithNameRef { postbase: index - self.base - 1, relative: index - ref_index - 1, absolute: index, } } Entry::Vacant(e) => { e.insert(index); DynamicInsertionResult::Inserted { postbase: index - self.base - 1, absolute: index, } } }; Ok(result) } fn find_name(&mut self, name: &[u8]) -> DynamicLookupResult { if let Some(index) = StaticTable::find_name(name) { return DynamicLookupResult::Static(index); } self.lookup_result(self.table.name_map.get(name).cloned()) } fn track_ref(&mut self, reference: usize) { self.block_refs .entry(reference) .and_modify(|c| *c += 1) .or_insert(1); self.table.track_ref(reference); } } #[derive(Debug, PartialEq)] pub enum DynamicLookupResult { Static(usize), Relative { index: usize, absolute: usize }, PostBase { index: usize, absolute: usize }, NotFound, } #[derive(Debug, PartialEq)] pub enum DynamicInsertionResult { Inserted { postbase: usize, absolute: usize, }, Duplicated { relative: usize, postbase: usize, absolute: usize, }, InsertedWithNameRef { postbase: usize, relative: usize, absolute: usize, }, InsertedWithStaticNameRef { postbase: usize, index: usize, absolute: usize, }, NotInserted(DynamicLookupResult), } #[derive(Default)] pub struct DynamicTable { fields: VecDeque, curr_size: usize, max_size: usize, vas: VirtualAddressSpace, field_map: HashMap, name_map: HashMap, usize>, track_map: BTreeMap, track_blocks: HashMap>>, largest_known_received: usize, blocked_max: usize, blocked_count: usize, blocked_streams: BTreeMap, // } impl DynamicTable { pub fn new() -> DynamicTable { DynamicTable::default() } pub fn decoder(&self, base: usize) -> DynamicTableDecoder { DynamicTableDecoder { table: self, base } } pub fn encoder(&mut self, stream_id: u64) -> DynamicTableEncoder { for (idx, field) in self.fields.iter().enumerate() { self.name_map .insert(field.name.clone(), self.vas.index(idx).unwrap()); self.field_map .insert(field.clone(), self.vas.index(idx).unwrap()); } DynamicTableEncoder { base: self.vas.largest_ref(), table: self, block_refs: HashMap::new(), commited: false, stream_id, } } pub fn set_max_blocked(&mut self, max: usize) -> Result<(), Error> { // TODO handle existing data if max >= SETTINGS_MAX_BLOCKED_STREAMS_MAX { return Err(Error::MaxBlockedStreamsTooLarge); } self.blocked_max = max; Ok(()) } pub fn set_max_size(&mut self, size: usize) -> Result<(), Error> { if size > SETTINGS_MAX_TABLE_CAPACITY_MAX { return Err(Error::MaximumTableSizeTooLarge); } if size >= self.max_size { self.max_size = size; return Ok(()); } let required = self.max_size - size; if let Some(to_evict) = self.can_free(required)? { self.evict(to_evict)?; } self.max_size = size; Ok(()) } pub(super) fn put(&mut self, field: HeaderField) -> Result<(), Error> { let index = match self.insert(field.clone())? { Some(index) => index, None => return Ok(()), }; self.field_map .entry(field.clone()) .and_modify(|e| *e = index) .or_insert(index); if StaticTable::find_name(&field.name).is_some() { return Ok(()); } self.name_map .entry(field.name.clone()) .and_modify(|e| *e = index) .or_insert(index); Ok(()) } pub(super) fn get_relative(&self, index: usize) -> Result<&HeaderField, Error> { let real_index = self.vas.relative(index)?; self.fields .get(real_index) .ok_or(Error::BadIndex(real_index)) } pub(super) fn total_inserted(&self) -> usize { self.vas.total_inserted() } pub(super) fn untrack_block(&mut self, stream_id: u64) -> Result<(), Error> { let mut entry = self.track_blocks.entry(stream_id); let block = match entry { Entry::Occupied(ref mut blocks) if blocks.get().len() > 1 => { blocks.get_mut().pop_front() } Entry::Occupied(blocks) => blocks.remove().pop_front(), Entry::Vacant { .. } => return Err(Error::UnknownStreamId(stream_id)), }; if let Some(b) = block { self.track_cancel(b.iter().map(|(x, y)| (*x, *y)))?; } Ok(()) } fn insert(&mut self, field: HeaderField) -> Result, Error> { if self.max_size == 0 { return Ok(None); } match self.can_free(field.mem_size())? { None => return Ok(None), Some(to_evict) => { self.evict(to_evict)?; } } self.curr_size += field.mem_size(); self.fields.push_back(field); let absolute = self.vas.add(); Ok(Some(absolute)) } fn evict(&mut self, to_evict: usize) -> Result<(), Error> { for _ in 0..to_evict { let field = self.fields.pop_front().ok_or(Error::MaxTableSizeReached)?; //TODO better type self.curr_size -= field.mem_size(); self.vas.drop(); if let Entry::Occupied(e) = self.name_map.entry(field.name.clone()) { if self.vas.evicted(*e.get()) { e.remove(); } } if let Entry::Occupied(e) = self.field_map.entry(field) { if self.vas.evicted(*e.get()) { e.remove(); } } } Ok(()) } fn can_free(&mut self, required: usize) -> Result, Error> { if required > self.max_size { return Err(Error::MaxTableSizeReached); } if self.max_size - self.curr_size >= required { return Ok(Some(0)); } let lower_bound = self.max_size - required; let mut hypothetic_mem_size = self.curr_size; let mut evictable = 0; for (idx, to_evict) in self.fields.iter().enumerate() { if hypothetic_mem_size <= lower_bound { break; } if self.is_tracked(self.vas.index(idx).unwrap()) { // TODO handle out of bounds error break; } evictable += 1; hypothetic_mem_size -= to_evict.mem_size(); } if required <= self.max_size - hypothetic_mem_size { Ok(Some(evictable)) } else { Ok(None) } } fn track_ref(&mut self, reference: usize) { self.track_map .entry(reference) .and_modify(|c| *c += 1) .or_insert(1); } fn is_tracked(&self, reference: usize) -> bool { matches!(self.track_map.get(&reference), Some(count) if *count > 0) } fn track_block(&mut self, stream_id: u64, refs: HashMap) { match self.track_blocks.entry(stream_id) { Entry::Occupied(mut e) => { e.get_mut().push_back(refs); } Entry::Vacant(e) => { let mut blocks = VecDeque::with_capacity(2); blocks.push_back(refs); e.insert(blocks); } } } fn track_cancel(&mut self, refs: T) -> Result<(), Error> where T: IntoIterator, { for (reference, count) in refs { match self.track_map.entry(reference) { BTEntry::Occupied(mut e) => { use std::cmp::Ordering; match e.get().cmp(&count) { Ordering::Less => { return Err(Error::InvalidTrackingCount); } Ordering::Equal => { e.remove(); // TODO just pu 0 ? } _ => *e.get_mut() -= count, } } BTEntry::Vacant(_) => return Err(Error::InvalidTrackingCount), } } Ok(()) } fn register_blocked(&mut self, largest: usize) { if largest <= self.largest_known_received { return; } self.blocked_count += 1; match self.blocked_streams.entry(largest) { BTEntry::Occupied(mut e) => { let entry = e.get_mut(); *entry += 1; } BTEntry::Vacant(e) => { e.insert(1); } } } pub fn update_largest_received(&mut self, increment: usize) { self.largest_known_received += increment; if self.blocked_count == 0 { return; } let blocked = self .blocked_streams .split_off(&(self.largest_known_received + 1)); let acked = std::mem::replace(&mut self.blocked_streams, blocked); if !acked.is_empty() { let total_acked = acked.iter().fold(0usize, |t, (_, v)| t + v); self.blocked_count -= total_acked; } } pub(super) fn max_mem_size(&self) -> usize { self.max_size } } impl From for Error { fn from(e: vas::Error) -> Self { match e { vas::Error::RelativeIndex(e) => Error::BadRelativeIndex(e), vas::Error::PostbaseIndex(e) => Error::BadPostbaseIndex(e), vas::Error::Index(e) => Error::BadIndex(e), } } } #[cfg(test)] mod tests { #![allow(clippy::identity_op)] use super::*; use crate::qpack::{static_::StaticTable, tests::helpers::build_table}; const STREAM_ID: u64 = 0x4; // Test on table size /** * https://tools.ietf.org/html/rfc7541#section-4.1 * "The size of the dynamic table is the sum of the size of its entries." */ #[test] fn test_table_size_is_sum_of_its_entries() { let mut table = build_table(); let fields: [(&'static str, &'static str); 2] = [ ("Name", "Value"), ("Another-Name", ""), // no value ]; let table_size = 4 + 5 + 12 + 0 + /* ESTIMATED_OVERHEAD_BYTES */ 32 * 2; for pair in fields.iter() { let field = HeaderField::new(pair.0, pair.1); table.insert(field).unwrap(); } assert_eq!(table.curr_size, table_size); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-maximum-dynamic-table-capac * "To bound the memory requirements of the decoder, the decoder * limits the maximum value the encoder is permitted to set for the * dynamic table capacity. In HTTP/3, this limit is determined by * the value of SETTINGS_QPACK_MAX_TABLE_CAPACITY sent by the * decoder; see Section 5." */ #[test] fn test_try_set_too_large_maximum_table_size() { let mut table = build_table(); let invalid_size = SETTINGS_MAX_TABLE_CAPACITY_MAX + 10; let res_change = table.set_max_size(invalid_size); assert_eq!(res_change, Err(Error::MaximumTableSizeTooLarge)); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-dynamic-table-capacity-and- * "This mechanism can be used to completely clear entries from the * dynamic table by setting a maximum size of 0, which can subsequently * be restored." */ #[test] fn test_maximum_table_size_can_reach_zero() { let mut table = build_table(); let res_change = table.set_max_size(0); assert!(res_change.is_ok()); assert_eq!(table.max_mem_size(), 0); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-maximum-dynamic-table-capac * "To bound the memory requirements of the decoder, the decoder * limits the maximum value the encoder is permitted to set for the * dynamic table capacity. In HTTP/3, this limit is determined by * the value of SETTINGS_QPACK_MAX_TABLE_CAPACITY sent by the * decoder; see Section 5." */ #[test] fn test_maximum_table_size_can_reach_maximum() { let mut table = build_table(); let res_change = table.set_max_size(SETTINGS_MAX_TABLE_CAPACITY_MAX); assert!(res_change.is_ok()); assert_eq!(table.max_mem_size(), SETTINGS_MAX_TABLE_CAPACITY_MAX); } // Test duplicated fields /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-dynamic-table * "The dynamic table can contain duplicate entries (i.e., entries with * the same name and same value). Therefore, duplicate entries MUST NOT * be treated as an error by a decoder." */ #[test] fn test_table_supports_duplicated_entries() { let mut table = build_table(); table.insert(HeaderField::new("Name", "Value")).unwrap(); table.insert(HeaderField::new("Name", "Value")).unwrap(); assert_eq!(table.fields.len(), 2); } // Test adding fields /** functional test */ #[test] fn test_add_field_fitting_free_space() { let mut table = build_table(); table.insert(HeaderField::new("Name", "Value")).unwrap(); assert_eq!(table.fields.len(), 1); } /** functional test */ #[test] fn test_add_field_reduce_free_space() { let mut table = build_table(); let field = HeaderField::new("Name", "Value"); table.insert(field.clone()).unwrap(); assert_eq!(table.curr_size, field.mem_size()); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-dynamic-table-capacity-and- * "Before a new entry is added to the dynamic table, entries are evicted * from the end of the dynamic table until the size of the dynamic table * is less than or equal to (maximum size - new entry size) or until the * table is empty." */ #[test] fn test_add_field_drop_older_fields_to_have_enough_space() { let mut table = build_table(); table.insert(HeaderField::new("Name-A", "Value-A")).unwrap(); table.insert(HeaderField::new("Name-B", "Value-B")).unwrap(); let perfect_size = table.curr_size; assert!(table.set_max_size(perfect_size).is_ok()); let field = HeaderField::new("Name-Large", "Value-Large"); table.insert(field).unwrap(); assert_eq!(table.fields.len(), 1); assert_eq!( table.fields.front(), Some(&HeaderField::new("Name-Large", "Value-Large")) ); } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-dynamic-table-capacity-and- * "It is an error if the encoder attempts to add an entry that is * larger than the dynamic table capacity; the decoder MUST treat * this as a connection error of type QPACK_ENCODER_STREAM_ERROR." */ #[test] fn test_try_add_field_larger_than_maximum_size() { let mut table = build_table(); table.insert(HeaderField::new("Name-A", "Value-A")).unwrap(); let perfect_size = table.curr_size; assert!(table.set_max_size(perfect_size).is_ok()); let field = HeaderField::new("Name-Large", "Value-Large"); assert_eq!(table.insert(field), Err(Error::MaxTableSizeReached)); } fn insert_fields(table: &mut DynamicTable, fields: Vec) { for field in fields { table.insert(field).unwrap(); } } /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-dynamic-table-capacity-and- * "This mechanism can be used to completely clear entries from the * dynamic table by setting a maximum size of 0, which can subsequently * be restored." */ #[test] fn test_set_maximum_table_size_to_zero_clear_entries() { let mut table = build_table(); insert_fields( &mut table, vec![ HeaderField::new("Name", "Value"), HeaderField::new("Name", "Value"), ], ); assert_eq!(table.fields.len(), 2); table.set_max_size(0).unwrap(); assert_eq!(table.fields.len(), 0); } /** functional test */ #[test] fn test_eviction_is_fifo() { let mut table = build_table(); insert_fields( &mut table, vec![ HeaderField::new("Name-A", "Value-A"), HeaderField::new("Name-B", "Value-B"), ], ); let perfect_size = table.curr_size; assert!(table.set_max_size(perfect_size).is_ok()); insert_fields(&mut table, vec![HeaderField::new("Name-C", "Value-C")]); assert_eq!( table.fields.front(), Some(&HeaderField::new("Name-B", "Value-B")) ); assert_eq!( table.fields.get(1), Some(&HeaderField::new("Name-C", "Value-C")) ); assert_eq!(table.fields.get(2), None); } #[test] fn encoder_build() { let mut table = build_table(); let field_a = HeaderField::new("Name-A", "Value-A"); let field_b = HeaderField::new("Name-B", "Value-B"); insert_fields(&mut table, vec![field_a.clone(), field_b.clone()]); let encoder = table.encoder(STREAM_ID); assert_eq!(encoder.base, 2); assert_eq!(encoder.table.name_map.len(), 2); assert_eq!(encoder.table.field_map.len(), 2); assert_eq!(encoder.table.name_map.get(&field_a.name).copied(), Some(1)); assert_eq!(encoder.table.name_map.get(&field_b.name).copied(), Some(2)); assert_eq!(encoder.table.field_map.get(&field_a).copied(), Some(1)); assert_eq!(encoder.table.field_map.get(&field_b).copied(), Some(2)); } #[test] fn encoder_find_relative() { let mut table = build_table(); let field_a = HeaderField::new("Name-A", "Value-A"); let field_b = HeaderField::new("Name-B", "Value-B"); insert_fields(&mut table, vec![field_a.clone(), field_b.clone()]); let mut encoder = table.encoder(STREAM_ID); assert_eq!( encoder.find(&field_a), DynamicLookupResult::Relative { index: 1, absolute: 1 } ); assert_eq!( encoder.find(&field_b), DynamicLookupResult::Relative { index: 0, absolute: 2 } ); assert_eq!( encoder.find(&HeaderField::new("Name-C", "Value-C")), DynamicLookupResult::NotFound ); assert_eq!( encoder.find_name(&field_a.name), DynamicLookupResult::Relative { index: 1, absolute: 1 } ); assert_eq!( encoder.find_name(&field_b.name), DynamicLookupResult::Relative { index: 0, absolute: 2 } ); assert_eq!( encoder.find_name(&b"Name-C"[..]), DynamicLookupResult::NotFound ); } #[test] fn encoder_insert() { let mut table = build_table(); let field_a = HeaderField::new("Name-A", "Value-A"); let field_b = HeaderField::new("Name-B", "Value-B"); insert_fields(&mut table, vec![field_a.clone(), field_b.clone()]); let mut encoder = table.encoder(STREAM_ID); assert_eq!( encoder.insert(&field_a), Ok(DynamicInsertionResult::Duplicated { postbase: 0, relative: 1, absolute: 3 }) ); assert_eq!( encoder.insert(&field_b.with_value("New Value-B")), Ok(DynamicInsertionResult::InsertedWithNameRef { postbase: 1, relative: 1, absolute: 4, }) ); assert_eq!( encoder.insert(&field_b.with_value("Newer Value-B")), Ok(DynamicInsertionResult::InsertedWithNameRef { postbase: 2, relative: 0, absolute: 5, }) ); let field_c = HeaderField::new("Name-C", "Value-C"); assert_eq!( encoder.insert(&field_c), Ok(DynamicInsertionResult::Inserted { postbase: 3, absolute: 6, }) ); assert_eq!(encoder.table.fields.len(), 6); assert_eq!( encoder.table.fields, &[ field_a.clone(), field_b.clone(), field_a.clone(), field_b.with_value("New Value-B"), field_b.with_value("Newer Value-B"), field_c ] ); assert_eq!(encoder.table.name_map.get(&field_a.name).copied(), Some(3)); assert_eq!(encoder.table.name_map.get(&field_b.name).copied(), Some(5)); assert_eq!(encoder.table.field_map.get(&field_a).copied(), Some(3)); assert_eq!(encoder.table.field_map.get(&field_b).copied(), Some(2)); assert_eq!( encoder .table .field_map .get(&field_b.with_value("New Value-B")) .copied(), Some(4) ); assert_eq!( encoder .table .field_map .get(&field_b.with_value("Newer Value-B")) .copied(), Some(5) ); } #[test] fn encode_insert_in_empty() { let mut table = build_table(); let field_a = HeaderField::new("Name-A", "Value-A"); let mut encoder = table.encoder(STREAM_ID); assert_eq!( encoder.insert(&field_a), Ok(DynamicInsertionResult::Inserted { postbase: 0, absolute: 1, }) ); assert_eq!(encoder.table.fields.len(), 1); assert_eq!(encoder.table.fields, &[field_a.clone()]); assert_eq!(encoder.table.name_map.get(&field_a.name).copied(), Some(1)); assert_eq!(encoder.table.field_map.get(&field_a).copied(), Some(1)); } #[test] fn insert_static() { let mut table = build_table(); let field = HeaderField::new(":method", "Value-A"); table.insert(field.clone()).unwrap(); assert_eq!(StaticTable::find_name(&field.name), Some(15)); let mut encoder = table.encoder(STREAM_ID); assert_eq!( encoder.insert(&field), Ok(DynamicInsertionResult::Duplicated { relative: 0, postbase: 0, absolute: 2 }) ); assert_eq!( encoder.insert(&field.with_value("Value-B")), Ok(DynamicInsertionResult::InsertedWithStaticNameRef { postbase: 1, index: 15, absolute: 3 }) ); assert_eq!( encoder.insert(&HeaderField::new(":path", "/baz")), Ok(DynamicInsertionResult::InsertedWithStaticNameRef { postbase: 2, index: 1, absolute: 4, }) ); assert_eq!(encoder.table.fields.len(), 4); } #[test] fn cannot_insert_field_greater_than_total_size() { let mut table = build_table(); table.set_max_size(33).unwrap(); let mut encoder = table.encoder(4); assert_eq!( encoder.insert(&HeaderField::new("foo", "bar")), Ok(DynamicInsertionResult::NotInserted( DynamicLookupResult::NotFound )) ); } #[test] fn encoder_maps_are_cleaned_on_eviction() { let mut table = build_table(); table.set_max_size(64).unwrap(); { let mut encoder = table.encoder(4); assert_eq!( encoder.insert(&HeaderField::new("foo", "bar")), Ok(DynamicInsertionResult::Inserted { postbase: 0, absolute: 1 }) ); encoder.commit(1); } table.untrack_block(4).unwrap(); { let mut encoder = table.encoder(4); assert_eq!( encoder.insert(&HeaderField::new("foo2", "bar")), Ok(DynamicInsertionResult::Inserted { postbase: 0, absolute: 2 }) ); assert_eq!( encoder.find(&HeaderField::new("foo", "bar")), DynamicLookupResult::NotFound ); assert_eq!(encoder.find_name(b"foo"), DynamicLookupResult::NotFound); encoder.commit(2); } } #[test] fn encoder_can_evict_unreferenced() { let mut table = build_table(); table.set_max_size(63).unwrap(); table.insert(HeaderField::new("foo", "bar")).unwrap(); assert_eq!(table.fields.len(), 1); assert_eq!( table.encoder(4).insert(&HeaderField::new("baz", "quxx")), Ok(DynamicInsertionResult::Inserted { postbase: 0, absolute: 2, }) ); assert_eq!(table.fields.len(), 1); } #[test] fn encoder_insertion_tracks_ref() { let mut table = build_table(); let mut encoder = table.encoder(4); assert_eq!( encoder.insert(&HeaderField::new("baz", "quxx")), Ok(DynamicInsertionResult::Inserted { postbase: 0, absolute: 1, }) ); assert_eq!(encoder.table.track_map.get(&1).copied(), Some(1)); assert_eq!(encoder.block_refs.get(&1).copied(), Some(1)); } #[test] fn encoder_insertion_refs_commited() { let mut table = build_table(); let stream_id = 42; { let mut encoder = table.encoder(stream_id); for idx in 1..4 { encoder .insert(&HeaderField::new(format!("foo{}", idx), "quxx")) .unwrap(); } assert_eq!(encoder.block_refs.len(), 3); encoder.commit(2); } for idx in 1..4 { assert!(table.is_tracked(idx)); assert_eq!(table.track_map.get(&1), Some(&1)); } let track_blocks = table.track_blocks; let block = track_blocks.get(&stream_id).unwrap().front().unwrap(); assert_eq!(block.get(&1), Some(&1)); assert_eq!(block.get(&2), Some(&1)); assert_eq!(block.get(&3), Some(&1)); } #[test] fn encoder_insertion_refs_not_commited() { let mut table = build_table(); table.track_blocks = HashMap::new(); let stream_id = 42; { let mut encoder = table.encoder(stream_id); for idx in 1..4 { encoder .insert(&HeaderField::new(format!("foo{}", idx), "quxx")) .unwrap(); } assert_eq!(encoder.block_refs.len(), 3); } // dropped without ::commit() assert_eq!(table.track_map.len(), 0); assert_eq!(table.track_blocks.len(), 0); } #[test] fn encoder_insertion_with_ref_tracks_both() { let mut table = build_table(); table.insert(HeaderField::new("foo", "bar")).unwrap(); table.track_blocks = HashMap::new(); let stream_id = 42; let mut encoder = table.encoder(stream_id); assert_eq!( encoder.insert(&HeaderField::new("foo", "quxx")), Ok(DynamicInsertionResult::InsertedWithNameRef { postbase: 0, relative: 0, absolute: 2, }) ); assert_eq!(encoder.table.track_map.get(&1), Some(&1)); assert_eq!(encoder.table.track_map.get(&2), Some(&1)); assert_eq!(encoder.block_refs.get(&1), Some(&1)); assert_eq!(encoder.block_refs.get(&2), Some(&1)); } #[test] fn encoder_ref_count_are_incremented() { let mut table = build_table(); table.insert(HeaderField::new("foo", "bar")).unwrap(); table.track_blocks = HashMap::new(); table.track_ref(1); let stream_id = 42; { let mut encoder = table.encoder(stream_id); encoder.track_ref(1); encoder.track_ref(2); encoder.track_ref(2); assert_eq!(encoder.table.track_map.get(&1), Some(&2)); assert_eq!(encoder.table.track_map.get(&2), Some(&2)); assert_eq!(encoder.block_refs.get(&1), Some(&1)); assert_eq!(encoder.block_refs.get(&2), Some(&2)); } // check ref count is correctly decremented after uncommited drop() assert_eq!(table.track_map.get(&1), Some(&1)); assert_eq!(table.track_map.get(&2), None); } #[test] fn encoder_does_not_evict_referenced() { let mut table = build_table(); table.set_max_size(95).unwrap(); table.insert(HeaderField::new("foo", "bar")).unwrap(); let stream_id = 42; let mut encoder = table.encoder(stream_id); assert_eq!( encoder.insert(&HeaderField::new("foo", "quxx")), Ok(DynamicInsertionResult::InsertedWithNameRef { postbase: 0, relative: 0, absolute: 2, }) ); assert!(encoder.table.is_tracked(1)); assert_eq!( encoder.insert(&HeaderField::new("foo", "baz")), Ok(DynamicInsertionResult::NotInserted( DynamicLookupResult::PostBase { index: 0, absolute: 2, } )) ); assert_eq!(encoder.table.fields.len(), 2); } fn tracked_table(stream_id: u64) -> DynamicTable { let mut table = build_table(); table.track_blocks = HashMap::new(); { let mut encoder = table.encoder(stream_id); for idx in 1..4 { encoder .insert(&HeaderField::new(format!("foo{}", idx), "quxx")) .unwrap(); } assert_eq!(encoder.block_refs.len(), 3); encoder.commit(3); } table } #[test] fn untrack_block() { let mut table = tracked_table(42); assert_eq!(table.track_map.len(), 3); assert_eq!(table.track_blocks.len(), 1); table.untrack_block(42).unwrap(); assert_eq!(table.track_map.len(), 0); assert_eq!(table.track_blocks.len(), 0); } #[test] fn untrack_block_not_in_map() { let mut table = tracked_table(42); table.track_map.remove(&2); assert_eq!(table.untrack_block(42), Err(Error::InvalidTrackingCount)); } #[test] fn untrack_block_wrong_count() { let mut table = tracked_table(42); table.track_blocks.entry(42).and_modify(|x| { x.get_mut(0).unwrap().entry(2).and_modify(|c| *c += 1); }); assert_eq!(table.untrack_block(42), Err(Error::InvalidTrackingCount)); } #[test] fn untrack_bloc_wrong_stream() { let mut table = tracked_table(41); assert_eq!(table.untrack_block(42), Err(Error::UnknownStreamId(42))); } #[test] fn untrack_trailers() { const STREAM_ID: u64 = 42; let mut table = tracked_table(STREAM_ID); { // encode trailers let mut encoder = table.encoder(STREAM_ID); for idx in 4..=9 { encoder .insert(&HeaderField::new(format!("foo{}", idx), "quxx")) .unwrap(); } assert_eq!(encoder.block_refs.len(), 6); encoder.commit(6); } assert_eq!(table.untrack_block(STREAM_ID), Ok(())); assert!(!table.is_tracked(3)); assert!(table.is_tracked(5)); assert_eq!(table.untrack_block(STREAM_ID), Ok(())); assert!(!table.is_tracked(6)); assert_eq!( table.untrack_block(STREAM_ID), Err(Error::UnknownStreamId(STREAM_ID)) ); } #[test] fn put_updates_maps() { let mut table = tracked_table(42); assert_eq!(table.name_map.len(), 3); assert_eq!(table.field_map.len(), 3); table.put(HeaderField::new("foo", "bar")).unwrap(); assert_eq!(table.name_map.len(), 4); assert_eq!(table.field_map.len(), 4); let field = HeaderField::new("foo1", "quxx"); table.put(field.clone()).unwrap(); assert_eq!(table.name_map.len(), 4); assert_eq!(table.field_map.len(), 4); assert_eq!(table.name_map.get(&b"foo1"[..]), Some(&5usize)); assert_eq!(table.field_map.get(&field), Some(&5usize)); } #[test] fn blocked_stream_registered() { let mut table = tracked_table(42); table.set_max_blocked(100).unwrap(); assert_eq!(table.blocked_count, 1); assert_eq!(table.blocked_streams.get(&3), Some(&1usize)) } #[test] fn blocked_stream_not_registered() { let mut table = tracked_table(42); table.set_max_blocked(100).unwrap(); table .encoder(44) .insert(&HeaderField::new("foo", "bar")) .unwrap(); // encoder dropped without commit assert_eq!(table.blocked_count, 1); assert_eq!(table.blocked_streams.get(&5), None); } #[test] fn blocked_stream_register_accumulate() { let mut table = tracked_table(42); table.set_max_blocked(100).unwrap(); { let mut encoder = table.encoder(44); assert_eq!( encoder.find(&HeaderField::new("foo3", "quxx")), DynamicLookupResult::Relative { index: 0, absolute: 3, } ); // the encoder inserts a reference to foo3 in a block (absolte index = 3) encoder.commit(3); } assert_eq!(table.blocked_count, 2); assert_eq!(table.blocked_streams.get(&3), Some(&2)); } #[test] fn blocked_stream_register_put_smaller() { let mut table = tracked_table(42); table.set_max_blocked(100).unwrap(); { let mut encoder = table.encoder(44); encoder.commit(2); } assert_eq!(table.blocked_count, 2); assert_eq!(table.blocked_streams.get(&2), Some(&1)); } #[test] fn blocked_stream_register_put_larger() { let mut table = tracked_table(42); table.set_max_blocked(100).unwrap(); { let mut encoder = table.encoder(44); encoder.commit(5); } assert_eq!(table.blocked_count, 2); assert_eq!(table.blocked_streams.get(&5), Some(&1)); } #[test] fn unblock_stream_smaller() { let mut table = tracked_table(42); table.set_max_blocked(100).unwrap(); { let mut encoder = table.encoder(44); encoder.commit(2); } assert_eq!(table.blocked_count, 2); assert_eq!(table.blocked_streams.get(&2), Some(&1)); table.update_largest_received(2); assert_eq!(table.blocked_count, 1); assert_eq!(table.blocked_streams.get(&2), None); assert_eq!(table.blocked_streams.get(&3), Some(&1)); } #[test] fn unblock_stream_larger() { let mut table = tracked_table(42); table.set_max_blocked(100).unwrap(); table.encoder(44).commit(2); table.encoder(46).commit(5); assert_eq!(table.blocked_count, 3); assert_eq!(table.blocked_streams.get(&2), Some(&1)); assert_eq!(table.blocked_streams.get(&3), Some(&1)); table.update_largest_received(5); assert_eq!(table.blocked_count, 0); assert_eq!(table.blocked_streams.len(), 0); } #[test] fn unblock_stream_decrement() { let mut table = tracked_table(42); table.set_max_blocked(100).unwrap(); table.encoder(44).commit(3); assert_eq!(table.blocked_count, 2); assert_eq!(table.blocked_streams.get(&3), Some(&2)); table.update_largest_received(5); assert_eq!(table.blocked_count, 0); assert_eq!(table.blocked_streams.len(), 0); } #[test] fn no_insert_when_max_blocked_0() { let mut table = tracked_table(42); table.set_max_blocked(0).unwrap(); assert_eq!( table.encoder(44).insert(&HeaderField::new("foo", "bar")), Ok(DynamicInsertionResult::NotInserted( DynamicLookupResult::NotFound )) ); } #[test] fn no_insert_after_max_blocked_reached() { let mut table = tracked_table(42); table.set_max_blocked(2).unwrap(); { let mut encoder = table.encoder(44); assert_eq!( encoder.insert(&HeaderField::new("foo", "bar")), Ok(DynamicInsertionResult::Inserted { postbase: 0, absolute: 4 }) ); encoder.commit(4); } assert_eq!(table.blocked_count, 2); let mut encoder = table.encoder(46); assert_eq!( encoder.insert(&HeaderField::new("foo99", "bar")), Ok(DynamicInsertionResult::NotInserted( DynamicLookupResult::NotFound )) ); } #[test] fn insert_again_after_encoder_ack() { let mut table = tracked_table(42); table.set_max_blocked(1).unwrap(); assert_eq!(table.blocked_count, 1); { let mut encoder = table.encoder(44); assert_eq!( encoder.insert(&HeaderField::new("foo99", "bar")), Ok(DynamicInsertionResult::NotInserted( DynamicLookupResult::NotFound )) ); encoder.commit(0); } table.update_largest_received(3); assert_eq!(table.blocked_count, 0); let mut encoder = table.encoder(46); assert_eq!( encoder.insert(&HeaderField::new("foo", "bar")), Ok(DynamicInsertionResult::Inserted { postbase: 0, absolute: 4 }) ); } } h3-0.0.6/src/qpack/encoder.rs000064400000000000000000000472571046102023000140330ustar 00000000000000use std::{cmp, io::Cursor}; use bytes::{Buf, BufMut}; use super::{ block::{ HeaderPrefix, Indexed, IndexedWithPostBase, Literal, LiteralWithNameRef, LiteralWithPostBaseNameRef, }, dynamic::{ DynamicInsertionResult, DynamicLookupResult, DynamicTable, DynamicTableEncoder, Error as DynamicTableError, }, parse_error::ParseError, prefix_int::Error as IntError, prefix_string::Error as StringError, static_::StaticTable, stream::{ DecoderInstruction, Duplicate, DynamicTableSizeUpdate, HeaderAck, InsertCountIncrement, InsertWithNameRef, InsertWithoutNameRef, StreamCancel, }, HeaderField, }; #[derive(Debug, PartialEq)] pub enum Error { Insertion(DynamicTableError), InvalidString(StringError), InvalidInteger(IntError), UnknownDecoderInstruction(u8), } impl std::error::Error for Error {} impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::Insertion(e) => write!(f, "dynamic table insertion: {:?}", e), Error::InvalidString(e) => write!(f, "could not parse string: {}", e), Error::InvalidInteger(e) => write!(f, "could not parse integer: {}", e), Error::UnknownDecoderInstruction(e) => { write!(f, "got unkown decoder instruction: {}", e) } } } } pub struct Encoder { table: DynamicTable, } impl Encoder { pub fn encode( &mut self, stream_id: u64, block: &mut W, encoder_buf: &mut W, fields: T, ) -> Result where W: BufMut, T: IntoIterator, H: AsRef, { let mut required_ref = 0; let mut block_buf = Vec::new(); let mut encoder = self.table.encoder(stream_id); for field in fields { if let Some(reference) = Self::encode_field(&mut encoder, &mut block_buf, encoder_buf, field.as_ref())? { required_ref = cmp::max(required_ref, reference); } } HeaderPrefix::new( required_ref, encoder.base(), encoder.total_inserted(), encoder.max_size(), ) .encode(block); block.put(block_buf.as_slice()); encoder.commit(required_ref); Ok(required_ref) } pub fn on_decoder_recv(&mut self, read: &mut R) -> Result<(), Error> { while let Some(instruction) = Action::parse(read)? { match instruction { Action::Untrack(stream_id) => self.table.untrack_block(stream_id)?, Action::StreamCancel(stream_id) => { // Untrack block twice, as this stream might have a trailer in addition to // the header. Failures are ignored as blocks might have been acked before // cancellation. if self.table.untrack_block(stream_id).is_ok() { let _ = self.table.untrack_block(stream_id); } } Action::ReceivedRefIncrement(increment) => { self.table.update_largest_received(increment) } } } Ok(()) } fn encode_field( table: &mut DynamicTableEncoder, block: &mut Vec, encoder: &mut W, field: &HeaderField, ) -> Result, Error> { if let Some(index) = StaticTable::find(field) { Indexed::Static(index).encode(block); return Ok(None); } if let DynamicLookupResult::Relative { index, absolute } = table.find(field) { Indexed::Dynamic(index).encode(block); return Ok(Some(absolute)); } let reference = match table.insert(field)? { DynamicInsertionResult::Duplicated { relative, postbase, absolute, } => { Duplicate(relative).encode(encoder); IndexedWithPostBase(postbase).encode(block); Some(absolute) } DynamicInsertionResult::Inserted { postbase, absolute } => { InsertWithoutNameRef::new(field.name.clone(), field.value.clone()) .encode(encoder)?; IndexedWithPostBase(postbase).encode(block); Some(absolute) } DynamicInsertionResult::InsertedWithStaticNameRef { postbase, index, absolute, } => { InsertWithNameRef::new_static(index, field.value.clone()).encode(encoder)?; IndexedWithPostBase(postbase).encode(block); Some(absolute) } DynamicInsertionResult::InsertedWithNameRef { postbase, relative, absolute, } => { InsertWithNameRef::new_dynamic(relative, field.value.clone()).encode(encoder)?; IndexedWithPostBase(postbase).encode(block); Some(absolute) } DynamicInsertionResult::NotInserted(lookup_result) => match lookup_result { DynamicLookupResult::Static(index) => { LiteralWithNameRef::new_static(index, field.value.clone()).encode(block)?; None } DynamicLookupResult::Relative { index, absolute } => { LiteralWithNameRef::new_dynamic(index, field.value.clone()).encode(block)?; Some(absolute) } DynamicLookupResult::PostBase { index, absolute } => { LiteralWithPostBaseNameRef::new(index, field.value.clone()).encode(block)?; Some(absolute) } DynamicLookupResult::NotFound => { Literal::new(field.name.clone(), field.value.clone()).encode(block)?; None } }, }; Ok(reference) } } impl Default for Encoder { fn default() -> Self { Self { table: DynamicTable::new(), } } } pub fn encode_stateless(block: &mut W, fields: T) -> Result where W: BufMut, T: IntoIterator, H: AsRef, { let mut size = 0; HeaderPrefix::new(0, 0, 0, 0).encode(block); for field in fields { let field = field.as_ref(); if let Some(index) = StaticTable::find(field) { Indexed::Static(index).encode(block); } else if let Some(index) = StaticTable::find_name(&field.name) { LiteralWithNameRef::new_static(index, field.value.clone()).encode(block)?; } else { Literal::new(field.name.clone(), field.value.clone()).encode(block)?; } size += field.mem_size() as u64; } Ok(size) } #[cfg(test)] impl From for Encoder { fn from(table: DynamicTable) -> Encoder { Encoder { table } } } // Action to apply to the encoder table, given an instruction received from the decoder. #[derive(Debug, PartialEq)] enum Action { ReceivedRefIncrement(usize), Untrack(u64), StreamCancel(u64), } impl Action { fn parse(read: &mut R) -> Result, Error> { if read.remaining() < 1 { return Ok(None); } let mut buf = Cursor::new(read.chunk()); let first = buf.chunk()[0]; let instruction = match DecoderInstruction::decode(first) { DecoderInstruction::Unknown => return Err(Error::UnknownDecoderInstruction(first)), DecoderInstruction::InsertCountIncrement => InsertCountIncrement::decode(&mut buf)? .map(|x| Action::ReceivedRefIncrement(x.0 as usize)), DecoderInstruction::HeaderAck => { HeaderAck::decode(&mut buf)?.map(|x| Action::Untrack(x.0)) } DecoderInstruction::StreamCancel => { StreamCancel::decode(&mut buf)?.map(|x| Action::StreamCancel(x.0)) } }; if instruction.is_some() { let pos = buf.position(); read.advance(pos as usize); } Ok(instruction) } } pub fn set_dynamic_table_size( table: &mut DynamicTable, encoder: &mut W, size: usize, ) -> Result<(), Error> { table.set_max_size(size)?; DynamicTableSizeUpdate(size).encode(encoder); Ok(()) } impl From for Error { fn from(e: DynamicTableError) -> Self { Error::Insertion(e) } } impl From for Error { fn from(e: StringError) -> Self { Error::InvalidString(e) } } impl From for Error { fn from(e: ParseError) -> Self { match e { ParseError::Integer(x) => Error::InvalidInteger(x), ParseError::String(x) => Error::InvalidString(x), ParseError::InvalidPrefix(x) => Error::UnknownDecoderInstruction(x), _ => unreachable!(), } } } #[cfg(test)] mod tests { use super::*; use crate::qpack::tests::helpers::{build_table, TABLE_SIZE}; #[allow(clippy::type_complexity)] fn check_encode_field( init_fields: &[HeaderField], field: &[HeaderField], check: &dyn Fn(&mut Cursor<&mut Vec>, &mut Cursor<&mut Vec>), ) { let mut table = build_table(); table.set_max_size(TABLE_SIZE).unwrap(); check_encode_field_table(&mut table, init_fields, field, 1, check); } #[allow(clippy::type_complexity)] fn check_encode_field_table( table: &mut DynamicTable, init_fields: &[HeaderField], field: &[HeaderField], stream_id: u64, check: &dyn Fn(&mut Cursor<&mut Vec>, &mut Cursor<&mut Vec>), ) { for field in init_fields { table.put(field.clone()).unwrap(); } let mut encoder = Vec::new(); let mut block = Vec::new(); let mut enc_table = table.encoder(stream_id); for field in field { Encoder::encode_field(&mut enc_table, &mut block, &mut encoder, field).unwrap(); } enc_table.commit(field.len()); let mut read_block = Cursor::new(&mut block); let mut read_encoder = Cursor::new(&mut encoder); check(&mut read_block, &mut read_encoder); } #[test] fn encode_static() { let field = HeaderField::new(":method", "GET"); check_encode_field(&[], &[field], &|mut b, e| { assert_eq!(Indexed::decode(&mut b), Ok(Indexed::Static(17))); assert_eq!(e.get_ref().len(), 0); }); } #[test] fn encode_static_nameref() { let field = HeaderField::new("location", "/bar"); check_encode_field(&[], &[field], &|mut b, mut e| { assert_eq!( IndexedWithPostBase::decode(&mut b), Ok(IndexedWithPostBase(0)) ); assert_eq!( InsertWithNameRef::decode(&mut e), Ok(Some(InsertWithNameRef::new_static(12, "/bar"))) ); }); } #[test] fn encode_static_nameref_indexed_in_dynamic() { let field = HeaderField::new("location", "/bar"); check_encode_field(&[field.clone()], &[field], &|mut b, e| { assert_eq!(Indexed::decode(&mut b), Ok(Indexed::Dynamic(0))); assert_eq!(e.get_ref().len(), 0); }); } #[test] fn encode_dynamic_insert() { let field = HeaderField::new("foo", "bar"); check_encode_field(&[], &[field], &|mut b, mut e| { assert_eq!( IndexedWithPostBase::decode(&mut b), Ok(IndexedWithPostBase(0)) ); assert_eq!( InsertWithoutNameRef::decode(&mut e), Ok(Some(InsertWithoutNameRef::new("foo", "bar"))) ); }); } #[test] fn encode_dynamic_insert_nameref() { let field = HeaderField::new("foo", "bar"); check_encode_field( &[field.clone(), HeaderField::new("baz", "bar")], &[field.with_value("quxx")], &|mut b, mut e| { assert_eq!( IndexedWithPostBase::decode(&mut b), Ok(IndexedWithPostBase(0)) ); assert_eq!( InsertWithNameRef::decode(&mut e), Ok(Some(InsertWithNameRef::new_dynamic(1, "quxx"))) ); }, ); } #[test] fn encode_literal() { let mut table = build_table(); table.set_max_size(0).unwrap(); let field = HeaderField::new("foo", "bar"); check_encode_field_table(&mut table, &[], &[field], 1, &|mut b, e| { assert_eq!(Literal::decode(&mut b), Ok(Literal::new("foo", "bar"))); assert_eq!(e.get_ref().len(), 0); }); } #[test] fn encode_literal_nameref() { let mut table = build_table(); table.set_max_size(63).unwrap(); let field = HeaderField::new("foo", "bar"); check_encode_field_table(&mut table, &[], &[field.clone()], 1, &|mut b, _| { assert_eq!( IndexedWithPostBase::decode(&mut b), Ok(IndexedWithPostBase(0)) ); }); check_encode_field_table( &mut table, &[field.clone()], &[field.with_value("quxx")], 2, &|mut b, e| { assert_eq!( LiteralWithNameRef::decode(&mut b), Ok(LiteralWithNameRef::new_dynamic(0, "quxx")) ); assert_eq!(e.get_ref().len(), 0); }, ); } #[test] fn encode_literal_postbase_nameref() { let mut table = build_table(); table.set_max_size(63).unwrap(); let field = HeaderField::new("foo", "bar"); check_encode_field_table( &mut table, &[], &[field.clone(), field.with_value("quxx")], 1, &|mut b, mut e| { assert_eq!( IndexedWithPostBase::decode(&mut b), Ok(IndexedWithPostBase(0)) ); assert_eq!( LiteralWithPostBaseNameRef::decode(&mut b), Ok(LiteralWithPostBaseNameRef::new(0, "quxx")) ); assert_eq!( InsertWithoutNameRef::decode(&mut e), Ok(Some(InsertWithoutNameRef::new("foo", "bar"))) ); }, ); } #[test] fn encode_with_header_block() { let mut table = build_table(); for idx in 1..5 { table .put(HeaderField::new( format!("foo{}", idx), format!("bar{}", idx), )) .unwrap(); } let mut encoder_buf = Vec::new(); let mut block = Vec::new(); let mut encoder = Encoder::from(table); let fields = vec![ HeaderField::new(":method", "GET"), HeaderField::new("foo1", "bar1"), HeaderField::new("foo3", "new bar3"), HeaderField::new(":method", "staticnameref"), HeaderField::new("newfoo", "newbar"), ] .into_iter(); assert_eq!( encoder.encode(1, &mut block, &mut encoder_buf, fields), Ok(7) ); let mut read_block = Cursor::new(&mut block); let mut read_encoder = Cursor::new(&mut encoder_buf); assert_eq!( InsertWithNameRef::decode(&mut read_encoder), Ok(Some(InsertWithNameRef::new_dynamic(1, "new bar3"))) ); assert_eq!( InsertWithNameRef::decode(&mut read_encoder), Ok(Some(InsertWithNameRef::new_static( StaticTable::find_name(&b":method"[..]).unwrap(), "staticnameref" ))) ); assert_eq!( InsertWithoutNameRef::decode(&mut read_encoder), Ok(Some(InsertWithoutNameRef::new("newfoo", "newbar"))) ); assert_eq!( HeaderPrefix::decode(&mut read_block) .unwrap() .get(7, TABLE_SIZE), Ok((7, 4)) ); assert_eq!(Indexed::decode(&mut read_block), Ok(Indexed::Static(17))); assert_eq!(Indexed::decode(&mut read_block), Ok(Indexed::Dynamic(3))); assert_eq!( IndexedWithPostBase::decode(&mut read_block), Ok(IndexedWithPostBase(0)) ); assert_eq!( IndexedWithPostBase::decode(&mut read_block), Ok(IndexedWithPostBase(1)) ); assert_eq!( IndexedWithPostBase::decode(&mut read_block), Ok(IndexedWithPostBase(2)) ); assert_eq!(read_block.get_ref().len() as u64, read_block.position()); } #[test] fn decoder_block_ack() { let mut table = build_table(); let field = HeaderField::new("foo", "bar"); check_encode_field_table( &mut table, &[], &[field.clone(), field.with_value("quxx")], 2, &|_, _| {}, ); let mut buf = vec![]; let mut encoder = Encoder::from(table); HeaderAck(2).encode(&mut buf); let mut cur = Cursor::new(&buf); assert_eq!(Action::parse(&mut cur), Ok(Some(Action::Untrack(2)))); let mut cur = Cursor::new(&buf); assert_eq!(encoder.on_decoder_recv(&mut cur), Ok(()),); let mut cur = Cursor::new(&buf); assert_eq!( encoder.on_decoder_recv(&mut cur), Err(Error::Insertion(DynamicTableError::UnknownStreamId(2))) ); } #[test] fn decoder_stream_cacnceled() { let mut table = build_table(); let field = HeaderField::new("foo", "bar"); check_encode_field_table( &mut table, &[], &[field.clone(), field.with_value("quxx")], 2, &|_, _| {}, ); let mut buf = vec![]; StreamCancel(2).encode(&mut buf); let mut cur = Cursor::new(&buf); assert_eq!(Action::parse(&mut cur), Ok(Some(Action::StreamCancel(2)))); } #[test] fn decoder_accept_truncated() { let mut buf = vec![]; StreamCancel(2321).encode(&mut buf); let mut cur = Cursor::new(&buf[..2]); // trucated prefix_int assert_eq!(Action::parse(&mut cur), Ok(None)); let mut cur = Cursor::new(&buf); assert_eq!( Action::parse(&mut cur), Ok(Some(Action::StreamCancel(2321))) ); } #[test] fn decoder_unknown_stream() { let mut table = build_table(); check_encode_field_table( &mut table, &[], &[HeaderField::new("foo", "bar")], 2, &|_, _| {}, ); let mut encoder = Encoder::from(table); let mut buf = vec![]; HeaderAck(4).encode(&mut buf); let mut cur = Cursor::new(&buf); assert_eq!( encoder.on_decoder_recv(&mut cur), Err(Error::Insertion(DynamicTableError::UnknownStreamId(4))) ); } #[test] fn insert_count() { let mut buf = vec![]; InsertCountIncrement(4).encode(&mut buf); let mut cur = Cursor::new(&buf); assert_eq!( Action::parse(&mut cur), Ok(Some(Action::ReceivedRefIncrement(4))) ); let mut encoder = Encoder { table: build_table(), }; let mut cur = Cursor::new(&buf); assert_eq!(encoder.on_decoder_recv(&mut cur), Ok(())); } } h3-0.0.6/src/qpack/field.rs000064400000000000000000000060001046102023000134540ustar 00000000000000use std::{ borrow::Cow, fmt::{Display, Formatter}, }; /** * https://tools.ietf.org/html/rfc7541 * 4.1. Calculating Table Size */ pub const ESTIMATED_OVERHEAD_BYTES: usize = 32; #[derive(Debug, PartialEq, Clone, Hash, Eq)] pub struct HeaderField { pub name: Cow<'static, [u8]>, pub value: Cow<'static, [u8]>, } impl HeaderField { pub fn new(name: T, value: S) -> HeaderField where T: Into>, S: Into>, { HeaderField { name: Cow::Owned(name.into()), value: Cow::Owned(value.into()), } } pub fn mem_size(&self) -> usize { self.name.len() + self.value.len() + ESTIMATED_OVERHEAD_BYTES } pub fn with_value(&self, value: T) -> Self where T: Into>, { Self { name: self.name.clone(), value: Cow::Owned(value.into()), } } pub fn into_inner(self) -> (Cow<'static, [u8]>, Cow<'static, [u8]>) { (self.name, self.value) } } impl AsRef for HeaderField { fn as_ref(&self) -> &Self { self } } impl Display for HeaderField { fn fmt(&self, f: &mut Formatter) -> Result<(), std::fmt::Error> { write!( f, "\"{}\": \"{}\"", String::from_utf8_lossy(&self.name), String::from_utf8_lossy(&self.value) )?; Ok(()) } } impl From for String { fn from(field: HeaderField) -> String { format!( "{}\t{}", String::from_utf8_lossy(&field.name), String::from_utf8_lossy(&field.value) ) } } impl From<(N, V)> for HeaderField where N: AsRef<[u8]>, V: AsRef<[u8]>, { fn from(header: (N, V)) -> Self { let (name, value) = header; Self { // FIXME: could avoid allocation if HeaderField had a lifetime name: Cow::Owned(Vec::from(name.as_ref())), value: Cow::Owned(Vec::from(value.as_ref())), } } } #[cfg(test)] mod tests { use super::*; /** * https://tools.ietf.org/html/rfc7541#section-4.1 * "The size of an entry is the sum of its name's length in octets (as * defined in Section 5.2), its value's length in octets, and 32." * "The size of an entry is calculated using the length of its name and * value without any Huffman encoding applied." */ #[test] fn test_field_size_is_offset_by_32() { let field = HeaderField { name: Cow::Borrowed(b"Name"), value: Cow::Borrowed(b"Value"), }; assert_eq!(field.mem_size(), 4 + 5 + 32); } #[test] fn with_value() { let field = HeaderField { name: Cow::Borrowed(b"Name"), value: Cow::Borrowed(b"Value"), }; assert_eq!( field.with_value("New value"), HeaderField { name: Cow::Borrowed(b"Name"), value: Cow::Borrowed(b"New value"), } ); } } h3-0.0.6/src/qpack/mod.rs000064400000000000000000000013411046102023000131530ustar 00000000000000pub use self::{ decoder::{decode_stateless, Decoded, Error as DecoderError}, encoder::{encode_stateless, Error as EncoderError}, field::HeaderField, }; mod block; mod dynamic; mod field; mod parse_error; mod static_; mod stream; mod vas; mod decoder; mod encoder; mod prefix_int; mod prefix_string; #[cfg(test)] mod tests; #[derive(Debug)] pub enum Error { Encoder(EncoderError), Decoder(DecoderError), } impl std::error::Error for Error {} impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::Encoder(e) => write!(f, "Encoder {}", e), Error::Decoder(e) => write!(f, "Decoder {}", e), } } } h3-0.0.6/src/qpack/parse_error.rs000064400000000000000000000007271046102023000147260ustar 00000000000000use super::{prefix_int, prefix_string}; #[derive(Debug, PartialEq)] pub enum ParseError { Integer(prefix_int::Error), String(prefix_string::Error), InvalidPrefix(u8), InvalidBase(isize), } impl From for ParseError { fn from(e: prefix_int::Error) -> Self { ParseError::Integer(e) } } impl From for ParseError { fn from(e: prefix_string::Error) -> Self { ParseError::String(e) } } h3-0.0.6/src/qpack/prefix_int.rs000064400000000000000000000077761046102023000145650ustar 00000000000000use std::fmt; use bytes::{Buf, BufMut}; use crate::proto::coding::{self, BufExt, BufMutExt}; #[derive(Debug, PartialEq)] pub enum Error { Overflow, UnexpectedEnd, } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result { match self { Error::Overflow => write!(f, "value overflow"), Error::UnexpectedEnd => write!(f, "unexpected end"), } } } pub fn decode(size: u8, buf: &mut B) -> Result<(u8, u64), Error> { assert!(size <= 8); let mut first = buf.get::()?; // NOTE: following casts to u8 intend to trim the most significant bits, they are used as a // workaround for shiftoverflow errors when size == 8. let flags = ((first as usize) >> size) as u8; let mask = 0xFF >> (8 - size); first &= mask; // if first < 2usize.pow(size) - 1 if first < mask { return Ok((flags, first as u64)); } let mut value = mask as u64; let mut power = 0usize; loop { let byte = buf.get::()? as u64; value += (byte & 127) << power; power += 7; if byte & 128 == 0 { break; } if power >= MAX_POWER { return Err(Error::Overflow); } } Ok((flags, value)) } pub fn encode(size: u8, flags: u8, value: u64, buf: &mut B) { assert!(size <= 8); // NOTE: following casts to u8 intend to trim the most significant bits, they are used as a // workaround for shiftoverflow errors when size == 8. let mask = !(0xFF << size) as u8; let flags = ((flags as usize) << size) as u8; // if value < 2usize.pow(size) - 1 if value < (mask as u64) { buf.write(flags | value as u8); return; } buf.write(mask | flags); let mut remaining = value - mask as u64; while remaining >= 128 { let rest = (remaining % 128) as u8; buf.write(rest + 128); remaining /= 128; } buf.write(remaining as u8); } #[cfg(target_pointer_width = "64")] const MAX_POWER: usize = 10 * 7; #[cfg(target_pointer_width = "32")] const MAX_POWER: usize = 5 * 7; impl From for Error { fn from(_: coding::UnexpectedEnd) -> Self { Error::UnexpectedEnd } } #[cfg(test)] mod test { use std::io::Cursor; fn check_codec(size: u8, flags: u8, value: u64, data: &[u8]) { let mut buf = Vec::new(); super::encode(size, flags, value, &mut buf); assert_eq!(buf, data); let mut read = Cursor::new(&buf); assert_eq!((flags, value), super::decode(size, &mut read).unwrap()); } #[test] fn codec_5_bits() { check_codec(5, 0b101, 10, &[0b1010_1010]); check_codec(5, 0b101, 0, &[0b1010_0000]); check_codec(5, 0b010, 1337, &[0b0101_1111, 154, 10]); check_codec(5, 0b010, 31, &[0b0101_1111, 0]); check_codec( 5, 0b010, u64::max_value(), &[95, 224, 255, 255, 255, 255, 255, 255, 255, 255, 1], ); } #[test] fn codec_8_bits() { check_codec(8, 0, 42, &[0b0010_1010]); check_codec(8, 0, 424_242, &[255, 179, 240, 25]); check_codec( 8, 0, u64::max_value(), &[255, 128, 254, 255, 255, 255, 255, 255, 255, 255, 1], ); } #[test] #[should_panic] fn size_too_big_value() { let mut buf = vec![]; super::encode(9, 1, 1, &mut buf); } #[test] #[should_panic] fn size_too_big_of_size() { let buf = vec![]; let mut read = Cursor::new(&buf); super::decode(9, &mut read).unwrap(); } #[cfg(target_pointer_width = "64")] #[test] fn overflow() { let buf = vec![255, 128, 254, 255, 255, 255, 255, 255, 255, 255, 255, 1]; let mut read = Cursor::new(&buf); assert!(super::decode(8, &mut read).is_err()); } #[test] fn number_never_ends_with_0x80() { check_codec(4, 0b0001, 143, &[31, 128, 1]); } } h3-0.0.6/src/qpack/prefix_string/bitwin.rs000064400000000000000000000010561046102023000165560ustar 00000000000000#[derive(Debug, Default, PartialEq, Clone)] pub struct BitWindow { pub byte: u32, pub bit: u32, pub count: u32, } impl BitWindow { pub fn new() -> Self { Self::default() } pub fn forwards(&mut self, step: u32) { self.bit += self.count; self.byte += self.bit / 8; self.bit %= 8; self.count = step; } pub fn opposite_bit_window(&self) -> BitWindow { BitWindow { byte: self.byte, bit: self.bit, count: 8 - (self.bit % 8), } } } h3-0.0.6/src/qpack/prefix_string/decode.rs000064400000000000000000002010471046102023000165070ustar 00000000000000use super::BitWindow; #[derive(Debug, PartialEq)] pub enum Error { MissingBits(BitWindow), Unhandled(BitWindow, usize), } #[derive(Clone, Debug)] enum DecodeValue { Partial(&'static HuffmanDecoder), Sym(u8), } #[derive(Clone, Debug)] struct HuffmanDecoder { lookup: u32, table: &'static [DecodeValue], } impl HuffmanDecoder { fn check_eof(&self, bit_pos: &mut BitWindow, input: &[u8]) -> Result, Error> { use std::cmp::Ordering; match ((bit_pos.byte + 1) as usize).cmp(&input.len()) { // Position is out-of-range Ordering::Greater => { return Ok(None); } // Position is on the last byte Ordering::Equal => { let side = bit_pos.opposite_bit_window(); let rest = match read_bits(input, side.byte, side.bit, side.count) { Ok(x) => x, Err(()) => { return Err(Error::MissingBits(side)); } }; let eof_filler = ((2u16 << (side.count - 1)) - 1) as u8; if rest & eof_filler == eof_filler { return Ok(None); } } Ordering::Less => {} } Err(Error::MissingBits(bit_pos.clone())) } fn fetch_value(&self, bit_pos: &mut BitWindow, input: &[u8]) -> Result, Error> { match read_bits(input, bit_pos.byte, bit_pos.bit, bit_pos.count) { Ok(value) => Ok(Some(value as u32)), Err(()) => self.check_eof(bit_pos, input), } } fn decode_next(&self, bit_pos: &mut BitWindow, input: &[u8]) -> Result, Error> { bit_pos.forwards(self.lookup); let value = match self.fetch_value(bit_pos, input) { Ok(Some(value)) => value as usize, Ok(None) => return Ok(None), Err(err) => return Err(err), }; let at_value = match (self.table).get(value) { Some(x) => x, None => return Err(Error::Unhandled(bit_pos.clone(), value)), }; match at_value { DecodeValue::Sym(x) => Ok(Some(*x)), DecodeValue::Partial(d) => d.decode_next(bit_pos, input), } } } /// Read `len` bits from the `src` slice at the specified position /// /// Never read more than 8 bits at a time. `bit_offset` may be larger than 8. fn read_bits(src: &[u8], mut byte_offset: u32, mut bit_offset: u32, len: u32) -> Result { if len == 0 || len > 8 || src.len() as u32 * 8 < (byte_offset * 8) + bit_offset + len { return Err(()); } // Deal with `bit_offset` > 8 byte_offset += bit_offset / 8; bit_offset -= (bit_offset / 8) * 8; Ok(if bit_offset + len <= 8 { // Read all the bits from a single byte (src[byte_offset as usize] << bit_offset) >> (8 - len) } else { // The range of bits spans over 2 bytes let mut result = (src[byte_offset as usize] as u16) << 8; result |= src[byte_offset as usize + 1] as u16; ((result << bit_offset) >> (16 - len)) as u8 }) } macro_rules! bits_decode { // general way ( lookup: $count:expr, [ $($sym:expr,)* $(=> $sub:ident,)* ] ) => { HuffmanDecoder { lookup: $count, table: &[ $( DecodeValue::Sym($sym as u8), )* $( DecodeValue::Partial(&$sub), )* ] } }; // 2-final ( $first:expr, $second:expr ) => { HuffmanDecoder { lookup: 1, table: &[ DecodeValue::Sym($first as u8), DecodeValue::Sym($second as u8), ] } }; // 4-final ( $first:expr, $second:expr, $third:expr, $fourth:expr ) => { HuffmanDecoder { lookup: 2, table: &[ DecodeValue::Sym($first as u8), DecodeValue::Sym($second as u8), DecodeValue::Sym($third as u8), DecodeValue::Sym($fourth as u8), ] } }; // 2-final-partial ( $first:expr, => $second:ident ) => { HuffmanDecoder { lookup: 1, table: &[ DecodeValue::Sym($first as u8), DecodeValue::Partial(&$second), ] } }; // 2-partial ( => $first:ident, => $second:ident ) => { HuffmanDecoder { lookup: 1, table: &[ DecodeValue::Partial(&$first), DecodeValue::Partial(&$second), ] } }; // 4-partial ( => $first:ident, => $second:ident, => $third:ident, => $fourth:ident ) => { HuffmanDecoder { lookup: 2, table: &[ DecodeValue::Partial(&$first), DecodeValue::Partial(&$second), DecodeValue::Partial(&$third), DecodeValue::Partial(&$fourth), ] } }; [ $( $name:ident => ( $($value:tt)* ), )* ] => { $( const $name: HuffmanDecoder = bits_decode!( $( $value )* ); )* }; } #[rustfmt::skip] bits_decode![ HPACK_STRING => ( lookup: 5, [ '0', '1', '2', 'a', 'c', 'e', 'i', 'o', 's', 't', => END0_01010, => END0_01011, => END0_01100, => END0_01101, => END0_01110, => END0_01111, => END0_10000, => END0_10001, => END0_10010, => END0_10011, => END0_10100, => END0_10101, => END0_10110, => END0_10111, => END0_11000, => END0_11001, => END0_11010, => END0_11011, => END0_11100, => END0_11101, => END0_11110, => END0_11111, ]), END0_01010 => ( 32, '%'), END0_01011 => ('-', '.'), END0_01100 => ('/', '3'), END0_01101 => ('4', '5'), END0_01110 => ('6', '7'), END0_01111 => ('8', '9'), END0_10000 => ('=', 'A'), END0_10001 => ('_', 'b'), END0_10010 => ('d', 'f'), END0_10011 => ('g', 'h'), END0_10100 => ('l', 'm'), END0_10101 => ('n', 'p'), END0_10110 => ('r', 'u'), END0_10111 => (':', 'B', 'C', 'D'), END0_11000 => ('E', 'F', 'G', 'H'), END0_11001 => ('I', 'J', 'K', 'L'), END0_11010 => ('M', 'N', 'O', 'P'), END0_11011 => ('Q', 'R', 'S', 'T'), END0_11100 => ('U', 'V', 'W', 'Y'), END0_11101 => ('j', 'k', 'q', 'v'), END0_11110 => ('w', 'x', 'y', 'z'), END0_11111 => (=> END5_00, => END5_01, => END5_10, => END5_11), END5_00 => ('&', '*'), END5_01 => (',', 59), END5_10 => ('X', 'Z'), END5_11 => (=> END7_0, => END7_1), END7_0 => ('!', '"', '(', ')'), END7_1 => (=> END8_0, => END8_1), END8_0 => ('?', => END9A_1), END9A_1 => ('\'', '+'), END8_1 => (lookup: 2, ['|', => END9B_01, => END9B_10, => END9B_11,]), END9B_01 => ('#', '>'), END9B_10 => (0, '$', '@', '['), END9B_11 => (lookup: 2, [']', '~', => END13_10, => END13_11,]), END13_10 => ('^', '}'), END13_11 => (=> END14_0, => END14_1), END14_0 => ('<', '`'), END14_1 => ('{', => END15_1), END15_1 => (lookup: 4, [ '\\', 195, 208, => END19_0011, => END19_0100, => END19_0101, => END19_0110, => END19_0111, => END19_1000, => END19_1001, => END19_1010, => END19_1011, => END19_1100, => END19_1101, => END19_1110, => END19_1111, ]), END19_0011 => (128, 130), END19_0100 => (131, 162), END19_0101 => (184, 194), END19_0110 => (224, 226), END19_0111 => (153, 161, 167, 172), END19_1000 => (176, 177, 179, 209), END19_1001 => (216, 217, 227, 229), END19_1010 => (lookup: 2, [230, => END19_1010_01, => END19_1010_10, => END19_1010_11,]), END19_1010_01 => (129, 132), END19_1010_10 => (133, 134), END19_1010_11 => (136, 146), END19_1011 => (lookup: 3, [154, 156, 160, 163, 164, 169, 170, 173,]), END19_1100 => (lookup: 3, [178, 181, 185, 186, 187, 189, 190, 196,]), END19_1101 => (lookup: 3, [198, 228, 232, 233, => END23A_100, => END23A_101, => END23A_110, => END23A_111,]), END23A_100 => ( 1, 135), END23A_101 => (137, 138), END23A_110 => (139, 140), END23A_111 => (141, 143), END19_1110 => (lookup: 4, [147, 149, 150, 151, 152, 155, 157, 158, 165, 166, 168, 174, 175, 180, 182, 183,]), END19_1111 => (lookup: 4, [188, 191, 197, 231, 239, => END23B_0101, => END23B_0110, => END23B_0111, => END23B_1000, => END23B_1001, => END23B_1010, => END23B_1011, => END23B_1100, => END23B_1101, => END23B_1110, => END23B_1111,]), END23B_0101 => ( 9, 142), END23B_0110 => (144, 145), END23B_0111 => (148, 159), END23B_1000 => (171, 206), END23B_1001 => (215, 225), END23B_1010 => (236, 237), END23B_1011 => (199, 207, 234, 235), END23B_1100 => (lookup: 3, [192, 193, 200, 201, 202, 205, 210, 213,]), END23B_1101 => (lookup: 3, [218, 219, 238, 240, 242, 243, 255, => END27A_111,]), END27A_111 => (203, 204), END23B_1110 => (lookup: 4, [211, 212, 214, 221, 222, 223, 241, 244, 245, 246, 247, 248, 250, 251, 252, 253,]), END23B_1111 => (lookup: 4, [ 254, => END27B_0001, => END27B_0010, => END27B_0011, => END27B_0100, => END27B_0101, => END27B_0110, => END27B_0111, => END27B_1000, => END27B_1001, => END27B_1010, => END27B_1011, => END27B_1100, => END27B_1101, => END27B_1110, => END27B_1111,]), END27B_0001 => (2, 3), END27B_0010 => (4, 5), END27B_0011 => (6, 7), END27B_0100 => (8, 11), END27B_0101 => (12, 14), END27B_0110 => (15, 16), END27B_0111 => (17, 18), END27B_1000 => (19, 20), END27B_1001 => (21, 23), END27B_1010 => (24, 25), END27B_1011 => (26, 27), END27B_1100 => (28, 29), END27B_1101 => (30, 31), END27B_1110 => (127, 220), END27B_1111 => (lookup: 1, [249, => END31_1,]), END31_1 => (lookup: 2, [10, 13, 22, => EOF,]), EOF => (lookup: 8, []), ]; pub struct DecodeIter<'a> { bit_pos: BitWindow, content: &'a Vec, } impl<'a> Iterator for DecodeIter<'a> { type Item = Result; fn next(&mut self) -> Option { match HPACK_STRING.decode_next(&mut self.bit_pos, self.content) { Ok(Some(x)) => Some(Ok(x)), Err(err) => Some(Err(err)), Ok(None) => None, } } } pub trait HpackStringDecode { fn hpack_decode(&self) -> DecodeIter; } impl HpackStringDecode for Vec { fn hpack_decode(&self) -> DecodeIter { DecodeIter { bit_pos: BitWindow::new(), content: self, } } } #[cfg(test)] mod tests { #![allow(clippy::identity_op)] use super::*; #[test] fn test_read_bits() { // Basic case (within one byte, aligned with start) assert_eq!(read_bits(&[0b1010_1010], 0, 0, 5), Ok(0b1_0101)); // Within one byte, aligned with end of byte assert_eq!(read_bits(&[0b1010_1010], 0, 3, 5), Ok(0b1010)); // Within one byte, unaligned with either side assert_eq!(read_bits(&[0b1010_1010], 0, 3, 3), Ok(0b10)); // `len` == 0 assert_eq!(read_bits(&[0b1010_1010], 0, 0, 0), Err(())); // `len` > 8 assert_eq!(read_bits(&[0b1010_1010], 0, 0, 9), Err(())); // `bit_offset` > 7 assert_eq!( read_bits(&[0b1010_1010, 0b1010_1010], 0, 8, 8), Ok(0b1010_1010) ); // Read spanning two bytes assert_eq!( read_bits(&[0b1010_1010, 0b1010_1010], 0, 4, 8), Ok(0b1010_1010) ); // Read with non-zero `byte_offset` assert_eq!( read_bits(&[0b1010_1010, 0b1010_1010], 1, 0, 5), Ok(0b1_0101) ); // Read with `bit_offset` > 7, unaligned with either side assert_eq!( read_bits(&[0b1010_1010, 0b1010_1010], 0, 10, 5), Ok(0b1_0101) ); // Read with `bit_offset` > 7 past end of input slice assert_eq!(read_bits(&[0b1010_1010, 0b1010_1010], 0, 16, 5), Err(())); } macro_rules! decoding { [ $( $code:expr => $( $byte:expr ),* ; )* ] => { $( { let bytes = vec![$( $byte ),*]; let res: Result, Error> = bytes.hpack_decode().collect(); assert_eq!(res, Ok(vec![$code]), "fail to decode {}", $code); } )* } } /** * https://tools.ietf.org/html/rfc7541 * Appendix B. Huffman Code */ #[test] #[allow(clippy::cognitive_complexity)] fn test_decode_single_value() { decoding![ 48 => (0b0_0000 << 3) | /* padding */ 0b111; // '0' 49 => (0b0_0001 << 3) | /* padding */ 0b111; // '1' 50 => (0b0_0010 << 3) | /* padding */ 0b111; // '2' 97 => (0b0_0011 << 3) | /* padding */ 0b111; // 'a' 99 => (0b0_0100 << 3) | /* padding */ 0b111; // 'c' 101 => (0b0_0101 << 3) | /* padding */ 0b111; // 'e' 105 => (0b0_0110 << 3) | /* padding */ 0b111; // 'i' 111 => (0b0_0111 << 3) | /* padding */ 0b111; // 'o' 115 => (0b0_1000 << 3) | /* padding */ 0b111; // 's' 116 => (0b0_1001 << 3) | /* padding */ 0b111; // 't' 32 => (0b01_0100 << 2) | /* padding */ 0b11; 37 => (0b01_0101 << 2) | /* padding */ 0b11; // '%' 45 => (0b01_0110 << 2) | /* padding */ 0b11; // '-' 46 => (0b01_0111 << 2) | /* padding */ 0b11; // '.' 47 => (0b01_1000 << 2) | /* padding */ 0b11; // '/' 51 => (0b01_1001 << 2) | /* padding */ 0b11; // '3' 52 => (0b01_1010 << 2) | /* padding */ 0b11; // '4' 53 => (0b01_1011 << 2) | /* padding */ 0b11; // '5' 54 => (0b01_1100 << 2) | /* padding */ 0b11; // '6' 55 => (0b01_1101 << 2) | /* padding */ 0b11; // '7' 56 => (0b01_1110 << 2) | /* padding */ 0b11; // '8' 57 => (0b01_1111 << 2) | /* padding */ 0b11; // '9' 61 => (0b10_0000 << 2) | /* padding */ 0b11; // '=' 65 => (0b10_0001 << 2) | /* padding */ 0b11; // 'A' 95 => (0b10_0010 << 2) | /* padding */ 0b11; // '_' 98 => (0b10_0011 << 2) | /* padding */ 0b11; // 'b' 100 => (0b10_0100 << 2) | /* padding */ 0b11; // 'd' 102 => (0b10_0101 << 2) | /* padding */ 0b11; // 'f' 103 => (0b10_0110 << 2) | /* padding */ 0b11; // 'g' 104 => (0b10_0111 << 2) | /* padding */ 0b11; // 'h' 108 => (0b10_1000 << 2) | /* padding */ 0b11; // 'l' 109 => (0b10_1001 << 2) | /* padding */ 0b11; // 'm' 110 => (0b10_1010 << 2) | /* padding */ 0b11; // 'n' 112 => (0b10_1011 << 2) | /* padding */ 0b11; // 'p' 114 => (0b10_1100 << 2) | /* padding */ 0b11; // 'r' 117 => (0b10_1101 << 2) | /* padding */ 0b11; // 'u' 58 => (0b101_1100 << 1) | /* padding */ 0b1; // ':' 66 => (0b101_1101 << 1) | /* padding */ 0b1; // 'B' 67 => (0b101_1110 << 1) | /* padding */ 0b1; // 'C' 68 => (0b101_1111 << 1) | /* padding */ 0b1; // 'D' 69 => (0b110_0000 << 1) | /* padding */ 0b1; // 'E' 70 => (0b110_0001 << 1) | /* padding */ 0b1; // 'F' 71 => (0b110_0010 << 1) | /* padding */ 0b1; // 'G' 72 => (0b110_0011 << 1) | /* padding */ 0b1; // 'H' 73 => (0b110_0100 << 1) | /* padding */ 0b1; // 'I' 74 => (0b110_0101 << 1) | /* padding */ 0b1; // 'J' 75 => (0b110_0110 << 1) | /* padding */ 0b1; // 'K' 76 => (0b110_0111 << 1) | /* padding */ 0b1; // 'L' 77 => (0b110_1000 << 1) | /* padding */ 0b1; // 'M' 78 => (0b110_1001 << 1) | /* padding */ 0b1; // 'N' 79 => (0b110_1010 << 1) | /* padding */ 0b1; // 'O' 80 => (0b110_1011 << 1) | /* padding */ 0b1; // 'P' 81 => (0b110_1100 << 1) | /* padding */ 0b1; // 'Q' 82 => (0b110_1101 << 1) | /* padding */ 0b1; // 'R' 83 => (0b110_1110 << 1) | /* padding */ 0b1; // 'S' 84 => (0b110_1111 << 1) | /* padding */ 0b1; // 'T' 85 => (0b111_0000 << 1) | /* padding */ 0b1; // 'U' 86 => (0b111_0001 << 1) | /* padding */ 0b1; // 'V' 87 => (0b111_0010 << 1) | /* padding */ 0b1; // 'W' 89 => (0b111_0011 << 1) | /* padding */ 0b1; // 'Y' 106 => (0b111_0100 << 1) | /* padding */ 0b1; // 'j' 107 => (0b111_0101 << 1) | /* padding */ 0b1; // 'k' 113 => (0b111_0110 << 1) | /* padding */ 0b1; // 'q' 118 => (0b111_0111 << 1) | /* padding */ 0b1; // 'v' 119 => (0b111_1000 << 1) | /* padding */ 0b1; // 'w' 120 => (0b111_1001 << 1) | /* padding */ 0b1; // 'x' 121 => (0b111_1010 << 1) | /* padding */ 0b1; // 'y' 122 => (0b111_1011 << 1) | /* padding */ 0b1; // 'z' 38 => 0b1111_1000, /* padding */ 0b1111_1111; // '&' 42 => 0b1111_1001, /* padding */ 0b1111_1111; // '*' 44 => 0b1111_1010, /* padding */ 0b1111_1111; // ',' 59 => 0b1111_1011, /* padding */ 0b1111_1111; 88 => 0b1111_1100, /* padding */ 0b1111_1111; // 'X' 90 => 0b1111_1101, /* padding */ 0b1111_1111; // 'Z' 33 => 0b1111_1110, (0b00 << 6) | /* padding */ 0b11_1111; // '!' 34 => 0b1111_1110, (0b01 << 6) | /* padding */ 0b11_1111; // '"' 40 => 0b1111_1110, (0b10 << 6) | /* padding */ 0b11_1111; // '(' 41 => 0b1111_1110, (0b11 << 6) | /* padding */ 0b11_1111; // ')' 63 => 0b1111_1111, (0b00 << 6) | /* padding */ 0b11_1111; // '?' 39 => 0b1111_1111, (0b010 << 5) | /* padding */ 0b11111; // ''' 43 => 0b1111_1111, (0b011 << 5) | /* padding */ 0b11111; // '+' 124 => 0b1111_1111, (0b100 << 5) | /* padding */ 0b11111; // '|' 35 => 0b1111_1111, (0b1010 << 4) | /* padding */ 0b1111; // '#' 62 => 0b1111_1111, (0b1011 << 4) | /* padding */ 0b1111; // '>' 0 => 0b1111_1111, (0b11000 << 3) | /* padding */ 0b111; 36 => 0b1111_1111, (0b11001 << 3) | /* padding */ 0b111; // '$' 64 => 0b1111_1111, (0b11010 << 3) | /* padding */ 0b111; // '@' 91 => 0b1111_1111, (0b11011 << 3) | /* padding */ 0b111; // '[' 93 => 0b1111_1111, (0b11100 << 3) | /* padding */ 0b111; // ']' 126 => 0b1111_1111, (0b11101 << 3) | /* padding */ 0b111; // '~' 94 => 0b1111_1111, (0b11_1100 << 2) | /* padding */ 0b11; // '^' 125 => 0b1111_1111, (0b11_1101 << 2) | /* padding */ 0b11; // '}' 60 => 0b1111_1111, (0b111_1100 << 1) | /* padding */ 0b1; // '<' 96 => 0b1111_1111, (0b111_1101 << 1) | /* padding */ 0b1; // '`' 123 => 0b1111_1111, (0b111_1110 << 1) | /* padding */ 0b1; // '{' 92 => 0b1111_1111, 0b1111_1110, (0b000 << 5) | /* padding */ 0b11111; // '\' 195 => 0b1111_1111, 0b1111_1110, (0b001 << 5) | /* padding */ 0b11111; 208 => 0b1111_1111, 0b1111_1110, (0b010 << 5) | /* padding */ 0b11111; 128 => 0b1111_1111, 0b1111_1110, (0b0110 << 4) | /* padding */ 0b1111; 130 => 0b1111_1111, 0b1111_1110, (0b0111 << 4) | /* padding */ 0b1111; 131 => 0b1111_1111, 0b1111_1110, (0b1000 << 4) | /* padding */ 0b1111; 162 => 0b1111_1111, 0b1111_1110, (0b1001 << 4) | /* padding */ 0b1111; 184 => 0b1111_1111, 0b1111_1110, (0b1010 << 4) | /* padding */ 0b1111; 194 => 0b1111_1111, 0b1111_1110, (0b1011 << 4) | /* padding */ 0b1111; 224 => 0b1111_1111, 0b1111_1110, (0b1100 << 4) | /* padding */ 0b1111; 226 => 0b1111_1111, 0b1111_1110, (0b1101 << 4) | /* padding */ 0b1111; 153 => 0b1111_1111, 0b1111_1110, (0b11100 << 3) | /* padding */ 0b111; 161 => 0b1111_1111, 0b1111_1110, (0b11101 << 3) | /* padding */ 0b111; 167 => 0b1111_1111, 0b1111_1110, (0b11110 << 3) | /* padding */ 0b111; 172 => 0b1111_1111, 0b1111_1110, (0b11111 << 3) | /* padding */ 0b111; 176 => 0b1111_1111, 0b1111_1111, (0b00000 << 3) | /* padding */ 0b111; 177 => 0b1111_1111, 0b1111_1111, (0b00001 << 3) | /* padding */ 0b111; 179 => 0b1111_1111, 0b1111_1111, (0b00010 << 3) | /* padding */ 0b111; 209 => 0b1111_1111, 0b1111_1111, (0b00011 << 3) | /* padding */ 0b111; 216 => 0b1111_1111, 0b1111_1111, (0b00100 << 3) | /* padding */ 0b111; 217 => 0b1111_1111, 0b1111_1111, (0b00101 << 3) | /* padding */ 0b111; 227 => 0b1111_1111, 0b1111_1111, (0b00110 << 3) | /* padding */ 0b111; 229 => 0b1111_1111, 0b1111_1111, (0b00111 << 3) | /* padding */ 0b111; 230 => 0b1111_1111, 0b1111_1111, (0b01000 << 3) | /* padding */ 0b111; 129 => 0b1111_1111, 0b1111_1111, (0b01_0010 << 2) | /* padding */ 0b11; 132 => 0b1111_1111, 0b1111_1111, (0b01_0011 << 2) | /* padding */ 0b11; 133 => 0b1111_1111, 0b1111_1111, (0b01_0100 << 2) | /* padding */ 0b11; 134 => 0b1111_1111, 0b1111_1111, (0b01_0101 << 2) | /* padding */ 0b11; 136 => 0b1111_1111, 0b1111_1111, (0b01_0110 << 2) | /* padding */ 0b11; 146 => 0b1111_1111, 0b1111_1111, (0b01_0111 << 2) | /* padding */ 0b11; 154 => 0b1111_1111, 0b1111_1111, (0b01_1000 << 2) | /* padding */ 0b11; 156 => 0b1111_1111, 0b1111_1111, (0b01_1001 << 2) | /* padding */ 0b11; 160 => 0b1111_1111, 0b1111_1111, (0b01_1010 << 2) | /* padding */ 0b11; 163 => 0b1111_1111, 0b1111_1111, (0b01_1011 << 2) | /* padding */ 0b11; 164 => 0b1111_1111, 0b1111_1111, (0b01_1100 << 2) | /* padding */ 0b11; 169 => 0b1111_1111, 0b1111_1111, (0b01_1101 << 2) | /* padding */ 0b11; 170 => 0b1111_1111, 0b1111_1111, (0b01_1110 << 2) | /* padding */ 0b11; 173 => 0b1111_1111, 0b1111_1111, (0b01_1111 << 2) | /* padding */ 0b11; 178 => 0b1111_1111, 0b1111_1111, (0b10_0000 << 2) | /* padding */ 0b11; 181 => 0b1111_1111, 0b1111_1111, (0b10_0001 << 2) | /* padding */ 0b11; 185 => 0b1111_1111, 0b1111_1111, (0b10_0010 << 2) | /* padding */ 0b11; 186 => 0b1111_1111, 0b1111_1111, (0b10_0011 << 2) | /* padding */ 0b11; 187 => 0b1111_1111, 0b1111_1111, (0b10_0100 << 2) | /* padding */ 0b11; 189 => 0b1111_1111, 0b1111_1111, (0b10_0101 << 2) | /* padding */ 0b11; 190 => 0b1111_1111, 0b1111_1111, (0b10_0110 << 2) | /* padding */ 0b11; 196 => 0b1111_1111, 0b1111_1111, (0b10_0111 << 2) | /* padding */ 0b11; 198 => 0b1111_1111, 0b1111_1111, (0b10_1000 << 2) | /* padding */ 0b11; 228 => 0b1111_1111, 0b1111_1111, (0b10_1001 << 2) | /* padding */ 0b11; 232 => 0b1111_1111, 0b1111_1111, (0b10_1010 << 2) | /* padding */ 0b11; 233 => 0b1111_1111, 0b1111_1111, (0b10_1011 << 2) | /* padding */ 0b11; 1 => 0b1111_1111, 0b1111_1111, (0b101_1000 << 1) | /* padding */ 0b1; 135 => 0b1111_1111, 0b1111_1111, (0b101_1001 << 1) | /* padding */ 0b1; 137 => 0b1111_1111, 0b1111_1111, (0b101_1010 << 1) | /* padding */ 0b1; 138 => 0b1111_1111, 0b1111_1111, (0b101_1011 << 1) | /* padding */ 0b1; 139 => 0b1111_1111, 0b1111_1111, (0b101_1100 << 1) | /* padding */ 0b1; 140 => 0b1111_1111, 0b1111_1111, (0b101_1101 << 1) | /* padding */ 0b1; 141 => 0b1111_1111, 0b1111_1111, (0b101_1110 << 1) | /* padding */ 0b1; 143 => 0b1111_1111, 0b1111_1111, (0b101_1111 << 1) | /* padding */ 0b1; 147 => 0b1111_1111, 0b1111_1111, (0b110_0000 << 1) | /* padding */ 0b1; 149 => 0b1111_1111, 0b1111_1111, (0b110_0001 << 1) | /* padding */ 0b1; 150 => 0b1111_1111, 0b1111_1111, (0b110_0010 << 1) | /* padding */ 0b1; 151 => 0b1111_1111, 0b1111_1111, (0b110_0011 << 1) | /* padding */ 0b1; 152 => 0b1111_1111, 0b1111_1111, (0b110_0100 << 1) | /* padding */ 0b1; 155 => 0b1111_1111, 0b1111_1111, (0b110_0101 << 1) | /* padding */ 0b1; 157 => 0b1111_1111, 0b1111_1111, (0b110_0110 << 1) | /* padding */ 0b1; 158 => 0b1111_1111, 0b1111_1111, (0b110_0111 << 1) | /* padding */ 0b1; 165 => 0b1111_1111, 0b1111_1111, (0b110_1000 << 1) | /* padding */ 0b1; 166 => 0b1111_1111, 0b1111_1111, (0b110_1001 << 1) | /* padding */ 0b1; 168 => 0b1111_1111, 0b1111_1111, (0b110_1010 << 1) | /* padding */ 0b1; 174 => 0b1111_1111, 0b1111_1111, (0b110_1011 << 1) | /* padding */ 0b1; 175 => 0b1111_1111, 0b1111_1111, (0b110_1100 << 1) | /* padding */ 0b1; 180 => 0b1111_1111, 0b1111_1111, (0b110_1101 << 1) | /* padding */ 0b1; 182 => 0b1111_1111, 0b1111_1111, (0b110_1110 << 1) | /* padding */ 0b1; 183 => 0b1111_1111, 0b1111_1111, (0b110_1111 << 1) | /* padding */ 0b1; 188 => 0b1111_1111, 0b1111_1111, (0b111_0000 << 1) | /* padding */ 0b1; 191 => 0b1111_1111, 0b1111_1111, (0b111_0001 << 1) | /* padding */ 0b1; 197 => 0b1111_1111, 0b1111_1111, (0b111_0010 << 1) | /* padding */ 0b1; 231 => 0b1111_1111, 0b1111_1111, (0b111_0011 << 1) | /* padding */ 0b1; 239 => 0b1111_1111, 0b1111_1111, (0b111_0100 << 1) | /* padding */ 0b1; 9 => 0b1111_1111, 0b1111_1111, 0b1110_1010, /* padding */ 0b1111_1111; 142 => 0b1111_1111, 0b1111_1111, 0b1110_1011, /* padding */ 0b1111_1111; 144 => 0b1111_1111, 0b1111_1111, 0b1110_1100, /* padding */ 0b1111_1111; 145 => 0b1111_1111, 0b1111_1111, 0b1110_1101, /* padding */ 0b1111_1111; 148 => 0b1111_1111, 0b1111_1111, 0b1110_1110, /* padding */ 0b1111_1111; 159 => 0b1111_1111, 0b1111_1111, 0b1110_1111, /* padding */ 0b1111_1111; 171 => 0b1111_1111, 0b1111_1111, 0b1111_0000, /* padding */ 0b1111_1111; 206 => 0b1111_1111, 0b1111_1111, 0b1111_0001, /* padding */ 0b1111_1111; 215 => 0b1111_1111, 0b1111_1111, 0b1111_0010, /* padding */ 0b1111_1111; 225 => 0b1111_1111, 0b1111_1111, 0b1111_0011, /* padding */ 0b1111_1111; 236 => 0b1111_1111, 0b1111_1111, 0b1111_0100, /* padding */ 0b1111_1111; 237 => 0b1111_1111, 0b1111_1111, 0b1111_0101, /* padding */ 0b1111_1111; 199 => 0b1111_1111, 0b1111_1111, 0b1111_0110, (0b0 << 7) | /* padding */ 0b111_1111; 207 => 0b1111_1111, 0b1111_1111, 0b1111_0110, (0b1 << 7) | /* padding */ 0b111_1111; 234 => 0b1111_1111, 0b1111_1111, 0b1111_0111, (0b0 << 7) | /* padding */ 0b111_1111; 235 => 0b1111_1111, 0b1111_1111, 0b1111_0111, (0b1 << 7) | /* padding */ 0b111_1111; 192 => 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b00 << 6) | /* padding */ 0b11_1111; 193 => 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b01 << 6) | /* padding */ 0b11_1111; 200 => 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b10 << 6) | /* padding */ 0b11_1111; 201 => 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b11 << 6) | /* padding */ 0b11_1111; 202 => 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b00 << 6) | /* padding */ 0b11_1111; 205 => 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b01 << 6) | /* padding */ 0b11_1111; 210 => 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b10 << 6) | /* padding */ 0b11_1111; 213 => 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b11 << 6) | /* padding */ 0b11_1111; 218 => 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b00 << 6) | /* padding */ 0b11_1111; 219 => 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b01 << 6) | /* padding */ 0b11_1111; 238 => 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b10 << 6) | /* padding */ 0b11_1111; 240 => 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b11 << 6) | /* padding */ 0b11_1111; 242 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b00 << 6) | /* padding */ 0b11_1111; 243 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b01 << 6) | /* padding */ 0b11_1111; 255 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b10 << 6) | /* padding */ 0b11_1111; 203 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b110 << 5) | /* padding */ 0b11111; 204 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b111 << 5) | /* padding */ 0b11111; 211 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b000 << 5) | /* padding */ 0b11111; 212 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b001 << 5) | /* padding */ 0b11111; 214 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b010 << 5) | /* padding */ 0b11111; 221 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b011 << 5) | /* padding */ 0b11111; 222 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b100 << 5) | /* padding */ 0b11111; 223 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b101 << 5) | /* padding */ 0b11111; 241 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b110 << 5) | /* padding */ 0b11111; 244 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b111 << 5) | /* padding */ 0b11111; 245 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b000 << 5) | /* padding */ 0b11111; 246 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b001 << 5) | /* padding */ 0b11111; 247 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b010 << 5) | /* padding */ 0b11111; 248 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b011 << 5) | /* padding */ 0b11111; 250 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b100 << 5) | /* padding */ 0b11111; 251 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b101 << 5) | /* padding */ 0b11111; 252 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b110 << 5) | /* padding */ 0b11111; 253 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b111 << 5) | /* padding */ 0b11111; 254 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b000 << 5) | /* padding */ 0b11111; 2 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0010 << 4) | /* padding */ 0b1111; 3 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0011 << 4) | /* padding */ 0b1111; 4 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0100 << 4) | /* padding */ 0b1111; 5 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0101 << 4) | /* padding */ 0b1111; 6 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0110 << 4) | /* padding */ 0b1111; 7 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0111 << 4) | /* padding */ 0b1111; 8 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1000 << 4) | /* padding */ 0b1111; 11 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1001 << 4) | /* padding */ 0b1111; 12 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1010 << 4) | /* padding */ 0b1111; 14 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1011 << 4) | /* padding */ 0b1111; 15 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1100 << 4) | /* padding */ 0b1111; 16 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1101 << 4) | /* padding */ 0b1111; 17 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1110 << 4) | /* padding */ 0b1111; 18 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1111 << 4) | /* padding */ 0b1111; 19 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000 << 4) | /* padding */ 0b1111; 20 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0001 << 4) | /* padding */ 0b1111; 21 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0010 << 4) | /* padding */ 0b1111; 23 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0011 << 4) | /* padding */ 0b1111; 24 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0100 << 4) | /* padding */ 0b1111; 25 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0101 << 4) | /* padding */ 0b1111; 26 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0110 << 4) | /* padding */ 0b1111; 27 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0111 << 4) | /* padding */ 0b1111; 28 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b1000 << 4) | /* padding */ 0b1111; 29 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b1001 << 4) | /* padding */ 0b1111; 30 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b1010 << 4) | /* padding */ 0b1111; 31 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b1011 << 4) | /* padding */ 0b1111; 127 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b1100 << 4) | /* padding */ 0b1111; 220 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b1101 << 4) | /* padding */ 0b1111; 249 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b1110 << 4) | /* padding */ 0b1111; 10 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b11_1100 << 2) | /* padding */ 0b11; 13 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b11_1101 << 2) | /* padding */ 0b11; 22 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b11_1110 << 2) | /* padding */ 0b11; // 256 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b11_1111 << 2) | /* padding */ 0b11; ]; } /** * https://tools.ietf.org/html/rfc7541 * Appendix B. Huffman Code */ #[test] fn test_decode_all_code_joined() { let bytes = vec![ // 0 |11111111|11000 0b1111_1111, (0b11000 << 3) // 1 |11111111|11111111|1011000 + 0b111, 0b1111_1111, 0b1111_1101, (0b1000 << 4) // 2 |11111111|11111111|11111110|0010 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1110_0010, // 3 |11111111|11111111|11111110|0011 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0011 << 4) // 4 |11111111|11111111|11111110|0100 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1110_0100, // 5 |11111111|11111111|11111110|0101 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0101 << 4) // 6 |11111111|11111111|11111110|0110 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1110_0110, // 7 |11111111|11111111|11111110|0111 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0111 << 4) // 8 |11111111|11111111|11111110|1000 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1110_1000, // 9 |11111111|11111111|11101010 0b1111_1111, 0b1111_1111, 0b1110_1010, // 10 |11111111|11111111|11111111|111100 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b11_1100 << 2) // 11 |11111111|11111111|11111110|1001 + 0b11, 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b01 << 6) // 12 |11111111|11111111|11111110|1010 + 0b11_1111, 0b1111_1111, 0b1111_1111, (0b10_1010 << 2) // 13 |11111111|11111111|11111111|111101 + 0b11, 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b1101 << 4) // 14 |11111111|11111111|11111110|1011 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1110_1011, // 15 |11111111|11111111|11111110|1100 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1100 << 4) // 16 |11111111|11111111|11111110|1101 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1110_1101, // 17 |11111111|11111111|11111110|1110 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1110 << 4) // 18 |11111111|11111111|11111110|1111 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1110_1111, // 19 |11111111|11111111|11111111|0000 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000 << 4) // 20 |11111111|11111111|11111111|0001 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1111_0001, // 21 |11111111|11111111|11111111|0010 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0010 << 4) // 22 |11111111|11111111|11111111|111110 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b10 << 6) // 23 |11111111|11111111|11111111|0011 + 0b11_1111, 0b1111_1111, 0b1111_1111, (0b11_0011 << 2) // 24 |11111111|11111111|11111111|0100 + 0b11, 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b00 << 6) // 25 |11111111|11111111|11111111|0101 + 0b11_1111, 0b1111_1111, 0b1111_1111, (0b11_0101 << 2) // 26 |11111111|11111111|11111111|0110 + 0b11, 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b10 << 6) // 27 |11111111|11111111|11111111|0111 + 0b11_1111, 0b1111_1111, 0b1111_1111, (0b11_0111 << 2) // 28 |11111111|11111111|11111111|1000 + 0b11, 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b00 << 6) // 29 |11111111|11111111|11111111|1001 + 0b11_1111, 0b1111_1111, 0b1111_1111, (0b11_1001 << 2) // 30 |11111111|11111111|11111111|1010 + 0b11, 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b10 << 6) // 31 |11111111|11111111|11111111|1011 + 0b11_1111, 0b1111_1111, 0b1111_1111, (0b11_1011 << 2) // 32 |010100 + 0b01, (0b0100 << 4) // 33 -!- |11111110|00 + 0b1111, (0b11_1000 << 2) // 34 -;- |11111110|01 + 0b11, 0b1111_1001, // 35 -#- |11111111|1010 0b1111_1111, (0b1010 << 4) // 36 -$- |11111111|11001 + 0b1111, 0b1111_1100, (0b1 << 7) // 37 -%- |010101 + (0b01_0101 << 1) // 38 -&- |11111000 + 0b1, (0b111_1000 << 1) // 39 -'- |11111111|010 + 0b1, 0b1111_1110, (0b10 << 6) // 40 -(- |11111110|10 + 0b11_1111, (0b1010 << 4) // 41 -)- |11111110|11 + 0b1111, (0b11_1011 << 2) // 42 -*- |11111001 + 0b11, (0b11_1001 << 2) // 43 -+- |11111111|011 + 0b11, 0b1111_1101, (0b1 << 7) // 44 -,- |11111010 + 0b111_1101, (0b0 << 7) // 45 --- |010110 + (0b01_0110 << 1) // 46 -.- |010111 + 0b0, (0b10111 << 3) // 47 -/- |011000 + 0b011, (0b000 << 5) // 48 -0- |00000 + 0b00000, // 49 -1- |00001 (0b00001 << 3) // 50 -2- |00010 + 0b000, (0b10 << 6) // 51 -3- |011001 + 0b01_1001, // 52 -4- |011010 (0b01_1010 << 2) // 53 -5- |011011 + 0b01, (0b1011 << 4) // 54 -6- |011100 + 0b0111, (0b00 << 6) // 55 -7- |011101 + 0b01_1101, // 56 -8- |011110 (0b01_1110 << 2) // 57 -9- |011111 + 0b01, (0b1111 << 4) // 58 -:- |1011100 + 0b1011, (0b100 << 5) // 59 |11111011 + 0b11111, (0b011 << 5) // 60 -<- |11111111|1111100 + 0b11111, 0b1111_1111, (0b00 << 6) // 61 -=- |100000 + 0b10_0000, // 62 ->- |11111111|1011 0b1111_1111, (0b1011 << 4) // 63 -?- |11111111|00 + 0b1111, (0b11_1100 << 2) // 64 -@- |11111111|11010 + 0b11, 0b1111_1111, (0b010 << 5) // 65 -A- |100001 + 0b10000, (0b1 << 7) // 66 -B- |1011101 + 0b101_1101, // 67 -C- |1011110 (0b101_1110 << 1) // 68 -D- |1011111 + 0b1, (0b01_1111 << 2) // 69 -E- |1100000 + 0b11, (0b00000 << 3) // 70 -F- |1100001 + 0b110, (0b0001 << 4) // 71 -G- |1100010 + 0b1100, (0b010 << 5) // 72 -H- |1100011 + 0b11000, (0b11 << 6) // 73 -I- |1100100 + 0b11_0010, (0b0 << 7) // 74 -J- |1100101 + 0b110_0101, // 75 -K- |1100110 (0b110_0110 << 1) // 76 -L- |1100111 + 0b1, (0b10_0111 << 2) // 77 -M- |1101000 + 0b11, (0b01000 << 3) // 78 -N- |1101001 + 0b110, (0b1001 << 4) // 79 -O- |1101010 + 0b1101, (0b010 << 5) // 80 -P- |1101011 + 0b11010, (0b11 << 6) // 81 -Q- |1101100 + 0b11_0110, (0b0 << 7) // 82 -R- |1101101 + 0b110_1101, // 83 -S- |1101110 (0b110_1110 << 1) // 84 -T- |1101111 + 0b1, (0b10_1111 << 2) // 85 -U- |1110000 + 0b11, (0b10000 << 3) // 86 -V- |1110001 + 0b111, (0b0001 << 4) // 87 -W- |1110010 + 0b1110, (0b010 << 5) // 88 -X- |11111100 + 0b11111, (0b100 << 5) // 89 -Y- |1110011 + 0b11100, (0b11 << 6) // 90 -Z- |11111101 + 0b11_1111, (0b01 << 6) // 91 -[- |11111111|11011 + 0b11_1111, (0b111_1011 << 1) // 92 -\- |11111111|11111110|000 + 0b1, 0b1111_1111, 0b1111_1100, (0b00 << 6) // 93 -]- |11111111|11100 + 0b11_1111, (0b111_1100 << 1) // 94 -^- |11111111|111100 + 0b1, 0b1111_1111, (0b11100 << 3) // 95 -_- |100010 + 0b100, (0b010 << 5) // 96 -`- |11111111|1111101 + 0b11111, 0b1111_1111, (0b01 << 6) // 97 -a- |00011 + (0b00011 << 1) // 98 -b- |100011 + 0b1, (0b00011 << 3) // 99 -c- |00100 + 0b001, (0b00 << 6) // 100 -d- |100100 + 0b10_0100, // 101 -e- |00101 (0b00101 << 3) // 102 -f- |100101 + 0b100, (0b101 << 5) // 103 -g- |100110 + 0b10011, (0b0 << 7) // 104 -h- |100111 + (0b10_0111 << 1) // 105 -i- |00110 + 0b0, (0b0110 << 4) // 106 -j- |1110100 + 0b1110, (0b100 << 5) // 107 -k- |1110101 + 0b11101, (0b01 << 6) // 108 -l- |101000 + 0b10_1000, // 109 -m- |101001 (0b10_1001 << 2) // 110 -n- |101010 + 0b10, (0b1010 << 4) // 111 -o- |00111 + 0b0011, (0b1 << 7) // 112 -p- |101011 + (0b10_1011 << 1) // 113 -q- |1110110 + 0b1, (0b11_0110 << 2) // 114 -r- |101100 + 0b10, (0b1100 << 4) // 115 -s- |01000 + 0b0100, (0b0 << 7) // 116 -t- |01001 + (0b01001 << 2) // 117 -u- |101101 + 0b10, (0b1101 << 4) // 118 -v- |1110111 + 0b1110, (0b111 << 5) // 119 -w- |1111000 + 0b11110, (0b00 << 6) // 120 -x- |1111001 + 0b11_1100, (0b1 << 7) // 121 -y- |1111010 + 0b111_1010, // 122 -z- |1111011 (0b111_1011 << 1) // 123 -{- |11111111|1111110 + 0b1, 0b1111_1111, (0b11_1110 << 2) // 124 -|- |11111111|100 + 0b11, 0b1111_1110, (0b0 << 7) // 125 -}- |11111111|111101 + 0b111_1111, (0b111_1101 << 1) // 126 -~- |11111111|11101 + 0b1, 0b1111_1111, (0b1101 << 4) // 127 |11111111|11111111|11111111|1100 + 0b1111, 0b1111_1111, 0b1111_1111, 0b1111_1100, // 128 |11111111|11111110|0110 0b1111_1111, 0b1111_1110, (0b0110 << 4) // 129 |11111111|11111111|010010 + 0b1111, 0b1111_1111, 0b1111_0100, (0b10 << 6) // 130 |11111111|11111110|0111 + 0b11_1111, 0b1111_1111, (0b10_0111 << 2) // 131 |11111111|11111110|1000 + 0b11, 0b1111_1111, 0b1111_1010, (0b00 << 6) // 132 |11111111|11111111|010011 + 0b11_1111, 0b1111_1111, 0b1101_0011, // 133 |11111111|11111111|010100 0b1111_1111, 0b1111_1111, (0b01_0100 << 2) // 134 |11111111|11111111|010101 + 0b11, 0b1111_1111, 0b1111_1101, (0b0101 << 4) // 135 |11111111|11111111|1011001 + 0b1111, 0b1111_1111, 0b1111_1011, (0b001 << 5) // 136 |11111111|11111111|010110 + 0b11111, 0b1111_1111, 0b1110_1011, (0b0 << 7) // 137 |11111111|11111111|1011010 + 0b111_1111, 0b1111_1111, 0b1101_1010, // 138 |11111111|11111111|1011011 0b1111_1111, 0b1111_1111, (0b101_1011 << 1) // 139 |11111111|11111111|1011100 + 0b1, 0b1111_1111, 0b1111_1111, (0b01_1100 << 2) // 140 |11111111|11111111|1011101 + 0b11, 0b1111_1111, 0b1111_1110, (0b11101 << 3) // 141 |11111111|11111111|1011110 + 0b111, 0b1111_1111, 0b1111_1101, (0b1110 << 4) // 142 |11111111|11111111|11101011 + 0b1111, 0b1111_1111, 0b1111_1110, (0b1011 << 4) // 143 |11111111|11111111|1011111 + 0b1111, 0b1111_1111, 0b1111_1011, (0b111 << 5) // 144 |11111111|11111111|11101100 + 0b11111, 0b1111_1111, 0b1111_1101, (0b100 << 5) // 145 |11111111|11111111|11101101 + 0b11111, 0b1111_1111, 0b1111_1101, (0b101 << 5) // 146 |11111111|11111111|010111 + 0b11111, 0b1111_1111, 0b1110_1011, (0b1 << 7) // 147 |11111111|11111111|1100000 + 0b111_1111, 0b1111_1111, 0b1110_0000, // 148 |11111111|11111111|11101110 0b1111_1111, 0b1111_1111, 0b1110_1110, // 149 |11111111|11111111|1100001 0b1111_1111, 0b1111_1111, (0b110_0001 << 1) // 150 |11111111|11111111|1100010 + 0b1, 0b1111_1111, 0b1111_1111, (0b10_0010 << 2) // 151 |11111111|11111111|1100011 + 0b11, 0b1111_1111, 0b1111_1111, (0b00011 << 3) // 152 |11111111|11111111|1100100 + 0b111, 0b1111_1111, 0b1111_1110, (0b0100 << 4) // 153 |11111111|11111110|11100 + 0b1111, 0b1111_1111, 0b1110_1110, (0b0 << 7) // 154 |11111111|11111111|011000 + 0b111_1111, 0b1111_1111, (0b101_1000 << 1) // 155 |11111111|11111111|1100101 + 0b1, 0b1111_1111, 0b1111_1111, (0b10_0101 << 2) // 156 |11111111|11111111|011001 + 0b11, 0b1111_1111, 0b1111_1101, (0b1001 << 4) // 157 |11111111|11111111|1100110 + 0b1111, 0b1111_1111, 0b1111_1100, (0b110 << 5) // 158 |11111111|11111111|1100111 + 0b11111, 0b1111_1111, 0b1111_1001, (0b11 << 6) // 159 |11111111|11111111|11101111 + 0b11_1111, 0b1111_1111, 0b1111_1011, (0b11 << 6) // 160 |11111111|11111111|011010 + 0b11_1111, 0b1111_1111, 0b1101_1010, // 161 |11111111|11111110|11101 0b1111_1111, 0b1111_1110, (0b11101 << 3) // 162 |11111111|11111110|1001 + 0b111, 0b1111_1111, 0b1111_0100, (0b1 << 7) // 163 |11111111|11111111|011011 + 0b111_1111, 0b1111_1111, (0b101_1011 << 1) // 164 |11111111|11111111|011100 + 0b1, 0b1111_1111, 0b1111_1110, (0b11100 << 3) // 165 |11111111|11111111|1101000 + 0b111, 0b1111_1111, 0b1111_1110, (0b1000 << 4) // 166 |11111111|11111111|1101001 + 0b1111, 0b1111_1111, 0b1111_1101, (0b001 << 5) // 167 |11111111|11111110|11110 + 0b11111, 0b1111_1111, 0b1101_1110, // 168 |11111111|11111111|1101010 0b1111_1111, 0b1111_1111, (0b110_1010 << 1) // 169 |11111111|11111111|011101 + 0b1, 0b1111_1111, 0b1111_1110, (0b11101 << 3) // 170 |11111111|11111111|011110 + 0b111, 0b1111_1111, 0b1111_1011, (0b110 << 5) // 171 |11111111|11111111|11110000 + 0b11111, 0b1111_1111, 0b1111_1110, (0b000 << 5) // 172 |11111111|11111110|11111 + 0b11111, 0b1111_1111, 0b1101_1111, // 173 |11111111|11111111|011111 0b1111_1111, 0b1111_1111, (0b01_1111 << 2) // 174 |11111111|11111111|1101011 + 0b11, 0b1111_1111, 0b1111_1111, (0b01011 << 3) // 175 |11111111|11111111|1101100 + 0b111, 0b1111_1111, 0b1111_1110, (0b1100 << 4) // 176 |11111111|11111111|00000 + 0b1111, 0b1111_1111, 0b1111_0000, (0b0 << 7) // 177 |11111111|11111111|00001 + 0b111_1111, 0b1111_1111, (0b10_0001 << 2) // 178 |11111111|11111111|100000 + 0b11, 0b1111_1111, 0b1111_1110, (0b0000 << 4) // 179 |11111111|11111111|00010 + 0b1111, 0b1111_1111, 0b1111_0001, (0b0 << 7) // 180 |11111111|11111111|1101101 + 0b111_1111, 0b1111_1111, 0b1110_1101, // 181 |11111111|11111111|100001 0b1111_1111, 0b1111_1111, (0b10_0001 << 2) // 182 |11111111|11111111|1101110 + 0b11, 0b1111_1111, 0b1111_1111, (0b01110 << 3) // 183 |11111111|11111111|1101111 + 0b111, 0b1111_1111, 0b1111_1110, (0b1111 << 4) // 184 |11111111|11111110|1010 + 0b1111, 0b1111_1111, 0b1110_1010, // 185 |11111111|11111111|100010 0b1111_1111, 0b1111_1111, (0b10_0010 << 2) // 186 |11111111|11111111|100011 + 0b11, 0b1111_1111, 0b1111_1110, (0b0011 << 4) // 187 |11111111|11111111|100100 + 0b1111, 0b1111_1111, 0b1111_1001, (0b00 << 6) // 188 |11111111|11111111|1110000 + 0b11_1111, 0b1111_1111, 0b1111_1000, (0b0 << 7) // 189 |11111111|11111111|100101 + 0b111_1111, 0b1111_1111, (0b110_0101 << 1) // 190 |11111111|11111111|100110 + 0b1, 0b1111_1111, 0b1111_1111, (0b00110 << 3) // 191 |11111111|11111111|1110001 + 0b111, 0b1111_1111, 0b1111_1111, (0b0001 << 4) // 192 |11111111|11111111|11111000|00 + 0b1111, 0b1111_1111, 0b1111_1111, (0b10_0000 << 2) // 193 |11111111|11111111|11111000|01 + 0b11, 0b1111_1111, 0b1111_1111, 0b1110_0001, // 194 |11111111|11111110|1011 0b1111_1111, 0b1111_1110, (0b1011 << 4) // 195 |11111111|11111110|001 + 0b1111, 0b1111_1111, (0b111_0001 << 1) // 196 |11111111|11111111|100111 + 0b1, 0b1111_1111, 0b1111_1111, (0b00111 << 3) // 197 |11111111|11111111|1110010 + 0b111, 0b1111_1111, 0b1111_1111, (0b0010 << 4) // 198 |11111111|11111111|101000 + 0b1111, 0b1111_1111, 0b1111_1010, (0b00 << 6) // 199 |11111111|11111111|11110110|0 + 0b11_1111, 0b1111_1111, 0b1111_1101, (0b100 << 5) // 200 |11111111|11111111|11111000|10 + 0b11111, 0b1111_1111, 0b1111_1111, (0b00010 << 3) // 201 |11111111|11111111|11111000|11 + 0b111, 0b1111_1111, 0b1111_1111, (0b110_0011 << 1) // 202 |11111111|11111111|11111001|00 + 0b1, 0b1111_1111, 0b1111_1111, 0b1111_0010, (0b0 << 7) // 203 |11111111|11111111|11111011|110 + 0b111_1111, 0b1111_1111, 0b1111_1101, (0b1110 << 4) // 204 |11111111|11111111|11111011|111 + 0b1111, 0b1111_1111, 0b1111_1111, (0b101_1111 << 1) // 205 |11111111|11111111|11111001|01 + 0b1, 0b1111_1111, 0b1111_1111, 0b1111_0010, (0b1 << 7) // 206 |11111111|11111111|11110001 + 0b111_1111, 0b1111_1111, 0b1111_1000, (0b1 << 7) // 207 |11111111|11111111|11110110|1 + 0b111_1111, 0b1111_1111, 0b1111_1011, (0b01 << 6) // 208 |11111111|11111110|010 + 0b11_1111, 0b1111_1111, (0b10010 << 3) // 209 |11111111|11111111|00011 + 0b111, 0b1111_1111, 0b1111_1000, (0b11 << 6) // 210 |11111111|11111111|11111001|10 + 0b11_1111, 0b1111_1111, 0b1111_1110, (0b0110 << 4) // 211 |11111111|11111111|11111100|000 + 0b1111, 0b1111_1111, 0b1111_1111, (0b110_0000 << 1) // 212 |11111111|11111111|11111100|001 + 0b1, 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b01 << 6) // 213 |11111111|11111111|11111001|11 + 0b11_1111, 0b1111_1111, 0b1111_1110, (0b0111 << 4) // 214 |11111111|11111111|11111100|010 + 0b1111, 0b1111_1111, 0b1111_1111, (0b110_0010 << 1) // 215 |11111111|11111111|11110010 + 0b1, 0b1111_1111, 0b1111_1111, (0b111_0010 << 1) // 216 |11111111|11111111|00100 + 0b1, 0b1111_1111, 0b1111_1110, (0b0100 << 4) // 217 |11111111|11111111|00101 + 0b1111, 0b1111_1111, 0b1111_0010, (0b1 << 7) // 218 |11111111|11111111|11111010|00 + 0b111_1111, 0b1111_1111, 0b1111_1101, (0b000 << 5) // 219 |11111111|11111111|11111010|01 + 0b11111, 0b1111_1111, 0b1111_1111, (0b01001 << 3) // 220 |11111111|11111111|11111111|1101 + 0b111, 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b1 << 7) // 221 |11111111|11111111|11111100|011 + 0b111_1111, 0b1111_1111, 0b1111_1110, (0b0011 << 4) // 222 |11111111|11111111|11111100|100 + 0b1111, 0b1111_1111, 0b1111_1111, (0b110_0100 << 1) // 223 |11111111|11111111|11111100|101 + 0b1, 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b01 << 6) // 224 |11111111|11111110|1100 + 0b11_1111, 0b1111_1111, (0b10_1100 << 2) // 225 |11111111|11111111|11110011 + 0b11, 0b1111_1111, 0b1111_1111, (0b11_0011 << 2) // 226 |11111111|11111110|1101 + 0b11, 0b1111_1111, 0b1111_1011, (0b01 << 6) // 227 |11111111|11111111|00110 + 0b11_1111, 0b1111_1111, (0b110_0110 << 1) // 228 |11111111|11111111|101001 + 0b1, 0b1111_1111, 0b1111_1111, (0b01001 << 3) // 229 |11111111|11111111|00111 + 0b111, 0b1111_1111, 0b1111_1001, (0b11 << 6) // 230 |11111111|11111111|01000 + 0b11_1111, 0b1111_1111, (0b110_1000 << 1) // 231 |11111111|11111111|1110011 + 0b1, 0b1111_1111, 0b1111_1111, (0b11_0011 << 2) // 232 |11111111|11111111|101010 + 0b11, 0b1111_1111, 0b1111_1110, (0b1010 << 4) // 233 |11111111|11111111|101011 + 0b1111, 0b1111_1111, 0b1111_1010, (0b11 << 6) // 234 |11111111|11111111|11110111|0 + 0b11_1111, 0b1111_1111, 0b1111_1101, (0b110 << 5) // 235 |11111111|11111111|11110111|1 + 0b11111, 0b1111_1111, 0b1111_1110, (0b1111 << 4) // 236 |11111111|11111111|11110100 + 0b1111, 0b1111_1111, 0b1111_1111, (0b0100 << 4) // 237 |11111111|11111111|11110101 + 0b1111, 0b1111_1111, 0b1111_1111, (0b0101 << 4) // 238 |11111111|11111111|11111010|10 + 0b1111, 0b1111_1111, 0b1111_1111, (0b10_1010 << 2) // 239 |11111111|11111111|1110100 + 0b11, 0b1111_1111, 0b1111_1111, (0b10100 << 3) // 240 |11111111|11111111|11111010|11 + 0b111, 0b1111_1111, 0b1111_1111, (0b110_1011 << 1) // 241 |11111111|11111111|11111100|110 + 0b1, 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b10 << 6) // 242 |11111111|11111111|11111011|00 + 0b11_1111, 0b1111_1111, 0b1111_1110, (0b1100 << 4) // 243 |11111111|11111111|11111011|01 + 0b1111, 0b1111_1111, 0b1111_1111, (0b10_1101 << 2) // 244 |11111111|11111111|11111100|111 + 0b11, 0b1111_1111, 0b1111_1111, 0b1111_0011, (0b1 << 7) // 245 |11111111|11111111|11111101|000 + 0b111_1111, 0b1111_1111, 0b1111_1110, (0b1000 << 4) // 246 |11111111|11111111|11111101|001 + 0b1111, 0b1111_1111, 0b1111_1111, (0b110_1001 << 1) // 247 |11111111|11111111|11111101|010 + 0b1, 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b10 << 6) // 248 |11111111|11111111|11111101|011 + 0b11_1111, 0b1111_1111, 0b1111_1111, (0b01011 << 3) // 249 |11111111|11111111|11111111|1110 + 0b111, 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0 << 7) // 250 |11111111|11111111|11111101|100 + 0b111_1111, 0b1111_1111, 0b1111_1110, (0b1100 << 4) // 251 |11111111|11111111|11111101|101 + 0b1111, 0b1111_1111, 0b1111_1111, (0b110_1101 << 1) // 252 |11111111|11111111|11111101|110 + 0b1, 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b10 << 6) // 253 |11111111|11111111|11111101|111 + 0b11_1111, 0b1111_1111, 0b1111_1111, (0b01111 << 3) // 254 |11111111|11111111|11111110|000 + 0b111, 0b1111_1111, 0b1111_1111, 0b1111_0000, // 255 |11111111|11111111|11111011|10 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b10 << 6) // 256 eof |11111111|11111111|11111111|111111 + 0b11_1111, 0b1111_1111, 0b1111_1111, 0b1111_1111, ]; let expected = (0u8..=255).collect(); let res: Result, Error> = bytes.hpack_decode().collect(); assert_eq!(res, Ok(expected)); } } h3-0.0.6/src/qpack/prefix_string/encode.rs000064400000000000000000002305171046102023000165250ustar 00000000000000use super::BitWindow; #[derive(Debug, PartialEq)] pub struct Error { buffer_pos: BitWindow, len: usize, capacity: usize, text: String, } #[derive(Clone, Debug)] struct EncodeValue { buffer: &'static [u8], bit_count: u32, } #[derive(Clone, Debug)] struct HuffmanEncoder { buffer_pos: BitWindow, buffer: Vec, } impl HuffmanEncoder { fn new() -> HuffmanEncoder { HuffmanEncoder { buffer_pos: BitWindow::new(), buffer: Vec::new(), } } fn ensure_free_space(&mut self, bit_count: u32) { let mut end_range = self.buffer_pos.clone(); end_range.forwards(bit_count); end_range.forwards(0); // buffer still has enough space to work on if self.buffer.len() > end_range.byte as usize { return; } // optimisation to grow capacity before pushing data if self.buffer.capacity() <= end_range.byte as usize { self.buffer.reserve(((7 * end_range.byte) / 4) as usize); } let forward = end_range.byte as usize - self.buffer.len() + if end_range.bit > 0 { 1 } else { 0 }; for _ in 0..forward { // push filler value that will end huffman decoding if not // modified self.buffer.push(255); } } fn put(&mut self, code: u8) -> Result<(), Error> { let encode_value = &HPACK_STRING[code as usize]; self.ensure_free_space(encode_value.bit_count); let mut rest = encode_value.bit_count; for i in 0..encode_value.buffer.len() { let part = encode_value.buffer[i]; self.buffer_pos.forwards(if rest < 8 { rest } else { 8 }); rest -= self.buffer_pos.count; write_bits(&mut self.buffer, &self.buffer_pos, part) } Ok(()) } fn ends(self) -> Result, Error> { Ok(self.buffer) } } /// Write bits from `value` to the `out` slice /// /// Write the least significant `pos.count` bits from `value` to the position specified by /// `(pos.byte, pos.bit)`. Writes may span multiple bytes. `out` is expected to be long enough /// to write these bits; this is ensured by `HuffmanEncoder::ensure_free_space()`, which is /// always called prior to calling this function. /// /// The bits to be written to are expected to be set to 1 when calling this function. Similarly, /// this function maintains the invariant that unused bits in the output bytes are set to 1. fn write_bits(out: &mut [u8], pos: &BitWindow, value: u8) { debug_assert!(pos.bit < 8); debug_assert!(pos.count <= 8); debug_assert!(pos.count > 0); if (pos.bit + pos.count) <= 8 { // Bits to be written to fit in a single byte debug_assert_eq!(out[pos.byte as usize] | PAD_LEFT[pos.bit as usize], 255); let pad_left = out[pos.byte as usize] | PAD_RIGHT[(8 - pos.bit) as usize]; let shifted = value << (8 - pos.bit - pos.count) | PAD_LEFT[pos.bit as usize]; let pad_right = PAD_RIGHT[(8 - pos.count - pos.bit) as usize]; out[pos.byte as usize] = (pad_left & shifted) | pad_right; } else { // Bits to be written to span two bytes debug_assert_eq!(out[pos.byte as usize] | PAD_LEFT[pos.bit as usize], 255); let split = 8 - pos.bit; let pad_left = out[pos.byte as usize] | PAD_RIGHT[split as usize]; let shifted = (value >> (pos.count - split)) | PAD_LEFT[pos.bit as usize]; out[pos.byte as usize] = pad_left & shifted; let rem = 8 - (pos.count - split); out[(pos.byte + 1) as usize] = (value << rem) | PAD_RIGHT[rem as usize]; } } const PAD_RIGHT: [u8; 9] = [0, 1, 3, 7, 15, 31, 63, 127, 255]; const PAD_LEFT: [u8; 9] = [0, 128, 192, 224, 240, 248, 252, 254, 255]; macro_rules! bits_encode { [ $( ( $len:expr => [ $( $byte:expr ),* ] ), )* ] => { [ $( EncodeValue{ buffer: &[ $( $byte as u8 ),* ], bit_count: $len } , )* ] } } const HPACK_STRING: [EncodeValue; 256] = bits_encode![ ( 13 => [0b1111_1111, 0b0001_1000]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0101_1000]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_0010]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_0011]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_0100]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_0101]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_0110]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_0111]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_1000]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1110_1010]), ( 30 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0011_1100]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_1001]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_1010]), ( 30 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0011_1101]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_1011]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_1100]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_1101]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_1110]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_1111]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_0000]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_0001]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_0010]), ( 30 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0011_1110]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_0011]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_0100]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_0101]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_0110]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_0111]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_1000]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_1001]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_1010]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_1011]), ( 6 => [0b0001_0100]), ( 10 => [0b1111_1110, 0b0000_0000]), // '!' ( 10 => [0b1111_1110, 0b0000_0001]), // ';' ( 12 => [0b1111_1111, 0b0000_1010]), // '#' ( 13 => [0b1111_1111, 0b0001_1001]), // '$' ( 6 => [0b0001_0101]), // '%' ( 8 => [0b1111_1000]), // '&' ( 11 => [0b1111_1111, 0b0000_0010]), // ''' ( 10 => [0b1111_1110, 0b0000_0010]), // '(' ( 10 => [0b1111_1110, 0b0000_0011]), // ')' ( 8 => [0b1111_1001]), // '*' ( 11 => [0b1111_1111, 0b0000_0011]), // '+' ( 8 => [0b1111_1010]), // ',' ( 6 => [0b0001_0110]), // '-' ( 6 => [0b0001_0111]), // '.' ( 6 => [0b0001_1000]), // '/' ( 5 => [0b0000_0000]), // '0' ( 5 => [0b0000_0001]), // '1' ( 5 => [0b0000_0010]), // '2' ( 6 => [0b0001_1001]), // '3' ( 6 => [0b0001_1010]), // '4' ( 6 => [0b0001_1011]), // '5' ( 6 => [0b0001_1100]), // '6' ( 6 => [0b0001_1101]), // '7' ( 6 => [0b0001_1110]), // '8' ( 6 => [0b0001_1111]), // '9' ( 7 => [0b0101_1100]), // ':' ( 8 => [0b1111_1011]), ( 15 => [0b1111_1111, 0b0111_1100]), // '<' ( 6 => [0b0010_0000]), // '=' ( 12 => [0b1111_1111, 0b0000_1011]), // '>' ( 10 => [0b1111_1111, 0b0000_0000]), // '?' ( 13 => [0b1111_1111, 0b0001_1010]), // '@' ( 6 => [0b0010_0001]), // 'A' ( 7 => [0b0101_1101]), // 'B' ( 7 => [0b0101_1110]), // 'C' ( 7 => [0b0101_1111]), // 'D' ( 7 => [0b0110_0000]), // 'E' ( 7 => [0b0110_0001]), // 'F' ( 7 => [0b0110_0010]), // 'G' ( 7 => [0b0110_0011]), // 'H' ( 7 => [0b0110_0100]), // 'I' ( 7 => [0b0110_0101]), // 'J' ( 7 => [0b0110_0110]), // 'K' ( 7 => [0b0110_0111]), // 'L' ( 7 => [0b0110_1000]), // 'M' ( 7 => [0b0110_1001]), // 'N' ( 7 => [0b0110_1010]), // 'O' ( 7 => [0b0110_1011]), // 'P' ( 7 => [0b0110_1100]), // 'Q' ( 7 => [0b0110_1101]), // 'R' ( 7 => [0b0110_1110]), // 'S' ( 7 => [0b0110_1111]), // 'T' ( 7 => [0b0111_0000]), // 'U' ( 7 => [0b0111_0001]), // 'V' ( 7 => [0b0111_0010]), // 'W' ( 8 => [0b1111_1100]), // 'X' ( 7 => [0b0111_0011]), // 'Y' ( 8 => [0b1111_1101]), // 'Z' ( 13 => [0b1111_1111, 0b0001_1011]), // '[' ( 19 => [0b1111_1111, 0b1111_1110, 0b0000_0000]), // '\' ( 13 => [0b1111_1111, 0b0001_1100]), // ']' ( 14 => [0b1111_1111, 0b0011_1100]), // '^' ( 6 => [0b0010_0010]), // '_' ( 15 => [0b1111_1111, 0b0111_1101]), // '`' ( 5 => [0b0000_0011]), // 'a' ( 6 => [0b0010_0011]), // 'b' ( 5 => [0b0000_0100]), // 'c' ( 6 => [0b0010_0100]), // 'd' ( 5 => [0b0000_0101]), // 'e' ( 6 => [0b0010_0101]), // 'f' ( 6 => [0b0010_0110]), // 'g' ( 6 => [0b0010_0111]), // 'h' ( 5 => [0b0000_0110]), // 'i' ( 7 => [0b0111_0100]), // 'j' ( 7 => [0b0111_0101]), // 'k' ( 6 => [0b0010_1000]), // 'l' ( 6 => [0b0010_1001]), // 'm' ( 6 => [0b0010_1010]), // 'n' ( 5 => [0b0000_0111]), // 'o' ( 6 => [0b0010_1011]), // 'p' ( 7 => [0b0111_0110]), // 'q' ( 6 => [0b0010_1100]), // 'r' ( 5 => [0b0000_1000]), // 's' ( 5 => [0b0000_1001]), // 't' ( 6 => [0b0010_1101]), // 'u' ( 7 => [0b0111_0111]), // 'v' ( 7 => [0b0111_1000]), // 'w' ( 7 => [0b0111_1001]), // 'x' ( 7 => [0b0111_1010]), // 'y' ( 7 => [0b0111_1011]), // 'z' ( 15 => [0b1111_1111, 0b0111_1110]), // '{' ( 11 => [0b1111_1111, 0b0000_0100]), // '|' ( 14 => [0b1111_1111, 0b0011_1101]), // '}' ( 13 => [0b1111_1111, 0b0001_1101]), // '~' ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_1100]), ( 20 => [0b1111_1111, 0b1111_1110, 0b0000_0110]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_0010]), ( 20 => [0b1111_1111, 0b1111_1110, 0b0000_0111]), ( 20 => [0b1111_1111, 0b1111_1110, 0b0000_1000]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_0011]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_0100]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_0101]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0101_1001]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_0110]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0101_1010]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0101_1011]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0101_1100]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0101_1101]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0101_1110]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1110_1011]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0101_1111]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1110_1100]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1110_1101]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_0111]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_0000]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1110_1110]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_0001]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_0010]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_0011]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_0100]), ( 21 => [0b1111_1111, 0b1111_1110, 0b0001_1100]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_1000]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_0101]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_1001]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_0110]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_0111]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1110_1111]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_1010]), ( 21 => [0b1111_1111, 0b1111_1110, 0b0001_1101]), ( 20 => [0b1111_1111, 0b1111_1110, 0b0000_1001]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_1011]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_1100]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_1000]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_1001]), ( 21 => [0b1111_1111, 0b1111_1110, 0b0001_1110]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_1010]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_1101]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_1110]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1111_0000]), ( 21 => [0b1111_1111, 0b1111_1110, 0b0001_1111]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0001_1111]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_1011]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_1100]), ( 21 => [0b1111_1111, 0b1111_1111, 0b0000_0000]), ( 21 => [0b1111_1111, 0b1111_1111, 0b0000_0001]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_0000]), ( 21 => [0b1111_1111, 0b1111_1111, 0b0000_0010]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_1101]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_0001]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_1110]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0110_1111]), ( 20 => [0b1111_1111, 0b1111_1110, 0b0000_1010]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_0010]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_0011]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_0100]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0111_0000]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_0101]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_0110]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0111_0001]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1000, 0b0000_0000]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1000, 0b0000_0001]), ( 20 => [0b1111_1111, 0b1111_1110, 0b0000_1011]), ( 19 => [0b1111_1111, 0b1111_1110, 0b0000_0001]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_0111]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0111_0010]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_1000]), ( 25 => [0b1111_1111, 0b1111_1111, 0b1111_0110, 0b0000_0000]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1000, 0b0000_0010]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1000, 0b0000_0011]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1001, 0b0000_0000]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1011, 0b0000_0110]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1011, 0b0000_0111]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1001, 0b0000_0001]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1111_0001]), ( 25 => [0b1111_1111, 0b1111_1111, 0b1111_0110, 0b0000_0001]), ( 19 => [0b1111_1111, 0b1111_1110, 0b0000_0010]), ( 21 => [0b1111_1111, 0b1111_1111, 0b0000_0011]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1001, 0b0000_0010]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1100, 0b0000_0000]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1100, 0b0000_0001]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1001, 0b0000_0011]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1100, 0b0000_0010]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1111_0010]), ( 21 => [0b1111_1111, 0b1111_1111, 0b0000_0100]), ( 21 => [0b1111_1111, 0b1111_1111, 0b0000_0101]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1010, 0b0000_0000]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1010, 0b0000_0001]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_1101]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1100, 0b0000_0011]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1100, 0b0000_0100]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1100, 0b0000_0101]), ( 20 => [0b1111_1111, 0b1111_1110, 0b0000_1100]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1111_0011]), ( 20 => [0b1111_1111, 0b1111_1110, 0b0000_1101]), ( 21 => [0b1111_1111, 0b1111_1111, 0b0000_0110]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_1001]), ( 21 => [0b1111_1111, 0b1111_1111, 0b0000_0111]), ( 21 => [0b1111_1111, 0b1111_1111, 0b0000_1000]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0111_0011]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_1010]), ( 22 => [0b1111_1111, 0b1111_1111, 0b0010_1011]), ( 25 => [0b1111_1111, 0b1111_1111, 0b1111_0111, 0b0000_0000]), ( 25 => [0b1111_1111, 0b1111_1111, 0b1111_0111, 0b0000_0001]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1111_0100]), ( 24 => [0b1111_1111, 0b1111_1111, 0b1111_0101]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1010, 0b0000_0010]), ( 23 => [0b1111_1111, 0b1111_1111, 0b0111_0100]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1010, 0b0000_0011]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1100, 0b0000_0110]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1011, 0b0000_0000]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1011, 0b0000_0001]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1100, 0b0000_0111]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1101, 0b0000_0000]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1101, 0b0000_0001]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1101, 0b0000_0010]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1101, 0b0000_0011]), ( 28 => [0b1111_1111, 0b1111_1111, 0b1111_1111, 0b0000_1110]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1101, 0b0000_0100]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1101, 0b0000_0101]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1101, 0b0000_0110]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1101, 0b0000_0111]), ( 27 => [0b1111_1111, 0b1111_1111, 0b1111_1110, 0b0000_0000]), ( 26 => [0b1111_1111, 0b1111_1111, 0b1111_1011, 0b0000_0010]), ]; pub trait HpackStringEncode { fn hpack_encode(&self) -> Result, Error>; } impl HpackStringEncode for Vec { fn hpack_encode(&self) -> Result, Error> { let mut encoder = HuffmanEncoder::new(); for code in self { encoder.put(*code)?; } encoder.ends() } } #[cfg(test)] mod tests { #![allow(clippy::identity_op)] use super::*; #[test] fn test_set_bits() { let mut buf = [0b1111_1111; 16]; // Write a full 8 bits into a single byte let mut pos = BitWindow { count: 8, ..Default::default() }; write_bits(&mut buf, &pos, 0b1_0101); assert_eq!(&buf[..1], &[0b1_0101]); pos.byte += 1; // 7-bit byte-spanning writes at each possible bit offset pos.count = 7; for _ in 0..8 { write_bits(&mut buf, &pos, 0b101_0101); pos.forwards(7); } assert_eq!( &buf[1..8], &[ 0b1010_1011, 0b0101_0110, 0b1010_1101, 0b0101_1010, 0b1011_0101, 0b0110_1010, 0b1101_0101 ] ); // Single-write partial bits, aligned with byte start pos.count = 5; write_bits(&mut buf, &pos, 0b1_0101); assert_eq!(&buf[8..9], &[0b1010_1111]); } macro_rules! encoding { [ $( $code:expr => $( $byte:expr ),* ; )* ] => { $( { let bytes = vec![$( $byte ),*]; let res = vec![ $code ].hpack_encode(); assert_eq!(res, Ok(bytes), "fail to encode {}", $code); } )* } } /** * https://tools.ietf.org/html/rfc7541 * Appendix B. Huffman Code */ #[test] #[allow(clippy::cognitive_complexity)] fn test_encode_single_value() { encoding![ 48 => (0b0000_0000 << 3) | /* padding */ 0b0000_0111; // '0' 49 => (0b0000_0001 << 3) | /* padding */ 0b0000_0111; // '1' 50 => (0b0000_0010 << 3) | /* padding */ 0b0000_0111; // '2' 97 => (0b0000_0011 << 3) | /* padding */ 0b0000_0111; // 'a' 99 => (0b0000_0100 << 3) | /* padding */ 0b0000_0111; // 'c' 101 => (0b0000_0101 << 3) | /* padding */ 0b0000_0111; // 'e' 105 => (0b0000_0110 << 3) | /* padding */ 0b0000_0111; // 'i' 111 => (0b0000_0111 << 3) | /* padding */ 0b0000_0111; // 'o' 115 => (0b0000_1000 << 3) | /* padding */ 0b0000_0111; // 's' 116 => (0b0000_1001 << 3) | /* padding */ 0b0000_0111; // 't' 32 => (0b0001_0100 << 2) | /* padding */ 0b0000_0011; 37 => (0b0001_0101 << 2) | /* padding */ 0b0000_0011; // '%' 45 => (0b0001_0110 << 2) | /* padding */ 0b0000_0011; // '-' 46 => (0b0001_0111 << 2) | /* padding */ 0b0000_0011; // '.' 47 => (0b0001_1000 << 2) | /* padding */ 0b0000_0011; // '/' 51 => (0b0001_1001 << 2) | /* padding */ 0b0000_0011; // '3' 52 => (0b0001_1010 << 2) | /* padding */ 0b0000_0011; // '4' 53 => (0b0001_1011 << 2) | /* padding */ 0b0000_0011; // '5' 54 => (0b0001_1100 << 2) | /* padding */ 0b0000_0011; // '6' 55 => (0b0001_1101 << 2) | /* padding */ 0b0000_0011; // '7' 56 => (0b0001_1110 << 2) | /* padding */ 0b0000_0011; // '8' 57 => (0b0001_1111 << 2) | /* padding */ 0b0000_0011; // '9' 61 => (0b0010_0000 << 2) | /* padding */ 0b0000_0011; // '=' 65 => (0b0010_0001 << 2) | /* padding */ 0b0000_0011; // 'A' 95 => (0b0010_0010 << 2) | /* padding */ 0b0000_0011; // '_' 98 => (0b0010_0011 << 2) | /* padding */ 0b0000_0011; // 'b' 100 => (0b0010_0100 << 2) | /* padding */ 0b0000_0011; // 'd' 102 => (0b0010_0101 << 2) | /* padding */ 0b0000_0011; // 'f' 103 => (0b0010_0110 << 2) | /* padding */ 0b0000_0011; // 'g' 104 => (0b0010_0111 << 2) | /* padding */ 0b0000_0011; // 'h' 108 => (0b0010_1000 << 2) | /* padding */ 0b0000_0011; // 'l' 109 => (0b0010_1001 << 2) | /* padding */ 0b0000_0011; // 'm' 110 => (0b0010_1010 << 2) | /* padding */ 0b0000_0011; // 'n' 112 => (0b0010_1011 << 2) | /* padding */ 0b0000_0011; // 'p' 114 => (0b0010_1100 << 2) | /* padding */ 0b0000_0011; // 'r' 117 => (0b0010_1101 << 2) | /* padding */ 0b0000_0011; // 'u' 58 => (0b0101_1100 << 1) | /* padding */ 0b0000_0001; // ':' 66 => (0b0101_1101 << 1) | /* padding */ 0b0000_0001; // 'B' 67 => (0b0101_1110 << 1) | /* padding */ 0b0000_0001; // 'C' 68 => (0b0101_1111 << 1) | /* padding */ 0b0000_0001; // 'D' 69 => (0b0110_0000 << 1) | /* padding */ 0b0000_0001; // 'E' 70 => (0b0110_0001 << 1) | /* padding */ 0b0000_0001; // 'F' 71 => (0b0110_0010 << 1) | /* padding */ 0b0000_0001; // 'G' 72 => (0b0110_0011 << 1) | /* padding */ 0b0000_0001; // 'H' 73 => (0b0110_0100 << 1) | /* padding */ 0b0000_0001; // 'I' 74 => (0b0110_0101 << 1) | /* padding */ 0b0000_0001; // 'J' 75 => (0b0110_0110 << 1) | /* padding */ 0b0000_0001; // 'K' 76 => (0b0110_0111 << 1) | /* padding */ 0b0000_0001; // 'L' 77 => (0b0110_1000 << 1) | /* padding */ 0b0000_0001; // 'M' 78 => (0b0110_1001 << 1) | /* padding */ 0b0000_0001; // 'N' 79 => (0b0110_1010 << 1) | /* padding */ 0b0000_0001; // 'O' 80 => (0b0110_1011 << 1) | /* padding */ 0b0000_0001; // 'P' 81 => (0b0110_1100 << 1) | /* padding */ 0b0000_0001; // 'Q' 82 => (0b0110_1101 << 1) | /* padding */ 0b0000_0001; // 'R' 83 => (0b0110_1110 << 1) | /* padding */ 0b0000_0001; // 'S' 84 => (0b0110_1111 << 1) | /* padding */ 0b0000_0001; // 'T' 85 => (0b0111_0000 << 1) | /* padding */ 0b0000_0001; // 'U' 86 => (0b0111_0001 << 1) | /* padding */ 0b0000_0001; // 'V' 87 => (0b0111_0010 << 1) | /* padding */ 0b0000_0001; // 'W' 89 => (0b0111_0011 << 1) | /* padding */ 0b0000_0001; // 'Y' 106 => (0b0111_0100 << 1) | /* padding */ 0b0000_0001; // 'j' 107 => (0b0111_0101 << 1) | /* padding */ 0b0000_0001; // 'k' 113 => (0b0111_0110 << 1) | /* padding */ 0b0000_0001; // 'q' 118 => (0b0111_0111 << 1) | /* padding */ 0b0000_0001; // 'v' 119 => (0b0111_1000 << 1) | /* padding */ 0b0000_0001; // 'w' 120 => (0b0111_1001 << 1) | /* padding */ 0b0000_0001; // 'x' 121 => (0b0111_1010 << 1) | /* padding */ 0b0000_0001; // 'y' 122 => (0b0111_1011 << 1) | /* padding */ 0b0000_0001; // 'z' 38 => 0b1111_1000; // '&' 42 => 0b1111_1001; // '*' 44 => 0b1111_1010; // ',' 59 => 0b1111_1011; 88 => 0b1111_1100; // 'X' 90 => 0b1111_1101; // 'Z' 33 => 0b1111_1110, (0b0000_0000 << 6) | /* padding */ 0b0011_1111; // '!' 34 => 0b1111_1110, (0b0000_0001 << 6) | /* padding */ 0b0011_1111; // '"' 40 => 0b1111_1110, (0b0000_0010 << 6) | /* padding */ 0b0011_1111; // '(' 41 => 0b1111_1110, (0b0000_0011 << 6) | /* padding */ 0b0011_1111; // ')' 63 => 0b1111_1111, (0b0000_0000 << 6) | /* padding */ 0b0011_1111; // '?' 39 => 0b1111_1111, (0b0000_0010 << 5) | /* padding */ 0b0001_1111; // ''' 43 => 0b1111_1111, (0b0000_0011 << 5) | /* padding */ 0b0001_1111; // '+' 124 => 0b1111_1111, (0b0000_0100 << 5) | /* padding */ 0b0001_1111; // '|' 35 => 0b1111_1111, (0b0000_1010 << 4) | /* padding */ 0b0000_1111; // '#' 62 => 0b1111_1111, (0b0000_1011 << 4) | /* padding */ 0b0000_1111; // '>' 0 => 0b1111_1111, (0b0001_1000 << 3) | /* padding */ 0b0000_0111; 36 => 0b1111_1111, (0b0001_1001 << 3) | /* padding */ 0b0000_0111; // '$' 64 => 0b1111_1111, (0b0001_1010 << 3) | /* padding */ 0b0000_0111; // '@' 91 => 0b1111_1111, (0b0001_1011 << 3) | /* padding */ 0b0000_0111; // '[' 93 => 0b1111_1111, (0b0001_1100 << 3) | /* padding */ 0b0000_0111; // ']' 126 => 0b1111_1111, (0b0001_1101 << 3) | /* padding */ 0b0000_0111; // '~' 94 => 0b1111_1111, (0b0011_1100 << 2) | /* padding */ 0b0000_0011; // '^' 125 => 0b1111_1111, (0b0011_1101 << 2) | /* padding */ 0b0000_0011; // '}' 60 => 0b1111_1111, (0b0111_1100 << 1) | /* padding */ 0b0000_0001; // '<' 96 => 0b1111_1111, (0b0111_1101 << 1) | /* padding */ 0b0000_0001; // '`' 123 => 0b1111_1111, (0b0111_1110 << 1) | /* padding */ 0b0000_0001; // '{' 92 => 0b1111_1111, 0b1111_1110, (0b0000_0000 << 5) | /* padding */ 0b0001_1111; // '\' 195 => 0b1111_1111, 0b1111_1110, (0b0000_0001 << 5) | /* padding */ 0b0001_1111; 208 => 0b1111_1111, 0b1111_1110, (0b0000_0010 << 5) | /* padding */ 0b0001_1111; 128 => 0b1111_1111, 0b1111_1110, (0b0000_0110 << 4) | /* padding */ 0b0000_1111; 130 => 0b1111_1111, 0b1111_1110, (0b0000_0111 << 4) | /* padding */ 0b0000_1111; 131 => 0b1111_1111, 0b1111_1110, (0b0000_1000 << 4) | /* padding */ 0b0000_1111; 162 => 0b1111_1111, 0b1111_1110, (0b0000_1001 << 4) | /* padding */ 0b0000_1111; 184 => 0b1111_1111, 0b1111_1110, (0b0000_1010 << 4) | /* padding */ 0b0000_1111; 194 => 0b1111_1111, 0b1111_1110, (0b0000_1011 << 4) | /* padding */ 0b0000_1111; 224 => 0b1111_1111, 0b1111_1110, (0b0000_1100 << 4) | /* padding */ 0b0000_1111; 226 => 0b1111_1111, 0b1111_1110, (0b0000_1101 << 4) | /* padding */ 0b0000_1111; 153 => 0b1111_1111, 0b1111_1110, (0b0001_1100 << 3) | /* padding */ 0b0000_0111; 161 => 0b1111_1111, 0b1111_1110, (0b0001_1101 << 3) | /* padding */ 0b0000_0111; 167 => 0b1111_1111, 0b1111_1110, (0b0001_1110 << 3) | /* padding */ 0b0000_0111; 172 => 0b1111_1111, 0b1111_1110, (0b0001_1111 << 3) | /* padding */ 0b0000_0111; 176 => 0b1111_1111, 0b1111_1111, (0b0000_0000 << 3) | /* padding */ 0b0000_0111; 177 => 0b1111_1111, 0b1111_1111, (0b0000_0001 << 3) | /* padding */ 0b0000_0111; 179 => 0b1111_1111, 0b1111_1111, (0b0000_0010 << 3) | /* padding */ 0b0000_0111; 209 => 0b1111_1111, 0b1111_1111, (0b0000_0011 << 3) | /* padding */ 0b0000_0111; 216 => 0b1111_1111, 0b1111_1111, (0b0000_0100 << 3) | /* padding */ 0b0000_0111; 217 => 0b1111_1111, 0b1111_1111, (0b0000_0101 << 3) | /* padding */ 0b0000_0111; 227 => 0b1111_1111, 0b1111_1111, (0b0000_0110 << 3) | /* padding */ 0b0000_0111; 229 => 0b1111_1111, 0b1111_1111, (0b0000_0111 << 3) | /* padding */ 0b0000_0111; 230 => 0b1111_1111, 0b1111_1111, (0b0000_1000 << 3) | /* padding */ 0b0000_0111; 129 => 0b1111_1111, 0b1111_1111, (0b0001_0010 << 2) | /* padding */ 0b0000_0011; 132 => 0b1111_1111, 0b1111_1111, (0b0001_0011 << 2) | /* padding */ 0b0000_0011; 133 => 0b1111_1111, 0b1111_1111, (0b0001_0100 << 2) | /* padding */ 0b0000_0011; 134 => 0b1111_1111, 0b1111_1111, (0b0001_0101 << 2) | /* padding */ 0b0000_0011; 136 => 0b1111_1111, 0b1111_1111, (0b0001_0110 << 2) | /* padding */ 0b0000_0011; 146 => 0b1111_1111, 0b1111_1111, (0b0001_0111 << 2) | /* padding */ 0b0000_0011; 154 => 0b1111_1111, 0b1111_1111, (0b0001_1000 << 2) | /* padding */ 0b0000_0011; 156 => 0b1111_1111, 0b1111_1111, (0b0001_1001 << 2) | /* padding */ 0b0000_0011; 160 => 0b1111_1111, 0b1111_1111, (0b0001_1010 << 2) | /* padding */ 0b0000_0011; 163 => 0b1111_1111, 0b1111_1111, (0b0001_1011 << 2) | /* padding */ 0b0000_0011; 164 => 0b1111_1111, 0b1111_1111, (0b0001_1100 << 2) | /* padding */ 0b0000_0011; 169 => 0b1111_1111, 0b1111_1111, (0b0001_1101 << 2) | /* padding */ 0b0000_0011; 170 => 0b1111_1111, 0b1111_1111, (0b0001_1110 << 2) | /* padding */ 0b0000_0011; 173 => 0b1111_1111, 0b1111_1111, (0b0001_1111 << 2) | /* padding */ 0b0000_0011; 178 => 0b1111_1111, 0b1111_1111, (0b0010_0000 << 2) | /* padding */ 0b0000_0011; 181 => 0b1111_1111, 0b1111_1111, (0b0010_0001 << 2) | /* padding */ 0b0000_0011; 185 => 0b1111_1111, 0b1111_1111, (0b0010_0010 << 2) | /* padding */ 0b0000_0011; 186 => 0b1111_1111, 0b1111_1111, (0b0010_0011 << 2) | /* padding */ 0b0000_0011; 187 => 0b1111_1111, 0b1111_1111, (0b0010_0100 << 2) | /* padding */ 0b0000_0011; 189 => 0b1111_1111, 0b1111_1111, (0b0010_0101 << 2) | /* padding */ 0b0000_0011; 190 => 0b1111_1111, 0b1111_1111, (0b0010_0110 << 2) | /* padding */ 0b0000_0011; 196 => 0b1111_1111, 0b1111_1111, (0b0010_0111 << 2) | /* padding */ 0b0000_0011; 198 => 0b1111_1111, 0b1111_1111, (0b0010_1000 << 2) | /* padding */ 0b0000_0011; 228 => 0b1111_1111, 0b1111_1111, (0b0010_1001 << 2) | /* padding */ 0b0000_0011; 232 => 0b1111_1111, 0b1111_1111, (0b0010_1010 << 2) | /* padding */ 0b0000_0011; 233 => 0b1111_1111, 0b1111_1111, (0b0010_1011 << 2) | /* padding */ 0b0000_0011; 1 => 0b1111_1111, 0b1111_1111, (0b0101_1000 << 1) | /* padding */ 0b0000_0001; 135 => 0b1111_1111, 0b1111_1111, (0b0101_1001 << 1) | /* padding */ 0b0000_0001; 137 => 0b1111_1111, 0b1111_1111, (0b0101_1010 << 1) | /* padding */ 0b0000_0001; 138 => 0b1111_1111, 0b1111_1111, (0b0101_1011 << 1) | /* padding */ 0b0000_0001; 139 => 0b1111_1111, 0b1111_1111, (0b0101_1100 << 1) | /* padding */ 0b0000_0001; 140 => 0b1111_1111, 0b1111_1111, (0b0101_1101 << 1) | /* padding */ 0b0000_0001; 141 => 0b1111_1111, 0b1111_1111, (0b0101_1110 << 1) | /* padding */ 0b0000_0001; 143 => 0b1111_1111, 0b1111_1111, (0b0101_1111 << 1) | /* padding */ 0b0000_0001; 147 => 0b1111_1111, 0b1111_1111, (0b0110_0000 << 1) | /* padding */ 0b0000_0001; 149 => 0b1111_1111, 0b1111_1111, (0b0110_0001 << 1) | /* padding */ 0b0000_0001; 150 => 0b1111_1111, 0b1111_1111, (0b0110_0010 << 1) | /* padding */ 0b0000_0001; 151 => 0b1111_1111, 0b1111_1111, (0b0110_0011 << 1) | /* padding */ 0b0000_0001; 152 => 0b1111_1111, 0b1111_1111, (0b0110_0100 << 1) | /* padding */ 0b0000_0001; 155 => 0b1111_1111, 0b1111_1111, (0b0110_0101 << 1) | /* padding */ 0b0000_0001; 157 => 0b1111_1111, 0b1111_1111, (0b0110_0110 << 1) | /* padding */ 0b0000_0001; 158 => 0b1111_1111, 0b1111_1111, (0b0110_0111 << 1) | /* padding */ 0b0000_0001; 165 => 0b1111_1111, 0b1111_1111, (0b0110_1000 << 1) | /* padding */ 0b0000_0001; 166 => 0b1111_1111, 0b1111_1111, (0b0110_1001 << 1) | /* padding */ 0b0000_0001; 168 => 0b1111_1111, 0b1111_1111, (0b0110_1010 << 1) | /* padding */ 0b0000_0001; 174 => 0b1111_1111, 0b1111_1111, (0b0110_1011 << 1) | /* padding */ 0b0000_0001; 175 => 0b1111_1111, 0b1111_1111, (0b0110_1100 << 1) | /* padding */ 0b0000_0001; 180 => 0b1111_1111, 0b1111_1111, (0b0110_1101 << 1) | /* padding */ 0b0000_0001; 182 => 0b1111_1111, 0b1111_1111, (0b0110_1110 << 1) | /* padding */ 0b0000_0001; 183 => 0b1111_1111, 0b1111_1111, (0b0110_1111 << 1) | /* padding */ 0b0000_0001; 188 => 0b1111_1111, 0b1111_1111, (0b0111_0000 << 1) | /* padding */ 0b0000_0001; 191 => 0b1111_1111, 0b1111_1111, (0b0111_0001 << 1) | /* padding */ 0b0000_0001; 197 => 0b1111_1111, 0b1111_1111, (0b0111_0010 << 1) | /* padding */ 0b0000_0001; 231 => 0b1111_1111, 0b1111_1111, (0b0111_0011 << 1) | /* padding */ 0b0000_0001; 239 => 0b1111_1111, 0b1111_1111, (0b0111_0100 << 1) | /* padding */ 0b0000_0001; 9 => 0b1111_1111, 0b1111_1111, 0b1110_1010; 142 => 0b1111_1111, 0b1111_1111, 0b1110_1011; 144 => 0b1111_1111, 0b1111_1111, 0b1110_1100; 145 => 0b1111_1111, 0b1111_1111, 0b1110_1101; 148 => 0b1111_1111, 0b1111_1111, 0b1110_1110; 159 => 0b1111_1111, 0b1111_1111, 0b1110_1111; 171 => 0b1111_1111, 0b1111_1111, 0b1111_0000; 206 => 0b1111_1111, 0b1111_1111, 0b1111_0001; 215 => 0b1111_1111, 0b1111_1111, 0b1111_0010; 225 => 0b1111_1111, 0b1111_1111, 0b1111_0011; 236 => 0b1111_1111, 0b1111_1111, 0b1111_0100; 237 => 0b1111_1111, 0b1111_1111, 0b1111_0101; 199 => 0b1111_1111, 0b1111_1111, 0b1111_0110, (0b0000_0000 << 7) | /* padding */ 0b0111_1111; 207 => 0b1111_1111, 0b1111_1111, 0b1111_0110, (0b0000_0001 << 7) | /* padding */ 0b0111_1111; 234 => 0b1111_1111, 0b1111_1111, 0b1111_0111, (0b0000_0000 << 7) | /* padding */ 0b0111_1111; 235 => 0b1111_1111, 0b1111_1111, 0b1111_0111, (0b0000_0001 << 7) | /* padding */ 0b0111_1111; 192 => 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b0000_0000 << 6) | /* padding */ 0b0011_1111; 193 => 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b0000_0001 << 6) | /* padding */ 0b0011_1111; 200 => 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b0000_0010 << 6) | /* padding */ 0b0011_1111; 201 => 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b0000_0011 << 6) | /* padding */ 0b0011_1111; 202 => 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b0000_0000 << 6) | /* padding */ 0b0011_1111; 205 => 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b0000_0001 << 6) | /* padding */ 0b0011_1111; 210 => 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b0000_0010 << 6) | /* padding */ 0b0011_1111; 213 => 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b0000_0011 << 6) | /* padding */ 0b0011_1111; 218 => 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b0000_0000 << 6) | /* padding */ 0b0011_1111; 219 => 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b0000_0001 << 6) | /* padding */ 0b0011_1111; 238 => 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b0000_0010 << 6) | /* padding */ 0b0011_1111; 240 => 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b0000_0011 << 6) | /* padding */ 0b0011_1111; 242 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b0000_0000 << 6) | /* padding */ 0b0011_1111; 243 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b0000_0001 << 6) | /* padding */ 0b0011_1111; 255 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b0000_0010 << 6) | /* padding */ 0b0011_1111; 203 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b0000_0110 << 5) | /* padding */ 0b0001_1111; 204 => 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b0000_0111 << 5) | /* padding */ 0b0001_1111; 211 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b0000_0000 << 5) | /* padding */ 0b0001_1111; 212 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b0000_0001 << 5) | /* padding */ 0b0001_1111; 214 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b0000_0010 << 5) | /* padding */ 0b0001_1111; 221 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b0000_0011 << 5) | /* padding */ 0b0001_1111; 222 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b0000_0100 << 5) | /* padding */ 0b0001_1111; 223 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b0000_0101 << 5) | /* padding */ 0b0001_1111; 241 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b0000_0110 << 5) | /* padding */ 0b0001_1111; 244 => 0b1111_1111, 0b1111_1111, 0b1111_1100, (0b0000_0111 << 5) | /* padding */ 0b0001_1111; 245 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0000 << 5) | /* padding */ 0b0001_1111; 246 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0001 << 5) | /* padding */ 0b0001_1111; 247 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0010 << 5) | /* padding */ 0b0001_1111; 248 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0011 << 5) | /* padding */ 0b0001_1111; 250 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0100 << 5) | /* padding */ 0b0001_1111; 251 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0101 << 5) | /* padding */ 0b0001_1111; 252 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0110 << 5) | /* padding */ 0b0001_1111; 253 => 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0111 << 5) | /* padding */ 0b0001_1111; 254 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0000 << 5) | /* padding */ 0b0001_1111; 2 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0010 << 4) | /* padding */ 0b0000_1111; 3 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0011 << 4) | /* padding */ 0b0000_1111; 4 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0100 << 4) | /* padding */ 0b0000_1111; 5 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0101 << 4) | /* padding */ 0b0000_1111; 6 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0110 << 4) | /* padding */ 0b0000_1111; 7 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0111 << 4) | /* padding */ 0b0000_1111; 8 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1000 << 4) | /* padding */ 0b0000_1111; 11 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1001 << 4) | /* padding */ 0b0000_1111; 12 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1010 << 4) | /* padding */ 0b0000_1111; 14 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1011 << 4) | /* padding */ 0b0000_1111; 15 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1100 << 4) | /* padding */ 0b0000_1111; 16 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1101 << 4) | /* padding */ 0b0000_1111; 17 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1110 << 4) | /* padding */ 0b0000_1111; 18 => 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1111 << 4) | /* padding */ 0b0000_1111; 19 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0000 << 4) | /* padding */ 0b0000_1111; 20 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0001 << 4) | /* padding */ 0b0000_1111; 21 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0010 << 4) | /* padding */ 0b0000_1111; 23 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0011 << 4) | /* padding */ 0b0000_1111; 24 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0100 << 4) | /* padding */ 0b0000_1111; 25 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0101 << 4) | /* padding */ 0b0000_1111; 26 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0110 << 4) | /* padding */ 0b0000_1111; 27 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0111 << 4) | /* padding */ 0b0000_1111; 28 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_1000 << 4) | /* padding */ 0b0000_1111; 29 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_1001 << 4) | /* padding */ 0b0000_1111; 30 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_1010 << 4) | /* padding */ 0b0000_1111; 31 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_1011 << 4) | /* padding */ 0b0000_1111; 127 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_1100 << 4) | /* padding */ 0b0000_1111; 220 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_1101 << 4) | /* padding */ 0b0000_1111; 249 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_1110 << 4) | /* padding */ 0b0000_1111; 10 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0011_1100 << 2) | /* padding */ 0b0000_0011; 13 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0011_1101 << 2) | /* padding */ 0b0000_0011; 22 => 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0011_1110 << 2) | /* padding */ 0b0000_0011; ]; } /** * https://tools.ietf.org/html/rfc7541 * Appendix B. Huffman Code */ #[test] fn test_encode_all_code_joined() { let bytes = vec![ // 0 |11111111|11000 0b1111_1111, (0b0001_1000 << 3) // 1 |11111111|11111111|1011000 + 0b0000_0111, 0b1111_1111, 0b1111_1101, (0b0000_1000 << 4) // 2 |11111111|11111111|11111110|0010 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1110_0010, // 3 |11111111|11111111|11111110|0011 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0011 << 4) // 4 |11111111|11111111|11111110|0100 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1110_0100, // 5 |11111111|11111111|11111110|0101 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0101 << 4) // 6 |11111111|11111111|11111110|0110 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1110_0110, // 7 |11111111|11111111|11111110|0111 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0111 << 4) // 8 |11111111|11111111|11111110|1000 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1110_1000, // 9 |11111111|11111111|11101010 0b1111_1111, 0b1111_1111, 0b1110_1010, // 10 |11111111|11111111|11111111|111100 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0011_1100 << 2) // 11 |11111111|11111111|11111110|1001 + 0b0000_0011, 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b0000_0001 << 6) // 12 |11111111|11111111|11111110|1010 + 0b0011_1111, 0b1111_1111, 0b1111_1111, (0b0010_1010 << 2) // 13 |11111111|11111111|11111111|111101 + 0b0000_0011, 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_1101 << 4) // 14 |11111111|11111111|11111110|1011 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1110_1011, // 15 |11111111|11111111|11111110|1100 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1100 << 4) // 16 |11111111|11111111|11111110|1101 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1110_1101, // 17 |11111111|11111111|11111110|1110 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1110 << 4) // 18 |11111111|11111111|11111110|1111 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1110_1111, // 19 |11111111|11111111|11111111|0000 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0000 << 4) // 20 |11111111|11111111|11111111|0001 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1111_0001, // 21 |11111111|11111111|11111111|0010 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0010 << 4) // 22 |11111111|11111111|11111111|111110 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0010 << 6) // 23 |11111111|11111111|11111111|0011 + 0b0011_1111, 0b1111_1111, 0b1111_1111, (0b0011_0011 << 2) // 24 |11111111|11111111|11111111|0100 + 0b0000_0011, 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0000 << 6) // 25 |11111111|11111111|11111111|0101 + 0b0011_1111, 0b1111_1111, 0b1111_1111, (0b0011_0101 << 2) // 26 |11111111|11111111|11111111|0110 + 0b0000_0011, 0b1111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0010 << 6) // 27 |11111111|11111111|11111111|0111 + 0b0011_1111, 0b1111_1111, 0b1111_1111, (0b0011_0111 << 2) // 28 |11111111|11111111|11111111|1000 + 0b0000_0011, 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0000 << 6) // 29 |11111111|11111111|11111111|1001 + 0b0011_1111, 0b1111_1111, 0b1111_1111, (0b0011_1001 << 2) // 30 |11111111|11111111|11111111|1010 + 0b0000_0011, 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0010 << 6) // 31 |11111111|11111111|11111111|1011 + 0b0011_1111, 0b1111_1111, 0b1111_1111, (0b0011_1011 << 2) // 32 |010100 + 0b0000_0001, (0b0000_0100 << 4) // 33 -!- |11111110|00 + 0b0000_1111, (0b0011_1000 << 2) // 34 -;- |11111110|01 + 0b0000_0011, 0b1111_1001, // 35 -#- |11111111|1010 0b1111_1111, (0b0000_1010 << 4) // 36 -$- |11111111|11001 + 0b0000_1111, 0b1111_1100, (0b0000_0001 << 7) // 37 -%- |010101 + (0b0001_0101 << 1) // 38 -&- |11111000 + 0b0000_0001, (0b0111_1000 << 1) // 39 -'- |11111111|010 + 0b0000_0001, 0b1111_1110, (0b0000_0010 << 6) // 40 -(- |11111110|10 + 0b0011_1111, (0b0000_1010 << 4) // 41 -)- |11111110|11 + 0b0000_1111, (0b0011_1011 << 2) // 42 -*- |11111001 + 0b0000_0011, (0b0011_1001 << 2) // 43 -+- |11111111|011 + 0b0000_0011, 0b1111_1101, (0b0000_0001 << 7) // 44 -,- |11111010 + 0b0111_1101, (0b0000_0000 << 7) // 45 --- |010110 + (0b0001_0110 << 1) // 46 -.- |010111 + 0b0000_0000, (0b0001_0111 << 3) // 47 -/- |011000 + 0b0000_0011, (0b0000_0000 << 5) // 48 -0- |00000 + 0b0000_0000, // 49 -1- |00001 (0b0000_0001 << 3) // 50 -2- |00010 + 0b0000_0000, (0b0000_0010 << 6) // 51 -3- |011001 + 0b0001_1001, // 52 -4- |011010 (0b0001_1010 << 2) // 53 -5- |011011 + 0b0000_0001, (0b0000_1011 << 4) // 54 -6- |011100 + 0b0000_0111, (0b0000_0000 << 6) // 55 -7- |011101 + 0b0001_1101, // 56 -8- |011110 (0b0001_1110 << 2) // 57 -9- |011111 + 0b0000_0001, (0b0000_1111 << 4) // 58 -:- |1011100 + 0b0000_1011, (0b0000_0100 << 5) // 59 |11111011 + 0b0001_1111, (0b0000_0011 << 5) // 60 -<- |11111111|1111100 + 0b0001_1111, 0b1111_1111, (0b0000_0000 << 6) // 61 -=- |100000 + 0b0010_0000, // 62 ->- |11111111|1011 0b1111_1111, (0b0000_1011 << 4) // 63 -?- |11111111|00 + 0b0000_1111, (0b0011_1100 << 2) // 64 -@- |11111111|11010 + 0b0000_0011, 0b1111_1111, (0b0000_0010 << 5) // 65 -A- |100001 + 0b0001_0000, (0b0000_0001 << 7) // 66 -B- |1011101 + 0b0101_1101, // 67 -C- |1011110 (0b0101_1110 << 1) // 68 -D- |1011111 + 0b0000_0001, (0b0001_1111 << 2) // 69 -E- |1100000 + 0b0000_0011, (0b0000_0000 << 3) // 70 -F- |1100001 + 0b0000_0110, (0b0000_0001 << 4) // 71 -G- |1100010 + 0b0000_1100, (0b0000_0010 << 5) // 72 -H- |1100011 + 0b0001_1000, (0b0000_0011 << 6) // 73 -I- |1100100 + 0b0011_0010, (0b0000_0000 << 7) // 74 -J- |1100101 + 0b0110_0101, // 75 -K- |1100110 (0b0110_0110 << 1) // 76 -L- |1100111 + 0b0000_0001, (0b0010_0111 << 2) // 77 -M- |1101000 + 0b0000_0011, (0b0000_1000 << 3) // 78 -N- |1101001 + 0b0000_0110, (0b0000_1001 << 4) // 79 -O- |1101010 + 0b0000_1101, (0b0000_0010 << 5) // 80 -P- |1101011 + 0b0001_1010, (0b0000_0011 << 6) // 81 -Q- |1101100 + 0b0011_0110, (0b0000_0000 << 7) // 82 -R- |1101101 + 0b0110_1101, // 83 -S- |1101110 (0b0110_1110 << 1) // 84 -T- |1101111 + 0b0000_0001, (0b0010_1111 << 2) // 85 -U- |1110000 + 0b0000_0011, (0b0001_0000 << 3) // 86 -V- |1110001 + 0b0000_0111, (0b0000_0001 << 4) // 87 -W- |1110010 + 0b0000_1110, (0b0000_0010 << 5) // 88 -X- |11111100 + 0b0001_1111, (0b0000_0100 << 5) // 89 -Y- |1110011 + 0b0001_1100, (0b0000_0011 << 6) // 90 -Z- |11111101 + 0b0011_1111, (0b0000_0001 << 6) // 91 -[- |11111111|11011 + 0b0011_1111, (0b0111_1011 << 1) // 92 -\- |11111111|11111110|000 + 0b0000_0001, 0b1111_1111, 0b1111_1100, (0b0000_0000 << 6) // 93 -]- |11111111|11100 + 0b0011_1111, (0b0111_1100 << 1) // 94 -^- |11111111|111100 + 0b0000_0001, 0b1111_1111, (0b0001_1100 << 3) // 95 -_- |100010 + 0b0000_0100, (0b0000_0010 << 5) // 96 -`- |11111111|1111101 + 0b0001_1111, 0b1111_1111, (0b0000_0001 << 6) // 97 -a- |00011 + (0b0000_0011 << 1) // 98 -b- |100011 + 0b0000_0001, (0b0000_0011 << 3) // 99 -c- |00100 + 0b0000_0001, (0b0000_0000 << 6) // 100 -d- |100100 + 0b0010_0100, // 101 -e- |00101 (0b0000_0101 << 3) // 102 -f- |100101 + 0b0000_0100, (0b0000_0101 << 5) // 103 -g- |100110 + 0b0001_0011, (0b0000_0000 << 7) // 104 -h- |100111 + (0b0010_0111 << 1) // 105 -i- |00110 + 0b0000_0000, (0b0000_0110 << 4) // 106 -j- |1110100 + 0b0000_1110, (0b0000_0100 << 5) // 107 -k- |1110101 + 0b0001_1101, (0b0000_0001 << 6) // 108 -l- |101000 + 0b0010_1000, // 109 -m- |101001 (0b0010_1001 << 2) // 110 -n- |101010 + 0b0000_0010, (0b0000_1010 << 4) // 111 -o- |00111 + 0b0000_0011, (0b0000_0001 << 7) // 112 -p- |101011 + (0b0010_1011 << 1) // 113 -q- |1110110 + 0b0000_0001, (0b0011_0110 << 2) // 114 -r- |101100 + 0b0000_0010, (0b0000_1100 << 4) // 115 -s- |01000 + 0b0000_0100, (0b0000_0000 << 7) // 116 -t- |01001 + (0b0000_1001 << 2) // 117 -u- |101101 + 0b0000_0010, (0b0000_1101 << 4) // 118 -v- |1110111 + 0b0000_1110, (0b0000_0111 << 5) // 119 -w- |1111000 + 0b0001_1110, (0b0000_0000 << 6) // 120 -x- |1111001 + 0b0011_1100, (0b0000_0001 << 7) // 121 -y- |1111010 + 0b0111_1010, // 122 -z- |1111011 (0b0111_1011 << 1) // 123 -{- |11111111|1111110 + 0b0000_0001, 0b1111_1111, (0b0011_1110 << 2) // 124 -|- |11111111|100 + 0b0000_0011, 0b1111_1110, (0b0000_0000 << 7) // 125 -}- |11111111|111101 + 0b0111_1111, (0b0111_1101 << 1) // 126 -~- |11111111|11101 + 0b0000_0001, 0b1111_1111, (0b0000_1101 << 4) // 127 |11111111|11111111|11111111|1100 + 0b0000_1111, 0b1111_1111, 0b1111_1111, 0b1111_1100, // 128 |11111111|11111110|0110 0b1111_1111, 0b1111_1110, (0b0000_0110 << 4) // 129 |11111111|11111111|010010 + 0b0000_1111, 0b1111_1111, 0b1111_0100, (0b0000_0010 << 6) // 130 |11111111|11111110|0111 + 0b0011_1111, 0b1111_1111, (0b0010_0111 << 2) // 131 |11111111|11111110|1000 + 0b0000_0011, 0b1111_1111, 0b1111_1010, (0b0000_0000 << 6) // 132 |11111111|11111111|010011 + 0b0011_1111, 0b1111_1111, 0b1101_0011, // 133 |11111111|11111111|010100 0b1111_1111, 0b1111_1111, (0b0001_0100 << 2) // 134 |11111111|11111111|010101 + 0b0000_0011, 0b1111_1111, 0b1111_1101, (0b0000_0101 << 4) // 135 |11111111|11111111|1011001 + 0b0000_1111, 0b1111_1111, 0b1111_1011, (0b0000_0001 << 5) // 136 |11111111|11111111|010110 + 0b0001_1111, 0b1111_1111, 0b1110_1011, (0b0000_0000 << 7) // 137 |11111111|11111111|1011010 + 0b0111_1111, 0b1111_1111, 0b1101_1010, // 138 |11111111|11111111|1011011 0b1111_1111, 0b1111_1111, (0b0101_1011 << 1) // 139 |11111111|11111111|1011100 + 0b0000_0001, 0b1111_1111, 0b1111_1111, (0b0001_1100 << 2) // 140 |11111111|11111111|1011101 + 0b0000_0011, 0b1111_1111, 0b1111_1110, (0b0001_1101 << 3) // 141 |11111111|11111111|1011110 + 0b0000_0111, 0b1111_1111, 0b1111_1101, (0b0000_1110 << 4) // 142 |11111111|11111111|11101011 + 0b0000_1111, 0b1111_1111, 0b1111_1110, (0b0000_1011 << 4) // 143 |11111111|11111111|1011111 + 0b0000_1111, 0b1111_1111, 0b1111_1011, (0b0000_0111 << 5) // 144 |11111111|11111111|11101100 + 0b0001_1111, 0b1111_1111, 0b1111_1101, (0b0000_0100 << 5) // 145 |11111111|11111111|11101101 + 0b0001_1111, 0b1111_1111, 0b1111_1101, (0b0000_0101 << 5) // 146 |11111111|11111111|010111 + 0b0001_1111, 0b1111_1111, 0b1110_1011, (0b0000_0001 << 7) // 147 |11111111|11111111|1100000 + 0b0111_1111, 0b1111_1111, 0b1110_0000, // 148 |11111111|11111111|11101110 0b1111_1111, 0b1111_1111, 0b1110_1110, // 149 |11111111|11111111|1100001 0b1111_1111, 0b1111_1111, (0b0110_0001 << 1) // 150 |11111111|11111111|1100010 + 0b0000_0001, 0b1111_1111, 0b1111_1111, (0b0010_0010 << 2) // 151 |11111111|11111111|1100011 + 0b0000_0011, 0b1111_1111, 0b1111_1111, (0b0000_0011 << 3) // 152 |11111111|11111111|1100100 + 0b0000_0111, 0b1111_1111, 0b1111_1110, (0b0000_0100 << 4) // 153 |11111111|11111110|11100 + 0b0000_1111, 0b1111_1111, 0b1110_1110, (0b0000_0000 << 7) // 154 |11111111|11111111|011000 + 0b0111_1111, 0b1111_1111, (0b0101_1000 << 1) // 155 |11111111|11111111|1100101 + 0b0000_0001, 0b1111_1111, 0b1111_1111, (0b0010_0101 << 2) // 156 |11111111|11111111|011001 + 0b0000_0011, 0b1111_1111, 0b1111_1101, (0b0000_1001 << 4) // 157 |11111111|11111111|1100110 + 0b0000_1111, 0b1111_1111, 0b1111_1100, (0b0000_0110 << 5) // 158 |11111111|11111111|1100111 + 0b0001_1111, 0b1111_1111, 0b1111_1001, (0b0000_0011 << 6) // 159 |11111111|11111111|11101111 + 0b0011_1111, 0b1111_1111, 0b1111_1011, (0b0000_0011 << 6) // 160 |11111111|11111111|011010 + 0b0011_1111, 0b1111_1111, 0b1101_1010, // 161 |11111111|11111110|11101 0b1111_1111, 0b1111_1110, (0b0001_1101 << 3) // 162 |11111111|11111110|1001 + 0b0000_0111, 0b1111_1111, 0b1111_0100, (0b0000_0001 << 7) // 163 |11111111|11111111|011011 + 0b0111_1111, 0b1111_1111, (0b0101_1011 << 1) // 164 |11111111|11111111|011100 + 0b0000_0001, 0b1111_1111, 0b1111_1110, (0b0001_1100 << 3) // 165 |11111111|11111111|1101000 + 0b0000_0111, 0b1111_1111, 0b1111_1110, (0b0000_1000 << 4) // 166 |11111111|11111111|1101001 + 0b0000_1111, 0b1111_1111, 0b1111_1101, (0b0000_0001 << 5) // 167 |11111111|11111110|11110 + 0b0001_1111, 0b1111_1111, 0b1101_1110, // 168 |11111111|11111111|1101010 0b1111_1111, 0b1111_1111, (0b0110_1010 << 1) // 169 |11111111|11111111|011101 + 0b0000_0001, 0b1111_1111, 0b1111_1110, (0b0001_1101 << 3) // 170 |11111111|11111111|011110 + 0b0000_0111, 0b1111_1111, 0b1111_1011, (0b0000_0110 << 5) // 171 |11111111|11111111|11110000 + 0b0001_1111, 0b1111_1111, 0b1111_1110, (0b0000_0000 << 5) // 172 |11111111|11111110|11111 + 0b0001_1111, 0b1111_1111, 0b1101_1111, // 173 |11111111|11111111|011111 0b1111_1111, 0b1111_1111, (0b0001_1111 << 2) // 174 |11111111|11111111|1101011 + 0b0000_0011, 0b1111_1111, 0b1111_1111, (0b0000_1011 << 3) // 175 |11111111|11111111|1101100 + 0b0000_0111, 0b1111_1111, 0b1111_1110, (0b0000_1100 << 4) // 176 |11111111|11111111|00000 + 0b0000_1111, 0b1111_1111, 0b1111_0000, (0b0000_0000 << 7) // 177 |11111111|11111111|00001 + 0b0111_1111, 0b1111_1111, (0b0010_0001 << 2) // 178 |11111111|11111111|100000 + 0b0000_0011, 0b1111_1111, 0b1111_1110, (0b0000_0000 << 4) // 179 |11111111|11111111|00010 + 0b0000_1111, 0b1111_1111, 0b1111_0001, (0b0000_0000 << 7) // 180 |11111111|11111111|1101101 + 0b0111_1111, 0b1111_1111, 0b1110_1101, // 181 |11111111|11111111|100001 0b1111_1111, 0b1111_1111, (0b0010_0001 << 2) // 182 |11111111|11111111|1101110 + 0b0000_0011, 0b1111_1111, 0b1111_1111, (0b0000_1110 << 3) // 183 |11111111|11111111|1101111 + 0b0000_0111, 0b1111_1111, 0b1111_1110, (0b0000_1111 << 4) // 184 |11111111|11111110|1010 + 0b0000_1111, 0b1111_1111, 0b1110_1010, // 185 |11111111|11111111|100010 0b1111_1111, 0b1111_1111, (0b0010_0010 << 2) // 186 |11111111|11111111|100011 + 0b0000_0011, 0b1111_1111, 0b1111_1110, (0b0000_0011 << 4) // 187 |11111111|11111111|100100 + 0b0000_1111, 0b1111_1111, 0b1111_1001, (0b0000_0000 << 6) // 188 |11111111|11111111|1110000 + 0b0011_1111, 0b1111_1111, 0b1111_1000, (0b0000_0000 << 7) // 189 |11111111|11111111|100101 + 0b0111_1111, 0b1111_1111, (0b0110_0101 << 1) // 190 |11111111|11111111|100110 + 0b0000_0001, 0b1111_1111, 0b1111_1111, (0b0000_0110 << 3) // 191 |11111111|11111111|1110001 + 0b0000_0111, 0b1111_1111, 0b1111_1111, (0b0000_0001 << 4) // 192 |11111111|11111111|11111000|00 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0010_0000 << 2) // 193 |11111111|11111111|11111000|01 + 0b0000_0011, 0b1111_1111, 0b1111_1111, 0b1110_0001, // 194 |11111111|11111110|1011 0b1111_1111, 0b1111_1110, (0b0000_1011 << 4) // 195 |11111111|11111110|001 + 0b0000_1111, 0b1111_1111, (0b0111_0001 << 1) // 196 |11111111|11111111|100111 + 0b0000_0001, 0b1111_1111, 0b1111_1111, (0b0000_0111 << 3) // 197 |11111111|11111111|1110010 + 0b0000_0111, 0b1111_1111, 0b1111_1111, (0b0000_0010 << 4) // 198 |11111111|11111111|101000 + 0b0000_1111, 0b1111_1111, 0b1111_1010, (0b0000_0000 << 6) // 199 |11111111|11111111|11110110|0 + 0b0011_1111, 0b1111_1111, 0b1111_1101, (0b0000_0100 << 5) // 200 |11111111|11111111|11111000|10 + 0b0001_1111, 0b1111_1111, 0b1111_1111, (0b0000_0010 << 3) // 201 |11111111|11111111|11111000|11 + 0b0000_0111, 0b1111_1111, 0b1111_1111, (0b0110_0011 << 1) // 202 |11111111|11111111|11111001|00 + 0b0000_0001, 0b1111_1111, 0b1111_1111, 0b1111_0010, (0b0000_0000 << 7) // 203 |11111111|11111111|11111011|110 + 0b0111_1111, 0b1111_1111, 0b1111_1101, (0b0000_1110 << 4) // 204 |11111111|11111111|11111011|111 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0101_1111 << 1) // 205 |11111111|11111111|11111001|01 + 0b0000_0001, 0b1111_1111, 0b1111_1111, 0b1111_0010, (0b0000_0001 << 7) // 206 |11111111|11111111|11110001 + 0b0111_1111, 0b1111_1111, 0b1111_1000, (0b0000_0001 << 7) // 207 |11111111|11111111|11110110|1 + 0b0111_1111, 0b1111_1111, 0b1111_1011, (0b0000_0001 << 6) // 208 |11111111|11111110|010 + 0b0011_1111, 0b1111_1111, (0b0001_0010 << 3) // 209 |11111111|11111111|00011 + 0b0000_0111, 0b1111_1111, 0b1111_1000, (0b0000_0011 << 6) // 210 |11111111|11111111|11111001|10 + 0b0011_1111, 0b1111_1111, 0b1111_1110, (0b0000_0110 << 4) // 211 |11111111|11111111|11111100|000 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0110_0000 << 1) // 212 |11111111|11111111|11111100|001 + 0b0000_0001, 0b1111_1111, 0b1111_1111, 0b1111_1000, (0b0000_0001 << 6) // 213 |11111111|11111111|11111001|11 + 0b0011_1111, 0b1111_1111, 0b1111_1110, (0b0000_0111 << 4) // 214 |11111111|11111111|11111100|010 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0110_0010 << 1) // 215 |11111111|11111111|11110010 + 0b0000_0001, 0b1111_1111, 0b1111_1111, (0b0111_0010 << 1) // 216 |11111111|11111111|00100 + 0b0000_0001, 0b1111_1111, 0b1111_1110, (0b0000_0100 << 4) // 217 |11111111|11111111|00101 + 0b0000_1111, 0b1111_1111, 0b1111_0010, (0b0000_0001 << 7) // 218 |11111111|11111111|11111010|00 + 0b0111_1111, 0b1111_1111, 0b1111_1101, (0b0000_0000 << 5) // 219 |11111111|11111111|11111010|01 + 0b0001_1111, 0b1111_1111, 0b1111_1111, (0b0000_1001 << 3) // 220 |11111111|11111111|11111111|1101 + 0b0000_0111, 0b1111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0001 << 7) // 221 |11111111|11111111|11111100|011 + 0b0111_1111, 0b1111_1111, 0b1111_1110, (0b0000_0011 << 4) // 222 |11111111|11111111|11111100|100 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0110_0100 << 1) // 223 |11111111|11111111|11111100|101 + 0b0000_0001, 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b0000_0001 << 6) // 224 |11111111|11111110|1100 + 0b0011_1111, 0b1111_1111, (0b0010_1100 << 2) // 225 |11111111|11111111|11110011 + 0b0000_0011, 0b1111_1111, 0b1111_1111, (0b0011_0011 << 2) // 226 |11111111|11111110|1101 + 0b0000_0011, 0b1111_1111, 0b1111_1011, (0b0000_0001 << 6) // 227 |11111111|11111111|00110 + 0b0011_1111, 0b1111_1111, (0b0110_0110 << 1) // 228 |11111111|11111111|101001 + 0b0000_0001, 0b1111_1111, 0b1111_1111, (0b0000_1001 << 3) // 229 |11111111|11111111|00111 + 0b0000_0111, 0b1111_1111, 0b1111_1001, (0b0000_0011 << 6) // 230 |11111111|11111111|01000 + 0b0011_1111, 0b1111_1111, (0b0110_1000 << 1) // 231 |11111111|11111111|1110011 + 0b0000_0001, 0b1111_1111, 0b1111_1111, (0b0011_0011 << 2) // 232 |11111111|11111111|101010 + 0b0000_0011, 0b1111_1111, 0b1111_1110, (0b0000_1010 << 4) // 233 |11111111|11111111|101011 + 0b0000_1111, 0b1111_1111, 0b1111_1010, (0b0000_0011 << 6) // 234 |11111111|11111111|11110111|0 + 0b0011_1111, 0b1111_1111, 0b1111_1101, (0b0000_0110 << 5) // 235 |11111111|11111111|11110111|1 + 0b0001_1111, 0b1111_1111, 0b1111_1110, (0b0000_1111 << 4) // 236 |11111111|11111111|11110100 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0000_0100 << 4) // 237 |11111111|11111111|11110101 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0000_0101 << 4) // 238 |11111111|11111111|11111010|10 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0010_1010 << 2) // 239 |11111111|11111111|1110100 + 0b0000_0011, 0b1111_1111, 0b1111_1111, (0b0001_0100 << 3) // 240 |11111111|11111111|11111010|11 + 0b0000_0111, 0b1111_1111, 0b1111_1111, (0b0110_1011 << 1) // 241 |11111111|11111111|11111100|110 + 0b0000_0001, 0b1111_1111, 0b1111_1111, 0b1111_1001, (0b0000_0010 << 6) // 242 |11111111|11111111|11111011|00 + 0b0011_1111, 0b1111_1111, 0b1111_1110, (0b0000_1100 << 4) // 243 |11111111|11111111|11111011|01 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0010_1101 << 2) // 244 |11111111|11111111|11111100|111 + 0b0000_0011, 0b1111_1111, 0b1111_1111, 0b1111_0011, (0b0000_0001 << 7) // 245 |11111111|11111111|11111101|000 + 0b0111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1000 << 4) // 246 |11111111|11111111|11111101|001 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0110_1001 << 1) // 247 |11111111|11111111|11111101|010 + 0b0000_0001, 0b1111_1111, 0b1111_1111, 0b1111_1010, (0b0000_0010 << 6) // 248 |11111111|11111111|11111101|011 + 0b0011_1111, 0b1111_1111, 0b1111_1111, (0b0000_1011 << 3) // 249 |11111111|11111111|11111111|1110 + 0b0000_0111, 0b1111_1111, 0b1111_1111, 0b1111_1111, (0b0000_0000 << 7) // 250 |11111111|11111111|11111101|100 + 0b0111_1111, 0b1111_1111, 0b1111_1110, (0b0000_1100 << 4) // 251 |11111111|11111111|11111101|101 + 0b0000_1111, 0b1111_1111, 0b1111_1111, (0b0110_1101 << 1) // 252 |11111111|11111111|11111101|110 + 0b0000_0001, 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b0000_0010 << 6) // 253 |11111111|11111111|11111101|111 + 0b0011_1111, 0b1111_1111, 0b1111_1111, (0b0000_1111 << 3) // 254 |11111111|11111111|11111110|000 + 0b0000_0111, 0b1111_1111, 0b1111_1111, 0b1111_0000, // 255 |11111111|11111111|11111011|10 0b1111_1111, 0b1111_1111, 0b1111_1011, (0b0000_0010 << 6) // end filler + 0b0011_1111, ]; let values: Vec = (0..=255).collect(); let res = values.hpack_encode(); assert_eq!(res, Ok(bytes)); } use super::super::HpackStringDecode; #[test] fn byte_count_exact_when_bit_count_multiple_of_8() { let encoded = vec![ 0x8c, 0x2d, 0x4b, 0x70, 0xdd, 0xf4, 0x5a, 0xbe, 0xfb, 0x40, 0x05, 0xdb, ]; let mut res = Vec::new(); for byte in encoded.hpack_decode() { res.push(byte.unwrap()); } let reencoded = res.hpack_encode(); assert_eq!(reencoded.unwrap().last(), Some(&0xdb)); } #[test] fn byte_() { let encoded = vec![ 0x55, 0x92, 0xbe, 0xff, 0x48, 0x36, 0xcb, 0x86, 0x37, 0x3d, 0x68, 0xca, 0xc9, 0x61, 0xce, 0xde, 0xdc, 0xe5, 0xfc, ]; let mut res = Vec::new(); for byte in encoded.hpack_decode() { res.push(byte.unwrap()); } let reencoded = res.hpack_encode(); assert_eq!(reencoded.unwrap().last(), Some(&0xfc)); } } h3-0.0.6/src/qpack/prefix_string/mod.rs000064400000000000000000000105161046102023000160420ustar 00000000000000mod bitwin; mod decode; mod encode; use std::convert::TryInto; use std::fmt; use std::num::TryFromIntError; use bytes::{Buf, BufMut}; pub use self::bitwin::BitWindow; pub use self::{ decode::{Error as HuffmanDecodingError, HpackStringDecode}, encode::{Error as HuffmanEncodingError, HpackStringEncode}, }; use crate::proto::coding::BufMutExt; use crate::qpack::prefix_int::{self, Error as IntegerError}; #[derive(Debug, PartialEq)] pub enum Error { UnexpectedEnd, Integer(IntegerError), HuffmanDecoding(HuffmanDecodingError), HuffmanEncoding(HuffmanEncodingError), BufSize(TryFromIntError), } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result { match self { Error::UnexpectedEnd => write!(f, "unexpected end"), Error::Integer(e) => write!(f, "could not parse integer: {}", e), Error::HuffmanDecoding(e) => write!(f, "Huffman decode failed: {:?}", e), Error::HuffmanEncoding(e) => write!(f, "Huffman encode failed: {:?}", e), Error::BufSize(_) => write!(f, "number in buffer wrong size"), } } } pub fn decode(size: u8, buf: &mut B) -> Result, Error> { let (flags, len) = prefix_int::decode(size - 1, buf)?; let len: usize = len.try_into()?; if buf.remaining() < len { return Err(Error::UnexpectedEnd); } let payload = buf.copy_to_bytes(len); let value = if flags & 1 == 0 { payload.into_iter().collect() } else { let mut decoded = Vec::new(); for byte in payload.into_iter().collect::>().hpack_decode() { decoded.push(byte?); } decoded }; Ok(value) } pub fn encode(size: u8, flags: u8, value: &[u8], buf: &mut B) -> Result<(), Error> { let encoded = Vec::from(value).hpack_encode()?; prefix_int::encode(size - 1, flags << 1 | 1, encoded.len().try_into()?, buf); for byte in encoded { buf.write(byte); } Ok(()) } impl From for Error { fn from(error: HuffmanEncodingError) -> Self { Error::HuffmanEncoding(error) } } impl From for Error { fn from(error: IntegerError) -> Self { match error { IntegerError::UnexpectedEnd => Error::UnexpectedEnd, e => Error::Integer(e), } } } impl From for Error { fn from(error: HuffmanDecodingError) -> Self { Error::HuffmanDecoding(error) } } impl From for Error { fn from(error: TryFromIntError) -> Self { Error::BufSize(error) } } #[cfg(test)] mod tests { use super::*; use assert_matches::assert_matches; use std::io::Cursor; #[test] fn codec_6() { let mut buf = Vec::new(); encode(6, 0b01, b"name without ref", &mut buf).unwrap(); let mut read = Cursor::new(&buf); assert_eq!( &buf, &[ 0b0110_1100, 168, 116, 149, 79, 6, 76, 231, 181, 42, 88, 89, 127 ] ); assert_eq!(decode(6, &mut read).unwrap(), b"name without ref"); } #[test] fn codec_8() { let mut buf = Vec::new(); encode(8, 0b01, b"name with ref", &mut buf).unwrap(); let mut read = Cursor::new(&buf); assert_eq!( &buf, &[0b1000_1010, 168, 116, 149, 79, 6, 76, 234, 88, 89, 127] ); assert_eq!(decode(8, &mut read).unwrap(), b"name with ref"); } #[test] fn codec_8_empty() { let mut buf = Vec::new(); encode(8, 0b01, b"", &mut buf).unwrap(); let mut read = Cursor::new(&buf); assert_eq!(&buf, &[0b1000_0000]); assert_eq!(decode(8, &mut read).unwrap(), b""); } #[test] fn decode_non_huffman() { let buf = vec![0b0100_0011, b'b', b'a', b'r']; let mut read = Cursor::new(&buf); assert_eq!(decode(6, &mut read).unwrap(), b"bar"); } #[test] fn decode_too_short() { let buf = vec![0b0100_0011, b'b', b'a']; let mut read = Cursor::new(&buf); assert_matches!(decode(6, &mut read), Err(Error::UnexpectedEnd)); } } h3-0.0.6/src/qpack/static_.rs000064400000000000000000000327011046102023000140260ustar 00000000000000use std::borrow::Cow; use super::field::HeaderField; #[derive(Debug, PartialEq)] pub enum Error { Unknown(usize), } pub struct StaticTable {} impl StaticTable { pub fn get(index: usize) -> Result<&'static HeaderField, Error> { match PREDEFINED_HEADERS.get(index) { Some(f) => Ok(f), None => Err(Error::Unknown(index)), } } pub fn find(field: &HeaderField) -> Option { match (&field.name[..], &field.value[..]) { (b":authority", b"") => Some(0), (b":path", b"/") => Some(1), (b"age", b"0") => Some(2), (b"content-disposition", b"") => Some(3), (b"content-length", b"0") => Some(4), (b"cookie", b"") => Some(5), (b"date", b"") => Some(6), (b"etag", b"") => Some(7), (b"if-modified-since", b"") => Some(8), (b"if-none-match", b"") => Some(9), (b"last-modified", b"") => Some(10), (b"link", b"") => Some(11), (b"location", b"") => Some(12), (b"referer", b"") => Some(13), (b"set-cookie", b"") => Some(14), (b":method", b"CONNECT") => Some(15), (b":method", b"DELETE") => Some(16), (b":method", b"GET") => Some(17), (b":method", b"HEAD") => Some(18), (b":method", b"OPTIONS") => Some(19), (b":method", b"POST") => Some(20), (b":method", b"PUT") => Some(21), (b":scheme", b"http") => Some(22), (b":scheme", b"https") => Some(23), (b":status", b"103") => Some(24), (b":status", b"200") => Some(25), (b":status", b"304") => Some(26), (b":status", b"404") => Some(27), (b":status", b"503") => Some(28), (b"accept", b"*/*") => Some(29), (b"accept", b"application/dns-message") => Some(30), (b"accept-encoding", b"gzip, deflate, br") => Some(31), (b"accept-ranges", b"bytes") => Some(32), (b"access-control-allow-headers", b"cache-control") => Some(33), (b"access-control-allow-headers", b"content-type") => Some(34), (b"access-control-allow-origin", b"*") => Some(35), (b"cache-control", b"max-age=0") => Some(36), (b"cache-control", b"max-age=2592000") => Some(37), (b"cache-control", b"max-age=604800") => Some(38), (b"cache-control", b"no-cache") => Some(39), (b"cache-control", b"no-store") => Some(40), (b"cache-control", b"public, max-age=31536000") => Some(41), (b"content-encoding", b"br") => Some(42), (b"content-encoding", b"gzip") => Some(43), (b"content-type", b"application/dns-message") => Some(44), (b"content-type", b"application/javascript") => Some(45), (b"content-type", b"application/json") => Some(46), (b"content-type", b"application/x-www-form-urlencoded") => Some(47), (b"content-type", b"image/gif") => Some(48), (b"content-type", b"image/jpeg") => Some(49), (b"content-type", b"image/png") => Some(50), (b"content-type", b"text/css") => Some(51), (b"content-type", b"text/html; charset=utf-8") => Some(52), (b"content-type", b"text/plain") => Some(53), (b"content-type", b"text/plain;charset=utf-8") => Some(54), (b"range", b"bytes=0-") => Some(55), (b"strict-transport-security", b"max-age=31536000") => Some(56), (b"strict-transport-security", b"max-age=31536000; includesubdomains") => Some(57), (b"strict-transport-security", b"max-age=31536000; includesubdomains; preload") => { Some(58) } (b"vary", b"accept-encoding") => Some(59), (b"vary", b"origin") => Some(60), (b"x-content-type-options", b"nosniff") => Some(61), (b"x-xss-protection", b"1; mode=block") => Some(62), (b":status", b"100") => Some(63), (b":status", b"204") => Some(64), (b":status", b"206") => Some(65), (b":status", b"302") => Some(66), (b":status", b"400") => Some(67), (b":status", b"403") => Some(68), (b":status", b"421") => Some(69), (b":status", b"425") => Some(70), (b":status", b"500") => Some(71), (b"accept-language", b"") => Some(72), (b"access-control-allow-credentials", b"FALSE") => Some(73), (b"access-control-allow-credentials", b"TRUE") => Some(74), (b"access-control-allow-headers", b"*") => Some(75), (b"access-control-allow-methods", b"get") => Some(76), (b"access-control-allow-methods", b"get, post, options") => Some(77), (b"access-control-allow-methods", b"options") => Some(78), (b"access-control-expose-headers", b"content-length") => Some(79), (b"access-control-request-headers", b"content-type") => Some(80), (b"access-control-request-method", b"get") => Some(81), (b"access-control-request-method", b"post") => Some(82), (b"alt-svc", b"clear") => Some(83), (b"authorization", b"") => Some(84), ( b"content-security-policy", b"script-src 'none'; object-src 'none'; base-uri 'none'", ) => Some(85), (b"early-data", b"1") => Some(86), (b"expect-ct", b"") => Some(87), (b"forwarded", b"") => Some(88), (b"if-range", b"") => Some(89), (b"origin", b"") => Some(90), (b"purpose", b"prefetch") => Some(91), (b"server", b"") => Some(92), (b"timing-allow-origin", b"*") => Some(93), (b"upgrade-insecure-requests", b"1") => Some(94), (b"user-agent", b"") => Some(95), (b"x-forwarded-for", b"") => Some(96), (b"x-frame-options", b"deny") => Some(97), (b"x-frame-options", b"sameorigin") => Some(98), _ => None, } } pub fn find_name(name: &[u8]) -> Option { match name { b":authority" => Some(0), b":path" => Some(1), b"age" => Some(2), b"content-disposition" => Some(3), b"content-length" => Some(4), b"cookie" => Some(5), b"date" => Some(6), b"etag" => Some(7), b"if-modified-since" => Some(8), b"if-none-match" => Some(9), b"last-modified" => Some(10), b"link" => Some(11), b"location" => Some(12), b"referer" => Some(13), b"set-cookie" => Some(14), b":method" => Some(15), b":scheme" => Some(22), b":status" => Some(24), b"accept" => Some(29), b"accept-encoding" => Some(31), b"accept-ranges" => Some(32), b"access-control-allow-headers" => Some(33), b"access-control-allow-origin" => Some(35), b"cache-control" => Some(36), b"content-encoding" => Some(42), b"content-type" => Some(44), b"range" => Some(55), b"strict-transport-security" => Some(56), b"vary" => Some(59), b"x-content-type-options" => Some(61), b"x-xss-protection" => Some(62), b"accept-language" => Some(72), b"access-control-allow-credentials" => Some(73), b"access-control-allow-methods" => Some(76), b"access-control-expose-headers" => Some(79), b"access-control-request-headers" => Some(80), b"access-control-request-method" => Some(81), b"alt-svc" => Some(83), b"authorization" => Some(84), b"content-security-policy" => Some(85), b"early-data" => Some(86), b"expect-ct" => Some(87), b"forwarded" => Some(88), b"if-range" => Some(89), b"origin" => Some(90), b"purpose" => Some(91), b"server" => Some(92), b"timing-allow-origin" => Some(93), b"upgrade-insecure-requests" => Some(94), b"user-agent" => Some(95), b"x-forwarded-for" => Some(96), b"x-frame-options" => Some(97), _ => None, } } } macro_rules! decl_fields { [ $( ($key:expr, $value:expr) ),* ] => { [ $( HeaderField { name: Cow::Borrowed($key), value: Cow::Borrowed($value) }, )* ] } } const PREDEFINED_HEADERS: [HeaderField; 99] = decl_fields![ (b":authority", b""), (b":path", b"/"), (b"age", b"0"), (b"content-disposition", b""), (b"content-length", b"0"), (b"cookie", b""), (b"date", b""), (b"etag", b""), (b"if-modified-since", b""), (b"if-none-match", b""), (b"last-modified", b""), (b"link", b""), (b"location", b""), (b"referer", b""), (b"set-cookie", b""), (b":method", b"CONNECT"), (b":method", b"DELETE"), (b":method", b"GET"), (b":method", b"HEAD"), (b":method", b"OPTIONS"), (b":method", b"POST"), (b":method", b"PUT"), (b":scheme", b"http"), (b":scheme", b"https"), (b":status", b"103"), (b":status", b"200"), (b":status", b"304"), (b":status", b"404"), (b":status", b"503"), (b"accept", b"*/*"), (b"accept", b"application/dns-message"), (b"accept-encoding", b"gzip, deflate, br"), (b"accept-ranges", b"bytes"), (b"access-control-allow-headers", b"cache-control"), (b"access-control-allow-headers", b"content-type"), (b"access-control-allow-origin", b"*"), (b"cache-control", b"max-age=0"), (b"cache-control", b"max-age=2592000"), (b"cache-control", b"max-age=604800"), (b"cache-control", b"no-cache"), (b"cache-control", b"no-store"), (b"cache-control", b"public, max-age=31536000"), (b"content-encoding", b"br"), (b"content-encoding", b"gzip"), (b"content-type", b"application/dns-message"), (b"content-type", b"application/javascript"), (b"content-type", b"application/json"), (b"content-type", b"application/x-www-form-urlencoded"), (b"content-type", b"image/gif"), (b"content-type", b"image/jpeg"), (b"content-type", b"image/png"), (b"content-type", b"text/css"), (b"content-type", b"text/html; charset=utf-8"), (b"content-type", b"text/plain"), (b"content-type", b"text/plain;charset=utf-8"), (b"range", b"bytes=0-"), (b"strict-transport-security", b"max-age=31536000"), ( b"strict-transport-security", b"max-age=31536000; includesubdomains" ), ( b"strict-transport-security", b"max-age=31536000; includesubdomains; preload" ), (b"vary", b"accept-encoding"), (b"vary", b"origin"), (b"x-content-type-options", b"nosniff"), (b"x-xss-protection", b"1; mode=block"), (b":status", b"100"), (b":status", b"204"), (b":status", b"206"), (b":status", b"302"), (b":status", b"400"), (b":status", b"403"), (b":status", b"421"), (b":status", b"425"), (b":status", b"500"), (b"accept-language", b""), (b"access-control-allow-credentials", b"FALSE"), (b"access-control-allow-credentials", b"TRUE"), (b"access-control-allow-headers", b"*"), (b"access-control-allow-methods", b"get"), (b"access-control-allow-methods", b"get, post, options"), (b"access-control-allow-methods", b"options"), (b"access-control-expose-headers", b"content-length"), (b"access-control-request-headers", b"content-type"), (b"access-control-request-method", b"get"), (b"access-control-request-method", b"post"), (b"alt-svc", b"clear"), (b"authorization", b""), ( b"content-security-policy", b"script-src 'none'; object-src 'none'; base-uri 'none'" ), (b"early-data", b"1"), (b"expect-ct", b""), (b"forwarded", b""), (b"if-range", b""), (b"origin", b""), (b"purpose", b"prefetch"), (b"server", b""), (b"timing-allow-origin", b"*"), (b"upgrade-insecure-requests", b"1"), (b"user-agent", b""), (b"x-forwarded-for", b""), (b"x-frame-options", b"deny"), (b"x-frame-options", b"sameorigin") ]; #[cfg(test)] mod tests { use super::*; /** * https://www.rfc-editor.org/rfc/rfc9204.html#name-static-table * 3.1. Static Table * [...] * Note the QPACK static table is indexed from 0, whereas the HPACK * static table is indexed from 1. */ #[test] fn test_static_table_index_is_0_based() { assert_eq!(StaticTable::get(0), Ok(&HeaderField::new(":authority", ""))); } #[test] fn test_static_table_is_full() { assert_eq!(PREDEFINED_HEADERS.len(), 99); } #[test] fn test_static_table_can_get_field() { assert_eq!( StaticTable::get(98), Ok(&HeaderField::new("x-frame-options", "sameorigin")) ); } #[test] fn invalid_index() { assert_eq!(StaticTable::get(99), Err(Error::Unknown(99))); } #[test] fn find_by_name() { assert_eq!(StaticTable::find_name(b"last-modified"), Some(10usize)); assert_eq!(StaticTable::find_name(b"does-not-exist"), None); } #[test] fn find() { assert_eq!( StaticTable::find(&HeaderField::new(":method", "GET")), Some(17usize) ); assert_eq!(StaticTable::find(&HeaderField::new("foo", "bar")), None); } } h3-0.0.6/src/qpack/stream.rs000064400000000000000000000316731046102023000137020ustar 00000000000000use bytes::{Buf, BufMut}; use std::convert::TryInto; use super::{ parse_error::ParseError, prefix_int::{self, Error as IntError}, prefix_string::{self, Error as StringError}, }; // 4.3. Encoder Instructions pub enum EncoderInstruction { // 4.3.1. Set Dynamic Table Capacity // An encoder informs the decoder of a change to the dynamic table capacity. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 0 | 0 | 1 | Capacity (5+) | // +---+---+---+-------------------+ DynamicTableSizeUpdate, // 4.3.2. Insert With Name Reference // An encoder adds an entry to the dynamic table where the field name // matches the field name of an entry stored in the static or the dynamic // table. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 1 | T | Name Index (6+) | // +---+---+-----------------------+ // | H | Value Length (7+) | // +---+---------------------------+ // | Value String (Length bytes) | // +-------------------------------+ InsertWithNameRef, // 4.3.3. Insert With Literal Name // An encoder adds an entry to the dynamic table where both the field name // and the field value are represented as string literals. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 0 | 1 | H | Name Length (5+) | // +---+---+---+-------------------+ // | Name String (Length bytes) | // +---+---------------------------+ // | H | Value Length (7+) | // +---+---------------------------+ // | Value String (Length bytes) | // +-------------------------------+ InsertWithoutNameRef, // 4.3.4. Duplicate // An encoder duplicates an existing entry in the dynamic table. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 0 | 0 | 0 | Index (5+) | // +---+---+---+-------------------+ Duplicate, Unknown, } impl EncoderInstruction { pub fn decode(first: u8) -> Self { if first & 0b1000_0000 != 0 { EncoderInstruction::InsertWithNameRef } else if first & 0b0100_0000 == 0b0100_0000 { EncoderInstruction::InsertWithoutNameRef } else if first & 0b1110_0000 == 0 { EncoderInstruction::Duplicate } else if first & 0b0010_0000 == 0b0010_0000 { EncoderInstruction::DynamicTableSizeUpdate } else { EncoderInstruction::Unknown } } } #[derive(Debug, PartialEq)] pub enum InsertWithNameRef { Static { index: usize, value: Vec }, Dynamic { index: usize, value: Vec }, } impl InsertWithNameRef { pub fn new_static>>(index: usize, value: T) -> Self { InsertWithNameRef::Static { index, value: value.into(), } } pub fn new_dynamic>>(index: usize, value: T) -> Self { InsertWithNameRef::Dynamic { index, value: value.into(), } } pub fn decode(buf: &mut R) -> Result, ParseError> { let (flags, index) = match prefix_int::decode(6, buf) { Ok((f, x)) if f & 0b10 == 0b10 => (f, x), Ok((f, _)) => return Err(ParseError::InvalidPrefix(f)), Err(IntError::UnexpectedEnd) => return Ok(None), Err(e) => return Err(e.into()), }; let index: usize = index .try_into() .map_err(|_e| ParseError::Integer(crate::qpack::prefix_int::Error::Overflow))?; let value = match prefix_string::decode(8, buf) { Ok(x) => x, Err(StringError::UnexpectedEnd) => return Ok(None), Err(e) => return Err(e.into()), }; if flags & 0b01 == 0b01 { Ok(Some(InsertWithNameRef::new_static(index, value))) } else { Ok(Some(InsertWithNameRef::new_dynamic(index, value))) } } pub fn encode(&self, buf: &mut W) -> Result<(), prefix_string::Error> { match self { InsertWithNameRef::Static { index, value } => { prefix_int::encode(6, 0b11, *index as u64, buf); prefix_string::encode(8, 0, value, buf)?; } InsertWithNameRef::Dynamic { index, value } => { prefix_int::encode(6, 0b10, *index as u64, buf); prefix_string::encode(8, 0, value, buf)?; } } Ok(()) } } #[derive(Debug, PartialEq)] pub struct InsertWithoutNameRef { pub name: Vec, pub value: Vec, } impl InsertWithoutNameRef { pub fn new>>(name: T, value: T) -> Self { Self { name: name.into(), value: value.into(), } } pub fn decode(buf: &mut R) -> Result, ParseError> { let name = match prefix_string::decode(6, buf) { Ok(x) => x, Err(StringError::UnexpectedEnd) => return Ok(None), Err(e) => return Err(e.into()), }; let value = match prefix_string::decode(8, buf) { Ok(x) => x, Err(StringError::UnexpectedEnd) => return Ok(None), Err(e) => return Err(e.into()), }; Ok(Some(Self::new(name, value))) } pub fn encode(&self, buf: &mut W) -> Result<(), prefix_string::Error> { prefix_string::encode(6, 0b01, &self.name, buf)?; prefix_string::encode(8, 0, &self.value, buf)?; Ok(()) } } #[derive(Debug, PartialEq)] pub struct Duplicate(pub usize); impl Duplicate { pub fn decode(buf: &mut R) -> Result, ParseError> { let index = match prefix_int::decode(5, buf) { Ok((0, x)) => { if x > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } x as usize } Ok((f, _)) => return Err(ParseError::InvalidPrefix(f)), Err(IntError::UnexpectedEnd) => return Ok(None), Err(e) => return Err(e.into()), }; Ok(Some(Duplicate(index))) } pub fn encode(&self, buf: &mut W) { prefix_int::encode(5, 0, self.0 as u64, buf); } } #[derive(Debug, PartialEq)] pub struct DynamicTableSizeUpdate(pub usize); impl DynamicTableSizeUpdate { pub fn decode(buf: &mut R) -> Result, ParseError> { let size = match prefix_int::decode(5, buf) { Ok((0b001, x)) => { if x > (usize::MAX as u64) { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } x as usize } Ok((f, _)) => return Err(ParseError::InvalidPrefix(f)), Err(IntError::UnexpectedEnd) => return Ok(None), Err(e) => return Err(e.into()), }; Ok(Some(DynamicTableSizeUpdate(size))) } pub fn encode(&self, buf: &mut W) { prefix_int::encode(5, 0b001, self.0 as u64, buf); } } // 4.4. Decoder Instructions // A decoder sends decoder instructions on the decoder stream to inform the encoder // about the processing of field sections and table updates to ensure consistency // of the dynamic table. #[derive(Debug, PartialEq)] pub enum DecoderInstruction { // 4.4.1. Section Acknowledgement // Acknowledge processing of an encoded field section whose declared Required // Insert Count is not zero. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 1 | Stream ID (7+) | // +---+---------------------------+ HeaderAck, // 4.4.2. Stream Cancellation // When a stream is reset or reading is abandoned. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 0 | 1 | Stream ID (6+) | // +---+---+-----------------------+ StreamCancel, // 4.4.3. Insert Count Increment // Increases the Known Received Count to the total number of dynamic table // insertions and duplications processed so far. // 0 1 2 3 4 5 6 7 // +---+---+---+---+---+---+---+---+ // | 0 | 0 | Increment (6+) | // +---+---+-----------------------+ InsertCountIncrement, Unknown, } impl DecoderInstruction { pub fn decode(first: u8) -> Self { if first & 0b1100_0000 == 0 { DecoderInstruction::InsertCountIncrement } else if first & 0b1000_0000 != 0 { DecoderInstruction::HeaderAck } else if first & 0b0100_0000 == 0b0100_0000 { DecoderInstruction::StreamCancel } else { DecoderInstruction::Unknown } } } #[derive(Debug, PartialEq)] pub struct InsertCountIncrement(pub u8); impl InsertCountIncrement { pub fn decode(buf: &mut R) -> Result, ParseError> { let insert_count = match prefix_int::decode(6, buf) { Ok((0b00, x)) => { if x > 64 { return Err(ParseError::Integer( crate::qpack::prefix_int::Error::Overflow, )); } x as u8 } Ok((f, _)) => return Err(ParseError::InvalidPrefix(f)), Err(IntError::UnexpectedEnd) => return Ok(None), Err(e) => return Err(e.into()), }; Ok(Some(InsertCountIncrement(insert_count))) } pub fn encode(&self, buf: &mut W) { prefix_int::encode(6, 0b00, self.0 as u64, buf); } } #[derive(Debug, PartialEq)] pub struct HeaderAck(pub u64); impl HeaderAck { pub fn decode(buf: &mut R) -> Result, ParseError> { let stream_id = match prefix_int::decode(7, buf) { Ok((0b1, x)) => x, Ok((f, _)) => return Err(ParseError::InvalidPrefix(f)), Err(IntError::UnexpectedEnd) => return Ok(None), Err(e) => return Err(e.into()), }; Ok(Some(HeaderAck(stream_id))) } pub fn encode(&self, buf: &mut W) { prefix_int::encode(7, 0b1, self.0, buf); } } #[derive(Debug, PartialEq)] pub struct StreamCancel(pub u64); impl StreamCancel { pub fn decode(buf: &mut R) -> Result, ParseError> { let stream_id = match prefix_int::decode(6, buf) { Ok((0b01, x)) => x, Ok((f, _)) => return Err(ParseError::InvalidPrefix(f)), Err(IntError::UnexpectedEnd) => return Ok(None), Err(e) => return Err(e.into()), }; Ok(Some(StreamCancel(stream_id))) } pub fn encode(&self, buf: &mut W) { prefix_int::encode(6, 0b01, self.0, buf); } } #[cfg(test)] mod test { use super::*; use std::io::Cursor; #[test] fn insert_with_name_ref() { let instruction = InsertWithNameRef::new_static(0, "value"); let mut buf = vec![]; instruction.encode(&mut buf).unwrap(); let mut read = Cursor::new(&buf); assert_eq!(InsertWithNameRef::decode(&mut read), Ok(Some(instruction))); } #[test] fn insert_without_name_ref() { let instruction = InsertWithoutNameRef::new("name", "value"); let mut buf = vec![]; instruction.encode(&mut buf).unwrap(); let mut read = Cursor::new(&buf); assert_eq!( InsertWithoutNameRef::decode(&mut read), Ok(Some(instruction)) ); } #[test] fn insert_duplicate() { let instruction = Duplicate(42); let mut buf = vec![]; instruction.encode(&mut buf); let mut read = Cursor::new(&buf); assert_eq!(Duplicate::decode(&mut read), Ok(Some(instruction))); } #[test] fn dynamic_table_size_update() { let instruction = DynamicTableSizeUpdate(42); let mut buf = vec![]; instruction.encode(&mut buf); let mut read = Cursor::new(&buf); assert_eq!( DynamicTableSizeUpdate::decode(&mut read), Ok(Some(instruction)) ); } #[test] fn insert_count_increment() { let instruction = InsertCountIncrement(42); let mut buf = vec![]; instruction.encode(&mut buf); let mut read = Cursor::new(&buf); assert_eq!( InsertCountIncrement::decode(&mut read), Ok(Some(instruction)) ); } #[test] fn header_ack() { let instruction = HeaderAck(42); let mut buf = vec![]; instruction.encode(&mut buf); let mut read = Cursor::new(&buf); assert_eq!(HeaderAck::decode(&mut read), Ok(Some(instruction))); } #[test] fn stream_cancel() { let instruction = StreamCancel(42); let mut buf = vec![]; instruction.encode(&mut buf); let mut read = Cursor::new(&buf); assert_eq!(StreamCancel::decode(&mut read), Ok(Some(instruction))); } } h3-0.0.6/src/qpack/tests.rs000064400000000000000000000110111046102023000135310ustar 00000000000000use crate::qpack::decoder::Decoder; use crate::qpack::encoder::Encoder; use crate::qpack::{dynamic::DynamicTable, Decoded, DecoderError, HeaderField}; use std::io::Cursor; pub mod helpers { use crate::qpack::{dynamic::DynamicTable, HeaderField}; pub const TABLE_SIZE: usize = 4096; pub fn build_table() -> DynamicTable { let mut table = DynamicTable::new(); table.set_max_size(TABLE_SIZE).unwrap(); table.set_max_blocked(100).unwrap(); table } pub fn build_table_with_size(n_field: usize) -> DynamicTable { let mut table = DynamicTable::new(); table.set_max_size(TABLE_SIZE).unwrap(); table.set_max_blocked(100).unwrap(); for i in 0..n_field { table .put(HeaderField::new(format!("foo{}", i + 1), "bar")) .unwrap(); } table } } #[test] fn codec_basic_get() { let mut encoder = Encoder::default(); let mut decoder = Decoder::from(DynamicTable::new()); let mut block_buf = vec![]; let mut enc_buf = vec![]; let mut dec_buf = vec![]; let header = vec![ HeaderField::new(":method", "GET"), HeaderField::new(":path", "/"), HeaderField::new("foo", "bar"), ]; encoder .encode(42, &mut block_buf, &mut enc_buf, header.clone()) .unwrap(); let mut enc_cur = Cursor::new(&mut enc_buf); decoder.on_encoder_recv(&mut enc_cur, &mut dec_buf).unwrap(); let mut block_cur = Cursor::new(&mut block_buf); let Decoded { fields, .. } = decoder.decode_header(&mut block_cur).unwrap(); assert_eq!(fields, header); let mut dec_cur = Cursor::new(&mut dec_buf); encoder.on_decoder_recv(&mut dec_cur).unwrap(); } const TABLE_SIZE: usize = 4096; #[test] fn blocked_header() { let mut enc_table = DynamicTable::new(); enc_table.set_max_size(TABLE_SIZE).unwrap(); enc_table.set_max_blocked(100).unwrap(); let mut encoder = Encoder::from(enc_table); let mut dec_table = DynamicTable::new(); dec_table.set_max_size(TABLE_SIZE).unwrap(); dec_table.set_max_blocked(100).unwrap(); let decoder = Decoder::from(dec_table); let mut block_buf = vec![]; let mut enc_buf = vec![]; encoder .encode( 42, &mut block_buf, &mut enc_buf, &[HeaderField::new("foo", "bar")], ) .unwrap(); let mut block_cur = Cursor::new(&mut block_buf); assert_eq!( decoder.decode_header(&mut block_cur), Err(DecoderError::MissingRefs(1)) ); } #[test] fn codec_table_size_0() { let mut enc_table = DynamicTable::new(); let mut dec_table = DynamicTable::new(); let mut block_buf = vec![]; let mut enc_buf = vec![]; let mut dec_buf = vec![]; let header = vec![ HeaderField::new(":method", "GET"), HeaderField::new(":path", "/"), HeaderField::new("foo", "bar"), ]; dec_table.set_max_size(0).unwrap(); enc_table.set_max_size(0).unwrap(); let mut encoder = Encoder::from(enc_table); let mut decoder = Decoder::from(dec_table); encoder .encode(42, &mut block_buf, &mut enc_buf, header.clone()) .unwrap(); let mut enc_cur = Cursor::new(&mut enc_buf); decoder.on_encoder_recv(&mut enc_cur, &mut dec_buf).unwrap(); let mut block_cur = Cursor::new(&mut block_buf); let Decoded { fields, .. } = decoder.decode_header(&mut block_cur).unwrap(); assert_eq!(fields, header); let mut dec_cur = Cursor::new(&mut dec_buf); encoder.on_decoder_recv(&mut dec_cur).unwrap(); } #[test] fn codec_table_full() { let mut enc_table = DynamicTable::new(); let mut dec_table = DynamicTable::new(); let mut block_buf = vec![]; let mut enc_buf = vec![]; let mut dec_buf = vec![]; let header = vec![ HeaderField::new("foo", "bar"), HeaderField::new("foo1", "bar1"), ]; dec_table.set_max_size(42).unwrap(); enc_table.set_max_size(42).unwrap(); let mut encoder = Encoder::from(enc_table); let mut decoder = Decoder::from(dec_table); encoder .encode(42, &mut block_buf, &mut enc_buf, header.clone()) .unwrap(); let mut enc_cur = Cursor::new(&mut enc_buf); let mut block_cur = Cursor::new(&mut block_buf); decoder.on_encoder_recv(&mut enc_cur, &mut dec_buf).unwrap(); let Decoded { fields, .. } = decoder.decode_header(&mut block_cur).unwrap(); assert_eq!(fields, header); let mut dec_cur = Cursor::new(&mut dec_buf); encoder.on_decoder_recv(&mut dec_cur).unwrap(); } h3-0.0.6/src/qpack/vas.rs000064400000000000000000000201661046102023000131730ustar 00000000000000/** * https://www.rfc-editor.org/rfc/rfc9204.html#name-absolute-indexing * https://www.rfc-editor.org/rfc/rfc9204.html#name-relative-indexing * https://www.rfc-editor.org/rfc/rfc9204.html#name-post-base-indexing */ /* * # Virtually infinite address space mapper. * * It can be described as an infinitive growable list, with a visibility * window that can only move in the direction of insertion. * * Origin Visible window * /\ /===========^===========\ * ++++-------+ - + - + - + - + - + - + * |||| | | | | | | | ==> Grow direction * ++++-------+ - + - + - + - + - + - + * \================v==================/ * Full Virtual Space * * * QPACK indexing is 1-based for absolute index, and 0-based for relative's. * Container (ex: list) indexing is 0-based. * * * # Basics * * inserted: number of insertion * dropped : number of drop * delta : count of available elements * * abs: absolute index * rel: relative index * pos: real index in memory container * pst: post-base relative index (only with base index) * * first oldest latest * element insertion insertion * (not available available * available) | | * | | | * v v v * + - +------+ - + - + - + - + - + - + inserted: 21 * | a | | p | q | r | s | t | u | dropped: 15 * + - +------+ - + - + - + - + - + - + delta: 21 - 15: 6 * ^ ^ ^ * | | | * abs:- abs:16 abs:21 * rel:- rel:5 rel:0 * pos:- pos:0 pos:6 * * * # Base index * A base index can arbitrary shift the relative index. * The base index itself is an absolute index. * * base index: 17 * | * v * + - +------+ - + - + - + - + - + - + inserted: 21 * | a | | p | q | r | s | t | u | dropped: 15 * + - +------+ - + - + - + - + - + - + delta: 21 - 15: 6 * ^ ^ ^ ^ * | | | | * abs:- abs:16 abs:18 abs:21 * rel:- rel:2 rel:0 rel:- * pst:- pst:- pst:- pst:2 * pos:- pos:0 pos:2 pos:6 */ pub type RelativeIndex = usize; pub type AbsoluteIndex = usize; #[derive(Debug, PartialEq)] pub enum Error { RelativeIndex(usize), PostbaseIndex(usize), Index(usize), } #[derive(Debug, Default)] pub struct VirtualAddressSpace { inserted: usize, dropped: usize, delta: usize, } impl VirtualAddressSpace { pub fn add(&mut self) -> AbsoluteIndex { self.inserted += 1; self.delta += 1; self.inserted } pub fn drop(&mut self) { self.dropped += 1; self.delta -= 1; } pub fn relative(&self, index: RelativeIndex) -> Result { if self.inserted < index || self.delta == 0 || self.inserted - index <= self.dropped { Err(Error::RelativeIndex(index)) } else { Ok(self.inserted - self.dropped - index - 1) } } pub fn evicted(&self, index: AbsoluteIndex) -> bool { index != 0 && index <= self.dropped } pub fn relative_base(&self, base: usize, index: RelativeIndex) -> Result { if self.delta == 0 || index > base || base - index <= self.dropped { Err(Error::RelativeIndex(index)) } else { Ok(base - self.dropped - index - 1) } } pub fn post_base(&self, base: usize, index: RelativeIndex) -> Result { if self.delta == 0 || base + index >= self.inserted || base + index < self.dropped { Err(Error::PostbaseIndex(index)) } else { Ok(base + index - self.dropped) } } pub fn index(&self, index: usize) -> Result { if index >= self.delta { Err(Error::Index(index)) } else { Ok(index + self.dropped + 1) } } pub fn largest_ref(&self) -> usize { self.inserted - self.dropped } pub fn total_inserted(&self) -> usize { self.inserted } } #[cfg(test)] mod tests { use super::*; use proptest::proptest; #[test] fn test_no_relative_index_when_empty() { let vas = VirtualAddressSpace::default(); let res = vas.relative_base(0, 0); assert_eq!(res, Err(Error::RelativeIndex(0))); } #[test] fn test_relative_underflow_protected() { let mut vas = VirtualAddressSpace::default(); vas.add(); assert_eq!(vas.relative(2), Err(Error::RelativeIndex(2))); } proptest! { #[test] fn test_first_insertion_without_drop( ref count in 1..2200usize ) { let mut vas = VirtualAddressSpace::default(); vas.add(); (1..*count).for_each(|_| { vas.add(); }); assert_eq!(vas.relative_base(*count, count - 1), Ok(0), "{:?}", vas); } #[test] fn test_first_insertion_with_drop( ref count in 2..2200usize ) { let mut vas = VirtualAddressSpace::default(); vas.add(); (1..*count).for_each(|_| { vas.add(); }); (0..*count - 1).for_each(|_| vas.drop()); assert_eq!(vas.relative_base(*count, count - 1), Err(Error::RelativeIndex(count - 1)), "{:?}", vas); } #[test] fn test_last_insertion_without_drop( ref count in 1..2200usize ) { let mut vas = VirtualAddressSpace::default(); (1..*count).for_each(|_| { vas.add(); }); vas.add(); assert_eq!(vas.relative_base(*count, 0), Ok(count -1), "{:?}", vas); } #[test] fn test_last_insertion_with_drop( ref count in 2..2200usize ) { let mut vas = VirtualAddressSpace::default(); (0..*count - 1).for_each(|_| { vas.add(); }); vas.add(); (0..*count - 1).for_each(|_| { vas.drop(); }); assert_eq!(vas.relative_base(*count, 0), Ok(0), "{:?}", vas); } } #[test] fn test_post_base_index() { /* * Base index: D * Target value: B * * VAS: ]GFEDCBA] * abs: 1234567 * rel: 3210--- * pst: ----012 * pos: 0123456 */ let mut vas = VirtualAddressSpace::default(); (0..7).for_each(|_| { vas.add(); }); assert_eq!(vas.post_base(4, 1), Ok(5)); } #[test] fn largest_ref() { let mut vas = VirtualAddressSpace::default(); (0..7).for_each(|_| { vas.add(); }); assert_eq!(vas.largest_ref(), 7); } #[test] fn relative() { let mut vas = VirtualAddressSpace::default(); (0..7).for_each(|_| { vas.add(); }); assert_eq!(vas.relative(0), Ok(6)); assert_eq!(vas.relative(1), Ok(5)); assert_eq!(vas.relative(6), Ok(0)); assert_eq!(vas.relative(7), Err(Error::RelativeIndex(7))); } #[test] fn absolute_from_real_index() { let mut vas = VirtualAddressSpace::default(); assert_eq!(vas.index(0), Err(Error::Index(0))); vas.add(); assert_eq!(vas.index(0), Ok(1)); vas.add(); vas.drop(); assert_eq!(vas.index(0), Ok(2)); vas.drop(); assert_eq!(vas.index(0), Err(Error::Index(0))); vas.add(); vas.add(); assert_eq!(vas.index(0), Ok(3)); assert_eq!(vas.index(1), Ok(4)); assert_eq!(vas.index(2), Err(Error::Index(2))); } #[test] fn evicted() { let mut vas = VirtualAddressSpace::default(); assert!(!vas.evicted(0)); assert!(!vas.evicted(1)); vas.add(); vas.add(); assert!(!vas.evicted(1)); vas.drop(); assert!(!vas.evicted(0)); assert!(vas.evicted(1)); assert!(!vas.evicted(2)); vas.drop(); assert!(vas.evicted(2)); } } h3-0.0.6/src/quic.rs000064400000000000000000000136611046102023000122460ustar 00000000000000//! QUIC Transport traits //! //! This module includes traits and types meant to allow being generic over any //! QUIC implementation. use std::task::{self, Poll}; use bytes::Buf; use crate::ext::Datagram; pub use crate::proto::stream::{InvalidStreamId, StreamId}; pub use crate::stream::WriteBuf; // Unresolved questions: // // - Should the `poll_` methods be `Pin<&mut Self>`? /// Trait that represent an error from the transport layer pub trait Error: std::error::Error + Send + Sync { /// Check if the current error is a transport timeout fn is_timeout(&self) -> bool; /// Get the QUIC error code from connection close or stream stop fn err_code(&self) -> Option; } impl<'a, E: Error + 'a> From for Box { fn from(err: E) -> Box { Box::new(err) } } /// Trait representing a QUIC connection. pub trait Connection: OpenStreams { /// The type produced by `poll_accept_recv()` type RecvStream: RecvStream; /// A producer of outgoing Unidirectional and Bidirectional streams. type OpenStreams: OpenStreams; /// Error type yielded by these trait methods type AcceptError: Into>; /// Accept an incoming unidirectional stream /// /// Returning `None` implies the connection is closing or closed. fn poll_accept_recv( &mut self, cx: &mut task::Context<'_>, ) -> Poll, Self::AcceptError>>; /// Accept an incoming bidirectional stream /// /// Returning `None` implies the connection is closing or closed. fn poll_accept_bidi( &mut self, cx: &mut task::Context<'_>, ) -> Poll, Self::AcceptError>>; /// Get an object to open outgoing streams. fn opener(&self) -> Self::OpenStreams; } /// Extends the `Connection` trait for sending datagrams /// /// See: pub trait SendDatagramExt { /// The error type that can occur when sending a datagram type Error: Into>; /// Send a datagram fn send_datagram(&mut self, data: Datagram) -> Result<(), Self::Error>; } /// Extends the `Connection` trait for receiving datagrams /// /// See: pub trait RecvDatagramExt { /// The type of `Buf` for *raw* datagrams (without the stream_id decoded) type Buf: Buf; /// The error type that can occur when receiving a datagram type Error: Into>; /// Poll the connection for incoming datagrams. fn poll_accept_datagram( &mut self, cx: &mut task::Context<'_>, ) -> Poll, Self::Error>>; } /// Trait for opening outgoing streams pub trait OpenStreams { /// The type produced by `poll_open_bidi()` type BidiStream: SendStream + RecvStream; /// The type produced by `poll_open_send()` type SendStream: SendStream; /// Error type yielded by these trait methods type OpenError: Into>; /// Poll the connection to create a new bidirectional stream. fn poll_open_bidi( &mut self, cx: &mut task::Context<'_>, ) -> Poll>; /// Poll the connection to create a new unidirectional stream. fn poll_open_send( &mut self, cx: &mut task::Context<'_>, ) -> Poll>; /// Close the connection immediately fn close(&mut self, code: crate::error::Code, reason: &[u8]); } /// A trait describing the "send" actions of a QUIC stream. pub trait SendStream { /// The error type returned by fallible send methods. type Error: Into>; /// Polls if the stream can send more data. fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; /// Send more data on the stream. fn send_data>>(&mut self, data: T) -> Result<(), Self::Error>; /// Poll to finish the sending side of the stream. fn poll_finish(&mut self, cx: &mut task::Context<'_>) -> Poll>; /// Send a QUIC reset code. fn reset(&mut self, reset_code: u64); /// Get QUIC send stream id fn send_id(&self) -> StreamId; } /// Allows sending unframed pure bytes to a stream. Similar to [`AsyncWrite`](https://docs.rs/tokio/latest/tokio/io/trait.AsyncWrite.html) pub trait SendStreamUnframed: SendStream { /// Attempts to write data into the stream. /// /// Returns the number of bytes written. /// /// `buf` is advanced by the number of bytes written. fn poll_send( &mut self, cx: &mut task::Context<'_>, buf: &mut D, ) -> Poll>; } /// A trait describing the "receive" actions of a QUIC stream. pub trait RecvStream { /// The type of `Buf` for data received on this stream. type Buf: Buf; /// The error type that can occur when receiving data. type Error: Into>; /// Poll the stream for more data. /// /// When the receiving side will no longer receive more data (such as because /// the peer closed their sending side), this should return `None`. fn poll_data( &mut self, cx: &mut task::Context<'_>, ) -> Poll, Self::Error>>; /// Send a `STOP_SENDING` QUIC code. fn stop_sending(&mut self, error_code: u64); /// Get QUIC send stream id fn recv_id(&self) -> StreamId; } /// Optional trait to allow "splitting" a bidirectional stream into two sides. pub trait BidiStream: SendStream + RecvStream { /// The type for the send half. type SendStream: SendStream; /// The type for the receive half. type RecvStream: RecvStream; /// Split this stream into two halves. fn split(self) -> (Self::SendStream, Self::RecvStream); } h3-0.0.6/src/server/builder.rs000064400000000000000000000104601046102023000142330ustar 00000000000000//! Builder of HTTP/3 server connections. //! //! Use this struct to create a new [`Connection`]. //! Settings for the [`Connection`] can be provided here. //! //! # Example //! //! ```rust //! fn doc(conn: C) //! where //! C: h3::quic::Connection, //! B: bytes::Buf, //! { //! let mut server_builder = h3::server::builder(); //! // Set the maximum header size //! server_builder.max_field_section_size(1000); //! // do not send grease types //! server_builder.send_grease(false); //! // Build the Connection //! let mut h3_conn = server_builder.build(conn); //! } //! ``` use std::{collections::HashSet, result::Result}; use bytes::Buf; use tokio::sync::mpsc; use crate::{ config::Config, connection::{ConnectionInner, SharedStateRef}, error::Error, quic::{self}, }; use super::connection::Connection; /// Create a builder of HTTP/3 server connections /// /// This function creates a [`Builder`] that carries settings that can /// be shared between server connections. pub fn builder() -> Builder { Builder::new() } /// Builder of HTTP/3 server connections. pub struct Builder { pub(crate) config: Config, } impl Builder { /// Creates a new [`Builder`] with default settings. pub(super) fn new() -> Self { Builder { config: Default::default(), } } #[cfg(test)] pub fn send_settings(&mut self, value: bool) -> &mut Self { self.config.send_settings = value; self } /// Set the maximum header size this client is willing to accept /// /// See [header size constraints] section of the specification for details. /// /// [header size constraints]: https://www.rfc-editor.org/rfc/rfc9114.html#name-header-size-constraints pub fn max_field_section_size(&mut self, value: u64) -> &mut Self { self.config.settings.max_field_section_size = value; self } /// Send grease values to the Client. /// See [setting](https://www.rfc-editor.org/rfc/rfc9114.html#settings-parameters), [frame](https://www.rfc-editor.org/rfc/rfc9114.html#frame-reserved) and [stream](https://www.rfc-editor.org/rfc/rfc9114.html#stream-grease) for more information. #[inline] pub fn send_grease(&mut self, value: bool) -> &mut Self { self.config.send_grease = value; self } /// Indicates to the peer that WebTransport is supported. /// /// See: [establishing a webtransport session](https://datatracker.ietf.org/doc/html/draft-ietf-webtrans-http3/#section-3.1) /// /// /// **Server**: /// Supporting for webtransport also requires setting `enable_connect` `enable_datagram` /// and `max_webtransport_sessions`. #[inline] pub fn enable_webtransport(&mut self, value: bool) -> &mut Self { self.config.settings.enable_webtransport = value; self } /// Enables the CONNECT protocol pub fn enable_connect(&mut self, value: bool) -> &mut Self { self.config.settings.enable_extended_connect = value; self } /// Limits the maximum number of WebTransport sessions pub fn max_webtransport_sessions(&mut self, value: u64) -> &mut Self { self.config.settings.max_webtransport_sessions = value; self } /// Indicates that the client or server supports HTTP/3 datagrams /// /// See: pub fn enable_datagram(&mut self, value: bool) -> &mut Self { self.config.settings.enable_datagram = value; self } } impl Builder { /// Build an HTTP/3 connection from a QUIC connection /// /// This method creates a [`Connection`] instance with the settings in the [`Builder`]. pub async fn build(&self, conn: C) -> Result, Error> where C: quic::Connection, B: Buf, { let (sender, receiver) = mpsc::unbounded_channel(); Ok(Connection { inner: ConnectionInner::new(conn, SharedStateRef::default(), self.config).await?, max_field_section_size: self.config.settings.max_field_section_size, request_end_send: sender, request_end_recv: receiver, ongoing_streams: HashSet::new(), sent_closing: None, recv_closing: None, last_accepted_stream: None, }) } } h3-0.0.6/src/server/connection.rs000064400000000000000000000443541046102023000147550ustar 00000000000000//! HTTP/3 server connection //! //! The [`Connection`] struct manages a connection from the side of the HTTP/3 server use std::{ collections::HashSet, marker::PhantomData, option::Option, result::Result, sync::Arc, task::{Context, Poll}, }; use bytes::Buf; use futures_util::{ future::{self}, ready, }; use http::Request; use quic::RecvStream; use quic::StreamId; use tokio::sync::mpsc; use crate::{ connection::{self, ConnectionInner, ConnectionState, SharedStateRef}, error::{Code, Error, ErrorLevel}, ext::Datagram, frame::{FrameStream, FrameStreamError}, proto::{ frame::{Frame, PayloadLen}, push::PushId, }, qpack, quic::{self, RecvDatagramExt, SendDatagramExt, SendStream as _}, stream::BufRecvStream, }; use crate::server::request::ResolveRequest; #[cfg(feature = "tracing")] use tracing::{instrument, trace, warn}; use super::stream::{ReadDatagram, RequestStream}; /// Server connection driver /// /// The [`Connection`] struct manages a connection from the side of the HTTP/3 server /// /// Create a new Instance with [`Connection::new()`]. /// Accept incoming requests with [`Connection::accept()`]. /// And shutdown a connection with [`Connection::shutdown()`]. pub struct Connection where C: quic::Connection, B: Buf, { /// TODO: temporarily break encapsulation for `WebTransportSession` pub inner: ConnectionInner, pub(super) max_field_section_size: u64, // List of all incoming streams that are currently running. pub(super) ongoing_streams: HashSet, // Let the streams tell us when they are no longer running. pub(super) request_end_recv: mpsc::UnboundedReceiver, pub(super) request_end_send: mpsc::UnboundedSender, // Has a GOAWAY frame been sent? If so, this StreamId is the last we are willing to accept. pub(super) sent_closing: Option, // Has a GOAWAY frame been received? If so, this is PushId the last the remote will accept. pub(super) recv_closing: Option, // The id of the last stream received by this connection. pub(super) last_accepted_stream: Option, } impl ConnectionState for Connection where C: quic::Connection, B: Buf, { fn shared_state(&self) -> &SharedStateRef { &self.inner.shared } } impl Connection where C: quic::Connection, B: Buf, { /// Create a new HTTP/3 server connection with default settings /// /// Use a custom [`super::builder::Builder`] with [`super::builder::builder()`] to create a connection /// with different settings. /// Provide a Connection which implements [`quic::Connection`]. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn new(conn: C) -> Result { super::builder::builder().build(conn).await } /// Closes the connection with a code and a reason. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn close>(&mut self, code: Code, reason: T) -> Error { self.inner.close(code, reason) } } impl Connection where C: quic::Connection, B: Buf, { /// Accept an incoming request. /// /// It returns a tuple with a [`http::Request`] and an [`RequestStream`]. /// The [`http::Request`] is the received request from the client. /// The [`RequestStream`] can be used to send the response. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn accept( &mut self, ) -> Result, RequestStream)>, Error> { // Accept the incoming stream let mut stream = match future::poll_fn(|cx| self.poll_accept_request(cx)).await { Ok(Some(s)) => FrameStream::new(BufRecvStream::new(s)), Ok(None) => { // We always send a last GoAway frame to the client, so it knows which was the last // non-rejected request. self.shutdown(0).await?; return Ok(None); } Err(err) => { match err.inner.kind { crate::error::Kind::Closed => return Ok(None), crate::error::Kind::Application { code, reason, level: ErrorLevel::ConnectionError, } => { return Err(self.inner.close( code, reason.unwrap_or_else(|| String::into_boxed_str(String::from(""))), )) } _ => return Err(err), }; } }; let frame = future::poll_fn(|cx| stream.poll_next(cx)).await; let req = self.accept_with_frame(stream, frame)?; if let Some(req) = req { Ok(Some(req.resolve().await?)) } else { Ok(None) } } /// Accepts a http request where the first frame has already been read and decoded. /// /// /// This is needed as a bidirectional stream may be read as part of incoming webtransport /// bi-streams. If it turns out that the stream is *not* a `WEBTRANSPORT_STREAM` the request /// may still want to be handled and passed to the user. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn accept_with_frame( &mut self, mut stream: FrameStream, frame: Result>, FrameStreamError>, ) -> Result>, Error> { let mut encoded = match frame { Ok(Some(Frame::Headers(h))) => h, //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1 //# If a client-initiated //# stream terminates without enough of the HTTP message to provide a //# complete response, the server SHOULD abort its response stream with //# the error code H3_REQUEST_INCOMPLETE. Ok(None) => { return Err(self.inner.close( Code::H3_REQUEST_INCOMPLETE, "request stream closed before headers", )); } //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1 //# Receipt of an invalid sequence of frames MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.5 //# A server MUST treat the //# receipt of a PUSH_PROMISE frame as a connection error of type //# H3_FRAME_UNEXPECTED. Ok(Some(_)) => { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1 //# Receipt of an invalid sequence of frames MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. // Close if the first frame is not a header frame return Err(self.inner.close( Code::H3_FRAME_UNEXPECTED, "first request frame is not headers", )); } Err(e) => { let err: Error = e.into(); if err.is_closed() { return Ok(None); } match err.inner.kind { crate::error::Kind::Closed => return Ok(None), crate::error::Kind::Application { code, reason, level: ErrorLevel::ConnectionError, } => { return Err(self.inner.close( code, reason.unwrap_or_else(|| String::into_boxed_str(String::from(""))), )) } crate::error::Kind::Application { code, reason: _, level: ErrorLevel::StreamError, } => { stream.reset(code.into()); return Err(err); } _ => return Err(err), }; } }; let mut request_stream = RequestStream { request_end: Arc::new(RequestEnd { request_end: self.request_end_send.clone(), stream_id: stream.send_id(), }), inner: connection::RequestStream::new( stream, self.max_field_section_size, self.inner.shared.clone(), self.inner.send_grease_frame, ), }; let decoded = match qpack::decode_stateless(&mut encoded, self.max_field_section_size) { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //# An HTTP/3 implementation MAY impose a limit on the maximum size of //# the message header it will accept on an individual HTTP message. Err(qpack::DecoderError::HeaderTooLong(cancel_size)) => Err(cancel_size), Ok(decoded) => { // send the grease frame only once self.inner.send_grease_frame = false; Ok(decoded) } Err(e) => { let err: Error = e.into(); if err.is_closed() { return Ok(None); } match err.inner.kind { crate::error::Kind::Closed => return Ok(None), crate::error::Kind::Application { code, reason, level: ErrorLevel::ConnectionError, } => { return Err(self.inner.close( code, reason.unwrap_or_else(|| String::into_boxed_str(String::from(""))), )) } crate::error::Kind::Application { code, reason: _, level: ErrorLevel::StreamError, } => { request_stream.stop_stream(code); return Err(err); } _ => return Err(err), }; } }; Ok(Some(ResolveRequest::new( request_stream, decoded, self.max_field_section_size, ))) } /// Initiate a graceful shutdown, accepting `max_request` potentially still in-flight /// /// See [connection shutdown](https://www.rfc-editor.org/rfc/rfc9114.html#connection-shutdown) for more information. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn shutdown(&mut self, max_requests: usize) -> Result<(), Error> { let max_id = self .last_accepted_stream .map(|id| id + max_requests) .unwrap_or(StreamId::FIRST_REQUEST); self.inner.shutdown(&mut self.sent_closing, max_id).await } /// Accepts an incoming bidirectional stream. /// /// This could be either a *Request* or a *WebTransportBiStream*, the first frame's type /// decides. #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn poll_accept_request( &mut self, cx: &mut Context<'_>, ) -> Poll, Error>> { let _ = self.poll_control(cx)?; let _ = self.poll_requests_completion(cx); loop { match self.inner.poll_accept_request(cx) { Poll::Ready(Err(x)) => break Poll::Ready(Err(x)), Poll::Ready(Ok(None)) => { if self.poll_requests_completion(cx).is_ready() { break Poll::Ready(Ok(None)); } else { // Wait for all the requests to be finished, request_end_recv will wake // us on each request completion. break Poll::Pending; } } Poll::Pending => { if self.recv_closing.is_some() && self.poll_requests_completion(cx).is_ready() { // The connection is now idle. break Poll::Ready(Ok(None)); } else { return Poll::Pending; } } Poll::Ready(Ok(Some(mut s))) => { // When the connection is in a graceful shutdown procedure, reject all // incoming requests not belonging to the grace interval. It's possible that // some acceptable request streams arrive after rejected requests. if let Some(max_id) = self.sent_closing { if s.send_id() > max_id { s.stop_sending(Code::H3_REQUEST_REJECTED.value()); s.reset(Code::H3_REQUEST_REJECTED.value()); if self.poll_requests_completion(cx).is_ready() { break Poll::Ready(Ok(None)); } continue; } } self.last_accepted_stream = Some(s.send_id()); self.ongoing_streams.insert(s.send_id()); break Poll::Ready(Ok(Some(s))); } }; } } #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub(crate) fn poll_control(&mut self, cx: &mut Context<'_>) -> Poll> { while (self.poll_next_control(cx)?).is_ready() {} Poll::Pending } #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub(crate) fn poll_next_control( &mut self, cx: &mut Context<'_>, ) -> Poll, Error>> { let frame = ready!(self.inner.poll_control(cx))?; match &frame { Frame::Settings(_setting) => { #[cfg(feature = "tracing")] trace!("Got settings > {:?}", _setting); () } &Frame::Goaway(id) => self.inner.process_goaway(&mut self.recv_closing, id)?, _frame @ Frame::MaxPushId(_) | _frame @ Frame::CancelPush(_) => { #[cfg(feature = "tracing")] warn!("Control frame ignored {:?}", _frame); //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.3 //= type=TODO //# If a server receives a CANCEL_PUSH frame for a push //# ID that has not yet been mentioned by a PUSH_PROMISE frame, this MUST //# be treated as a connection error of type H3_ID_ERROR. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.7 //= type=TODO //# A MAX_PUSH_ID frame cannot reduce the maximum push //# ID; receipt of a MAX_PUSH_ID frame that contains a smaller value than //# previously received MUST be treated as a connection error of type //# H3_ID_ERROR. } //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.5 //# A server MUST treat the //# receipt of a PUSH_PROMISE frame as a connection error of type //# H3_FRAME_UNEXPECTED. frame => { return Poll::Ready(Err(Code::H3_FRAME_UNEXPECTED.with_reason( format!("on server control stream: {:?}", frame), ErrorLevel::ConnectionError, ))) } } Poll::Ready(Ok(frame)) } #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] fn poll_requests_completion(&mut self, cx: &mut Context<'_>) -> Poll<()> { loop { match self.request_end_recv.poll_recv(cx) { // The channel is closed Poll::Ready(None) => return Poll::Ready(()), // A request has completed Poll::Ready(Some(id)) => { self.ongoing_streams.remove(&id); } Poll::Pending => { if self.ongoing_streams.is_empty() { // Tell the caller there is not more ongoing requests. // Still, the completion of future requests will wake us. return Poll::Ready(()); } else { return Poll::Pending; } } } } } } impl Connection where C: quic::Connection + SendDatagramExt, B: Buf, { /// Sends a datagram #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn send_datagram(&mut self, stream_id: StreamId, data: B) -> Result<(), Error> { self.inner .conn .send_datagram(Datagram::new(stream_id, data))?; #[cfg(feature = "tracing")] tracing::info!("Sent datagram"); Ok(()) } } impl Connection where C: quic::Connection + RecvDatagramExt, B: Buf, { /// Reads an incoming datagram #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn read_datagram(&mut self) -> ReadDatagram { ReadDatagram { conn: self, _marker: PhantomData, } } } impl Drop for Connection where C: quic::Connection, B: Buf, { #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] fn drop(&mut self) { self.inner.close(Code::H3_NO_ERROR, ""); } } //= https://www.rfc-editor.org/rfc/rfc9114#section-6.1 //= type=TODO //# In order to //# permit these streams to open, an HTTP/3 server SHOULD configure non- //# zero minimum values for the number of permitted streams and the //# initial stream flow-control window. //= https://www.rfc-editor.org/rfc/rfc9114#section-6.1 //= type=TODO //# So as to not unnecessarily limit //# parallelism, at least 100 request streams SHOULD be permitted at a //# time. pub(super) struct RequestEnd { pub(super) request_end: mpsc::UnboundedSender, pub(super) stream_id: StreamId, } h3-0.0.6/src/server/mod.rs000064400000000000000000000042431046102023000133660ustar 00000000000000//! This module provides methods to create a http/3 Server. //! //! It allows to accept incoming requests, and send responses. //! //! # Examples //! //! ## Simple example //! ```rust //! async fn doc(conn: C) //! where //! C: h3::quic::Connection, //! >::BidiStream: Send + 'static //! { //! let mut server_builder = h3::server::builder(); //! // Build the Connection //! let mut h3_conn = server_builder.build(conn).await.unwrap(); //! loop { //! // Accept incoming requests //! match h3_conn.accept().await { //! Ok(Some((req, mut stream))) => { //! // spawn a new task to handle the request //! tokio::spawn(async move { //! // build a http response //! let response = http::Response::builder().status(http::StatusCode::OK).body(()).unwrap(); //! // send the response to the wire //! stream.send_response(response).await.unwrap(); //! // send some date //! stream.send_data(bytes::Bytes::from("test")).await.unwrap(); //! // finnish the stream //! stream.finish().await.unwrap(); //! }); //! } //! Ok(None) => { //! // break if no Request is accepted //! break; //! } //! Err(err) => { //! match err.get_error_level() { //! // break on connection errors //! h3::error::ErrorLevel::ConnectionError => break, //! // continue on stream errors //! h3::error::ErrorLevel::StreamError => continue, //! } //! } //! } //! } //! } //! ``` //! //! ## File server //! A ready-to-use example of a file server is available [here](https://github.com/hyperium/h3/blob/master/examples/server.rs) mod builder; mod connection; mod request; mod stream; pub use builder::builder; pub use builder::Builder; pub use connection::Connection; pub use stream::ReadDatagram; pub use stream::RequestStream; h3-0.0.6/src/server/request.rs000064400000000000000000000071261046102023000143020ustar 00000000000000use std::convert::TryFrom; use bytes::Buf; use http::{Request, StatusCode}; #[cfg(feature = "tracing")] use tracing::instrument; use crate::{error::Code, proto::headers::Header, qpack, quic, Error}; use super::stream::RequestStream; pub struct ResolveRequest, B: Buf> { request_stream: RequestStream, // Ok or `REQUEST_HEADER_FIELDS_TO_LARGE` which needs to be sent decoded: Result, max_field_section_size: u64, } impl> ResolveRequest { pub fn new( request_stream: RequestStream, decoded: Result, max_field_section_size: u64, ) -> Self { Self { request_stream, decoded, max_field_section_size, } } /// Finishes the resolution of the request #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn resolve( mut self, ) -> Result<(Request<()>, RequestStream), Error> { let fields = match self.decoded { Ok(v) => v.fields, Err(cancel_size) => { // Send and await the error response self.request_stream .send_response( http::Response::builder() .status(StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE) .body(()) .expect("header too big response"), ) .await?; return Err(Error::header_too_big( cancel_size, self.max_field_section_size, )); } }; // Parse the request headers let (method, uri, protocol, headers) = match Header::try_from(fields) { Ok(header) => match header.into_request_parts() { Ok(parts) => parts, Err(err) => { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1.2 //# Malformed requests or responses that are //# detected MUST be treated as a stream error of type H3_MESSAGE_ERROR. let error: Error = err.into(); self.request_stream .stop_stream(error.try_get_code().unwrap_or(Code::H3_MESSAGE_ERROR)); return Err(error); } }, Err(err) => { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1.2 //# Malformed requests or responses that are //# detected MUST be treated as a stream error of type H3_MESSAGE_ERROR. let error: Error = err.into(); self.request_stream .stop_stream(error.try_get_code().unwrap_or(Code::H3_MESSAGE_ERROR)); return Err(error); } }; // request_stream.stop_stream(Code::H3_MESSAGE_ERROR).await; let mut req = http::Request::new(()); *req.method_mut() = method; *req.uri_mut() = uri; *req.headers_mut() = headers; // NOTE: insert `Protocol` and not `Option` if let Some(protocol) = protocol { req.extensions_mut().insert(protocol); } *req.version_mut() = http::Version::HTTP_3; // send the grease frame only once // self.inner.send_grease_frame = false; #[cfg(feature = "tracing")] tracing::trace!("replying with: {:?}", req); Ok((req, self.request_stream)) } } h3-0.0.6/src/server/stream.rs000064400000000000000000000162001046102023000140760ustar 00000000000000//! Server-side HTTP/3 stream management use bytes::Buf; use crate::{ connection::{ConnectionState, SharedStateRef}, ext::Datagram, quic::{self, RecvDatagramExt}, Error, }; use pin_project_lite::pin_project; use super::connection::{Connection, RequestEnd}; use std::{marker::PhantomData, sync::Arc}; use std::{ option::Option, result::Result, task::{Context, Poll}, }; use bytes::BytesMut; use futures_util::{future::Future, ready}; use http::{response, HeaderMap, Response}; use quic::StreamId; use crate::{ error::Code, proto::{frame::Frame, headers::Header}, qpack, quic::SendStream as _, stream::{self}, }; #[cfg(feature = "tracing")] use tracing::{error, instrument}; /// Manage request and response transfer for an incoming request /// /// The [`RequestStream`] struct is used to send and/or receive /// information from the client. pub struct RequestStream { pub(super) inner: crate::connection::RequestStream, pub(super) request_end: Arc, } impl AsMut> for RequestStream { fn as_mut(&mut self) -> &mut crate::connection::RequestStream { &mut self.inner } } impl ConnectionState for RequestStream { fn shared_state(&self) -> &SharedStateRef { &self.inner.conn_state } } impl RequestStream where S: quic::RecvStream, B: Buf, { /// Receive data sent from the client #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn recv_data(&mut self) -> Result, Error> { self.inner.recv_data().await } /// Poll for data sent from the client #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn poll_recv_data( &mut self, cx: &mut Context<'_>, ) -> Poll, Error>> { self.inner.poll_recv_data(cx) } /// Receive an optional set of trailers for the request #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub async fn recv_trailers(&mut self) -> Result, Error> { self.inner.recv_trailers().await } /// Tell the peer to stop sending into the underlying QUIC stream #[cfg_attr(feature = "tracing", instrument(skip_all, level = "trace"))] pub fn stop_sending(&mut self, error_code: crate::error::Code) { self.inner.stream.stop_sending(error_code) } /// Returns the underlying stream id pub fn id(&self) -> StreamId { self.inner.stream.id() } } impl RequestStream where S: quic::SendStream, B: Buf, { /// Send the HTTP/3 response /// /// This should be called before trying to send any data with /// [`RequestStream::send_data`]. pub async fn send_response(&mut self, resp: Response<()>) -> Result<(), Error> { let (parts, _) = resp.into_parts(); let response::Parts { status, headers, .. } = parts; let headers = Header::response(status, headers); let mut block = BytesMut::new(); let mem_size = qpack::encode_stateless(&mut block, headers)?; let max_mem_size = self .inner .conn_state .read("send_response") .peer_config .max_field_section_size; //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //# An implementation that //# has received this parameter SHOULD NOT send an HTTP message header //# that exceeds the indicated size, as the peer will likely refuse to //# process it. if mem_size > max_mem_size { return Err(Error::header_too_big(mem_size, max_mem_size)); } stream::write(&mut self.inner.stream, Frame::Headers(block.freeze())) .await .map_err(|e| self.maybe_conn_err(e))?; Ok(()) } /// Send some data on the response body. pub async fn send_data(&mut self, buf: B) -> Result<(), Error> { self.inner.send_data(buf).await } /// Stop a stream with an error code /// /// The code can be [`Code::H3_NO_ERROR`]. pub fn stop_stream(&mut self, error_code: Code) { self.inner.stop_stream(error_code); } /// Send a set of trailers to end the response. /// /// Either [`RequestStream::finish`] or /// [`RequestStream::send_trailers`] must be called to finalize a /// request. pub async fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), Error> { self.inner.send_trailers(trailers).await } /// End the response without trailers. /// /// Either [`RequestStream::finish`] or /// [`RequestStream::send_trailers`] must be called to finalize a /// request. pub async fn finish(&mut self) -> Result<(), Error> { self.inner.finish().await } //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1.1 //= type=TODO //# Implementations SHOULD cancel requests by abruptly terminating any //# directions of a stream that are still open. To do so, an //# implementation resets the sending parts of streams and aborts reading //# on the receiving parts of streams; see Section 2.4 of //# [QUIC-TRANSPORT]. /// Returns the underlying stream id pub fn send_id(&self) -> StreamId { self.inner.stream.send_id() } } impl RequestStream where S: quic::BidiStream, B: Buf, { /// Splits the Request-Stream into send and receive. /// This can be used the send and receive data on different tasks. pub fn split( self, ) -> ( RequestStream, RequestStream, ) { let (send, recv) = self.inner.split(); ( RequestStream { inner: send, request_end: self.request_end.clone(), }, RequestStream { inner: recv, request_end: self.request_end, }, ) } } impl Drop for RequestEnd { fn drop(&mut self) { if let Err(_error) = self.request_end.send(self.stream_id) { #[cfg(feature = "tracing")] error!( "failed to notify connection of request end: {} {}", self.stream_id, _error ); } } } pin_project! { /// Future for [`Connection::read_datagram`] pub struct ReadDatagram<'a, C, B> where C: quic::Connection, B: Buf, { pub(super) conn: &'a mut Connection, pub(super) _marker: PhantomData, } } impl<'a, C, B> Future for ReadDatagram<'a, C, B> where C: quic::Connection + RecvDatagramExt, B: Buf, { type Output = Result>, Error>; fn poll(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { #[cfg(feature = "tracing")] tracing::trace!("poll: read_datagram"); match ready!(self.conn.inner.conn.poll_accept_datagram(cx))? { Some(v) => Poll::Ready(Ok(Some(Datagram::decode(v)?))), None => Poll::Ready(Ok(None)), } } } h3-0.0.6/src/stream.rs000064400000000000000000000517151046102023000126020ustar 00000000000000use std::{ marker::PhantomData, pin::Pin, task::{Context, Poll}, }; use bytes::{Buf, BufMut, Bytes}; use futures_util::{future, ready}; use pin_project_lite::pin_project; use tokio::io::ReadBuf; use crate::{ buf::BufList, error::{Code, ErrorLevel}, frame::FrameStream, proto::{ coding::{Decode as _, Encode}, frame::{Frame, Settings}, stream::StreamType, varint::VarInt, }, quic::{self, BidiStream, RecvStream, SendStream, SendStreamUnframed}, webtransport::SessionId, Error, }; #[inline] /// Transmits data by encoding in wire format. pub(crate) async fn write(stream: &mut S, data: D) -> Result<(), Error> where S: SendStream, D: Into>, B: Buf, { stream.send_data(data)?; future::poll_fn(|cx| stream.poll_ready(cx)).await?; Ok(()) } const WRITE_BUF_ENCODE_SIZE: usize = StreamType::MAX_ENCODED_SIZE + Frame::MAX_ENCODED_SIZE; /// Wrap frames to encode their header on the stack before sending them on the wire /// /// Implements `Buf` so wire data is seamlessly available for transport layer transmits: /// `Buf::chunk()` will yield the encoded header, then the payload. For unidirectional streams, /// this type makes it possible to prefix wire data with the `StreamType`. /// /// Conveying frames as `Into` makes it possible to encode only when generating wire-format /// data is necessary (say, in `quic::SendStream::send_data`). It also has a public API ergonomy /// advantage: `WriteBuf` doesn't have to appear in public associated types. On the other hand, /// QUIC implementers have to call `into()`, which will encode the header in `Self::buf`. pub struct WriteBuf { buf: [u8; WRITE_BUF_ENCODE_SIZE], len: usize, pos: usize, frame: Option>, } impl WriteBuf where B: Buf, { fn encode_stream_type(&mut self, ty: StreamType) { let mut buf_mut = &mut self.buf[self.len..]; ty.encode(&mut buf_mut); self.len = WRITE_BUF_ENCODE_SIZE - buf_mut.remaining_mut(); } fn encode_value(&mut self, value: impl Encode) { let mut buf_mut = &mut self.buf[self.len..]; value.encode(&mut buf_mut); self.len = WRITE_BUF_ENCODE_SIZE - buf_mut.remaining_mut(); } fn encode_frame_header(&mut self) { if let Some(frame) = self.frame.as_ref() { let mut buf_mut = &mut self.buf[self.len..]; frame.encode(&mut buf_mut); self.len = WRITE_BUF_ENCODE_SIZE - buf_mut.remaining_mut(); } } } impl From for WriteBuf where B: Buf, { fn from(ty: StreamType) -> Self { let mut me = Self { buf: [0; WRITE_BUF_ENCODE_SIZE], len: 0, pos: 0, frame: None, }; me.encode_stream_type(ty); me } } impl From for WriteBuf where B: Buf, { fn from(header: UniStreamHeader) -> Self { let mut this = Self { buf: [0; WRITE_BUF_ENCODE_SIZE], len: 0, pos: 0, frame: None, }; this.encode_value(header); this } } pub enum UniStreamHeader { Control(Settings), WebTransportUni(SessionId), Encoder, Decoder, } impl Encode for UniStreamHeader { fn encode(&self, buf: &mut B) { match self { Self::Control(settings) => { StreamType::CONTROL.encode(buf); settings.encode(buf); } Self::WebTransportUni(session_id) => { StreamType::WEBTRANSPORT_UNI.encode(buf); session_id.encode(buf); } UniStreamHeader::Encoder => { StreamType::ENCODER.encode(buf); } UniStreamHeader::Decoder => { StreamType::DECODER.encode(buf); } } } } impl From for WriteBuf where B: Buf, { fn from(header: BidiStreamHeader) -> Self { let mut this = Self { buf: [0; WRITE_BUF_ENCODE_SIZE], len: 0, pos: 0, frame: None, }; this.encode_value(header); this } } pub enum BidiStreamHeader { WebTransportBidi(SessionId), } impl Encode for BidiStreamHeader { fn encode(&self, buf: &mut B) { match self { Self::WebTransportBidi(session_id) => { StreamType::WEBTRANSPORT_BIDI.encode(buf); session_id.encode(buf); } } } } impl From> for WriteBuf where B: Buf, { fn from(frame: Frame) -> Self { let mut me = Self { buf: [0; WRITE_BUF_ENCODE_SIZE], len: 0, pos: 0, frame: Some(frame), }; me.encode_frame_header(); me } } impl From<(StreamType, Frame)> for WriteBuf where B: Buf, { fn from(ty_stream: (StreamType, Frame)) -> Self { let (ty, frame) = ty_stream; let mut me = Self { buf: [0; WRITE_BUF_ENCODE_SIZE], len: 0, pos: 0, frame: Some(frame), }; me.encode_value(ty); me.encode_frame_header(); me } } impl Buf for WriteBuf where B: Buf, { fn remaining(&self) -> usize { self.len - self.pos + self .frame .as_ref() .and_then(|f| f.payload()) .map_or(0, |x| x.remaining()) } fn chunk(&self) -> &[u8] { if self.len - self.pos > 0 { &self.buf[self.pos..self.len] } else if let Some(payload) = self.frame.as_ref().and_then(|f| f.payload()) { payload.chunk() } else { &[] } } fn advance(&mut self, mut cnt: usize) { let remaining_header = self.len - self.pos; if remaining_header > 0 { let advanced = usize::min(cnt, remaining_header); self.pos += advanced; cnt -= advanced; } if let Some(payload) = self.frame.as_mut().and_then(|f| f.payload_mut()) { payload.advance(cnt); } } } pub(super) enum AcceptedRecvStream where S: quic::RecvStream, B: Buf, { Control(FrameStream), Push(FrameStream), Encoder(BufRecvStream), Decoder(BufRecvStream), WebTransportUni(SessionId, BufRecvStream), Reserved, } /// Resolves an incoming streams type as well as `PUSH_ID`s and `SESSION_ID`s pub(super) struct AcceptRecvStream { stream: BufRecvStream, ty: Option, /// push_id or session_id id: Option, expected: Option, } impl AcceptRecvStream where S: RecvStream, B: Buf, { pub fn new(stream: S) -> Self { Self { stream: BufRecvStream::new(stream), ty: None, id: None, expected: None, } } pub fn into_stream(self) -> Result, Error> { Ok(match self.ty.expect("Stream type not resolved yet") { StreamType::CONTROL => AcceptedRecvStream::Control(FrameStream::new(self.stream)), StreamType::PUSH => AcceptedRecvStream::Push(FrameStream::new(self.stream)), StreamType::ENCODER => AcceptedRecvStream::Encoder(self.stream), StreamType::DECODER => AcceptedRecvStream::Decoder(self.stream), StreamType::WEBTRANSPORT_UNI => AcceptedRecvStream::WebTransportUni( SessionId::from_varint(self.id.expect("Session ID not resolved yet")), self.stream, ), t if t.value() > 0x21 && (t.value() - 0x21) % 0x1f == 0 => AcceptedRecvStream::Reserved, //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2 //# Recipients of unknown stream types MUST //# either abort reading of the stream or discard incoming data without //# further processing. //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2 //# If reading is aborted, the recipient SHOULD use //# the H3_STREAM_CREATION_ERROR error code or a reserved error code //# (Section 8.1). //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2 //= type=implication //# The recipient MUST NOT consider unknown stream types //# to be a connection error of any kind. t => { return Err(Code::H3_STREAM_CREATION_ERROR.with_reason( format!("unknown stream type 0x{:x}", t.value()), crate::error::ErrorLevel::ConnectionError, )) } }) } pub fn poll_type(&mut self, cx: &mut Context) -> Poll> { loop { // Return if all identification data is met match self.ty { Some(StreamType::PUSH | StreamType::WEBTRANSPORT_UNI) => { if self.id.is_some() { return Poll::Ready(Ok(())); } } Some(_) => return Poll::Ready(Ok(())), None => (), }; if ready!(self.stream.poll_read(cx))? { return Poll::Ready(Err(Code::H3_STREAM_CREATION_ERROR.with_reason( "Stream closed before type received", ErrorLevel::ConnectionError, ))); }; let mut buf = self.stream.buf_mut(); if self.expected.is_none() && buf.remaining() >= 1 { self.expected = Some(VarInt::encoded_size(buf.chunk()[0])); } if let Some(expected) = self.expected { // Poll for more data if buf.remaining() < expected { continue; } } else { continue; } // Parse ty and then id if self.ty.is_none() { // Parse StreamType self.ty = Some(StreamType::decode(&mut buf).map_err(|_| { Code::H3_INTERNAL_ERROR.with_reason( "Unexpected end parsing stream type", ErrorLevel::ConnectionError, ) })?); // Get the next VarInt for PUSH_ID on the next iteration self.expected = None; } else { // Parse PUSH_ID self.id = Some(VarInt::decode(&mut buf).map_err(|_| { Code::H3_INTERNAL_ERROR.with_reason( "Unexpected end parsing push or session id", ErrorLevel::ConnectionError, ) })?); } } } } pin_project! { /// A stream which allows partial reading of the data without data loss. /// /// This fixes the problem where `poll_data` returns more than the needed amount of bytes, /// requiring correct implementations to hold on to that extra data and return it later. /// /// # Usage /// /// Implements `quic::RecvStream` which will first return buffered data, and then read from the /// stream pub struct BufRecvStream { buf: BufList, // Indicates that the end of the stream has been reached // // Data may still be available as buffered eos: bool, stream: S, _marker: PhantomData, } } impl std::fmt::Debug for BufRecvStream { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("BufRecvStream") .field("buf", &self.buf) .field("eos", &self.eos) .field("stream", &"...") .finish() } } impl BufRecvStream { pub fn new(stream: S) -> Self { Self { buf: BufList::new(), eos: false, stream, _marker: PhantomData, } } } impl BufRecvStream { /// Reads more data into the buffer, returning the number of bytes read. /// /// Returns `true` if the end of the stream is reached. pub fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll> { let data = ready!(self.stream.poll_data(cx))?; if let Some(mut data) = data { self.buf.push_bytes(&mut data); Poll::Ready(Ok(false)) } else { self.eos = true; Poll::Ready(Ok(true)) } } /// Returns the currently buffered data, allowing it to be partially read #[inline] pub(crate) fn buf_mut(&mut self) -> &mut BufList { &mut self.buf } /// Returns the next chunk of data from the stream /// /// Return `None` when there is no more buffered data; use [`Self::poll_read`]. pub fn take_chunk(&mut self, limit: usize) -> Option { self.buf.take_chunk(limit) } /// Returns true if there is remaining buffered data pub fn has_remaining(&mut self) -> bool { self.buf.has_remaining() } #[inline] pub(crate) fn buf(&self) -> &BufList { &self.buf } pub fn is_eos(&self) -> bool { self.eos } } impl RecvStream for BufRecvStream { type Buf = Bytes; type Error = S::Error; fn poll_data( &mut self, cx: &mut std::task::Context<'_>, ) -> Poll, Self::Error>> { // There is data buffered, return that immediately if let Some(chunk) = self.buf.take_first_chunk() { return Poll::Ready(Ok(Some(chunk))); } if let Some(mut data) = ready!(self.stream.poll_data(cx))? { Poll::Ready(Ok(Some(data.copy_to_bytes(data.remaining())))) } else { self.eos = true; Poll::Ready(Ok(None)) } } fn stop_sending(&mut self, error_code: u64) { self.stream.stop_sending(error_code) } fn recv_id(&self) -> quic::StreamId { self.stream.recv_id() } } impl SendStream for BufRecvStream where B: Buf, S: SendStream, { type Error = S::Error; fn poll_finish(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { self.stream.poll_finish(cx) } fn reset(&mut self, reset_code: u64) { self.stream.reset(reset_code) } fn send_id(&self) -> quic::StreamId { self.stream.send_id() } fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { self.stream.poll_ready(cx) } fn send_data>>(&mut self, data: T) -> Result<(), Self::Error> { self.stream.send_data(data) } } impl SendStreamUnframed for BufRecvStream where B: Buf, S: SendStreamUnframed, { #[inline] fn poll_send( &mut self, cx: &mut std::task::Context<'_>, buf: &mut D, ) -> Poll> { self.stream.poll_send(cx, buf) } } impl BidiStream for BufRecvStream where B: Buf, S: BidiStream, { type SendStream = BufRecvStream; type RecvStream = BufRecvStream; fn split(self) -> (Self::SendStream, Self::RecvStream) { let (send, recv) = self.stream.split(); ( BufRecvStream { // Sending is not buffered buf: BufList::new(), eos: self.eos, stream: send, _marker: PhantomData, }, BufRecvStream { buf: self.buf, eos: self.eos, stream: recv, _marker: PhantomData, }, ) } } impl futures_util::io::AsyncRead for BufRecvStream where B: Buf, S: RecvStream, S::Error: Into, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { let p = &mut *self; // Poll for data if the buffer is empty // // If there is data available *do not* poll for more data, as that may suspend indefinitely // if no more data is sent, causing data loss. if !p.has_remaining() { let eos = ready!(p.poll_read(cx).map_err(Into::into))?; if eos { return Poll::Ready(Ok(0)); } } let chunk = p.buf_mut().take_chunk(buf.len()); if let Some(chunk) = chunk { assert!(chunk.len() <= buf.len()); let len = chunk.len().min(buf.len()); // Write the subset into the destination buf[..len].copy_from_slice(&chunk); Poll::Ready(Ok(len)) } else { Poll::Ready(Ok(0)) } } } impl tokio::io::AsyncRead for BufRecvStream where B: Buf, S: RecvStream, S::Error: Into, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { let p = &mut *self; // Poll for data if the buffer is empty // // If there is data available *do not* poll for more data, as that may suspend indefinitely // if no more data is sent, causing data loss. if !p.has_remaining() { let eos = ready!(p.poll_read(cx).map_err(Into::into))?; if eos { return Poll::Ready(Ok(())); } } let chunk = p.buf_mut().take_chunk(buf.remaining()); if let Some(chunk) = chunk { assert!(chunk.len() <= buf.remaining()); // Write the subset into the destination buf.put_slice(&chunk); Poll::Ready(Ok(())) } else { Poll::Ready(Ok(())) } } } impl futures_util::io::AsyncWrite for BufRecvStream where B: Buf, S: SendStreamUnframed, S::Error: Into, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, mut buf: &[u8], ) -> Poll> { let p = &mut *self; p.poll_send(cx, &mut buf).map_err(Into::into) } fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let p = &mut *self; p.poll_finish(cx).map_err(Into::into) } } impl tokio::io::AsyncWrite for BufRecvStream where B: Buf, S: SendStreamUnframed, S::Error: Into, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, mut buf: &[u8], ) -> Poll> { let p = &mut *self; p.poll_send(cx, &mut buf).map_err(Into::into) } fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let p = &mut *self; p.poll_finish(cx).map_err(Into::into) } } #[cfg(test)] mod tests { use quinn_proto::coding::BufExt; use super::*; #[test] fn write_wt_uni_header() { let mut w = WriteBuf::::from(UniStreamHeader::WebTransportUni( SessionId::from_varint(VarInt(5)), )); let ty = w.get_var().unwrap(); println!("Got type: {ty} {ty:#x}"); assert_eq!(ty, 0x54); let id = w.get_var().unwrap(); println!("Got id: {id}"); } #[test] fn write_buf_encode_streamtype() { let wbuf = WriteBuf::::from(StreamType::ENCODER); assert_eq!(wbuf.chunk(), b"\x02"); assert_eq!(wbuf.len, 1); } #[test] fn write_buf_encode_frame() { let wbuf = WriteBuf::::from(Frame::Goaway(VarInt(2))); assert_eq!(wbuf.chunk(), b"\x07\x01\x02"); assert_eq!(wbuf.len, 3); } #[test] fn write_buf_encode_streamtype_then_frame() { let wbuf = WriteBuf::::from((StreamType::ENCODER, Frame::Goaway(VarInt(2)))); assert_eq!(wbuf.chunk(), b"\x02\x07\x01\x02"); } #[test] fn write_buf_advances() { let mut wbuf = WriteBuf::::from((StreamType::ENCODER, Frame::Data(Bytes::from("hey")))); assert_eq!(wbuf.chunk(), b"\x02\x00\x03"); wbuf.advance(3); assert_eq!(wbuf.remaining(), 3); assert_eq!(wbuf.chunk(), b"hey"); wbuf.advance(2); assert_eq!(wbuf.chunk(), b"y"); wbuf.advance(1); assert_eq!(wbuf.remaining(), 0); } #[test] fn write_buf_advance_jumps_header_and_payload_start() { let mut wbuf = WriteBuf::::from((StreamType::ENCODER, Frame::Data(Bytes::from("hey")))); wbuf.advance(4); assert_eq!(wbuf.chunk(), b"ey"); } } h3-0.0.6/src/tests/connection.rs000064400000000000000000000623541046102023000146110ustar 00000000000000// identity_op: we write out how test values are computed #![allow(clippy::identity_op)] use std::{borrow::BorrowMut, time::Duration}; use assert_matches::assert_matches; use bytes::{Buf, Bytes, BytesMut}; use futures_util::future; use http::{Request, Response, StatusCode}; use tokio::sync::oneshot::{self}; use crate::client::SendRequest; use crate::{client, server}; use crate::{ connection::ConnectionState, error::{Code, Error, Kind}, proto::{ coding::Encode as _, frame::{Frame, Settings}, push::PushId, stream::StreamType, varint::VarInt, }, quic::{self, SendStream}, }; use super::h3_quinn; use super::{init_tracing, Pair}; #[tokio::test] async fn connect() { let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut drive, _client) = client::new(pair.client().await).await.expect("client init"); future::poll_fn(|cx| drive.poll_close(cx)).await.unwrap(); }; let server_fut = async { let conn = server.next().await; let _server = server::Connection::new(conn).await.unwrap(); }; tokio::select!(() = server_fut => (), () = client_fut => panic!("client resolved first")); } #[tokio::test] async fn accept_request_end_on_client_close() { let mut pair = Pair::default(); let mut server = pair.server(); let (tx, rx) = oneshot::channel::<()>(); let client_fut = async { let client = pair.client().await; let client = client::new(client).await.expect("client init"); // wait for the server to accept the connection rx.await.unwrap(); // client is dropped, it will send H3_NO_ERROR drop(client); }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); tx.send(()).unwrap(); // Accept returns Ok(None) assert!(incoming.accept().await.unwrap().is_none()); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn server_drop_close() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let server_fut = async { let conn = server.next().await; let _ = server::Connection::new(conn).await.unwrap(); }; let client_fut = async { let (mut conn, mut send) = client::new(pair.client().await).await.expect("client init"); let request_fut = async move { let mut request_stream = send .send_request(Request::get("http://no.way").body(()).unwrap()) .await .unwrap(); let response = request_stream.recv_response().await; assert_matches!(response.unwrap_err().kind(), Kind::Closed); }; let drive_fut = async { let drive = future::poll_fn(|cx| conn.poll_close(cx)).await; assert_matches!(drive, Ok(())); }; tokio::select! {biased; _ = request_fut => (), _ = drive_fut => () } }; tokio::join!(server_fut, client_fut); } // In this test the client calls send_data() without doing a finish(), // i.e client keeps the body stream open. And client expects server to // read_data() and send a response #[tokio::test] async fn server_send_data_without_finish() { let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (_driver, mut send_request) = client::new(pair.client().await).await.unwrap(); let mut req = send_request .send_request(Request::get("http://no.way").body(()).unwrap()) .await .unwrap(); let data = vec![0; 100]; req.send_data(bytes::Bytes::copy_from_slice(&data)) .await .unwrap(); let _ = req.recv_response().await.unwrap(); }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); let (_, mut stream) = incoming.accept().await.unwrap().unwrap(); let mut data = stream.recv_data().await.unwrap().unwrap(); let data = data.copy_to_bytes(data.remaining()); assert_eq!(data.len(), 100); response(stream).await; server.endpoint.wait_idle().await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn client_close_only_on_last_sender_drop() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); assert!(incoming.accept().await.unwrap().is_some()); assert!(incoming.accept().await.unwrap().is_some()); assert!(incoming.accept().await.unwrap().is_none()); }; let client_fut = async { let (mut conn, mut send1) = client::new(pair.client().await).await.expect("client init"); let mut send2 = send1.clone(); let mut request_stream_1 = send1 .send_request(Request::get("http://no.way").body(()).unwrap()) .await .unwrap(); let _ = request_stream_1.recv_response().await; let _ = request_stream_1.finish().await; let mut request_stream_2 = send2 .send_request(Request::get("http://no.way").body(()).unwrap()) .await .unwrap(); let _ = request_stream_2.recv_response().await; let _ = request_stream_2.finish().await; drop(send1); drop(send2); let drive = future::poll_fn(|cx| conn.poll_close(cx)).await; assert_matches!(drive, Ok(())); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn settings_exchange_client() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut conn, client) = client::new(pair.client().await).await.expect("client init"); let settings_change = async { for _ in 0..10 { if client .shared_state() .read("client") .peer_config .max_field_section_size == 12 { return; } tokio::time::sleep(Duration::from_millis(2)).await; } panic!("peer's max_field_section_size didn't change"); }; let drive = async move { future::poll_fn(|cx| conn.poll_close(cx)).await.unwrap(); }; tokio::select! { _ = settings_change => (), _ = drive => panic!("driver resolved first") }; }; let server_fut = async { let conn = server.next().await; let mut incoming = server::builder() .max_field_section_size(12) .build(conn) .await .unwrap(); incoming.accept().await.unwrap() }; tokio::select! { _ = server_fut => panic!("server resolved first"), _ = client_fut => () }; } #[tokio::test] async fn settings_exchange_server() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut conn, _client) = client::builder() .max_field_section_size(12) .build::<_, _, Bytes>(pair.client().await) .await .expect("client init"); let drive = async move { future::poll_fn(|cx| conn.poll_close(cx)).await.unwrap(); }; tokio::select! { _ = drive => () }; }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); let state = incoming.shared_state().clone(); let accept = async { incoming.accept().await.unwrap() }; let settings_change = async { for _ in 0..10 { if state .read("setting_change") .peer_config .max_field_section_size == 12 { return; } tokio::time::sleep(Duration::from_millis(2)).await; } panic!("peer's max_field_section_size didn't change"); }; tokio::select! { _ = accept => panic!("server resolved first"), _ = settings_change => () }; }; tokio::select! { _ = server_fut => (), _ = client_fut => () }; } #[tokio::test] async fn client_error_on_bidi_recv() { let mut pair = Pair::default(); let server = pair.server(); macro_rules! check_err { ($e:expr) => { assert_matches!( $e.map(|_| ()).unwrap_err().kind(), Kind::Application { reason: Some(reason), code: Code::H3_STREAM_CREATION_ERROR, .. } if *reason == *"client received a bidirectional stream"); } } let client_fut = async { let (mut conn, mut send) = client::new(pair.client().await).await.expect("client init"); //= https://www.rfc-editor.org/rfc/rfc9114#section-6.1 //= type=test //# Clients MUST treat //# receipt of a server-initiated bidirectional stream as a connection //# error of type H3_STREAM_CREATION_ERROR unless such an extension has //# been negotiated. let driver = future::poll_fn(|cx| conn.poll_close(cx)); check_err!(driver.await); check_err!( send.send_request(Request::get("http://no.way").body(()).unwrap()) .await ); }; let server_fut = async { let connection = server.endpoint.accept().await.unwrap().await.unwrap(); let (mut send, _recv) = connection.open_bi().await.unwrap(); for _ in 0..100 { match send.write(b"I'm not really a server").await { Err(quinn::WriteError::ConnectionLost( quinn::ConnectionError::ApplicationClosed(quinn::ApplicationClose { error_code, .. }), )) if Code::H3_STREAM_CREATION_ERROR == error_code.into_inner() => break, Err(e) => panic!("got err: {}", e), Ok(_) => tokio::time::sleep(Duration::from_millis(1)).await, } } }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn two_control_streams() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let connection = pair.client_inner().await; //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.1 //= type=test //# Only one control stream per peer is permitted; //# receipt of a second stream claiming to be a control stream MUST be //# treated as a connection error of type H3_STREAM_CREATION_ERROR. for _ in 0..=1 { let mut control_stream = connection.open_uni().await.unwrap(); let mut buf = BytesMut::new(); StreamType::CONTROL.encode(&mut buf); control_stream.write_all(&buf[..]).await.unwrap(); } tokio::time::sleep(Duration::from_secs(10)).await; }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); assert_matches!( incoming.accept().await.map(|_| ()).unwrap_err().kind(), Kind::Application { code: Code::H3_STREAM_CREATION_ERROR, .. } ); }; tokio::select! { _ = server_fut => (), _ = client_fut => panic!("client resolved first") }; } #[tokio::test] async fn control_close_send_error() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let connection = pair.client_inner().await; let mut control_stream = connection.open_uni().await.unwrap(); let mut buf = BytesMut::new(); StreamType::CONTROL.encode(&mut buf); control_stream.write_all(&buf[..]).await.unwrap(); //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.1 //= type=test //# If either control //# stream is closed at any point, this MUST be treated as a connection //# error of type H3_CLOSED_CRITICAL_STREAM. control_stream.finish().unwrap(); // close the client control stream immediately // create the Connection manually, so it does not open a second Control stream let connection_error = loop { let accepted = connection.accept_bi().await; match accepted { // do nothing with the stream Ok(_) => continue, Err(err) => break err, } }; let err_code = match connection_error { quinn::ConnectionError::ApplicationClosed(quinn::ApplicationClose { error_code, .. }) => error_code.into_inner(), e => panic!("unexpected error: {:?}", e), }; assert_eq!(err_code, Code::H3_CLOSED_CRITICAL_STREAM.value()); }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); // Driver detects that the receiving side of the control stream has been closed assert_matches!( incoming.accept().await.map(|_| ()).unwrap_err().kind(), Kind::Application { reason: Some(reason), code: Code::H3_CLOSED_CRITICAL_STREAM, .. } if *reason == *"control stream closed"); // Poll it once again returns the previously stored error assert_matches!( incoming.accept().await.map(|_| ()).unwrap_err().kind(), Kind::Application { reason: Some(reason), code: Code::H3_CLOSED_CRITICAL_STREAM, .. } if *reason == *"control stream closed"); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn missing_settings() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let connection = pair.client_inner().await; let mut control_stream = connection.open_uni().await.unwrap(); let mut buf = BytesMut::new(); StreamType::CONTROL.encode(&mut buf); //= https://www.rfc-editor.org/rfc/rfc9114#section-6.2.1 //= type=test //# If the first frame of the control stream is any other frame //# type, this MUST be treated as a connection error of type //# H3_MISSING_SETTINGS. Frame::::CancelPush(PushId(0)).encode(&mut buf); control_stream.write_all(&buf[..]).await.unwrap(); tokio::time::sleep(Duration::from_secs(10)).await; }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); assert_matches!( incoming.accept().await.map(|_| ()).unwrap_err().kind(), Kind::Application { code: Code::H3_MISSING_SETTINGS, .. } ); }; tokio::select! { _ = server_fut => (), _ = client_fut => panic!("client resolved first") }; } #[tokio::test] async fn control_stream_frame_unexpected() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let connection = pair.client_inner().await; let mut control_stream = connection.open_uni().await.unwrap(); //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.1 //= type=test //# If //# a DATA frame is received on a control stream, the recipient MUST //# respond with a connection error of type H3_FRAME_UNEXPECTED. let mut buf = BytesMut::new(); StreamType::CONTROL.encode(&mut buf); Frame::Data(Bytes::from("")).encode(&mut buf); control_stream.write_all(&buf[..]).await.unwrap(); tokio::time::sleep(Duration::from_secs(10)).await; }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); assert_matches!( incoming.accept().await.map(|_| ()).unwrap_err().kind(), Kind::Application { code: Code::H3_FRAME_UNEXPECTED, .. } ); }; tokio::select! { _ = server_fut => (), _ = client_fut => panic!("client resolved first") }; } #[tokio::test] async fn timeout_on_control_frame_read() { init_tracing(); let mut pair = Pair::default(); pair.with_timeout(Duration::from_millis(10)); let mut server = pair.server(); let client_fut = async { let (mut driver, _send_request) = client::new(pair.client().await).await.unwrap(); let _ = future::poll_fn(|cx| driver.poll_close(cx)).await; }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); assert_matches!( incoming.accept().await.map(|_| ()).unwrap_err().kind(), Kind::Timeout ); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn goaway_from_server_not_request_id() { init_tracing(); let mut pair = Pair::default(); let server = pair.server_inner(); let client_fut = async { let connection = pair.client_inner().await; let mut control_stream = connection.open_uni().await.unwrap(); let mut buf = BytesMut::new(); StreamType::CONTROL.encode(&mut buf); control_stream.write_all(&buf[..]).await.unwrap(); control_stream.finish().unwrap(); // close the client control stream immediately let (mut driver, _send) = client::new(h3_quinn::Connection::new(connection)) .await .unwrap(); assert_matches!( future::poll_fn(|cx| driver.poll_close(cx)) .await .unwrap_err() .kind(), Kind::Application { // The sent in the GoAway frame from the client is not a Request: code: Code::H3_ID_ERROR, .. } ) }; let server_fut = async { let conn = server.accept().await.unwrap().await.unwrap(); let mut control_stream = conn.open_uni().await.unwrap(); let mut buf = BytesMut::new(); StreamType::CONTROL.encode(&mut buf); Frame::::Settings(Settings::default()).encode(&mut buf); //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.6 //= type=test //# A client MUST treat receipt of a GOAWAY frame containing a stream ID //# of any other type as a connection error of type H3_ID_ERROR. // StreamId(index=0 << 2 | dir=Uni << 1 | initiator=Server as u64) Frame::::Goaway(VarInt(0u64 << 2 | 0 << 1 | 1)).encode(&mut buf); control_stream.write_all(&buf[..]).await.unwrap(); tokio::time::sleep(Duration::from_secs(10)).await; }; tokio::select! { _ = server_fut => panic!("client resolved first"), _ = client_fut => () }; } #[tokio::test] async fn graceful_shutdown_server_rejects() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (_driver, mut send_request) = client::new(pair.client().await).await.unwrap(); let mut first = send_request .send_request(Request::get("http://no.way").body(()).unwrap()) .await .unwrap(); let mut rejected = send_request .send_request(Request::get("http://no.way").body(()).unwrap()) .await .unwrap(); let first = first.recv_response().await; let rejected = rejected.recv_response().await; assert_matches!(first, Ok(_)); assert_matches!( rejected.unwrap_err().kind(), Kind::Application { code: Code::H3_REQUEST_REJECTED, .. } ); }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); let (_, stream) = incoming.accept().await.unwrap().unwrap(); response(stream).await; incoming.shutdown(0).await.unwrap(); assert_matches!(incoming.accept().await.map(|x| x.map(|_| ())), Ok(None)); server.endpoint.wait_idle().await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn graceful_shutdown_grace_interval() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut send_request) = client::new(pair.client().await).await.unwrap(); // Sent as the connection is not shutting down let mut first = send_request .send_request(Request::get("http://no.way").body(()).unwrap()) .await .unwrap(); // Sent as the connection is shutting down, but GoAway has not been received yet let mut in_flight = send_request .send_request(Request::get("http://no.way").body(()).unwrap()) .await .unwrap(); let first = first.recv_response().await; let in_flight = in_flight.recv_response().await; // Will not be sent as client's driver already received the GoAway let too_late = async move { tokio::time::sleep(Duration::from_millis(15)).await; request(send_request).await }; let driver = future::poll_fn(|cx| driver.poll_close(cx)); let (too_late, driver) = tokio::join!(too_late, driver); assert_matches!(first, Ok(_)); assert_matches!(in_flight, Ok(_)); assert_matches!(too_late.unwrap_err().kind(), Kind::Closing); assert_matches!(driver, Ok(_)); }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); let (_, first) = incoming.accept().await.unwrap().unwrap(); incoming.shutdown(1).await.unwrap(); let (_, in_flight) = incoming.accept().await.unwrap().unwrap(); response(first).await; response(in_flight).await; while let Ok(Some((_, stream))) = incoming.accept().await { response(stream).await; } // Ensure `too_late` request is executed as the connection is still // closing (no QUIC `Close` frame has been fired yet) tokio::time::sleep(Duration::from_millis(50)).await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn graceful_shutdown_closes_when_idle() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut send_request) = client::new(pair.client().await).await.unwrap(); // Make continuous requests, ignoring GoAway because the connection is not driven while request(&mut send_request).await.is_ok() { tokio::task::yield_now().await; } assert_matches!( future::poll_fn(|cx| { println!("client drive"); driver.poll_close(cx) }) .await, Ok(()) ); }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); let mut count = 0; while let Ok(Some((_, stream))) = incoming.accept().await { count += 1; if count == 4 { incoming.shutdown(2).await.unwrap(); } response(stream).await; } }; tokio::select! { _ = client_fut => (), r = tokio::time::timeout(Duration::from_millis(100), server_fut) => assert_matches!(r, Ok(())), }; } #[tokio::test] async fn graceful_shutdown_client() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut _send_request) = client::new(pair.client().await).await.unwrap(); driver.shutdown(0).await.unwrap(); assert_matches!( future::poll_fn(|cx| { println!("client drive"); driver.poll_close(cx) }) .await, Ok(()) ); }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); assert!(incoming.accept().await.unwrap().is_none()); }; tokio::join!(server_fut, client_fut); } async fn request(mut send_request: T) -> Result, Error> where T: BorrowMut>, O: quic::OpenStreams, B: Buf, { let mut request_stream = send_request .borrow_mut() .send_request(Request::get("http://no.way").body(()).unwrap()) .await?; request_stream.recv_response().await } async fn response(mut stream: server::RequestStream) where S: quic::RecvStream + SendStream, B: Buf, { stream .send_response( Response::builder() .status(StatusCode::IM_A_TEAPOT) .body(()) .unwrap(), ) .await .unwrap(); stream.finish().await.unwrap(); } h3-0.0.6/src/tests/mod.rs000064400000000000000000000110431046102023000132160ustar 00000000000000// This is to avoid an import loop: // h3 tests depend on having private access to the crate. // They must be part of the crate so as not to break privacy. // They also depend on h3_quinn which depends on the crate. // Having a dev-dependency on h3_quinn would work as far as cargo is // concerned, but quic traits wouldn't match between the "h3" crate that // comes before h3_quinn and the one that comes after and runs the tests #[path = "../../../h3-quinn/src/lib.rs"] mod h3_quinn; mod connection; mod request; use std::{ convert::TryInto, net::{Ipv6Addr, ToSocketAddrs}, sync::Arc, time::Duration, }; use bytes::Bytes; use quinn::crypto::rustls::{QuicClientConfig, QuicServerConfig}; use rustls::pki_types::{CertificateDer, PrivateKeyDer}; use crate::quic; use h3_quinn::{quinn::TransportConfig, Connection}; pub fn init_tracing() { let _ = tracing_subscriber::fmt() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .with_span_events(tracing_subscriber::fmt::format::FmtSpan::FULL) .with_test_writer() .try_init(); } pub struct Pair { port: u16, cert: CertificateDer<'static>, key: PrivateKeyDer<'static>, config: Arc, } impl Default for Pair { fn default() -> Self { let (cert, key) = build_certs(); Self { cert, key, port: 0, config: Arc::new(TransportConfig::default()), } } } impl Pair { pub fn with_timeout(&mut self, duration: Duration) { Arc::get_mut(&mut self.config) .unwrap() .max_idle_timeout(Some( duration.try_into().expect("idle timeout duration invalid"), )) .initial_rtt(Duration::from_millis(10)); } pub fn server_inner(&mut self) -> h3_quinn::Endpoint { let mut crypto = rustls::ServerConfig::builder_with_provider(Arc::new( rustls::crypto::ring::default_provider(), )) .with_protocol_versions(&[&rustls::version::TLS13]) .unwrap() .with_no_client_auth() .with_single_cert(vec![self.cert.clone()], self.key.clone_key()) .unwrap(); crypto.max_early_data_size = u32::MAX; crypto.alpn_protocols = vec![b"h3".to_vec()]; let mut server_config = h3_quinn::quinn::ServerConfig::with_crypto(Arc::new( QuicServerConfig::try_from(crypto).unwrap(), )); server_config.transport = self.config.clone(); let endpoint = h3_quinn::quinn::Endpoint::server(server_config, "[::]:0".parse().unwrap()).unwrap(); self.port = endpoint.local_addr().unwrap().port(); endpoint } pub fn server(&mut self) -> Server { let endpoint = self.server_inner(); Server { endpoint } } pub async fn client_inner(&self) -> quinn::Connection { let addr = (Ipv6Addr::LOCALHOST, self.port) .to_socket_addrs() .unwrap() .next() .unwrap(); let mut root_cert_store = rustls::RootCertStore::empty(); root_cert_store.add(self.cert.clone()).unwrap(); let mut crypto = rustls::ClientConfig::builder_with_provider(Arc::new( rustls::crypto::ring::default_provider(), )) .with_protocol_versions(&[&rustls::version::TLS13]) .unwrap() .with_root_certificates(root_cert_store) .with_no_client_auth(); crypto.enable_early_data = true; crypto.alpn_protocols = vec![b"h3".to_vec()]; let client_config = h3_quinn::quinn::ClientConfig::new(Arc::new( QuicClientConfig::try_from(crypto).unwrap(), )); let mut client_endpoint = h3_quinn::quinn::Endpoint::client("[::]:0".parse().unwrap()).unwrap(); client_endpoint.set_default_client_config(client_config); client_endpoint .connect(addr, "localhost") .unwrap() .await .unwrap() } pub async fn client(&self) -> h3_quinn::Connection { Connection::new(self.client_inner().await) } } pub struct Server { pub endpoint: h3_quinn::Endpoint, } impl Server { pub async fn next(&mut self) -> impl quic::Connection { Connection::new(self.endpoint.accept().await.unwrap().await.unwrap()) } } pub fn build_certs() -> (CertificateDer<'static>, PrivateKeyDer<'static>) { let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); ( cert.cert.into(), PrivateKeyDer::Pkcs8(cert.key_pair.serialize_der().into()), ) } h3-0.0.6/src/tests/request.rs000064400000000000000000001356031046102023000141400ustar 00000000000000use std::time::Duration; use assert_matches::assert_matches; use bytes::{Buf, BufMut, Bytes, BytesMut}; use futures_util::future; use http::{request, HeaderMap, Request, Response, StatusCode}; use crate::{ client, connection::ConnectionState, error::{Code, Error, Kind}, proto::{ coding::Encode, frame::{self, Frame, FrameType}, headers::Header, push::PushId, varint::VarInt, }, qpack, server, }; use super::h3_quinn; use super::{init_tracing, Pair}; #[tokio::test] async fn get() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await).await.expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); let response = request_stream.recv_response().await.expect("recv response"); assert_eq!(response.status(), StatusCode::OK); let body = request_stream .recv_data() .await .expect("recv data") .expect("body"); assert_eq!(body.chunk(), b"wonderful hypertext"); }; tokio::select! { _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_request, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); request_stream .send_response( Response::builder() .status(200) .body(()) .expect("build response"), ) .await .expect("send_response"); request_stream .send_data("wonderful hypertext".into()) .await .expect("send_data"); request_stream.finish().await.expect("finish"); let _ = incoming_req.accept().await.unwrap(); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn get_with_trailers_unknown_content_type() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await).await.expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); request_stream.recv_response().await.expect("recv response"); request_stream .recv_data() .await .expect("recv data") .expect("body"); assert!(request_stream.recv_data().await.unwrap().is_none()); let trailers = request_stream .recv_trailers() .await .expect("recv trailers") .expect("trailers none"); assert_eq!(trailers.get("trailer").unwrap(), &"value"); }; tokio::select! { _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); request_stream .send_response( Response::builder() .status(200) .body(()) .expect("build response"), ) .await .expect("send_response"); request_stream .send_data("wonderful hypertext".into()) .await .expect("send_data"); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); request_stream .send_trailers(trailers) .await .expect("send_trailers"); request_stream.finish().await.expect("finish"); let _ = incoming_req.accept().await.unwrap(); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn get_with_trailers_known_content_type() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await).await.expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); request_stream.recv_response().await.expect("recv response"); request_stream .recv_data() .await .expect("recv data") .expect("body"); let trailers = request_stream .recv_trailers() .await .expect("recv trailers") .expect("trailers none"); assert_eq!(trailers.get("trailer").unwrap(), &"value"); }; tokio::select! { _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); request_stream .send_response( Response::builder() .status(200) .body(()) .expect("build response"), ) .await .expect("send_response"); request_stream .send_data("wonderful hypertext".into()) .await .expect("send_data"); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); request_stream .send_trailers(trailers) .await .expect("send_trailers"); request_stream.finish().await.expect("finish"); let _ = incoming_req.accept().await.unwrap(); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn post() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await).await.expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); request_stream .send_data("wonderful json".into()) .await .expect("send_data"); request_stream.finish().await.expect("client finish"); request_stream.recv_response().await.expect("recv response"); }; tokio::select! { _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); request_stream .send_response( Response::builder() .status(200) .body(()) .expect("build response"), ) .await .expect("send_response"); let request_body = request_stream .recv_data() .await .expect("recv data") .expect("server recv body"); assert_eq!(request_body.chunk(), b"wonderful json"); request_stream.finish().await.expect("client finish"); // keep connection until client is finished let _ = incoming_req.accept().await.expect("accept"); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn header_too_big_response_from_server() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await).await.expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); request_stream.finish().await.expect("client finish"); let response = request_stream.recv_response().await.unwrap(); assert_eq!( response.status(), StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE ); }; tokio::select! {biased; _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //= type=test //# An HTTP/3 implementation MAY impose a limit on the maximum size of //# the message header it will accept on an individual HTTP message. let mut incoming_req = server::builder() .max_field_section_size(12) .build(conn) .await .unwrap(); let err_kind = incoming_req.accept().await.map(|_| ()).unwrap_err().kind(); assert_matches!( err_kind, Kind::HeaderTooBig { actual_size: 42, max_size: 12, .. } ); let _ = incoming_req.accept().await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn header_too_big_response_from_server_trailers() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await).await.expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); request_stream .send_data("wonderful json".into()) .await .expect("send_data"); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "A".repeat(200).parse().unwrap()); request_stream .send_trailers(trailers) .await .expect("send trailers"); request_stream.finish().await.expect("client finish"); let _ = request_stream.recv_response().await; }; tokio::select! {biased; _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //= type=test //# An HTTP/3 implementation MAY impose a limit on the maximum size of //# the message header it will accept on an individual HTTP message. let mut incoming_req = server::builder() .max_field_section_size(207) .build(conn) .await .unwrap(); let (_request, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); let _ = request_stream .recv_data() .await .expect("recv data") .expect("body"); let err_kind = request_stream.recv_trailers().await.unwrap_err().kind(); assert_matches!( err_kind, Kind::HeaderTooBig { actual_size: 239, max_size: 207, .. } ); let _ = incoming_req.accept().await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn header_too_big_client_error() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await).await.expect("client init"); let drive_fut = async { let err = future::poll_fn(|cx| driver.poll_close(cx)) .await .unwrap_err(); match err.kind() { // The client never sends a data on the request stream Kind::Application { code, .. } => { assert_eq!(code, Code::H3_REQUEST_INCOMPLETE) } _ => panic!("unexpected error: {:?}", err), } }; let req_fut = async { // pretend client already received server's settings client .shared_state() .write("client") .peer_config .max_field_section_size = 12; let req = Request::get("http://localhost/salut").body(()).unwrap(); let err_kind = client .send_request(req) .await .map(|_| ()) .unwrap_err() .kind(); assert_matches!( err_kind, Kind::HeaderTooBig { actual_size: 179, max_size: 12, .. } ); }; tokio::join! {req_fut, drive_fut } }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::builder() .max_field_section_size(12) .build(conn) .await .unwrap(); let _ = incoming_req.accept().await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn header_too_big_client_error_trailer() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await).await.expect("client init"); let drive_fut = async { let err = future::poll_fn(|cx| driver.poll_close(cx)) .await .unwrap_err(); match err.kind() { Kind::Timeout => (), _ => panic!("unexpected error: {:?}", err), } }; let req_fut = async { client .shared_state() .write("client") .peer_config .max_field_section_size = 200; let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); request_stream .send_data("wonderful json".into()) .await .expect("send_data"); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "A".repeat(200).parse().unwrap()); let err_kind = request_stream .send_trailers(trailers) .await .unwrap_err() .kind(); assert_matches!( err_kind, Kind::HeaderTooBig { actual_size: 239, max_size: 200, .. } ); request_stream.finish().await.expect("client finish"); }; tokio::join! {req_fut,drive_fut}; }; let server_fut = async { let conn = server.next().await; //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //= type=test //# An HTTP/3 implementation MAY impose a limit on the maximum size of //# the message header it will accept on an individual HTTP message. let mut incoming_req = server::builder() .max_field_section_size(207) .build(conn) .await .unwrap(); let (_request, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); let _ = request_stream .recv_data() .await .expect("recv data") .expect("body"); let _ = incoming_req.accept().await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn header_too_big_discard_from_client() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //= type=test //# An implementation that //# has received this parameter SHOULD NOT send an HTTP message header //# that exceeds the indicated size, as the peer will likely refuse to //# process it. let (mut driver, mut client) = client::builder() .max_field_section_size(12) // Don't send settings, so server doesn't know about the low max_field_section_size .send_settings(false) .build::<_, _, Bytes>(pair.client().await) .await .expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); request_stream.finish().await.expect("client finish"); let err_kind = request_stream.recv_response().await.unwrap_err().kind(); assert_matches!( err_kind, Kind::HeaderTooBig { actual_size: 42, max_size: 12, .. } ); let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); request_stream.finish().await.expect("client finish"); let _ = request_stream.recv_response().await.unwrap_err(); }; tokio::select! {biased; _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_request, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); request_stream .send_response( Response::builder() .status(200) .body(()) .expect("build response"), ) .await .expect("send_response"); // Keep sending: wait for the stream to be cancelled by the client let mut err = None; for _ in 0..100 { if let Err(e) = request_stream.send_data("some data".into()).await { err = Some(e); break; } tokio::time::sleep(Duration::from_millis(2)).await; } assert_matches!( err.as_ref().unwrap().kind(), Kind::Application { code: Code::H3_REQUEST_CANCELLED, .. } ); let _ = incoming_req.accept().await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn header_too_big_discard_from_client_trailers() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //= type=test //# An implementation that //# has received this parameter SHOULD NOT send an HTTP message header //# that exceeds the indicated size, as the peer will likely refuse to //# process it. let (mut driver, mut client) = client::builder() .max_field_section_size(200) // Don't send settings, so server doesn't know about the low max_field_section_size .send_settings(false) .build::<_, _, Bytes>(pair.client().await) .await .expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); request_stream.recv_response().await.expect("recv response"); request_stream.recv_data().await.expect("recv data"); let err_kind = request_stream.recv_trailers().await.unwrap_err().kind(); assert_matches!( err_kind, Kind::HeaderTooBig { actual_size: 539, max_size: 200, .. } ); request_stream.finish().await.expect("client finish"); }; tokio::select! {biased; _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_request, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); request_stream .send_response( Response::builder() .status(200) .body(()) .expect("build response"), ) .await .expect("send_response"); request_stream .send_data("wonderful hypertext".into()) .await .expect("send_data"); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".repeat(100).parse().unwrap()); request_stream .send_trailers(trailers) .await .expect("send_trailers"); request_stream.finish().await.expect("finish"); let _ = incoming_req.accept().await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn header_too_big_server_error() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await) // header size limit faked for brevity .await .expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let req = Request::get("http://localhost/salut").body(()).unwrap(); let _ = client .send_request(req) .await .unwrap() .recv_response() .await; }; tokio::select! { _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_request, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //= type=test //# An implementation that //# has received this parameter SHOULD NOT send an HTTP message header //# that exceeds the indicated size, as the peer will likely refuse to //# process it. // pretend the server received a smaller max_field_section_size incoming_req .shared_state() .write("server") .peer_config .max_field_section_size = 12; let err_kind = request_stream .send_response( Response::builder() .status(200) .body(()) .expect("build response"), ) .await .map(|_| ()) .unwrap_err() .kind(); assert_matches!( err_kind, Kind::HeaderTooBig { actual_size: 42, max_size: 12, .. } ); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn header_too_big_server_error_trailers() { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let (mut driver, mut client) = client::new(pair.client().await) // header size limit faked for brevity .await .expect("client init"); let drive_fut = async { future::poll_fn(|cx| driver.poll_close(cx)).await }; let req_fut = async { let req = Request::get("http://localhost/salut").body(()).unwrap(); let _ = client .send_request(req) .await .unwrap() .recv_response() .await; }; tokio::select! { _ = req_fut => (), _ = drive_fut => () } }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_request, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); request_stream .send_response( Response::builder() .status(200) .body(()) .expect("build response"), ) .await .unwrap(); request_stream .send_data("wonderful hypertext".into()) .await .expect("send_data"); //= https://www.rfc-editor.org/rfc/rfc9114#section-4.2.2 //= type=test //# An implementation that //# has received this parameter SHOULD NOT send an HTTP message header //# that exceeds the indicated size, as the peer will likely refuse to //# process it. // pretend the server already received client's settings incoming_req .shared_state() .write("write") .peer_config .max_field_section_size = 200; let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".repeat(100).parse().unwrap()); let err_kind = request_stream .send_trailers(trailers) .await .unwrap_err() .kind(); assert_matches!( err_kind, Kind::HeaderTooBig { actual_size: 539, max_size: 200, .. } ); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn get_timeout_client_recv_response() { init_tracing(); let mut pair = Pair::default(); pair.with_timeout(Duration::from_millis(100)); let mut server = pair.server(); let client_fut = async { let (mut conn, mut client) = client::new(pair.client().await).await.expect("client init"); let request_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); let response = request_stream.recv_response().await; assert_matches!(response.unwrap_err().kind(), Kind::Timeout); }; let drive_fut = async move { let result = future::poll_fn(|cx| conn.poll_close(cx)).await; assert_matches!(result.unwrap_err().kind(), Kind::Timeout); }; tokio::join!(drive_fut, request_fut); }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); // _req must not be dropped, else the connection will be closed and the timeout // won't be triggered let _req = incoming_req.accept().await.expect("accept").unwrap(); tokio::time::sleep(Duration::from_millis(500)).await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn get_timeout_client_recv_data() { init_tracing(); let mut pair = Pair::default(); pair.with_timeout(Duration::from_millis(200)); let mut server = pair.server(); let client_fut = async { let (mut conn, mut client) = client::new(pair.client().await).await.expect("client init"); let request_fut = async { let mut request_stream = client .send_request(Request::get("http://localhost/salut").body(()).unwrap()) .await .expect("request"); let _ = request_stream.recv_response().await.unwrap(); let data = request_stream.recv_data().await; assert_matches!(data.map(|_| ()).unwrap_err().kind(), Kind::Timeout); }; let drive_fut = async move { let result = future::poll_fn(|cx| conn.poll_close(cx)).await; assert_matches!(result.unwrap_err().kind(), Kind::Timeout); }; tokio::join!(drive_fut, request_fut); }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_request, mut request_stream) = incoming_req.accept().await.expect("accept").unwrap(); request_stream .send_response( Response::builder() .status(200) .body(()) .expect("build response"), ) .await .expect("send_response"); tokio::time::sleep(Duration::from_millis(500)).await; }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn get_timeout_server_accept() { init_tracing(); let mut pair = Pair::default(); pair.with_timeout(Duration::from_millis(200)); let mut server = pair.server(); let client_fut = async { let (mut conn, _client) = client::new(pair.client().await).await.expect("client init"); let request_fut = async { tokio::time::sleep(Duration::from_millis(500)).await; }; let drive_fut = async move { let result = future::poll_fn(|cx| conn.poll_close(cx)).await; assert_matches!(result.unwrap_err().kind(), Kind::Timeout); }; tokio::join!(drive_fut, request_fut); }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); assert_matches!( incoming_req.accept().await.map(|_| ()).unwrap_err().kind(), Kind::Timeout ); }; tokio::join!(server_fut, client_fut); } #[tokio::test] async fn post_timeout_server_recv_data() { init_tracing(); let mut pair = Pair::default(); pair.with_timeout(Duration::from_millis(100)); let mut server = pair.server(); let client_fut = async { let (_conn, mut client) = client::new(pair.client().await).await.expect("client init"); let _request_stream = client .send_request(Request::post("http://localhost/salut").body(()).unwrap()) .await .expect("request"); tokio::time::sleep(Duration::from_millis(500)).await; }; let server_fut = async { let conn = server.next().await; let mut incoming_req = server::Connection::new(conn).await.unwrap(); let (_, mut req_stream) = incoming_req.accept().await.expect("accept").unwrap(); assert_matches!( req_stream.recv_data().await.map(|_| ()).unwrap_err().kind(), Kind::Timeout ); }; tokio::join!(server_fut, client_fut); } // 4.1. HTTP Message Exchanges // An HTTP message (request or response) consists of: // * the header section, sent as a single HEADERS frame (see Section 7.2.2), // * optionally, the content, if present, sent as a series of DATA frames (see Section 7.2.1), // * and optionally, the trailer section, if present, sent as a single HEADERS frame. #[tokio::test] async fn request_valid_one_header() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); }) .await; } #[tokio::test] async fn request_valid_header_data() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); }) .await; } #[tokio::test] async fn request_valid_header_data_trailer() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers); }) .await; } #[tokio::test] async fn request_valid_header_multiple_data_trailer() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers); }) .await; } #[tokio::test] async fn request_valid_header_trailer() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers); }) .await; } // Frames of unknown types (Section 9), including reserved frames (Section // 7.2.8) MAY be sent on a request or push stream before, after, or interleaved // with other frames described in this section. #[tokio::test] async fn request_valid_unkown_frame_before() { request_sequence_ok(|mut buf| { unknown_frame_encode(buf); request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); }) .await; } #[tokio::test] async fn request_valid_unkown_frame_after_one_header() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); unknown_frame_encode(buf); }) .await; } #[tokio::test] async fn request_valid_unkown_frame_interleaved_after_header() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); unknown_frame_encode(buf); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); }) .await; } #[tokio::test] async fn request_valid_unkown_frame_interleaved_between_data() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); unknown_frame_encode(buf); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); }) .await; } #[tokio::test] async fn request_valid_unkown_frame_interleaved_after_data() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); unknown_frame_encode(buf); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); }) .await; } #[tokio::test] async fn request_valid_unkown_frame_interleaved_before_trailers() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); unknown_frame_encode(buf); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers); }) .await; } #[tokio::test] async fn request_valid_unkown_frame_after_trailers() { request_sequence_ok(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers); unknown_frame_encode(buf); }) .await; } //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1 //= type=test //# Receipt of an invalid sequence of frames MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. fn invalid_request_frames() -> Vec> { vec![ Frame::CancelPush(PushId(0)), Frame::Settings(frame::Settings::default()), Frame::Goaway(VarInt(1)), Frame::MaxPushId(PushId(1)), ] } #[tokio::test] async fn request_invalid_frame_first() { //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.3 //= type=test //# Receiving a //# CANCEL_PUSH frame on a stream other than the control stream MUST be //# treated as a connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.4 //= type=test //# If an endpoint receives a SETTINGS frame on a different //# stream, the endpoint MUST respond with a connection error of type //# H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.6 //= type=test //# A client MUST treat a GOAWAY frame on a stream other than //# the control stream as a connection error of type H3_FRAME_UNEXPECTED. //= https://www.rfc-editor.org/rfc/rfc9114#section-7.2.7 //= type=test //# The MAX_PUSH_ID frame is always sent on the control stream. Receipt //# of a MAX_PUSH_ID frame on any other stream MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. for frame in invalid_request_frames() { request_sequence_unexpected(|mut buf| frame.encode(&mut buf)).await; } } #[tokio::test] async fn request_invalid_frame_after_header() { for frame in invalid_request_frames() { request_sequence_unexpected(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); frame.encode(&mut buf); }) .await; } } #[tokio::test] async fn request_invalid_frame_after_data() { for frame in invalid_request_frames() { request_sequence_unexpected(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); frame.encode(&mut buf); }) .await; } } #[tokio::test] async fn request_invalid_frame_after_trailers() { for frame in invalid_request_frames() { request_sequence_unexpected(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers); frame.encode(&mut buf); }) .await; } } #[tokio::test] async fn request_invalid_data_after_trailers() { request_sequence_unexpected(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); }) .await; } #[tokio::test] async fn request_invalid_data_first() { request_sequence_unexpected(|mut buf| { Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); }) .await; } #[tokio::test] async fn request_invalid_two_trailers() { request_sequence_unexpected(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers.clone()); trailers_encode(buf, trailers); }) .await; } #[tokio::test] async fn request_invalid_trailing_byte() { request_sequence_frame_error(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); Frame::Data(Bytes::from("fada")).encode_with_payload(&mut buf); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers); //= https://www.rfc-editor.org/rfc/rfc9114#section-7.1 //= type=test //# A frame payload that contains additional bytes //# after the identified fields or a frame payload that terminates before //# the end of the identified fields MUST be treated as a connection //# error of type H3_FRAME_ERROR. buf.put_u8(255); }) .await; } #[tokio::test] async fn request_invalid_data_frame_length_too_large() { request_sequence_frame_error(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); FrameType::DATA.encode(&mut buf); //= https://www.rfc-editor.org/rfc/rfc9114#section-7.1 //= type=test //# In particular, redundant length //# encodings MUST be verified to be self-consistent; see Section 10.8. VarInt::from(5u32).encode(&mut buf); buf.put_slice(b"fada"); let mut trailers = HeaderMap::new(); trailers.insert("trailer", "value".parse().unwrap()); trailers_encode(buf, trailers); }) .await; } #[tokio::test] async fn request_invalid_data_frame_length_too_short() { request_sequence_frame_error(|mut buf| { request_encode( &mut buf, Request::post("http://localhost/salut").body(()).unwrap(), ); FrameType::DATA.encode(&mut buf); //= https://www.rfc-editor.org/rfc/rfc9114#section-7.1 //= type=test //# In particular, redundant length //# encodings MUST be verified to be self-consistent; see Section 10.8. VarInt::from(3u32).encode(&mut buf); buf.put_slice(b"fada"); }) .await; } // Helpers fn request_encode(buf: &mut B, req: http::Request<()>) { let (parts, _) = req.into_parts(); let request::Parts { method, uri, headers, extensions, .. } = parts; let headers = Header::request(method, uri, headers, extensions).unwrap(); let mut block = BytesMut::new(); qpack::encode_stateless(&mut block, headers).unwrap(); Frame::headers(block).encode_with_payload(buf); } fn trailers_encode(buf: &mut B, fields: HeaderMap) { let headers = Header::trailer(fields); let mut block = BytesMut::new(); qpack::encode_stateless(&mut block, headers).unwrap(); Frame::headers(block).encode_with_payload(buf); } fn unknown_frame_encode(buf: &mut B) { buf.put_slice(&[22, 4, 0, 255, 128, 0]); } async fn request_sequence_ok(request: F) where F: Fn(&mut BytesMut), { request_sequence_check(request, |res| assert_matches!(res, Ok(_))).await; } async fn request_sequence_unexpected(request: F) where F: Fn(&mut BytesMut), { request_sequence_check(request, |err| { //= https://www.rfc-editor.org/rfc/rfc9114#section-4.1 //= type=test //# Receipt of an invalid sequence of frames MUST be treated as a //# connection error of type H3_FRAME_UNEXPECTED. assert_matches!( err.unwrap_err().kind(), Kind::Application { code: Code::H3_FRAME_UNEXPECTED, .. } ) }) .await; } async fn request_sequence_frame_error(request: F) where F: Fn(&mut BytesMut), { request_sequence_check(request, |err| { assert_matches!( err.unwrap_err().kind(), Kind::Application { code: Code::H3_FRAME_ERROR, .. } ) }) .await; } async fn request_sequence_check(request: F, check: FC) where F: Fn(&mut BytesMut), FC: Fn(Result<(), Error>), { init_tracing(); let mut pair = Pair::default(); let mut server = pair.server(); let client_fut = async { let connection = pair.client_inner().await; let (mut req_send, mut req_recv) = connection.open_bi().await.unwrap(); let mut buf = BytesMut::new(); request(&mut buf); req_send.write_all(&buf[..]).await.unwrap(); req_send.finish().unwrap(); let res = req_recv .read(&mut buf) .await .map_err(Into::::into) .map_err(Into::::into) .map(|_| ()); check(res); let (mut driver, _send) = client::new(h3_quinn::Connection::new(connection)) .await .unwrap(); let res = future::poll_fn(|cx| driver.poll_close(cx)) .await .map_err(Into::::into) .map(|_| ()); check(res); }; let server_fut = async { let conn = server.next().await; let mut incoming = server::Connection::new(conn).await.unwrap(); let (_, mut stream) = incoming .accept() .await? .expect("request stream end unexpected"); while stream.recv_data().await?.is_some() {} stream.recv_trailers().await?; Result::<(), Error>::Ok(()) }; tokio::select! { res = server_fut => check(res) , _ = client_fut => panic!("client resolved first") }; } h3-0.0.6/src/webtransport/mod.rs000064400000000000000000000000571046102023000146110ustar 00000000000000mod session_id; pub use session_id::SessionId; h3-0.0.6/src/webtransport/session_id.rs000064400000000000000000000022411046102023000161660ustar 00000000000000use std::convert::TryFrom; use crate::proto::{ coding::{Decode, Encode}, stream::{InvalidStreamId, StreamId}, varint::VarInt, }; /// Identifies a WebTransport session /// /// The session id is the same as the stream id of the CONNECT request. #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct SessionId(u64); impl SessionId { pub(crate) fn from_varint(id: VarInt) -> SessionId { Self(id.0) } pub(crate) fn into_inner(self) -> u64 { self.0 } } impl TryFrom for SessionId { type Error = InvalidStreamId; fn try_from(v: u64) -> Result { if v > VarInt::MAX.0 { return Err(InvalidStreamId(v)); } Ok(Self(v)) } } impl Encode for SessionId { fn encode(&self, buf: &mut B) { VarInt::from_u64(self.0).unwrap().encode(buf); } } impl Decode for SessionId { fn decode(buf: &mut B) -> crate::proto::coding::Result { Ok(Self(VarInt::decode(buf)?.into_inner())) } } impl From for SessionId { fn from(value: StreamId) -> Self { Self(value.index()) } } h3-0.0.6/tests/examples_server_client.rs000064400000000000000000000026301046102023000164140ustar 00000000000000use std::{ path::PathBuf, process::{Child, Command, Stdio}, }; struct ChildGuard(Child); impl Drop for ChildGuard { fn drop(&mut self) { if std::thread::panicking() { println!("Cleaning up child process while unwinding"); if let Err(e) = self.0.kill() { println!("Failed to kill child process: {}", e); } } } } #[test] fn server_and_client_should_connect_successfully() { // A little hack since CARGO_BIN_EXE_ is not set for examples let mut command = PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); command.push("../target/debug/examples/server"); let server = Command::new(command.as_path()) .arg("--listen=[::]:4433") .arg("--cert=../examples/server.cert") .arg("--key=../examples/server.key") .spawn() .expect("Failed to run server example"); let mut server = ChildGuard(server); assert!(server.0.stderr.is_none(), "Failed to listen on localhost"); command.pop(); command.push("client"); assert!( Command::new(command) .arg("https://localhost:4433") .arg("--ca=../examples/ca.cert") .stderr(Stdio::null()) .status() .expect("Failed to run client example") .success(), "Failed to connect to server" ); server.0.kill().expect("Failed to terminate server"); }