h2-0.1.26/.gitignore010066400017500001750000000002201330331306600123140ustar0000000000000000target Cargo.lock h2spec # These are backup files generated by rustfmt **/*.rs.bk # Files generated by honggfuzz hfuzz_target hfuzz_workspace h2-0.1.26/.travis.yml010066400017500001750000000034451350175035200124540ustar0000000000000000--- language: rust dist: trusty sudo: false cache: cargo: true apt: true addons: apt: packages: - libssl-dev matrix: include: - rust: nightly - rust: stable before_deploy: cargo doc --no-deps allow_failures: - rust: nightly before_script: - cargo clean script: # Build without unstable flag - cargo build # Test examples in README. - rustdoc --test README.md -L target/debug -L target/debug/deps # Check with unstable flag - cargo check --features unstable # Run tests, this includes lib tests and doc tests - RUST_TEST_THREADS=1 cargo test # Run integration tests - cargo test -p h2-tests # Run h2spec on stable - if [ "${TRAVIS_RUST_VERSION}" = "stable" ]; then ./ci/h2spec.sh; fi # Check minimal versions - if [ "${TRAVIS_RUST_VERSION}" = "nightly" ]; then cargo clean; cargo check -Z minimal-versions; fi deploy: provider: pages skip_cleanup: true github_token: $GH_TOKEN target_branch: gh-pages local_dir: target/doc on: branch: master repo: hyperium/h2 rust: stable env: global: secure: LkjG3IYPu7GY7zuMdYyLtdvjR4a6elX6or1Du7LTBz4JSlQXYAaj6DxhfZfm4d1kECIlnJJ2T21BqDoJDnld5lLu6VcXQ2ZEo/2f2k77GQ/9w3erwcDtqxK02rPoslFNzSd2SCdafjGKdbcvGW2HVBEu5gYEfOdu1Cdy6Av3+vLPk5To50khBQY90Kk+cmSd7J0+CHw/wSXnVgIVoO4742+aj5pxZQLx3lsi3ZPzIh1VL4QOUlaI98ybrCVNxADQCeXRRDzj0d8NzeKlkm8eXpgpiMVRJWURMa3rU2sHU9wh+YjMyoqGZWv2LlzG5LBqde3RWPQ99ebxVhlly6RgEom8yvZbavcGJ4BA0OjviLYAMb1Wjlu1paLZikEqlvTojhpzz3PVuIBZHl+rUgnUfkuhfmMzTBJTPHPMP0GtqpIAGpyRwbv56DquuEiubl70FZmz52sXGDseoABv9jQ4SNJrDrA+bfIWkPpWwqnKaWIgGPl0n3GKeceQM3RshpaE59awYUDS4ybjtacb2Fr99fx25mTO2W4x5hcDqAvBohxRPXgRB2y0ZmrcJyCV3rfkiGFUK7H8ZBqNQ6GG/GYilgj40q6TgcnXxUxyKkykDiS9VU0QAjAwz0pkCNipJ+ImS1j0LHEOcKMKZ7OsGOuSqBmF24ewBs+XzXY7dTnM/Xc= notifications: email: on_success: never h2-0.1.26/CHANGELOG.md010066400017500001750000000074571351645762400121770ustar0000000000000000# 0.1.26 (July 25, 2019) * Fix handling of `SETTING_HEADER_TABLE_SIZE` (#387). # 0.1.25 (June 28, 2019) * Fix to send a `RST_STREAM` instead of `GOAWAY` if receiving a frame on a previously closed stream. * Fix receiving trailers without an end-stream flag to be a stream error instead of connection error. # 0.1.24 (June 17, 2019) * Fix server wrongly rejecting requests that don't have an `:authority` header (#372). # 0.1.23 (June 4, 2019) * Fix leaking of received DATA frames if the `RecvStream` is never polled (#368). # 0.1.22 (June 3, 2019) * Fix rare panic when remote sends `RST_STREAM` or `GOAWAY` for a stream pending window capacity (#364). # 0.1.21 (May 30, 2019) * Fix write loop when a header didn't fit in write buffer. # 0.1.20 (May 16, 2019) * Fix lifetime conflict for older compilers. # 0.1.19 (May 15, 2019) * Fix rare crash if `CONTINUATION` frame resumed in the middle of headers with the same name. * Fix HPACK encoder using an old evicted index for repeated header names. # 0.1.18 (April 9, 2019) * Fix `server::Connection::abrupt_shutdown` to no longer return the same error the user sent (#352). # 0.1.17 (March 12, 2019) * Add user PING support (#346). * Fix notifying a `RecvStream` task if locally sending a reset. * Fix connections "hanging" when all handles are dropped but some streams had been reset. # 0.1.16 (January 24, 2019) * Log header values when malformed (#342). # 0.1.15 (January 12, 2019) * Fix race condition bug related to shutting down the client (#338). # 0.1.14 (December 5, 2018) * Fix closed streams to always return window capacity to the connection (#334). * Fix locking when `Debug` printing an `OpaqueStreamRef` (#333). * Fix inverted split for DATA frame padding (#330). * Reduce `Debug` noise for `Frame` (#329). # 0.1.13 (October 16, 2018) * Add client support for Push Promises (#314). * Expose `io::Error` from `h2::Error` (#311) * Misc bug fixes (#304, #309, #319, #313, #320). # 0.1.12 (August 8, 2018) * Fix initial send window size (#301). * Fix panic when calling `reserve_capacity` after connection has been closed (#302). * Fix handling of incoming `SETTINGS_INITIAL_WINDOW_SIZE`. (#299) # 0.1.11 (July 31, 2018) * Add `stream_id` accessors to public API types (#292). * Fix potential panic when dropping clients (#295). * Fix busy loop when shutting down server (#296). # 0.1.10 (June 15, 2018) * Fix potential panic in `SendRequest::poll_ready()` (#281). * Fix infinite loop on reset connection during prefix (#285). # 0.1.9 (May 31, 2018) * Add `poll_reset` to `SendResponse` and `SendStream` (#279). # 0.1.8 (May 23, 2018) * Fix client bug when max streams is reached. (#277) # 0.1.7 (May 14, 2018) * Misc bug fixes (#266, #273, #261, #275). # 0.1.6 (April 24, 2018) * Misc bug fixes related to stream management (#258, #260, #262). # 0.1.5 (April 6, 2018) * Fix the `last_stream_id` sent during graceful GOAWAY (#254). # 0.1.4 (April 5, 2018) * Add `initial_connection_window_size` to client and server `Builder`s (#249). * Add `graceful_shutdown` and `abrupt_shutdown` to `server::Connection`, deprecating `close_connection` (#250). # 0.1.3 (March 28, 2018) * Allow configuring max streams before the peer's settings frame is received (#242). * Fix HPACK decoding bug with regards to large literals (#244). * Fix state transition bug triggered by receiving a RST_STREAM frame (#247). # 0.1.2 (March 13, 2018) * Fix another bug relating to resetting connections and reaching max concurrency (#238). # 0.1.1 (March 8, 2018) * When streams are dropped, close the connection (#222). * Notify send tasks on connection error (#231). * Fix bug relating to resetting connections and reaching max concurrency (#235). * Normalize HTTP request path to satisfy HTTP/2.0 specification (#228). * Update internal dependencies. # 0.1.0 (Jan 12, 2018) * Initial release h2-0.1.26/CONTRIBUTING.md010066400017500001750000000047171350175035200125770ustar0000000000000000# Contributing to _h2_ # :balloon: Thanks for your help improving the project! ## Getting Help ## If you have a question about the h2 library or have encountered problems using it, you may [file an issue][issue] or ask ask a question on the [Tokio Gitter][gitter]. ## Submitting a Pull Request ## Do you have an improvement? 1. Submit an [issue][issue] describing your proposed change. 2. We will try to respond to your issue promptly. 3. Fork this repo, develop and test your code changes. See the project's [README](README.md) for further information about working in this repository. 4. Submit a pull request against this repo's `master` branch. 6. Your branch may be merged once all configured checks pass, including: - Code review has been completed. - The branch has passed tests in CI. ## Committing ## When initially submitting a pull request, we prefer a single squashed commit. It is preferable to split up contributions into multiple pull requests if the changes are unrelated. All pull requests are squashed when merged, but squashing yourself gives you better control over the commit message. After the pull request is submitted, all changes should be done in separate commits. This makes reviewing the evolution of the pull request easier. We will squash all the changes into a single commit when we merge the pull request. ### Commit messages ### Finalized commit messages should be in the following format: ``` Subject Problem Solution Validation ``` #### Subject #### - one line, <= 50 characters - describe what is done; not the result - use the active voice - capitalize first word and proper nouns - do not end in a period — this is a title/subject - reference the github issue by number ##### Examples ##### ``` bad: server disconnects should cause dst client disconnects. good: Propagate disconnects from source to destination ``` ``` bad: support tls servers good: Introduce support for server-side TLS (#347) ``` #### Problem #### Explain the context and why you're making that change. What is the problem you're trying to solve? In some cases there is not a problem and this can be thought of as being the motivation for your change. #### Solution #### Describe the modifications you've made. #### Validation #### Describe the testing you've done to validate your change. Performance-related changes should include before- and after- benchmark results. [issue]: https://github.com/hyperium/h2/issues/new [gitter]: https://gitter.im/tokio-rs/tokio h2-0.1.26/Cargo.toml.orig010066400017500001750000000030371351645762400132430ustar0000000000000000[package] name = "h2" # When releasing to crates.io: # - Update doc URL. # - html_root_url. # - Update CHANGELOG.md. # - Create git tag version = "0.1.26" license = "MIT" authors = ["Carl Lerche "] description = "An HTTP/2.0 client and server" documentation = "https://docs.rs/h2/0.1.25/h2/" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] categories = ["asynchronous", "web-programming", "network-programming"] exclude = ["fixtures/**", "ci/**"] [badges.travis-ci] repository = "hyperium/h2" branch = "master" [features] # Enables **unstable** APIs. Any API exposed by this feature has no backwards # compatibility guarantees. In other words, you should not use this feature for # anything besides experimentation. Definitely **do not** publish a crate that # depends on this feature. unstable = [] [workspace] members = [ "tests/h2-fuzz", "tests/h2-tests", "tests/h2-support", "util/genfixture", "util/genhuff", ] [dependencies] futures = "0.1" tokio-io = "0.1.4" bytes = "0.4.7" http = "0.1.8" byteorder = "1.0" log = "0.4.1" fnv = "1.0.5" slab = "0.4.0" string = "0.2" indexmap = "1.0" [dev-dependencies] # Fuzzing quickcheck = { version = "0.4.1", default-features = false } rand = "0.3.15" # HPACK fixtures hex = "0.2.0" walkdir = "1.0.0" serde = "1.0.0" serde_json = "1.0.0" # Akamai example tokio = "0.1.8" env_logger = { version = "0.5.3", default-features = false } rustls = "0.12" tokio-rustls = "0.5.0" webpki = "0.18" webpki-roots = "0.14" h2-0.1.26/Cargo.toml0000644000000037620000000000000075000ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "h2" version = "0.1.26" authors = ["Carl Lerche "] exclude = ["fixtures/**", "ci/**"] description = "An HTTP/2.0 client and server" documentation = "https://docs.rs/h2/0.1.25/h2/" readme = "README.md" keywords = ["http", "async", "non-blocking"] categories = ["asynchronous", "web-programming", "network-programming"] license = "MIT" repository = "https://github.com/hyperium/h2" [dependencies.byteorder] version = "1.0" [dependencies.bytes] version = "0.4.7" [dependencies.fnv] version = "1.0.5" [dependencies.futures] version = "0.1" [dependencies.http] version = "0.1.8" [dependencies.indexmap] version = "1.0" [dependencies.log] version = "0.4.1" [dependencies.slab] version = "0.4.0" [dependencies.string] version = "0.2" [dependencies.tokio-io] version = "0.1.4" [dev-dependencies.env_logger] version = "0.5.3" default-features = false [dev-dependencies.hex] version = "0.2.0" [dev-dependencies.quickcheck] version = "0.4.1" default-features = false [dev-dependencies.rand] version = "0.3.15" [dev-dependencies.rustls] version = "0.12" [dev-dependencies.serde] version = "1.0.0" [dev-dependencies.serde_json] version = "1.0.0" [dev-dependencies.tokio] version = "0.1.8" [dev-dependencies.tokio-rustls] version = "0.5.0" [dev-dependencies.walkdir] version = "1.0.0" [dev-dependencies.webpki] version = "0.18" [dev-dependencies.webpki-roots] version = "0.14" [features] unstable = [] [badges.travis-ci] branch = "master" repository = "hyperium/h2" h2-0.1.26/LICENSE010066400017500001750000000020361321601246600113430ustar0000000000000000Copyright (c) 2017 h2 authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. h2-0.1.26/README.md010066400017500001750000000036711351636560000116270ustar0000000000000000# H2 A Tokio aware, HTTP/2.0 client & server implementation for Rust. [![Build Status](https://travis-ci.org/hyperium/h2.svg?branch=master)](https://travis-ci.org/hyperium/h2) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Crates.io](https://img.shields.io/crates/v/h2.svg)](https://crates.io/crates/h2) [![Documentation](https://docs.rs/h2/badge.svg)][dox] More information about this crate can be found in the [crate documentation][dox]. [dox]: https://docs.rs/h2 ## Features * Client and server HTTP/2.0 implementation. * Implements the full HTTP/2.0 specification. * Passes [h2spec](https://github.com/summerwind/h2spec). * Focus on performance and correctness. * Built on [Tokio](https://tokio.rs). ## Non goals This crate is intended to only be an implementation of the HTTP/2.0 specification. It does not handle: * Managing TCP connections * HTTP 1.0 upgrade * TLS * Any feature not described by the HTTP/2.0 specification. The intent is that this crate will eventually be used by [hyper](https://github.com/hyperium/hyper), which will provide all of these features. ## Usage To use `h2`, first add this to your `Cargo.toml`: ```toml [dependencies] h2 = "0.1" ``` Next, add this to your crate: ```rust extern crate h2; use h2::server::Connection; fn main() { // ... } ``` ## FAQ **How does h2 compare to [solicit] or [rust-http2]?** The h2 library has implemented more of the details of the HTTP/2.0 specification than any other Rust library. It also passes the [h2spec] set of tests. The h2 library is rapidly approaching "production ready" quality. Besides the above, Solicit is built on blocking I/O and does not appear to be actively maintained. **Is this an embedded Java SQL database engine?** [No](http://www.h2database.com). [solicit]: https://github.com/mlalic/solicit [rust-http2]: https://github.com/stepancheg/rust-http2 [h2spec]: https://github.com/summerwind/h2spec h2-0.1.26/examples/akamai.rs010066400017500001750000000046031340203217400137420ustar0000000000000000extern crate env_logger; extern crate futures; extern crate h2; extern crate http; extern crate rustls; extern crate tokio; extern crate tokio_rustls; extern crate webpki; extern crate webpki_roots; use h2::client; use futures::*; use http::{Method, Request}; use tokio::net::TcpStream; use rustls::Session; use tokio_rustls::ClientConfigExt; use webpki::DNSNameRef; use std::net::ToSocketAddrs; const ALPN_H2: &str = "h2"; pub fn main() { let _ = env_logger::try_init(); let tls_client_config = std::sync::Arc::new({ let mut c = rustls::ClientConfig::new(); c.root_store .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); c.alpn_protocols.push(ALPN_H2.to_owned()); c }); // Sync DNS resolution. let addr = "http2.akamai.com:443" .to_socket_addrs() .unwrap() .next() .unwrap(); println!("ADDR: {:?}", addr); let tcp = TcpStream::connect(&addr); let dns_name = DNSNameRef::try_from_ascii_str("http2.akamai.com").unwrap(); let tcp = tcp.then(move |res| { let tcp = res.unwrap(); tls_client_config .connect_async(dns_name, tcp) .then(|res| { let tls = res.unwrap(); { let (_, session) = tls.get_ref(); let negotiated_protocol = session.get_alpn_protocol(); assert_eq!(Some(ALPN_H2), negotiated_protocol.as_ref().map(|x| &**x)); } println!("Starting client handshake"); client::handshake(tls) }) .then(|res| { let (mut client, h2) = res.unwrap(); let request = Request::builder() .method(Method::GET) .uri("https://http2.akamai.com/") .body(()) .unwrap(); let (response, _) = client.send_request(request, true).unwrap(); let stream = response.and_then(|response| { let (_, body) = response.into_parts(); body.for_each(|chunk| { println!("RX: {:?}", chunk); Ok(()) }) }); h2.join(stream) }) }) .map_err(|e| eprintln!("ERROR: {:?}", e)) .map(|((), ())| ()); tokio::run(tcp); } h2-0.1.26/examples/client.rs010066400017500001750000000044211340203217400137730ustar0000000000000000extern crate env_logger; extern crate futures; extern crate h2; extern crate http; extern crate tokio; use h2::client; use h2::RecvStream; use futures::*; use http::*; use tokio::net::TcpStream; struct Process { body: RecvStream, trailers: bool, } impl Future for Process { type Item = (); type Error = h2::Error; fn poll(&mut self) -> Poll<(), h2::Error> { loop { if self.trailers { let trailers = try_ready!(self.body.poll_trailers()); println!("GOT TRAILERS: {:?}", trailers); return Ok(().into()); } else { match try_ready!(self.body.poll()) { Some(chunk) => { println!("GOT CHUNK = {:?}", chunk); }, None => { self.trailers = true; }, } } } } } pub fn main() { let _ = env_logger::try_init(); let tcp = TcpStream::connect(&"127.0.0.1:5928".parse().unwrap()); let tcp = tcp.then(|res| { let tcp = res.unwrap(); client::handshake(tcp) }).then(|res| { let (mut client, h2) = res.unwrap(); println!("sending request"); let request = Request::builder() .uri("https://http2.akamai.com/") .body(()) .unwrap(); let mut trailers = HeaderMap::new(); trailers.insert("zomg", "hello".parse().unwrap()); let (response, mut stream) = client.send_request(request, false).unwrap(); // send trailers stream.send_trailers(trailers).unwrap(); // Spawn a task to run the conn... tokio::spawn(h2.map_err(|e| println!("GOT ERR={:?}", e))); response .and_then(|response| { println!("GOT RESPONSE: {:?}", response); // Get the body let (_, body) = response.into_parts(); Process { body, trailers: false, } }) .map_err(|e| { println!("GOT ERR={:?}", e); }) }); tokio::run(tcp); } h2-0.1.26/examples/server.rs010066400017500001750000000036561340203217400140340ustar0000000000000000extern crate bytes; extern crate env_logger; extern crate futures; extern crate h2; extern crate http; extern crate tokio; use h2::server; use bytes::*; use futures::*; use http::*; use tokio::net::TcpListener; pub fn main() { let _ = env_logger::try_init(); let listener = TcpListener::bind(&"127.0.0.1:5928".parse().unwrap()).unwrap(); println!("listening on {:?}", listener.local_addr()); let server = listener.incoming().for_each(move |socket| { // let socket = io_dump::Dump::to_stdout(socket); let connection = server::handshake(socket) .and_then(|conn| { println!("H2 connection bound"); conn.for_each(|(request, mut respond)| { println!("GOT request: {:?}", request); let response = Response::builder().status(StatusCode::OK).body(()).unwrap(); let mut send = match respond.send_response(response, false) { Ok(send) => send, Err(e) => { println!(" error respond; err={:?}", e); return Ok(()); } }; println!(">>>> sending data"); if let Err(e) = send.send_data(Bytes::from_static(b"hello world"), true) { println!(" -> err={:?}", e); } Ok(()) }) }) .and_then(|_| { println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~"); Ok(()) }) .then(|res| { if let Err(e) = res { println!(" -> err={:?}", e); } Ok(()) }); tokio::spawn(Box::new(connection)); Ok(()) }) .map_err(|e| eprintln!("accept error: {}", e)); tokio::run(server); } h2-0.1.26/src/client.rs010066400017500001750000001472241351644257100127710ustar0000000000000000//! Client implementation of the HTTP/2.0 protocol. //! //! # Getting started //! //! Running an HTTP/2.0 client requires the caller to establish the underlying //! connection as well as get the connection to a state that is ready to begin //! the HTTP/2.0 handshake. See [here](../index.html#handshake) for more //! details. //! //! This could be as basic as using Tokio's [`TcpStream`] to connect to a remote //! host, but usually it means using either ALPN or HTTP/1.1 protocol upgrades. //! //! Once a connection is obtained, it is passed to [`handshake`], which will //! begin the [HTTP/2.0 handshake]. This returns a future that completes once //! the handshake process is performed and HTTP/2.0 streams may be initialized. //! //! [`handshake`] uses default configuration values. There are a number of //! settings that can be changed by using [`Builder`] instead. //! //! Once the handshake future completes, the caller is provided with a //! [`Connection`] instance and a [`SendRequest`] instance. The [`Connection`] //! instance is used to drive the connection (see [Managing the connection]). //! The [`SendRequest`] instance is used to initialize new streams (see [Making //! requests]). //! //! # Making requests //! //! Requests are made using the [`SendRequest`] handle provided by the handshake //! future. Once a request is submitted, an HTTP/2.0 stream is initialized and //! the request is sent to the server. //! //! A request body and request trailers are sent using [`SendRequest`] and the //! server's response is returned once the [`ResponseFuture`] future completes. //! Both the [`SendStream`] and [`ResponseFuture`] instances are returned by //! [`SendRequest::send_request`] and are tied to the HTTP/2.0 stream //! initialized by the sent request. //! //! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2.0 //! stream can be created, i.e. as long as the current number of active streams //! is below [`MAX_CONCURRENT_STREAMS`]. If a new stream cannot be created, the //! caller will be notified once an existing stream closes, freeing capacity for //! the caller. The caller should use [`SendRequest::poll_ready`] to check for //! capacity before sending a request to the server. //! //! [`SendRequest`] enforces the [`MAX_CONCURRENT_STREAMS`] setting. The user //! must not send a request if `poll_ready` does not return `Ready`. Attempting //! to do so will result in an [`Error`] being returned. //! //! # Managing the connection //! //! The [`Connection`] instance is used to manage connection state. The caller //! is required to call [`Connection::poll`] in order to advance state. //! [`SendRequest::send_request`] and other functions have no effect unless //! [`Connection::poll`] is called. //! //! The [`Connection`] instance should only be dropped once [`Connection::poll`] //! returns `Ready`. At this point, the underlying socket has been closed and no //! further work needs to be done. //! //! The easiest way to ensure that the [`Connection`] instance gets polled is to //! submit the [`Connection`] instance to an [executor]. The executor will then //! manage polling the connection until the connection is complete. //! Alternatively, the caller can call `poll` manually. //! //! # Example //! //! ```rust //! extern crate futures; //! extern crate h2; //! extern crate http; //! extern crate tokio; //! //! use h2::client; //! //! use futures::*; //! use http::*; //! //! use tokio::net::TcpStream; //! //! pub fn main() { //! let addr = "127.0.0.1:5928".parse().unwrap(); //! //! tokio::run( //! // Establish TCP connection to the server. //! TcpStream::connect(&addr) //! .map_err(|_| { //! panic!("failed to establish TCP connection") //! }) //! .and_then(|tcp| client::handshake(tcp)) //! .and_then(|(h2, connection)| { //! let connection = connection //! .map_err(|_| panic!("HTTP/2.0 connection failed")); //! //! // Spawn a new task to drive the connection state //! tokio::spawn(connection); //! //! // Wait until the `SendRequest` handle has available //! // capacity. //! h2.ready() //! }) //! .and_then(|mut h2| { //! // Prepare the HTTP request to send to the server. //! let request = Request::builder() //! .method(Method::GET) //! .uri("https://www.example.com/") //! .body(()) //! .unwrap(); //! //! // Send the request. The second tuple item allows the caller //! // to stream a request body. //! let (response, _) = h2.send_request(request, true).unwrap(); //! //! response.and_then(|response| { //! let (head, mut body) = response.into_parts(); //! //! println!("Received response: {:?}", head); //! //! // The `release_capacity` handle allows the caller to manage //! // flow control. //! // //! // Whenever data is received, the caller is responsible for //! // releasing capacity back to the server once it has freed //! // the data from memory. //! let mut release_capacity = body.release_capacity().clone(); //! //! body.for_each(move |chunk| { //! println!("RX: {:?}", chunk); //! //! // Let the server send more data. //! let _ = release_capacity.release_capacity(chunk.len()); //! //! Ok(()) //! }) //! }) //! }) //! .map_err(|e| panic!("failed to perform HTTP/2.0 request: {:?}", e)) //! ) //! } //! ``` //! //! [`TcpStream`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpStream.html //! [`handshake`]: fn.handshake.html //! [executor]: https://docs.rs/futures/0.1/futures/future/trait.Executor.html //! [`SendRequest`]: struct.SendRequest.html //! [`SendStream`]: ../struct.SendStream.html //! [Making requests]: #making-requests //! [Managing the connection]: #managing-the-connection //! [`Connection`]: struct.Connection.html //! [`Connection::poll`]: struct.Connection.html#method.poll //! [`SendRequest::send_request`]: struct.SendRequest.html#method.send_request //! [`MAX_CONCURRENT_STREAMS`]: http://httpwg.org/specs/rfc7540.html#SettingValues //! [`SendRequest`]: struct.SendRequest.html //! [`ResponseFuture`]: struct.ResponseFuture.html //! [`SendRequest::poll_ready`]: struct.SendRequest.html#method.poll_ready //! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader //! [`Builder`]: struct.Builder.html //! [`Error`]: ../struct.Error.html use {SendStream, RecvStream, ReleaseCapacity, PingPong}; use codec::{Codec, RecvError, SendError, UserError}; use frame::{Headers, Pseudo, Reason, Settings, StreamId}; use proto; use bytes::{Bytes, IntoBuf}; use futures::{Async, Future, Poll, Stream}; use http::{uri, HeaderMap, Request, Response, Method, Version}; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::io::WriteAll; use std::fmt; use std::marker::PhantomData; use std::time::Duration; use std::usize; /// Performs the HTTP/2.0 connection handshake. /// /// This type implements `Future`, yielding a `(SendRequest, Connection)` /// instance once the handshake has completed. /// /// The handshake is completed once both the connection preface and the initial /// settings frame is sent by the client. /// /// The handshake future does not wait for the initial settings frame from the /// server. /// /// See [module] level documentation for more details. /// /// [module]: index.html #[must_use = "futures do nothing unless polled"] pub struct Handshake { builder: Builder, inner: WriteAll, _marker: PhantomData, } /// Initializes new HTTP/2.0 streams on a connection by sending a request. /// /// This type does no work itself. Instead, it is a handle to the inner /// connection state held by [`Connection`]. If the associated connection /// instance is dropped, all `SendRequest` functions will return [`Error`]. /// /// [`SendRequest`] instances are able to move to and operate on separate tasks /// / threads than their associated [`Connection`] instance. Internally, there /// is a buffer used to stage requests before they get written to the /// connection. There is no guarantee that requests get written to the /// connection in FIFO order as HTTP/2.0 prioritization logic can play a role. /// /// [`SendRequest`] implements [`Clone`], enabling the creation of many /// instances that are backed by a single connection. /// /// See [module] level documentation for more details. /// /// [module]: index.html /// [`Connection`]: struct.Connection.html /// [`Clone`]: https://doc.rust-lang.org/std/clone/trait.Clone.html /// [`Error`]: ../struct.Error.html pub struct SendRequest { inner: proto::Streams, pending: Option, } /// Returns a `SendRequest` instance once it is ready to send at least one /// request. #[derive(Debug)] pub struct ReadySendRequest { inner: Option>, } /// Manages all state associated with an HTTP/2.0 client connection. /// /// A `Connection` is backed by an I/O resource (usually a TCP socket) and /// implements the HTTP/2.0 client logic for that connection. It is responsible /// for driving the internal state forward, performing the work requested of the /// associated handles ([`SendRequest`], [`ResponseFuture`], [`SendStream`], /// [`RecvStream`]). /// /// `Connection` values are created by calling [`handshake`]. Once a /// `Connection` value is obtained, the caller must repeatedly call [`poll`] /// until `Ready` is returned. The easiest way to do this is to submit the /// `Connection` instance to an [executor]. /// /// [module]: index.html /// [`handshake`]: fn.handshake.html /// [`SendRequest`]: struct.SendRequest.html /// [`ResponseFuture`]: struct.ResponseFuture.html /// [`SendStream`]: ../struct.SendStream.html /// [`RecvStream`]: ../struct.RecvStream.html /// [`poll`]: #method.poll /// [executor]: https://docs.rs/futures/0.1/futures/future/trait.Executor.html /// /// # Examples /// /// ``` /// # extern crate bytes; /// # extern crate futures; /// # extern crate h2; /// # extern crate tokio_io; /// # use futures::{Future, Stream}; /// # use futures::future::Executor; /// # use tokio_io::*; /// # use h2::client; /// # use h2::client::*; /// # /// # fn doc(my_io: T, my_executor: E) /// # where T: AsyncRead + AsyncWrite + 'static, /// # E: Executor>>, /// # { /// client::handshake(my_io) /// .and_then(|(send_request, connection)| { /// // Submit the connection handle to an executor. /// my_executor.execute( /// # Box::new( /// connection.map_err(|_| panic!("connection failed")) /// # ) /// ).unwrap(); /// /// // Now, use `send_request` to initialize HTTP/2.0 streams. /// // ... /// # drop(send_request); /// # Ok(()) /// }) /// # .wait().unwrap(); /// # } /// # /// # pub fn main() {} /// ``` #[must_use = "futures do nothing unless polled"] pub struct Connection { inner: proto::Connection, } /// A future of an HTTP response. #[derive(Debug)] #[must_use = "futures do nothing unless polled"] pub struct ResponseFuture { inner: proto::OpaqueStreamRef, push_promise_consumed: bool, } /// A future of a pushed HTTP response. /// /// We have to differentiate between pushed and non pushed because of the spec /// /// > PUSH_PROMISE frames MUST only be sent on a peer-initiated stream /// > that is in either the "open" or "half-closed (remote)" state. #[derive(Debug)] #[must_use = "futures do nothing unless polled"] pub struct PushedResponseFuture { inner: ResponseFuture, } /// A pushed response and corresponding request headers #[derive(Debug)] pub struct PushPromise { /// The request headers request: Request<()>, /// The pushed response response: PushedResponseFuture, } /// A stream of pushed responses and corresponding promised requests #[derive(Debug)] #[must_use = "streams do nothing unless polled"] pub struct PushPromises { inner: proto::OpaqueStreamRef, } /// Builds client connections with custom configuration values. /// /// Methods can be chained in order to set the configuration values. /// /// The client is constructed by calling [`handshake`] and passing the I/O /// handle that will back the HTTP/2.0 server. /// /// New instances of `Builder` are obtained via [`Builder::new`]. /// /// See function level documentation for details on the various client /// configuration settings. /// /// [`Builder::new`]: struct.Builder.html#method.new /// [`handshake`]: struct.Builder.html#method.handshake /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .initial_window_size(1_000_000) /// .max_concurrent_streams(1000) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` #[derive(Clone, Debug)] pub struct Builder { /// Time to keep locally reset streams around before reaping. reset_stream_duration: Duration, /// Initial maximum number of locally initiated (send) streams. /// After receiving a Settings frame from the remote peer, /// the connection will overwrite this value with the /// MAX_CONCURRENT_STREAMS specified in the frame. initial_max_send_streams: usize, /// Initial target window size for new connections. initial_target_connection_window_size: Option, /// Maximum number of locally reset streams to keep at a time. reset_stream_max: usize, /// Initial `Settings` frame to send as part of the handshake. settings: Settings, /// The stream ID of the first (lowest) stream. Subsequent streams will use /// monotonically increasing stream IDs. stream_id: StreamId, } #[derive(Debug)] pub(crate) struct Peer; // ===== impl SendRequest ===== impl SendRequest where B: IntoBuf, B::Buf: 'static, { /// Returns `Ready` when the connection can initialize a new HTTP/2.0 /// stream. /// /// This function must return `Ready` before `send_request` is called. When /// `NotReady` is returned, the task will be notified once the readiness /// state changes. /// /// See [module] level docs for more details. /// /// [module]: index.html pub fn poll_ready(&mut self) -> Poll<(), ::Error> { try_ready!(self.inner.poll_pending_open(self.pending.as_ref())); self.pending = None; Ok(().into()) } /// Consumes `self`, returning a future that returns `self` back once it is /// ready to send a request. /// /// This function should be called before calling `send_request`. /// /// This is a functional combinator for [`poll_ready`]. The returned future /// will call `SendStream::poll_ready` until `Ready`, then returns `self` to /// the caller. /// /// # Examples /// /// ```rust /// # extern crate futures; /// # extern crate h2; /// # extern crate http; /// # use futures::*; /// # use h2::client::*; /// # use http::*; /// # fn doc(send_request: SendRequest<&'static [u8]>) /// # { /// // First, wait until the `send_request` handle is ready to send a new /// // request /// send_request.ready() /// .and_then(|mut send_request| { /// // Use `send_request` here. /// # Ok(()) /// }) /// # .wait().unwrap(); /// # } /// # pub fn main() {} /// ``` /// /// See [module] level docs for more details. /// /// [`poll_ready`]: #method.poll_ready /// [module]: index.html pub fn ready(self) -> ReadySendRequest { ReadySendRequest { inner: Some(self) } } /// Sends a HTTP/2.0 request to the server. /// /// `send_request` initializes a new HTTP/2.0 stream on the associated /// connection, then sends the given request using this new stream. Only the /// request head is sent. /// /// On success, a [`ResponseFuture`] instance and [`SendStream`] instance /// are returned. The [`ResponseFuture`] instance is used to get the /// server's response and the [`SendStream`] instance is used to send a /// request body or trailers to the server over the same HTTP/2.0 stream. /// /// To send a request body or trailers, set `end_of_stream` to `false`. /// Then, use the returned [`SendStream`] instance to stream request body /// chunks or send trailers. If `end_of_stream` is **not** set to `false` /// then attempting to call [`SendStream::send_data`] or /// [`SendStream::send_trailers`] will result in an error. /// /// If no request body or trailers are to be sent, set `end_of_stream` to /// `true` and drop the returned [`SendStream`] instance. /// /// # A note on HTTP versions /// /// The provided `Request` will be encoded differently depending on the /// value of its version field. If the version is set to 2.0, then the /// request is encoded as per the specification recommends. /// /// If the version is set to a lower value, then the request is encoded to /// preserve the characteristics of HTTP 1.1 and lower. Specifically, host /// headers are permitted and the `:authority` pseudo header is not /// included. /// /// The caller should always set the request's version field to 2.0 unless /// specifically transmitting an HTTP 1.1 request over 2.0. /// /// # Examples /// /// Sending a request with no body /// /// ```rust /// # extern crate futures; /// # extern crate h2; /// # extern crate http; /// # use futures::*; /// # use h2::client::*; /// # use http::*; /// # fn doc(send_request: SendRequest<&'static [u8]>) /// # { /// // First, wait until the `send_request` handle is ready to send a new /// // request /// send_request.ready() /// .and_then(|mut send_request| { /// // Prepare the HTTP request to send to the server. /// let request = Request::get("https://www.example.com/") /// .body(()) /// .unwrap(); /// /// // Send the request to the server. Since we are not sending a /// // body or trailers, we can drop the `SendStream` instance. /// let (response, _) = send_request /// .send_request(request, true).unwrap(); /// /// response /// }) /// .and_then(|response| { /// // Process the response /// # Ok(()) /// }) /// # .wait().unwrap(); /// # } /// # pub fn main() {} /// ``` /// /// Sending a request with a body and trailers /// /// ```rust /// # extern crate futures; /// # extern crate h2; /// # extern crate http; /// # use futures::*; /// # use h2::client::*; /// # use http::*; /// # fn doc(send_request: SendRequest<&'static [u8]>) /// # { /// // First, wait until the `send_request` handle is ready to send a new /// // request /// send_request.ready() /// .and_then(|mut send_request| { /// // Prepare the HTTP request to send to the server. /// let request = Request::get("https://www.example.com/") /// .body(()) /// .unwrap(); /// /// // Send the request to the server. If we are not sending a /// // body or trailers, we can drop the `SendStream` instance. /// let (response, mut send_stream) = send_request /// .send_request(request, false).unwrap(); /// /// // At this point, one option would be to wait for send capacity. /// // Doing so would allow us to not hold data in memory that /// // cannot be sent. However, this is not a requirement, so this /// // example will skip that step. See `SendStream` documentation /// // for more details. /// send_stream.send_data(b"hello", false).unwrap(); /// send_stream.send_data(b"world", false).unwrap(); /// /// // Send the trailers. /// let mut trailers = HeaderMap::new(); /// trailers.insert( /// header::HeaderName::from_bytes(b"my-trailer").unwrap(), /// header::HeaderValue::from_bytes(b"hello").unwrap()); /// /// send_stream.send_trailers(trailers).unwrap(); /// /// response /// }) /// .and_then(|response| { /// // Process the response /// # Ok(()) /// }) /// # .wait().unwrap(); /// # } /// # pub fn main() {} /// ``` /// /// [`ResponseFuture`]: struct.ResponseFuture.html /// [`SendStream`]: ../struct.SendStream.html /// [`SendStream::send_data`]: ../struct.SendStream.html#method.send_data /// [`SendStream::send_trailers`]: ../struct.SendStream.html#method.send_trailers pub fn send_request( &mut self, request: Request<()>, end_of_stream: bool, ) -> Result<(ResponseFuture, SendStream), ::Error> { self.inner .send_request(request, end_of_stream, self.pending.as_ref()) .map_err(Into::into) .map(|stream| { if stream.is_pending_open() { self.pending = Some(stream.clone_to_opaque()); } let response = ResponseFuture { inner: stream.clone_to_opaque(), push_promise_consumed: false, }; let stream = SendStream::new(stream); (response, stream) }) } } impl fmt::Debug for SendRequest where B: IntoBuf, { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("SendRequest").finish() } } impl Clone for SendRequest where B: IntoBuf, { fn clone(&self) -> Self { SendRequest { inner: self.inner.clone(), pending: None, } } } #[cfg(feature = "unstable")] impl SendRequest where B: IntoBuf, { /// Returns the number of active streams. /// /// An active stream is a stream that has not yet transitioned to a closed /// state. pub fn num_active_streams(&self) -> usize { self.inner.num_active_streams() } /// Returns the number of streams that are held in memory. /// /// A wired stream is a stream that is either active or is closed but must /// stay in memory for some reason. For example, there are still outstanding /// userspace handles pointing to the slot. pub fn num_wired_streams(&self) -> usize { self.inner.num_wired_streams() } } // ===== impl ReadySendRequest ===== impl Future for ReadySendRequest where B: IntoBuf, B::Buf: 'static, { type Item = SendRequest; type Error = ::Error; fn poll(&mut self) -> Poll { match self.inner { Some(ref mut send_request) => { let _ = try_ready!(send_request.poll_ready()); } None => panic!("called `poll` after future completed"), } Ok(self.inner.take().unwrap().into()) } } // ===== impl Builder ===== impl Builder { /// Returns a new client builder instance initialized with default /// configuration values. /// /// Configuration methods can be chained on the return value. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .initial_window_size(1_000_000) /// .max_concurrent_streams(1000) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn new() -> Builder { Builder { reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, initial_target_connection_window_size: None, initial_max_send_streams: usize::MAX, settings: Default::default(), stream_id: 1.into(), } } /// Indicates the initial window size (in octets) for stream-level /// flow control for received data. /// /// The initial window of a stream is used as part of flow control. For more /// details, see [`ReleaseCapacity`]. /// /// The default value is 65,535. /// /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .initial_window_size(1_000_000) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn initial_window_size(&mut self, size: u32) -> &mut Self { self.settings.set_initial_window_size(Some(size)); self } /// Indicates the initial window size (in octets) for connection-level flow control /// for received data. /// /// The initial window of a connection is used as part of flow control. For more details, /// see [`ReleaseCapacity`]. /// /// The default value is 65,535. /// /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .initial_connection_window_size(1_000_000) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn initial_connection_window_size(&mut self, size: u32) -> &mut Self { self.initial_target_connection_window_size = Some(size); self } /// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the /// configured client is able to accept. /// /// The sender may send data frames that are **smaller** than this value, /// but any data larger than `max` will be broken up into multiple `DATA` /// frames. /// /// The value **must** be between 16,384 and 16,777,215. The default value is 16,384. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .max_frame_size(1_000_000) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` /// /// # Panics /// /// This function panics if `max` is not within the legal range specified /// above. pub fn max_frame_size(&mut self, max: u32) -> &mut Self { self.settings.set_max_frame_size(Some(max)); self } /// Sets the max size of received header frames. /// /// This advisory setting informs a peer of the maximum size of header list /// that the sender is prepared to accept, in octets. The value is based on /// the uncompressed size of header fields, including the length of the name /// and value in octets plus an overhead of 32 octets for each header field. /// /// This setting is also used to limit the maximum amount of data that is /// buffered to decode HEADERS frames. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .max_header_list_size(16 * 1024) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { self.settings.set_max_header_list_size(Some(max)); self } /// Sets the maximum table size of the dynamic header decoder. /// /// By default, this value is 4,096 bytes. pub fn header_table_size(&mut self, max: u32) -> &mut Self { self.settings.set_header_table_size(Some(max)); self } /// Sets the maximum number of concurrent streams. /// /// The maximum concurrent streams setting only controls the maximum number /// of streams that can be initiated by the remote peer. In other words, /// when this setting is set to 100, this does not limit the number of /// concurrent streams that can be created by the caller. /// /// It is recommended that this value be no smaller than 100, so as to not /// unnecessarily limit parallelism. However, any value is legal, including /// 0. If `max` is set to 0, then the remote will not be permitted to /// initiate streams. /// /// Note that streams in the reserved state, i.e., push promises that have /// been reserved but the stream has not started, do not count against this /// setting. /// /// Also note that if the remote *does* exceed the value set here, it is not /// a protocol level error. Instead, the `h2` library will immediately reset /// the stream. /// /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .max_concurrent_streams(1000) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn max_concurrent_streams(&mut self, max: u32) -> &mut Self { self.settings.set_max_concurrent_streams(Some(max)); self } /// Sets the initial maximum of locally initiated (send) streams. /// /// The initial settings will be overwritten by the remote peer when /// the Settings frame is received. The new value will be set to the /// `max_concurrent_streams()` from the frame. /// /// This setting prevents the caller from exceeding this number of /// streams that are counted towards the concurrency limit. /// /// Sending streams past the limit returned by the peer will be treated /// as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM. /// /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .initial_max_send_streams(1000) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn initial_max_send_streams(&mut self, initial: usize) -> &mut Self { self.initial_max_send_streams = initial; self } /// Sets the maximum number of concurrent locally reset streams. /// /// When a stream is explicitly reset, the HTTP/2.0 specification requires /// that any further frames received for that stream must be ignored for /// "some time". /// /// In order to satisfy the specification, internal state must be maintained /// to implement the behavior. This state grows linearly with the number of /// streams that are locally reset. /// /// The `max_concurrent_reset_streams` setting configures sets an upper /// bound on the amount of state that is maintained. When this max value is /// reached, the oldest reset stream is purged from memory. /// /// Once the stream has been fully purged from memory, any additional frames /// received for that stream will result in a connection level protocol /// error, forcing the connection to terminate. /// /// The default value is 10. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .max_concurrent_reset_streams(1000) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { self.reset_stream_max = max; self } /// Sets the duration to remember locally reset streams. /// /// When a stream is explicitly reset, the HTTP/2.0 specification requires /// that any further frames received for that stream must be ignored for /// "some time". /// /// In order to satisfy the specification, internal state must be maintained /// to implement the behavior. This state grows linearly with the number of /// streams that are locally reset. /// /// The `reset_stream_duration` setting configures the max amount of time /// this state will be maintained in memory. Once the duration elapses, the /// stream state is purged from memory. /// /// Once the stream has been fully purged from memory, any additional frames /// received for that stream will result in a connection level protocol /// error, forcing the connection to terminate. /// /// The default value is 30 seconds. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # use std::time::Duration; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .reset_stream_duration(Duration::from_secs(10)) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn reset_stream_duration(&mut self, dur: Duration) -> &mut Self { self.reset_stream_duration = dur; self } /// Enables or disables server push promises. /// /// This value is included in the initial SETTINGS handshake. When set, the /// server MUST NOT send a push promise. Setting this value to value to /// false in the initial SETTINGS handshake guarantees that the remote server /// will never send a push promise. /// /// This setting can be changed during the life of a single HTTP/2.0 /// connection by sending another settings frame updating the value. /// /// Default value: `true`. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # use std::time::Duration; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .enable_push(false) /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn enable_push(&mut self, enabled: bool) -> &mut Self { self.settings.set_enable_push(enabled); self } /// Sets the first stream ID to something other than 1. #[cfg(feature = "unstable")] pub fn initial_stream_id(&mut self, stream_id: u32) -> &mut Self { self.stream_id = stream_id.into(); assert!( self.stream_id.is_client_initiated(), "stream id must be odd" ); self } /// Creates a new configured HTTP/2.0 client backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence /// the [HTTP/2.0 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] / [`SendRequest`] /// tuple once the HTTP/2.0 handshake has been completed. /// /// This function also allows the caller to configure the send payload data /// type. See [Outbound data type] for more details. /// /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// [`SendRequest`]: struct.SendRequest.html /// [Outbound data type]: ../index.html#outbound-data-type. /// /// # Examples /// /// Basic usage: /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut = Builder::new() /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` /// /// Configures the send-payload data type. In this case, the outbound data /// type will be `&'static [u8]`. /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let client_fut: Handshake<_, &'static [u8]> = Builder::new() /// .handshake(my_io); /// # client_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn handshake(&self, io: T) -> Handshake where T: AsyncRead + AsyncWrite, B: IntoBuf, B::Buf: 'static, { Connection::handshake2(io, self.clone()) } } impl Default for Builder { fn default() -> Builder { Builder::new() } } /// Creates a new configured HTTP/2.0 client with default configuration /// values backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence /// the [HTTP/2.0 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] / [`SendRequest`] /// tuple once the HTTP/2.0 handshake has been completed. The returned /// [`Connection`] instance will be using default configuration values. Use /// [`Builder`] to customize the configuration values used by a [`Connection`] /// instance. /// /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// [`SendRequest`]: struct.SendRequest.html /// /// # Examples /// /// ``` /// # extern crate futures; /// # extern crate h2; /// # extern crate tokio_io; /// # use futures::*; /// # use tokio_io::*; /// # use h2::client; /// # use h2::client::*; /// # /// # fn doc(my_io: T) /// # { /// client::handshake(my_io) /// .and_then(|(send_request, connection)| { /// // The HTTP/2.0 handshake has completed, now start polling /// // `connection` and use `send_request` to send requests to the /// // server. /// # Ok(()) /// }) /// # .wait().unwrap(); /// # } /// # /// # pub fn main() {} /// ``` pub fn handshake(io: T) -> Handshake where T: AsyncRead + AsyncWrite, { Builder::new().handshake(io) } // ===== impl Connection ===== impl Connection where T: AsyncRead + AsyncWrite, B: IntoBuf, { fn handshake2(io: T, builder: Builder) -> Handshake { use tokio_io::io; debug!("binding client connection"); let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; let handshake = io::write_all(io, msg); Handshake { builder, inner: handshake, _marker: PhantomData, } } /// Sets the target window size for the whole connection. /// /// If `size` is greater than the current value, then a `WINDOW_UPDATE` /// frame will be immediately sent to the remote, increasing the connection /// level window by `size - current_value`. /// /// If `size` is less than the current value, nothing will happen /// immediately. However, as window capacity is released by /// [`ReleaseCapacity`] instances, no `WINDOW_UPDATE` frames will be sent /// out until the number of "in flight" bytes drops below `size`. /// /// The default value is 65,535. /// /// See [`ReleaseCapacity`] documentation for more details. /// /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html /// [library level]: ../index.html#flow-control pub fn set_target_window_size(&mut self, size: u32) { assert!(size <= proto::MAX_WINDOW_SIZE); self.inner.set_target_window_size(size); } /// Takes a `PingPong` instance from the connection. /// /// # Note /// /// This may only be called once. Calling multiple times will return `None`. pub fn ping_pong(&mut self) -> Option { self.inner .take_user_pings() .map(PingPong::new) } } impl Future for Connection where T: AsyncRead + AsyncWrite, B: IntoBuf, { type Item = (); type Error = ::Error; fn poll(&mut self) -> Poll<(), ::Error> { self.inner.maybe_close_connection_if_no_streams(); self.inner.poll().map_err(Into::into) } } impl fmt::Debug for Connection where T: AsyncRead + AsyncWrite, T: fmt::Debug, B: fmt::Debug + IntoBuf, B::Buf: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&self.inner, fmt) } } // ===== impl Handshake ===== impl Future for Handshake where T: AsyncRead + AsyncWrite, B: IntoBuf, B::Buf: 'static, { type Item = (SendRequest, Connection); type Error = ::Error; fn poll(&mut self) -> Poll { let res = self.inner.poll() .map_err(::Error::from); let (io, _) = try_ready!(res); debug!("client connection bound"); // Create the codec let mut codec = Codec::new(io); if let Some(max) = self.builder.settings.max_frame_size() { codec.set_max_recv_frame_size(max as usize); } if let Some(max) = self.builder.settings.max_header_list_size() { codec.set_max_recv_header_list_size(max as usize); } // The server receives the settings first, so we can just assume // they will be ACKed, or else bad server. if let Some(max) = self.builder.settings.header_table_size() { codec.set_max_recv_header_table_size(max as usize); } // Send initial settings frame codec .buffer(self.builder.settings.clone().into()) .expect("invalid SETTINGS frame"); let inner = proto::Connection::new(codec, proto::Config { next_stream_id: self.builder.stream_id, initial_max_send_streams: self.builder.initial_max_send_streams, reset_stream_duration: self.builder.reset_stream_duration, reset_stream_max: self.builder.reset_stream_max, settings: self.builder.settings.clone(), }); let send_request = SendRequest { inner: inner.streams().clone(), pending: None, }; let mut connection = Connection { inner }; if let Some(sz) = self.builder.initial_target_connection_window_size { connection.set_target_window_size(sz); } Ok(Async::Ready((send_request, connection))) } } impl fmt::Debug for Handshake where T: AsyncRead + AsyncWrite, T: fmt::Debug, B: fmt::Debug + IntoBuf, B::Buf: fmt::Debug + IntoBuf, { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "client::Handshake") } } // ===== impl ResponseFuture ===== impl Future for ResponseFuture { type Item = Response; type Error = ::Error; fn poll(&mut self) -> Poll { let (parts, _) = try_ready!(self.inner.poll_response()).into_parts(); let body = RecvStream::new(ReleaseCapacity::new(self.inner.clone())); Ok(Response::from_parts(parts, body).into()) } } impl ResponseFuture { /// Returns the stream ID of the response stream. /// /// # Panics /// /// If the lock on the stream store has been poisoned. pub fn stream_id(&self) -> ::StreamId { ::StreamId::from_internal(self.inner.stream_id()) } /// Returns a stream of PushPromises /// /// # Panics /// /// If this method has been called before /// or the stream was itself was pushed pub fn push_promises(&mut self) -> PushPromises { if self.push_promise_consumed { panic!("Reference to push promises stream taken!"); } self.push_promise_consumed = true; PushPromises { inner: self.inner.clone() } } } // ===== impl PushPromises ===== impl Stream for PushPromises { type Item = PushPromise; type Error = ::Error; fn poll(&mut self) -> Poll, Self::Error> { match try_ready!(self.inner.poll_pushed()) { Some((request, response)) => { let response = PushedResponseFuture { inner: ResponseFuture { inner: response, push_promise_consumed: false } }; Ok(Async::Ready(Some(PushPromise{request, response}))) } None => Ok(Async::Ready(None)), } } } // ===== impl PushPromise ===== impl PushPromise { /// Returns a reference to the push promise's request headers. pub fn request(&self) -> &Request<()> { &self.request } /// Returns a mutable reference to the push promise's request headers. pub fn request_mut(&mut self) -> &mut Request<()> { &mut self.request } /// Consumes `self`, returning the push promise's request headers and /// response future. pub fn into_parts(self) -> (Request<()>, PushedResponseFuture) { (self.request, self.response) } } // ===== impl PushedResponseFuture ===== impl Future for PushedResponseFuture { type Item = Response; type Error = ::Error; fn poll(&mut self) -> Poll { self.inner.poll() } } impl PushedResponseFuture { /// Returns the stream ID of the response stream. /// /// # Panics /// /// If the lock on the stream store has been poisoned. pub fn stream_id(&self) -> ::StreamId { self.inner.stream_id() } } // ===== impl Peer ===== impl Peer { pub fn convert_send_message( id: StreamId, request: Request<()>, end_of_stream: bool) -> Result { use http::request::Parts; let ( Parts { method, uri, headers, version, .. }, _, ) = request.into_parts(); let is_connect = method == Method::CONNECT; // Build the set pseudo header set. All requests will include `method` // and `path`. let mut pseudo = Pseudo::request(method, uri); if pseudo.scheme.is_none() { // If the scheme is not set, then there are a two options. // // 1) Authority is not set. In this case, a request was issued with // a relative URI. This is permitted **only** when forwarding // HTTP 1.x requests. If the HTTP version is set to 2.0, then // this is an error. // // 2) Authority is set, then the HTTP method *must* be CONNECT. // // It is not possible to have a scheme but not an authority set (the // `http` crate does not allow it). // if pseudo.authority.is_none() { if version == Version::HTTP_2 { return Err(UserError::MissingUriSchemeAndAuthority.into()); } else { // This is acceptable as per the above comment. However, // HTTP/2.0 requires that a scheme is set. Since we are // forwarding an HTTP 1.1 request, the scheme is set to // "http". pseudo.set_scheme(uri::Scheme::HTTP); } } else if !is_connect { // TODO: Error } } // Create the HEADERS frame let mut frame = Headers::new(id, pseudo, headers); if end_of_stream { frame.set_end_stream() } Ok(frame) } } impl proto::Peer for Peer { type Poll = Response<()>; fn dyn() -> proto::DynPeer { proto::DynPeer::Client } fn is_server() -> bool { false } fn convert_poll_message( pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId ) -> Result { let mut b = Response::builder(); b.version(Version::HTTP_2); if let Some(status) = pseudo.status { b.status(status); } let mut response = match b.body(()) { Ok(response) => response, Err(_) => { // TODO: Should there be more specialized handling for different // kinds of errors return Err(RecvError::Stream { id: stream_id, reason: Reason::PROTOCOL_ERROR, }); }, }; *response.headers_mut() = fields; Ok(response) } } h2-0.1.26/src/codec/error.rs010066400017500001750000000074401347362176000137150ustar0000000000000000use frame::{Reason, StreamId}; use std::{error, fmt, io}; /// Errors that are received #[derive(Debug)] pub enum RecvError { Connection(Reason), Stream { id: StreamId, reason: Reason }, Io(io::Error), } /// Errors caused by sending a message #[derive(Debug)] pub enum SendError { /// User error User(UserError), /// Connection error prevents sending. Connection(Reason), /// I/O error Io(io::Error), } /// Errors caused by users of the library #[derive(Debug)] pub enum UserError { /// The stream ID is no longer accepting frames. InactiveStreamId, /// The stream is not currently expecting a frame of this type. UnexpectedFrameType, /// The payload size is too big PayloadTooBig, /// A header size is too big HeaderTooBig, /// The application attempted to initiate too many streams to remote. Rejected, /// The released capacity is larger than claimed capacity. ReleaseCapacityTooBig, /// The stream ID space is overflowed. /// /// A new connection is needed. OverflowedStreamId, /// Illegal headers, such as connection-specific headers. MalformedHeaders, /// Request submitted with relative URI. MissingUriSchemeAndAuthority, /// Calls `SendResponse::poll_reset` after having called `send_response`. PollResetAfterSendResponse, /// Calls `PingPong::send_ping` before receiving a pong. SendPingWhilePending, } // ===== impl RecvError ===== impl From for RecvError { fn from(src: io::Error) -> Self { RecvError::Io(src) } } impl error::Error for RecvError { fn description(&self) -> &str { use self::RecvError::*; match *self { Connection(ref reason) => reason.description(), Stream { ref reason, .. } => reason.description(), Io(ref e) => e.description(), } } } impl fmt::Display for RecvError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { use std::error::Error; write!(fmt, "{}", self.description()) } } // ===== impl SendError ===== impl error::Error for SendError { fn description(&self) -> &str { use self::SendError::*; match *self { User(ref e) => e.description(), Connection(ref reason) => reason.description(), Io(ref e) => e.description(), } } } impl fmt::Display for SendError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { use std::error::Error; write!(fmt, "{}", self.description()) } } impl From for SendError { fn from(src: io::Error) -> Self { SendError::Io(src) } } impl From for SendError { fn from(src: UserError) -> Self { SendError::User(src) } } // ===== impl UserError ===== impl error::Error for UserError { fn description(&self) -> &str { use self::UserError::*; match *self { InactiveStreamId => "inactive stream", UnexpectedFrameType => "unexpected frame type", PayloadTooBig => "payload too big", HeaderTooBig => "header too big", Rejected => "rejected", ReleaseCapacityTooBig => "release capacity too big", OverflowedStreamId => "stream ID overflowed", MalformedHeaders => "malformed headers", MissingUriSchemeAndAuthority => "request URI missing scheme and authority", PollResetAfterSendResponse => "poll_reset after send_response is illegal", SendPingWhilePending => "send_ping before received previous pong", } } } impl fmt::Display for UserError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { use std::error::Error; write!(fmt, "{}", self.description()) } } h2-0.1.26/src/codec/framed_read.rs010066400017500001750000000352761351644257100150240ustar0000000000000000use codec::RecvError; use frame::{self, Frame, Kind, Reason}; use frame::{DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE}; use hpack; use futures::*; use bytes::BytesMut; use std::io; use tokio_io::AsyncRead; use tokio_io::codec::length_delimited; // 16 MB "sane default" taken from golang http2 const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: usize = 16 << 20; #[derive(Debug)] pub struct FramedRead { inner: length_delimited::FramedRead, // hpack decoder state hpack: hpack::Decoder, max_header_list_size: usize, partial: Option, } /// Partially loaded headers frame #[derive(Debug)] struct Partial { /// Empty frame frame: Continuable, /// Partial header payload buf: BytesMut, } #[derive(Debug)] enum Continuable { Headers(frame::Headers), PushPromise(frame::PushPromise), } impl FramedRead { pub fn new(inner: length_delimited::FramedRead) -> FramedRead { FramedRead { inner: inner, hpack: hpack::Decoder::new(DEFAULT_SETTINGS_HEADER_TABLE_SIZE), max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE, partial: None, } } fn decode_frame(&mut self, mut bytes: BytesMut) -> Result, RecvError> { use self::RecvError::*; trace!("decoding frame from {}B", bytes.len()); // Parse the head let head = frame::Head::parse(&bytes); if self.partial.is_some() && head.kind() != Kind::Continuation { proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind()); return Err(Connection(Reason::PROTOCOL_ERROR)); } let kind = head.kind(); trace!(" -> kind={:?}", kind); macro_rules! header_block { ($frame:ident, $head:ident, $bytes:ident) => ({ // Drop the frame header // TODO: Change to drain: carllerche/bytes#130 let _ = $bytes.split_to(frame::HEADER_LEN); // Parse the header frame w/o parsing the payload let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) { Ok(res) => res, Err(frame::Error::InvalidDependencyId) => { proto_err!(stream: "invalid HEADERS dependency ID"); // A stream cannot depend on itself. An endpoint MUST // treat this as a stream error (Section 5.4.2) of type // `PROTOCOL_ERROR`. return Err(Stream { id: $head.stream_id(), reason: Reason::PROTOCOL_ERROR, }); }, Err(e) => { proto_err!(conn: "failed to load frame; err={:?}", e); return Err(Connection(Reason::PROTOCOL_ERROR)); } }; let is_end_headers = frame.is_end_headers(); // Load the HPACK encoded headers match frame.load_hpack(&mut payload, self.max_header_list_size, &mut self.hpack) { Ok(_) => {}, Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, Err(frame::Error::Hpack(e)) => { debug!("connection error COMPRESSION_ERROR -- {:?};", e); return Err(Connection(Reason::COMPRESSION_ERROR)); }, Err(frame::Error::MalformedMessage) => { let id = $head.stream_id(); proto_err!(stream: "malformed header block; stream={:?}", id); return Err(Stream { id, reason: Reason::PROTOCOL_ERROR, }); }, Err(e) => { proto_err!(conn: "failed HPACK decoding; err={:?}", e); return Err(Connection(Reason::PROTOCOL_ERROR)); } } if is_end_headers { frame.into() } else { trace!("loaded partial header block"); // Defer returning the frame self.partial = Some(Partial { frame: Continuable::$frame(frame), buf: payload, }); return Ok(None); } }); } let frame = match kind { Kind::Settings => { let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]); res.map_err(|e| { proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e); Connection(Reason::PROTOCOL_ERROR) })?.into() }, Kind::Ping => { let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]); res.map_err(|e| { proto_err!(conn: "failed to load PING frame; err={:?}", e); Connection(Reason::PROTOCOL_ERROR) })?.into() }, Kind::WindowUpdate => { let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]); res.map_err(|e| { proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e); Connection(Reason::PROTOCOL_ERROR) })?.into() }, Kind::Data => { let _ = bytes.split_to(frame::HEADER_LEN); let res = frame::Data::load(head, bytes.freeze()); // TODO: Should this always be connection level? Probably not... res.map_err(|e| { proto_err!(conn: "failed to load DATA frame; err={:?}", e); Connection(Reason::PROTOCOL_ERROR) })?.into() }, Kind::Headers => { header_block!(Headers, head, bytes) }, Kind::Reset => { let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); res.map_err(|e| { proto_err!(conn: "failed to load RESET frame; err={:?}", e); Connection(Reason::PROTOCOL_ERROR) })?.into() }, Kind::GoAway => { let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); res.map_err(|e| { proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e); Connection(Reason::PROTOCOL_ERROR) })?.into() }, Kind::PushPromise => { header_block!(PushPromise, head, bytes) }, Kind::Priority => { if head.stream_id() == 0 { // Invalid stream identifier proto_err!(conn: "invalid stream ID 0"); return Err(Connection(Reason::PROTOCOL_ERROR)); } match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { Ok(frame) => frame.into(), Err(frame::Error::InvalidDependencyId) => { // A stream cannot depend on itself. An endpoint MUST // treat this as a stream error (Section 5.4.2) of type // `PROTOCOL_ERROR`. let id = head.stream_id(); proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id); return Err(Stream { id, reason: Reason::PROTOCOL_ERROR, }); }, Err(e) => { proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e); return Err(Connection(Reason::PROTOCOL_ERROR)); } } }, Kind::Continuation => { let is_end_headers = (head.flag() & 0x4) == 0x4; let mut partial = match self.partial.take() { Some(partial) => partial, None => { proto_err!(conn: "received unexpected CONTINUATION frame"); return Err(Connection(Reason::PROTOCOL_ERROR)); } }; // The stream identifiers must match if partial.frame.stream_id() != head.stream_id() { proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID"); return Err(Connection(Reason::PROTOCOL_ERROR)); } // Extend the buf if partial.buf.is_empty() { partial.buf = bytes.split_off(frame::HEADER_LEN); } else { if partial.frame.is_over_size() { // If there was left over bytes previously, they may be // needed to continue decoding, even though we will // be ignoring this frame. This is done to keep the HPACK // decoder state up-to-date. // // Still, we need to be careful, because if a malicious // attacker were to try to send a gigantic string, such // that it fits over multiple header blocks, we could // grow memory uncontrollably again, and that'd be a shame. // // Instead, we use a simple heuristic to determine if // we should continue to ignore decoding, or to tell // the attacker to go away. if partial.buf.len() + bytes.len() > self.max_header_list_size { proto_err!(conn: "CONTINUATION frame header block size over ignorable limit"); return Err(Connection(Reason::COMPRESSION_ERROR)); } } partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); } match partial.frame.load_hpack(&mut partial.buf, self.max_header_list_size, &mut self.hpack) { Ok(_) => {}, Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, Err(frame::Error::MalformedMessage) => { let id = head.stream_id(); proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id); return Err(Stream { id, reason: Reason::PROTOCOL_ERROR, }); }, Err(e) => { proto_err!(conn: "failed HPACK decoding; err={:?}", e); return Err(Connection(Reason::PROTOCOL_ERROR)); }, } if is_end_headers { partial.frame.into() } else { self.partial = Some(partial); return Ok(None); } }, Kind::Unknown => { // Unknown frames are ignored return Ok(None); }, }; Ok(Some(frame)) } pub fn get_ref(&self) -> &T { self.inner.get_ref() } pub fn get_mut(&mut self) -> &mut T { self.inner.get_mut() } /// Returns the current max frame size setting #[cfg(feature = "unstable")] #[inline] pub fn max_frame_size(&self) -> usize { self.inner.max_frame_length() } /// Updates the max frame size setting. /// /// Must be within 16,384 and 16,777,215. #[inline] pub fn set_max_frame_size(&mut self, val: usize) { assert!(DEFAULT_MAX_FRAME_SIZE as usize <= val && val <= MAX_MAX_FRAME_SIZE as usize); self.inner.set_max_frame_length(val) } /// Update the max header list size setting. #[inline] pub fn set_max_header_list_size(&mut self, val: usize) { self.max_header_list_size = val; } /// Update the max header dynamic table size setting. pub fn set_max_header_table_size(&mut self, val: usize) { self.hpack.queue_size_update(val); } } impl Stream for FramedRead where T: AsyncRead, { type Item = Frame; type Error = RecvError; fn poll(&mut self) -> Poll, Self::Error> { loop { trace!("poll"); let bytes = match try_ready!(self.inner.poll().map_err(map_err)) { Some(bytes) => bytes, None => return Ok(Async::Ready(None)), }; trace!("poll; bytes={}B", bytes.len()); if let Some(frame) = self.decode_frame(bytes)? { debug!("received; frame={:?}", frame); return Ok(Async::Ready(Some(frame))); } } } } fn map_err(err: io::Error) -> RecvError { use tokio_io::codec::length_delimited::FrameTooBig; if let io::ErrorKind::InvalidData = err.kind() { if let Some(custom) = err.get_ref() { if custom.is::() { return RecvError::Connection(Reason::FRAME_SIZE_ERROR); } } } err.into() } // ===== impl Continuable ===== impl Continuable { fn stream_id(&self) -> frame::StreamId { match *self { Continuable::Headers(ref h) => h.stream_id(), Continuable::PushPromise(ref p) => p.stream_id(), } } fn is_over_size(&self) -> bool { match *self { Continuable::Headers(ref h) => h.is_over_size(), Continuable::PushPromise(ref p) => p.is_over_size(), } } fn load_hpack( &mut self, src: &mut BytesMut, max_header_list_size: usize, decoder: &mut hpack::Decoder, ) -> Result<(), frame::Error> { match *self { Continuable::Headers(ref mut h) => h.load_hpack(src, max_header_list_size, decoder), Continuable::PushPromise(ref mut p) => p.load_hpack(src, max_header_list_size, decoder), } } } impl From for Frame { fn from(cont: Continuable) -> Self { match cont { Continuable::Headers(mut headers) => { headers.set_end_headers(); headers.into() } Continuable::PushPromise(mut push) => { push.set_end_headers(); push.into() } } } } h2-0.1.26/src/codec/framed_write.rs010066400017500001750000000225171351644257100152350ustar0000000000000000use codec::UserError; use codec::UserError::*; use frame::{self, Frame, FrameSize}; use hpack; use bytes::{Buf, BufMut, BytesMut}; use futures::*; use tokio_io::{AsyncRead, AsyncWrite}; use std::io::{self, Cursor}; #[derive(Debug)] pub struct FramedWrite { /// Upstream `AsyncWrite` inner: T, /// HPACK encoder hpack: hpack::Encoder, /// Write buffer /// /// TODO: Should this be a ring buffer? buf: Cursor, /// Next frame to encode next: Option>, /// Last data frame last_data_frame: Option>, /// Max frame size, this is specified by the peer max_frame_size: FrameSize, } #[derive(Debug)] enum Next { Data(frame::Data), Continuation(frame::Continuation), } /// Initialze the connection with this amount of write buffer. /// /// The minimum MAX_FRAME_SIZE is 16kb, so always be able to send a HEADERS /// frame that big. const DEFAULT_BUFFER_CAPACITY: usize = 16 * 1_024; /// Min buffer required to attempt to write a frame const MIN_BUFFER_CAPACITY: usize = frame::HEADER_LEN + CHAIN_THRESHOLD; /// Chain payloads bigger than this. The remote will never advertise a max frame /// size less than this (well, the spec says the max frame size can't be less /// than 16kb, so not even close). const CHAIN_THRESHOLD: usize = 256; // TODO: Make generic impl FramedWrite where T: AsyncWrite, B: Buf, { pub fn new(inner: T) -> FramedWrite { FramedWrite { inner: inner, hpack: hpack::Encoder::default(), buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)), next: None, last_data_frame: None, max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, } } /// Returns `Ready` when `send` is able to accept a frame /// /// Calling this function may result in the current contents of the buffer /// to be flushed to `T`. pub fn poll_ready(&mut self) -> Poll<(), io::Error> { if !self.has_capacity() { // Try flushing self.flush()?; if !self.has_capacity() { return Ok(Async::NotReady); } } Ok(Async::Ready(())) } /// Buffer a frame. /// /// `poll_ready` must be called first to ensure that a frame may be /// accepted. pub fn buffer(&mut self, item: Frame) -> Result<(), UserError> { // Ensure that we have enough capacity to accept the write. assert!(self.has_capacity()); debug!("send; frame={:?}", item); match item { Frame::Data(mut v) => { // Ensure that the payload is not greater than the max frame. let len = v.payload().remaining(); if len > self.max_frame_size() { return Err(PayloadTooBig); } if len >= CHAIN_THRESHOLD { let head = v.head(); // Encode the frame head to the buffer head.encode(len, self.buf.get_mut()); // Save the data frame self.next = Some(Next::Data(v)); } else { v.encode_chunk(self.buf.get_mut()); // The chunk has been fully encoded, so there is no need to // keep it around assert_eq!(v.payload().remaining(), 0, "chunk not fully encoded"); // Save off the last frame... self.last_data_frame = Some(v); } }, Frame::Headers(v) => { if let Some(continuation) = v.encode(&mut self.hpack, self.buf.get_mut()) { self.next = Some(Next::Continuation(continuation)); } }, Frame::PushPromise(v) => { if let Some(continuation) = v.encode(&mut self.hpack, self.buf.get_mut()) { self.next = Some(Next::Continuation(continuation)); } }, Frame::Settings(v) => { v.encode(self.buf.get_mut()); trace!("encoded settings; rem={:?}", self.buf.remaining()); }, Frame::GoAway(v) => { v.encode(self.buf.get_mut()); trace!("encoded go_away; rem={:?}", self.buf.remaining()); }, Frame::Ping(v) => { v.encode(self.buf.get_mut()); trace!("encoded ping; rem={:?}", self.buf.remaining()); }, Frame::WindowUpdate(v) => { v.encode(self.buf.get_mut()); trace!("encoded window_update; rem={:?}", self.buf.remaining()); }, Frame::Priority(_) => { /* v.encode(self.buf.get_mut()); trace!("encoded priority; rem={:?}", self.buf.remaining()); */ unimplemented!(); }, Frame::Reset(v) => { v.encode(self.buf.get_mut()); trace!("encoded reset; rem={:?}", self.buf.remaining()); }, } Ok(()) } /// Flush buffered data to the wire pub fn flush(&mut self) -> Poll<(), io::Error> { trace!("flush"); loop { while !self.is_empty() { match self.next { Some(Next::Data(ref mut frame)) => { trace!(" -> queued data frame"); let mut buf = Buf::by_ref(&mut self.buf).chain(frame.payload_mut()); try_ready!(self.inner.write_buf(&mut buf)); }, _ => { trace!(" -> not a queued data frame"); try_ready!(self.inner.write_buf(&mut self.buf)); }, } } // Clear internal buffer self.buf.set_position(0); self.buf.get_mut().clear(); // The data frame has been written, so unset it match self.next.take() { Some(Next::Data(frame)) => { self.last_data_frame = Some(frame); debug_assert!(self.is_empty()); break; }, Some(Next::Continuation(frame)) => { // Buffer the continuation frame, then try to write again if let Some(continuation) = frame.encode(&mut self.hpack, self.buf.get_mut()) { // We previously had a CONTINUATION, and after encoding // it, we got *another* one? Let's just double check // that at least some progress is being made... if self.buf.get_ref().len() == frame::HEADER_LEN { // If *only* the CONTINUATION frame header was // written, and *no* header fields, we're stuck // in a loop... panic!("CONTINUATION frame write loop; header value too big to encode"); } self.next = Some(Next::Continuation(continuation)); } }, None => { break; } } } trace!("flushing buffer"); // Flush the upstream try_nb!(self.inner.flush()); Ok(Async::Ready(())) } /// Close the codec pub fn shutdown(&mut self) -> Poll<(), io::Error> { try_ready!(self.flush()); self.inner.shutdown().map_err(Into::into) } fn has_capacity(&self) -> bool { self.next.is_none() && self.buf.get_ref().remaining_mut() >= MIN_BUFFER_CAPACITY } fn is_empty(&self) -> bool { match self.next { Some(Next::Data(ref frame)) => !frame.payload().has_remaining(), _ => !self.buf.has_remaining(), } } } impl FramedWrite { /// Returns the max frame size that can be sent pub fn max_frame_size(&self) -> usize { self.max_frame_size as usize } /// Set the peer's max frame size. pub fn set_max_frame_size(&mut self, val: usize) { assert!(val <= frame::MAX_MAX_FRAME_SIZE as usize); self.max_frame_size = val as FrameSize; } /// Set the peer's max header table size. pub fn set_max_header_table_size(&mut self, val: usize) { self.hpack.update_max_size(val); } /// Retrieve the last data frame that has been sent pub fn take_last_data_frame(&mut self) -> Option> { self.last_data_frame.take() } pub fn get_mut(&mut self) -> &mut T { &mut self.inner } } impl io::Read for FramedWrite { fn read(&mut self, dst: &mut [u8]) -> io::Result { self.inner.read(dst) } } impl AsyncRead for FramedWrite { fn read_buf(&mut self, buf: &mut B2) -> Poll where Self: Sized, { self.inner.read_buf(buf) } unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { self.inner.prepare_uninitialized_buffer(buf) } } #[cfg(feature = "unstable")] mod unstable { use super::*; impl FramedWrite { pub fn get_ref(&self) -> &T { &self.inner } } } h2-0.1.26/src/codec/mod.rs010066400017500001750000000130311351644257100133330ustar0000000000000000// Until tokio-rs/tokio#680 is fixed #![allow(deprecated)] mod error; mod framed_read; mod framed_write; pub use self::error::{RecvError, SendError, UserError}; use self::framed_read::FramedRead; use self::framed_write::FramedWrite; use frame::{self, Data, Frame}; use futures::*; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::codec::length_delimited; use bytes::Buf; use std::io; #[derive(Debug)] pub struct Codec { inner: FramedRead>, } impl Codec where T: AsyncRead + AsyncWrite, B: Buf, { /// Returns a new `Codec` with the default max frame size #[inline] pub fn new(io: T) -> Self { Self::with_max_recv_frame_size(io, frame::DEFAULT_MAX_FRAME_SIZE as usize) } /// Returns a new `Codec` with the given maximum frame size pub fn with_max_recv_frame_size(io: T, max_frame_size: usize) -> Self { // Wrap with writer let framed_write = FramedWrite::new(io); // Delimit the frames let delimited = length_delimited::Builder::new() .big_endian() .length_field_length(3) .length_adjustment(9) .num_skip(0) // Don't skip the header .new_read(framed_write); let mut inner = FramedRead::new(delimited); // Use FramedRead's method since it checks the value is within range. inner.set_max_frame_size(max_frame_size); Codec { inner, } } } impl Codec { /// Updates the max received frame size. /// /// The change takes effect the next time a frame is decoded. In other /// words, if a frame is currently in process of being decoded with a frame /// size greater than `val` but less than the max frame size in effect /// before calling this function, then the frame will be allowed. #[inline] pub fn set_max_recv_frame_size(&mut self, val: usize) { self.inner.set_max_frame_size(val) } /// Returns the current max received frame size setting. /// /// This is the largest size this codec will accept from the wire. Larger /// frames will be rejected. #[cfg(feature = "unstable")] #[inline] pub fn max_recv_frame_size(&self) -> usize { self.inner.max_frame_size() } /// Returns the max frame size that can be sent to the peer. pub fn max_send_frame_size(&self) -> usize { self.inner.get_ref().max_frame_size() } /// Set the peer's max frame size. pub fn set_max_send_frame_size(&mut self, val: usize) { self.framed_write().set_max_frame_size(val) } /// Set the max header list size that can be received. pub fn set_max_recv_header_list_size(&mut self, val: usize) { self.inner.set_max_header_list_size(val); } /// Set the max dynamic header table size that can be received. pub fn set_max_recv_header_table_size(&mut self, val: usize) { self.inner.set_max_header_table_size(val); } /// Set the max dynamic header table size that can be sent. pub fn set_max_send_header_table_size(&mut self, val: usize) { self.framed_write().set_max_header_table_size(val) } /// Get a reference to the inner stream. #[cfg(feature = "unstable")] pub fn get_ref(&self) -> &T { self.inner.get_ref().get_ref() } /// Get a mutable reference to the inner stream. pub fn get_mut(&mut self) -> &mut T { self.inner.get_mut().get_mut() } /// Takes the data payload value that was fully written to the socket pub(crate) fn take_last_data_frame(&mut self) -> Option> { self.framed_write().take_last_data_frame() } fn framed_write(&mut self) -> &mut FramedWrite { self.inner.get_mut() } } impl Codec where T: AsyncWrite, B: Buf, { /// Returns `Ready` when the codec can buffer a frame pub fn poll_ready(&mut self) -> Poll<(), io::Error> { self.framed_write().poll_ready() } /// Buffer a frame. /// /// `poll_ready` must be called first to ensure that a frame may be /// accepted. /// /// TODO: Rename this to avoid conflicts with Sink::buffer pub fn buffer(&mut self, item: Frame) -> Result<(), UserError> { self.framed_write().buffer(item) } /// Flush buffered data to the wire pub fn flush(&mut self) -> Poll<(), io::Error> { self.framed_write().flush() } /// Shutdown the send half pub fn shutdown(&mut self) -> Poll<(), io::Error> { self.framed_write().shutdown() } } impl Stream for Codec where T: AsyncRead, { type Item = Frame; type Error = RecvError; fn poll(&mut self) -> Poll, Self::Error> { self.inner.poll() } } impl Sink for Codec where T: AsyncWrite, B: Buf, { type SinkItem = Frame; type SinkError = SendError; fn start_send(&mut self, item: Self::SinkItem) -> StartSend { if !self.poll_ready()?.is_ready() { return Ok(AsyncSink::NotReady(item)); } self.buffer(item)?; Ok(AsyncSink::Ready) } fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { self.flush()?; Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), Self::SinkError> { self.shutdown()?; Ok(Async::Ready(())) } } // TODO: remove (or improve) this impl From for Codec> where T: AsyncRead + AsyncWrite, { fn from(src: T) -> Self { Self::new(src) } } h2-0.1.26/src/error.rs010066400017500001750000000070731340203217400126250ustar0000000000000000use codec::{SendError, UserError}; use proto; use std::{error, fmt, io}; pub use frame::Reason; /// Represents HTTP/2.0 operation errors. /// /// `Error` covers error cases raised by protocol errors caused by the /// peer, I/O (transport) errors, and errors caused by the user of the library. /// /// If the error was caused by the remote peer, then it will contain a /// [`Reason`] which can be obtained with the [`reason`] function. /// /// [`Reason`]: struct.Reason.html /// [`reason`]: #method.reason #[derive(Debug)] pub struct Error { kind: Kind, } #[derive(Debug)] enum Kind { /// An error caused by an action taken by the remote peer. /// /// This is either an error received by the peer or caused by an invalid /// action taken by the peer (i.e. a protocol error). Proto(Reason), /// An error resulting from an invalid action taken by the user of this /// library. User(UserError), /// An `io::Error` occurred while trying to read or write. Io(io::Error), } // ===== impl Error ===== impl Error { /// If the error was caused by the remote peer, the error reason. /// /// This is either an error received by the peer or caused by an invalid /// action taken by the peer (i.e. a protocol error). pub fn reason(&self) -> Option { match self.kind { Kind::Proto(reason) => Some(reason), _ => None, } } /// Returns the true if the error is an io::Error pub fn is_io(&self) -> bool { match self.kind { Kind::Io(_) => true, _ => false, } } /// Returns the error if the error is an io::Error pub fn get_io(&self) -> Option<&io::Error> { match self.kind { Kind::Io(ref e) => Some(e), _ => None, } } /// Returns the error if the error is an io::Error pub fn into_io(self) -> Option { match self.kind { Kind::Io(e) => Some(e), _ => None, } } } impl From for Error { fn from(src: proto::Error) -> Error { use proto::Error::*; Error { kind: match src { Proto(reason) => Kind::Proto(reason), Io(e) => Kind::Io(e), }, } } } impl From for Error { fn from(src: io::Error) -> Error { Error { kind: Kind::Io(src), } } } impl From for Error { fn from(src: Reason) -> Error { Error { kind: Kind::Proto(src), } } } impl From for Error { fn from(src: SendError) -> Error { match src { SendError::User(e) => e.into(), SendError::Connection(reason) => reason.into(), SendError::Io(e) => e.into(), } } } impl From for Error { fn from(src: UserError) -> Error { Error { kind: Kind::User(src), } } } impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { use self::Kind::*; match self.kind { Proto(ref reason) => write!(fmt, "protocol error: {}", reason), User(ref e) => write!(fmt, "user error: {}", e), Io(ref e) => fmt::Display::fmt(e, fmt), } } } impl error::Error for Error { fn description(&self) -> &str { use self::Kind::*; match self.kind { Io(ref e) => error::Error::description(e), Proto(ref reason) => reason.description(), User(ref user) => user.description(), } } } h2-0.1.26/src/frame/data.rs010066400017500001750000000132461347433203500135060ustar0000000000000000use bytes::{Buf, BufMut, Bytes}; use frame::{util, Error, Frame, Head, Kind, StreamId}; use std::fmt; /// Data frame /// /// Data frames convey arbitrary, variable-length sequences of octets associated /// with a stream. One or more DATA frames are used, for instance, to carry HTTP /// request or response payloads. #[derive(Eq, PartialEq)] pub struct Data { stream_id: StreamId, data: T, flags: DataFlags, pad_len: Option, } #[derive(Copy, Clone, Eq, PartialEq)] struct DataFlags(u8); const END_STREAM: u8 = 0x1; const PADDED: u8 = 0x8; const ALL: u8 = END_STREAM | PADDED; impl Data { /// Creates a new DATA frame. pub fn new(stream_id: StreamId, payload: T) -> Self { assert!(!stream_id.is_zero()); Data { stream_id: stream_id, data: payload, flags: DataFlags::default(), pad_len: None, } } /// Returns the stream identifer that this frame is associated with. /// /// This cannot be a zero stream identifier. pub fn stream_id(&self) -> StreamId { self.stream_id } /// Gets the value of the `END_STREAM` flag for this frame. /// /// If true, this frame is the last that the endpoint will send for the /// identified stream. /// /// Setting this flag causes the stream to enter one of the "half-closed" /// states or the "closed" state (Section 5.1). pub fn is_end_stream(&self) -> bool { self.flags.is_end_stream() } /// Sets the value for the `END_STREAM` flag on this frame. pub fn set_end_stream(&mut self, val: bool) { if val { self.flags.set_end_stream(); } else { self.flags.unset_end_stream(); } } /// Returns whther the `PADDED` flag is set on this frame. #[cfg(feature = "unstable")] pub fn is_padded(&self) -> bool { self.flags.is_padded() } /// Sets the value for the `PADDED` flag on this frame. #[cfg(feature = "unstable")] pub fn set_padded(&mut self) { self.flags.set_padded(); } /// Returns a reference to this frame's payload. /// /// This does **not** include any padding that might have been originally /// included. pub fn payload(&self) -> &T { &self.data } /// Returns a mutable reference to this frame's payload. /// /// This does **not** include any padding that might have been originally /// included. pub fn payload_mut(&mut self) -> &mut T { &mut self.data } /// Consumes `self` and returns the frame's payload. /// /// This does **not** include any padding that might have been originally /// included. pub fn into_payload(self) -> T { self.data } pub(crate) fn head(&self) -> Head { Head::new(Kind::Data, self.flags.into(), self.stream_id) } pub(crate) fn map(self, f: F) -> Data where F: FnOnce(T) -> U, { Data { stream_id: self.stream_id, data: f(self.data), flags: self.flags, pad_len: self.pad_len, } } } impl Data { pub(crate) fn load(head: Head, mut payload: Bytes) -> Result { let flags = DataFlags::load(head.flag()); // The stream identifier must not be zero if head.stream_id().is_zero() { return Err(Error::InvalidStreamId); } let pad_len = if flags.is_padded() { let len = util::strip_padding(&mut payload)?; Some(len) } else { None }; Ok(Data { stream_id: head.stream_id(), data: payload, flags: flags, pad_len: pad_len, }) } } impl Data { /// Encode the data frame into the `dst` buffer. /// /// # Panics /// /// Panics if `dst` cannot contain the data frame. pub(crate) fn encode_chunk(&mut self, dst: &mut U) { let len = self.data.remaining() as usize; assert!(dst.remaining_mut() >= len); self.head().encode(len, dst); dst.put(&mut self.data); } } impl From> for Frame { fn from(src: Data) -> Self { Frame::Data(src) } } impl fmt::Debug for Data { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut f = fmt.debug_struct("Data"); f.field("stream_id", &self.stream_id); if !self.flags.is_empty() { f.field("flags", &self.flags); } if let Some(ref pad_len) = self.pad_len { f.field("pad_len", pad_len); } // `data` bytes purposefully excluded f.finish() } } // ===== impl DataFlags ===== impl DataFlags { fn load(bits: u8) -> DataFlags { DataFlags(bits & ALL) } fn is_empty(&self) -> bool { self.0 == 0 } fn is_end_stream(&self) -> bool { self.0 & END_STREAM == END_STREAM } fn set_end_stream(&mut self) { self.0 |= END_STREAM } fn unset_end_stream(&mut self) { self.0 &= !END_STREAM } fn is_padded(&self) -> bool { self.0 & PADDED == PADDED } #[cfg(feature = "unstable")] fn set_padded(&mut self) { self.0 |= PADDED } } impl Default for DataFlags { fn default() -> Self { DataFlags(0) } } impl From for u8 { fn from(src: DataFlags) -> u8 { src.0 } } impl fmt::Debug for DataFlags { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { util::debug_flags(fmt, self.0) .flag_if(self.is_end_stream(), "END_STREAM") .flag_if(self.is_padded(), "PADDED") .finish() } } h2-0.1.26/src/frame/go_away.rs010066400017500001750000000025221327314171300142140ustar0000000000000000use frame::{self, Error, Head, Kind, Reason, StreamId}; use bytes::{BufMut}; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct GoAway { last_stream_id: StreamId, error_code: Reason, } impl GoAway { pub fn new(last_stream_id: StreamId, reason: Reason) -> Self { GoAway { last_stream_id, error_code: reason, } } pub fn last_stream_id(&self) -> StreamId { self.last_stream_id } pub fn reason(&self) -> Reason { self.error_code } pub fn load(payload: &[u8]) -> Result { if payload.len() < 8 { return Err(Error::BadFrameSize); } let (last_stream_id, _) = StreamId::parse(&payload[..4]); let error_code = unpack_octets_4!(payload, 4, u32); Ok(GoAway { last_stream_id: last_stream_id, error_code: error_code.into(), }) } pub fn encode(&self, dst: &mut B) { trace!("encoding GO_AWAY; code={:?}", self.error_code); let head = Head::new(Kind::GoAway, 0, StreamId::zero()); head.encode(8, dst); dst.put_u32_be(self.last_stream_id.into()); dst.put_u32_be(self.error_code.into()); } } impl From for frame::Frame { fn from(src: GoAway) -> Self { frame::Frame::GoAway(src) } } h2-0.1.26/src/frame/head.rs010066400017500001750000000036441327314171300134750ustar0000000000000000use super::StreamId; use bytes::{BufMut}; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct Head { kind: Kind, flag: u8, stream_id: StreamId, } #[repr(u8)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Kind { Data = 0, Headers = 1, Priority = 2, Reset = 3, Settings = 4, PushPromise = 5, Ping = 6, GoAway = 7, WindowUpdate = 8, Continuation = 9, Unknown, } // ===== impl Head ===== impl Head { pub fn new(kind: Kind, flag: u8, stream_id: StreamId) -> Head { Head { kind: kind, flag: flag, stream_id: stream_id, } } /// Parse an HTTP/2.0 frame header pub fn parse(header: &[u8]) -> Head { let (stream_id, _) = StreamId::parse(&header[5..]); Head { kind: Kind::new(header[3]), flag: header[4], stream_id, } } pub fn stream_id(&self) -> StreamId { self.stream_id } pub fn kind(&self) -> Kind { self.kind } pub fn flag(&self) -> u8 { self.flag } pub fn encode_len(&self) -> usize { super::HEADER_LEN } pub fn encode(&self, payload_len: usize, dst: &mut T) { debug_assert!(self.encode_len() <= dst.remaining_mut()); dst.put_uint_be(payload_len as u64, 3); dst.put_u8(self.kind as u8); dst.put_u8(self.flag); dst.put_u32_be(self.stream_id.into()); } } // ===== impl Kind ===== impl Kind { pub fn new(byte: u8) -> Kind { match byte { 0 => Kind::Data, 1 => Kind::Headers, 2 => Kind::Priority, 3 => Kind::Reset, 4 => Kind::Settings, 5 => Kind::PushPromise, 6 => Kind::Ping, 7 => Kind::GoAway, 8 => Kind::WindowUpdate, 9 => Kind::Continuation, _ => Kind::Unknown, } } } h2-0.1.26/src/frame/headers.rs010066400017500001750000000611061347362176000142130ustar0000000000000000use super::{util, StreamDependency, StreamId}; use frame::{Error, Frame, Head, Kind}; use hpack; use http::{uri, HeaderMap, Method, StatusCode, Uri}; use http::header::{self, HeaderName, HeaderValue}; use byteorder::{BigEndian, ByteOrder}; use bytes::{Bytes, BytesMut}; use string::String; use std::fmt; use std::io::Cursor; // Minimum MAX_FRAME_SIZE is 16kb, so save some arbitrary space for frame // head and other header bits. const MAX_HEADER_LENGTH: usize = 1024 * 16 - 100; /// Header frame /// /// This could be either a request or a response. #[derive(Eq, PartialEq)] pub struct Headers { /// The ID of the stream with which this frame is associated. stream_id: StreamId, /// The stream dependency information, if any. stream_dep: Option, /// The header block fragment header_block: HeaderBlock, /// The associated flags flags: HeadersFlag, } #[derive(Copy, Clone, Eq, PartialEq)] pub struct HeadersFlag(u8); #[derive(Eq, PartialEq)] pub struct PushPromise { /// The ID of the stream with which this frame is associated. stream_id: StreamId, /// The ID of the stream being reserved by this PushPromise. promised_id: StreamId, /// The header block fragment header_block: HeaderBlock, /// The associated flags flags: PushPromiseFlag, } #[derive(Copy, Clone, Eq, PartialEq)] pub struct PushPromiseFlag(u8); #[derive(Debug)] pub struct Continuation { /// Stream ID of continuation frame stream_id: StreamId, header_block: EncodingHeaderBlock, } // TODO: These fields shouldn't be `pub` #[derive(Debug, Default, Eq, PartialEq)] pub struct Pseudo { // Request pub method: Option, pub scheme: Option>, pub authority: Option>, pub path: Option>, // Response pub status: Option, } #[derive(Debug)] pub struct Iter { /// Pseudo headers pseudo: Option, /// Header fields fields: header::IntoIter, } #[derive(Debug, PartialEq, Eq)] struct HeaderBlock { /// The decoded header fields fields: HeaderMap, /// Set to true if decoding went over the max header list size. is_over_size: bool, /// Pseudo headers, these are broken out as they must be sent as part of the /// headers frame. pseudo: Pseudo, } #[derive(Debug)] struct EncodingHeaderBlock { /// Argument to pass to the HPACK encoder to resume encoding hpack: Option, /// remaining headers to encode headers: Iter, } const END_STREAM: u8 = 0x1; const END_HEADERS: u8 = 0x4; const PADDED: u8 = 0x8; const PRIORITY: u8 = 0x20; const ALL: u8 = END_STREAM | END_HEADERS | PADDED | PRIORITY; // ===== impl Headers ===== impl Headers { /// Create a new HEADERS frame pub fn new(stream_id: StreamId, pseudo: Pseudo, fields: HeaderMap) -> Self { Headers { stream_id: stream_id, stream_dep: None, header_block: HeaderBlock { fields: fields, is_over_size: false, pseudo: pseudo, }, flags: HeadersFlag::default(), } } pub fn trailers(stream_id: StreamId, fields: HeaderMap) -> Self { let mut flags = HeadersFlag::default(); flags.set_end_stream(); Headers { stream_id, stream_dep: None, header_block: HeaderBlock { fields: fields, is_over_size: false, pseudo: Pseudo::default(), }, flags: flags, } } /// Loads the header frame but doesn't actually do HPACK decoding. /// /// HPACK decoding is done in the `load_hpack` step. pub fn load(head: Head, mut src: BytesMut) -> Result<(Self, BytesMut), Error> { let flags = HeadersFlag(head.flag()); let mut pad = 0; trace!("loading headers; flags={:?}", flags); // Read the padding length if flags.is_padded() { if src.len() < 1 { return Err(Error::MalformedMessage); } pad = src[0] as usize; // Drop the padding let _ = src.split_to(1); } // Read the stream dependency let stream_dep = if flags.is_priority() { if src.len() < 5 { return Err(Error::MalformedMessage); } let stream_dep = StreamDependency::load(&src[..5])?; if stream_dep.dependency_id() == head.stream_id() { return Err(Error::InvalidDependencyId); } // Drop the next 5 bytes let _ = src.split_to(5); Some(stream_dep) } else { None }; if pad > 0 { if pad > src.len() { return Err(Error::TooMuchPadding); } let len = src.len() - pad; src.truncate(len); } let headers = Headers { stream_id: head.stream_id(), stream_dep: stream_dep, header_block: HeaderBlock { fields: HeaderMap::new(), is_over_size: false, pseudo: Pseudo::default(), }, flags: flags, }; Ok((headers, src)) } pub fn load_hpack(&mut self, src: &mut BytesMut, max_header_list_size: usize, decoder: &mut hpack::Decoder) -> Result<(), Error> { self.header_block.load(src, max_header_list_size, decoder) } pub fn stream_id(&self) -> StreamId { self.stream_id } pub fn is_end_headers(&self) -> bool { self.flags.is_end_headers() } pub fn set_end_headers(&mut self) { self.flags.set_end_headers(); } pub fn is_end_stream(&self) -> bool { self.flags.is_end_stream() } pub fn set_end_stream(&mut self) { self.flags.set_end_stream() } pub fn is_over_size(&self) -> bool { self.header_block.is_over_size } pub(crate) fn has_too_big_field(&self) -> bool { self.header_block.has_too_big_field() } pub fn into_parts(self) -> (Pseudo, HeaderMap) { (self.header_block.pseudo, self.header_block.fields) } #[cfg(feature = "unstable")] pub fn pseudo_mut(&mut self) -> &mut Pseudo { &mut self.header_block.pseudo } pub fn fields(&self) -> &HeaderMap { &self.header_block.fields } pub fn into_fields(self) -> HeaderMap { self.header_block.fields } pub fn encode(self, encoder: &mut hpack::Encoder, dst: &mut BytesMut) -> Option { // At this point, the `is_end_headers` flag should always be set debug_assert!(self.flags.is_end_headers()); // Get the HEADERS frame head let head = self.head(); self.header_block.into_encoding() .encode(&head, encoder, dst, |_| { }) } fn head(&self) -> Head { Head::new(Kind::Headers, self.flags.into(), self.stream_id) } } impl From for Frame { fn from(src: Headers) -> Self { Frame::Headers(src) } } impl fmt::Debug for Headers { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut builder = f.debug_struct("Headers"); builder .field("stream_id", &self.stream_id) .field("flags", &self.flags); if let Some(ref dep) = self.stream_dep { builder.field("stream_dep", dep); } // `fields` and `pseudo` purposefully not included builder.finish() } } // ===== impl PushPromise ===== impl PushPromise { /// Loads the push promise frame but doesn't actually do HPACK decoding. /// /// HPACK decoding is done in the `load_hpack` step. pub fn load(head: Head, mut src: BytesMut) -> Result<(Self, BytesMut), Error> { let flags = PushPromiseFlag(head.flag()); let mut pad = 0; // Read the padding length if flags.is_padded() { if src.len() < 1 { return Err(Error::MalformedMessage); } // TODO: Ensure payload is sized correctly pad = src[0] as usize; // Drop the padding let _ = src.split_to(1); } if src.len() < 5 { return Err(Error::MalformedMessage); } let (promised_id, _) = StreamId::parse(&src[..4]); // Drop promised_id bytes let _ = src.split_to(4); if pad > 0 { if pad > src.len() { return Err(Error::TooMuchPadding); } let len = src.len() - pad; src.truncate(len); } let frame = PushPromise { flags: flags, header_block: HeaderBlock { fields: HeaderMap::new(), is_over_size: false, pseudo: Pseudo::default(), }, promised_id: promised_id, stream_id: head.stream_id(), }; Ok((frame, src)) } pub fn load_hpack(&mut self, src: &mut BytesMut, max_header_list_size: usize, decoder: &mut hpack::Decoder) -> Result<(), Error> { self.header_block.load(src, max_header_list_size, decoder) } pub fn stream_id(&self) -> StreamId { self.stream_id } pub fn promised_id(&self) -> StreamId { self.promised_id } pub fn is_end_headers(&self) -> bool { self.flags.is_end_headers() } pub fn set_end_headers(&mut self) { self.flags.set_end_headers(); } pub fn is_over_size(&self) -> bool { self.header_block.is_over_size } pub fn encode(self, encoder: &mut hpack::Encoder, dst: &mut BytesMut) -> Option { use bytes::BufMut; // At this point, the `is_end_headers` flag should always be set debug_assert!(self.flags.is_end_headers()); let head = self.head(); let promised_id = self.promised_id; self.header_block.into_encoding() .encode(&head, encoder, dst, |dst| { dst.put_u32_be(promised_id.into()); }) } fn head(&self) -> Head { Head::new(Kind::PushPromise, self.flags.into(), self.stream_id) } } impl PushPromise { /// Consume `self`, returning the parts of the frame pub fn into_parts(self) -> (Pseudo, HeaderMap) { (self.header_block.pseudo, self.header_block.fields) } } #[cfg(feature = "unstable")] impl PushPromise { pub fn new( stream_id: StreamId, promised_id: StreamId, pseudo: Pseudo, fields: HeaderMap, ) -> Self { PushPromise { flags: PushPromiseFlag::default(), header_block: HeaderBlock { fields, is_over_size: false, pseudo, }, promised_id, stream_id, } } pub fn fields(&self) -> &HeaderMap { &self.header_block.fields } pub fn into_fields(self) -> HeaderMap { self.header_block.fields } } impl From for Frame { fn from(src: PushPromise) -> Self { Frame::PushPromise(src) } } impl fmt::Debug for PushPromise { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("PushPromise") .field("stream_id", &self.stream_id) .field("promised_id", &self.promised_id) .field("flags", &self.flags) // `fields` and `pseudo` purposefully not included .finish() } } // ===== impl Continuation ===== impl Continuation { fn head(&self) -> Head { Head::new(Kind::Continuation, END_HEADERS, self.stream_id) } pub fn encode(self, encoder: &mut hpack::Encoder, dst: &mut BytesMut) -> Option { // Get the CONTINUATION frame head let head = self.head(); self.header_block .encode(&head, encoder, dst, |_| { }) } } // ===== impl Pseudo ===== impl Pseudo { pub fn request(method: Method, uri: Uri) -> Self { let parts = uri::Parts::from(uri); let mut path = parts .path_and_query .map(|v| v.into()) .unwrap_or_else(|| Bytes::new()); if path.is_empty() && method != Method::OPTIONS { path = Bytes::from_static(b"/"); } let mut pseudo = Pseudo { method: Some(method), scheme: None, authority: None, path: Some(to_string(path)), status: None, }; // If the URI includes a scheme component, add it to the pseudo headers // // TODO: Scheme must be set... if let Some(scheme) = parts.scheme { pseudo.set_scheme(scheme); } // If the URI includes an authority component, add it to the pseudo // headers if let Some(authority) = parts.authority { pseudo.set_authority(to_string(authority.into())); } pseudo } pub fn response(status: StatusCode) -> Self { Pseudo { method: None, scheme: None, authority: None, path: None, status: Some(status), } } pub fn set_scheme(&mut self, scheme: uri::Scheme) { self.scheme = Some(to_string(scheme.into())); } pub fn set_authority(&mut self, authority: String) { self.authority = Some(authority); } } fn to_string(src: Bytes) -> String { unsafe { String::from_utf8_unchecked(src) } } // ===== impl EncodingHeaderBlock ===== impl EncodingHeaderBlock { fn encode(mut self, head: &Head, encoder: &mut hpack::Encoder, dst: &mut BytesMut, f: F) -> Option where F: FnOnce(&mut BytesMut), { let head_pos = dst.len(); // At this point, we don't know how big the h2 frame will be. // So, we write the head with length 0, then write the body, and // finally write the length once we know the size. head.encode(0, dst); let payload_pos = dst.len(); f(dst); // Now, encode the header payload let continuation = match encoder.encode(self.hpack, &mut self.headers, dst) { hpack::Encode::Full => None, hpack::Encode::Partial(state) => Some(Continuation { stream_id: head.stream_id(), header_block: EncodingHeaderBlock { hpack: Some(state), headers: self.headers, }, }), }; // Compute the header block length let payload_len = (dst.len() - payload_pos) as u64; // Write the frame length BigEndian::write_uint(&mut dst[head_pos..head_pos + 3], payload_len, 3); if continuation.is_some() { // There will be continuation frames, so the `is_end_headers` flag // must be unset debug_assert!(dst[head_pos + 4] & END_HEADERS == END_HEADERS); dst[head_pos + 4] -= END_HEADERS; } continuation } } // ===== impl Iter ===== impl Iterator for Iter { type Item = hpack::Header>; fn next(&mut self) -> Option { use hpack::Header::*; if let Some(ref mut pseudo) = self.pseudo { if let Some(method) = pseudo.method.take() { return Some(Method(method)); } if let Some(scheme) = pseudo.scheme.take() { return Some(Scheme(scheme)); } if let Some(authority) = pseudo.authority.take() { return Some(Authority(authority)); } if let Some(path) = pseudo.path.take() { return Some(Path(path)); } if let Some(status) = pseudo.status.take() { return Some(Status(status)); } } self.pseudo = None; self.fields.next().map(|(name, value)| { Field { name: name, value: value, } }) } } // ===== impl HeadersFlag ===== impl HeadersFlag { pub fn empty() -> HeadersFlag { HeadersFlag(0) } pub fn load(bits: u8) -> HeadersFlag { HeadersFlag(bits & ALL) } pub fn is_end_stream(&self) -> bool { self.0 & END_STREAM == END_STREAM } pub fn set_end_stream(&mut self) { self.0 |= END_STREAM; } pub fn is_end_headers(&self) -> bool { self.0 & END_HEADERS == END_HEADERS } pub fn set_end_headers(&mut self) { self.0 |= END_HEADERS; } pub fn is_padded(&self) -> bool { self.0 & PADDED == PADDED } pub fn is_priority(&self) -> bool { self.0 & PRIORITY == PRIORITY } } impl Default for HeadersFlag { /// Returns a `HeadersFlag` value with `END_HEADERS` set. fn default() -> Self { HeadersFlag(END_HEADERS) } } impl From for u8 { fn from(src: HeadersFlag) -> u8 { src.0 } } impl fmt::Debug for HeadersFlag { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { util::debug_flags(fmt, self.0) .flag_if(self.is_end_headers(), "END_HEADERS") .flag_if(self.is_end_stream(), "END_STREAM") .flag_if(self.is_padded(), "PADDED") .flag_if(self.is_priority(), "PRIORITY") .finish() } } // ===== impl PushPromiseFlag ===== impl PushPromiseFlag { pub fn empty() -> PushPromiseFlag { PushPromiseFlag(0) } pub fn load(bits: u8) -> PushPromiseFlag { PushPromiseFlag(bits & ALL) } pub fn is_end_headers(&self) -> bool { self.0 & END_HEADERS == END_HEADERS } pub fn set_end_headers(&mut self) { self.0 |= END_HEADERS; } pub fn is_padded(&self) -> bool { self.0 & PADDED == PADDED } } impl Default for PushPromiseFlag { /// Returns a `PushPromiseFlag` value with `END_HEADERS` set. fn default() -> Self { PushPromiseFlag(END_HEADERS) } } impl From for u8 { fn from(src: PushPromiseFlag) -> u8 { src.0 } } impl fmt::Debug for PushPromiseFlag { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { util::debug_flags(fmt, self.0) .flag_if(self.is_end_headers(), "END_HEADERS") .flag_if(self.is_padded(), "PADDED") .finish() } } // ===== HeaderBlock ===== impl HeaderBlock { fn load(&mut self, src: &mut BytesMut, max_header_list_size: usize, decoder: &mut hpack::Decoder) -> Result<(), Error> { let mut reg = !self.fields.is_empty(); let mut malformed = false; let mut headers_size = self.calculate_header_list_size(); macro_rules! set_pseudo { ($field:ident, $val:expr) => {{ if reg { trace!("load_hpack; header malformed -- pseudo not at head of block"); malformed = true; } else if self.pseudo.$field.is_some() { trace!("load_hpack; header malformed -- repeated pseudo"); malformed = true; } else { let __val = $val; headers_size += decoded_header_size(stringify!($ident).len() + 1, __val.as_str().len()); if headers_size < max_header_list_size { self.pseudo.$field = Some(__val); } else if !self.is_over_size { trace!("load_hpack; header list size over max"); self.is_over_size = true; } } }} } let mut cursor = Cursor::new(src); // If the header frame is malformed, we still have to continue decoding // the headers. A malformed header frame is a stream level error, but // the hpack state is connection level. In order to maintain correct // state for other streams, the hpack decoding process must complete. let res = decoder.decode(&mut cursor, |header| { use hpack::Header::*; match header { Field { name, value, } => { // Connection level header fields are not supported and must // result in a protocol error. if name == header::CONNECTION || name == header::TRANSFER_ENCODING || name == header::UPGRADE || name == "keep-alive" || name == "proxy-connection" { trace!("load_hpack; connection level header"); malformed = true; } else if name == header::TE && value != "trailers" { trace!("load_hpack; TE header not set to trailers; val={:?}", value); malformed = true; } else { reg = true; headers_size += decoded_header_size(name.as_str().len(), value.len()); if headers_size < max_header_list_size { self.fields.append(name, value); } else if !self.is_over_size { trace!("load_hpack; header list size over max"); self.is_over_size = true; } } }, Authority(v) => set_pseudo!(authority, v), Method(v) => set_pseudo!(method, v), Scheme(v) => set_pseudo!(scheme, v), Path(v) => set_pseudo!(path, v), Status(v) => set_pseudo!(status, v), } }); if let Err(e) = res { trace!("hpack decoding error; err={:?}", e); return Err(e.into()); } if malformed { trace!("malformed message"); return Err(Error::MalformedMessage.into()); } Ok(()) } fn into_encoding(self) -> EncodingHeaderBlock { EncodingHeaderBlock { hpack: None, headers: Iter { pseudo: Some(self.pseudo), fields: self.fields.into_iter(), }, } } /// Calculates the size of the currently decoded header list. /// /// According to http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE /// /// > The value is based on the uncompressed size of header fields, /// > including the length of the name and value in octets plus an /// > overhead of 32 octets for each header field. fn calculate_header_list_size(&self) -> usize { macro_rules! pseudo_size { ($name:ident) => ({ self.pseudo .$name .as_ref() .map(|m| decoded_header_size(stringify!($name).len() + 1, m.as_str().len())) .unwrap_or(0) }); } pseudo_size!(method) + pseudo_size!(scheme) + pseudo_size!(status) + pseudo_size!(authority) + pseudo_size!(path) + self.fields.iter() .map(|(name, value)| decoded_header_size(name.as_str().len(), value.len())) .sum::() } /// Iterate over all pseudos and headers to see if any individual pair /// would be too large to encode. pub(crate) fn has_too_big_field(&self) -> bool { macro_rules! pseudo_size { ($name:ident) => ({ self.pseudo .$name .as_ref() .map(|m| decoded_header_size(stringify!($name).len() + 1, m.as_str().len())) .unwrap_or(0) }); } if pseudo_size!(method) > MAX_HEADER_LENGTH { return true; } if pseudo_size!(scheme) > MAX_HEADER_LENGTH { return true; } if pseudo_size!(authority) > MAX_HEADER_LENGTH { return true; } if pseudo_size!(path) > MAX_HEADER_LENGTH { return true; } // skip :status, its never going to be too big for (name, value) in &self.fields { if decoded_header_size(name.as_str().len(), value.len()) > MAX_HEADER_LENGTH { return true; } } false } } fn decoded_header_size(name: usize, value: usize) -> usize { name + value + 32 } // Stupid hack to make the set_pseudo! macro happy, since all other values // have a method `as_str` except for `String`. trait AsStr { fn as_str(&self) -> &str; } impl AsStr for String { fn as_str(&self) -> &str { self } } h2-0.1.26/src/frame/mod.rs010066400017500001750000000104651347426626300133650ustar0000000000000000use hpack; use bytes::Bytes; use std::fmt; /// A helper macro that unpacks a sequence of 4 bytes found in the buffer with /// the given identifier, starting at the given offset, into the given integer /// type. Obviously, the integer type should be able to support at least 4 /// bytes. /// /// # Examples /// /// ```rust /// let buf: [u8; 4] = [0, 0, 0, 1]; /// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32)); /// ``` #[macro_escape] macro_rules! unpack_octets_4 { // TODO: Get rid of this macro ($buf:expr, $offset:expr, $tip:ty) => ( (($buf[$offset + 0] as $tip) << 24) | (($buf[$offset + 1] as $tip) << 16) | (($buf[$offset + 2] as $tip) << 8) | (($buf[$offset + 3] as $tip) << 0) ); } mod data; mod go_away; mod head; mod headers; mod ping; mod priority; mod reason; mod reset; mod settings; mod stream_id; mod util; mod window_update; pub use self::data::Data; pub use self::go_away::GoAway; pub use self::head::{Head, Kind}; pub use self::headers::{Continuation, Headers, Pseudo, PushPromise}; pub use self::ping::Ping; pub use self::priority::{Priority, StreamDependency}; pub use self::reason::Reason; pub use self::reset::Reset; pub use self::settings::Settings; pub use self::stream_id::{StreamId, StreamIdOverflow}; pub use self::window_update::WindowUpdate; // Re-export some constants pub use self::settings::{ DEFAULT_INITIAL_WINDOW_SIZE, DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_INITIAL_WINDOW_SIZE, MAX_MAX_FRAME_SIZE, }; pub type FrameSize = u32; pub const HEADER_LEN: usize = 9; #[derive(Eq, PartialEq)] pub enum Frame { Data(Data), Headers(Headers), Priority(Priority), PushPromise(PushPromise), Settings(Settings), Ping(Ping), GoAway(GoAway), WindowUpdate(WindowUpdate), Reset(Reset), } impl Frame { pub fn map(self, f: F) -> Frame where F: FnOnce(T) -> U, { use self::Frame::*; match self { Data(frame) => frame.map(f).into(), Headers(frame) => frame.into(), Priority(frame) => frame.into(), PushPromise(frame) => frame.into(), Settings(frame) => frame.into(), Ping(frame) => frame.into(), GoAway(frame) => frame.into(), WindowUpdate(frame) => frame.into(), Reset(frame) => frame.into(), } } } impl fmt::Debug for Frame { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { use self::Frame::*; match *self { Data(ref frame) => fmt::Debug::fmt(frame, fmt), Headers(ref frame) => fmt::Debug::fmt(frame, fmt), Priority(ref frame) => fmt::Debug::fmt(frame, fmt), PushPromise(ref frame) => fmt::Debug::fmt(frame, fmt), Settings(ref frame) => fmt::Debug::fmt(frame, fmt), Ping(ref frame) => fmt::Debug::fmt(frame, fmt), GoAway(ref frame) => fmt::Debug::fmt(frame, fmt), WindowUpdate(ref frame) => fmt::Debug::fmt(frame, fmt), Reset(ref frame) => fmt::Debug::fmt(frame, fmt), } } } /// Errors that can occur during parsing an HTTP/2 frame. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Error { /// A length value other than 8 was set on a PING message. BadFrameSize, /// The padding length was larger than the frame-header-specified /// length of the payload. TooMuchPadding, /// An invalid setting value was provided InvalidSettingValue, /// An invalid window update value InvalidWindowUpdateValue, /// The payload length specified by the frame header was not the /// value necessary for the specific frame type. InvalidPayloadLength, /// Received a payload with an ACK settings frame InvalidPayloadAckSettings, /// An invalid stream identifier was provided. /// /// This is returned if a SETTINGS or PING frame is received with a stream /// identifier other than zero. InvalidStreamId, /// A request or response is malformed. MalformedMessage, /// An invalid stream dependency ID was provided /// /// This is returned if a HEADERS or PRIORITY frame is received with an /// invalid stream identifier. InvalidDependencyId, /// Failed to perform HPACK decoding Hpack(hpack::DecoderError), } h2-0.1.26/src/frame/ping.rs010066400017500001750000000057631343264346100135410ustar0000000000000000use bytes::{Buf, BufMut, IntoBuf}; use frame::{Error, Frame, Head, Kind, StreamId}; const ACK_FLAG: u8 = 0x1; pub type Payload = [u8; 8]; #[derive(Debug, Eq, PartialEq)] pub struct Ping { ack: bool, payload: Payload, } // This was just 8 randomly generated bytes. We use something besides just // zeroes to distinguish this specific PING from any other. const SHUTDOWN_PAYLOAD: Payload = [0x0b, 0x7b, 0xa2, 0xf0, 0x8b, 0x9b, 0xfe, 0x54]; const USER_PAYLOAD: Payload = [0x3b, 0x7c, 0xdb, 0x7a, 0x0b, 0x87, 0x16, 0xb4]; impl Ping { #[cfg(feature = "unstable")] pub const SHUTDOWN: Payload = SHUTDOWN_PAYLOAD; #[cfg(not(feature = "unstable"))] pub(crate) const SHUTDOWN: Payload = SHUTDOWN_PAYLOAD; #[cfg(feature = "unstable")] pub const USER: Payload = USER_PAYLOAD; #[cfg(not(feature = "unstable"))] pub(crate) const USER: Payload = USER_PAYLOAD; pub fn new(payload: Payload) -> Ping { Ping { ack: false, payload, } } pub fn pong(payload: Payload) -> Ping { Ping { ack: true, payload, } } pub fn is_ack(&self) -> bool { self.ack } pub fn payload(&self) -> &Payload { &self.payload } pub fn into_payload(self) -> Payload { self.payload } /// Builds a `Ping` frame from a raw frame. pub fn load(head: Head, bytes: &[u8]) -> Result { debug_assert_eq!(head.kind(), ::frame::Kind::Ping); // PING frames are not associated with any individual stream. If a PING // frame is received with a stream identifier field value other than // 0x0, the recipient MUST respond with a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. if !head.stream_id().is_zero() { return Err(Error::InvalidStreamId); } // In addition to the frame header, PING frames MUST contain 8 octets of opaque // data in the payload. if bytes.len() != 8 { return Err(Error::BadFrameSize); } let mut payload = [0; 8]; bytes.into_buf().copy_to_slice(&mut payload); // The PING frame defines the following flags: // // ACK (0x1): When set, bit 0 indicates that this PING frame is a PING // response. An endpoint MUST set this flag in PING responses. An // endpoint MUST NOT respond to PING frames containing this flag. let ack = head.flag() & ACK_FLAG != 0; Ok(Ping { ack, payload, }) } pub fn encode(&self, dst: &mut B) { let sz = self.payload.len(); trace!("encoding PING; ack={} len={}", self.ack, sz); let flags = if self.ack { ACK_FLAG } else { 0 }; let head = Head::new(Kind::Ping, flags, StreamId::zero()); head.encode(sz, dst); dst.put_slice(&self.payload); } } impl From for Frame { fn from(src: Ping) -> Frame { Frame::Ping(src) } } h2-0.1.26/src/frame/priority.rs010066400017500001750000000034271315635047700144650ustar0000000000000000use frame::*; #[derive(Debug, Eq, PartialEq)] pub struct Priority { stream_id: StreamId, dependency: StreamDependency, } #[derive(Debug, Eq, PartialEq)] pub struct StreamDependency { /// The ID of the stream dependency target dependency_id: StreamId, /// The weight for the stream. The value exposed (and set) here is always in /// the range [0, 255], instead of [1, 256] (as defined in section 5.3.2.) /// so that the value fits into a `u8`. weight: u8, /// True if the stream dependency is exclusive. is_exclusive: bool, } impl Priority { pub fn load(head: Head, payload: &[u8]) -> Result { let dependency = StreamDependency::load(payload)?; if dependency.dependency_id() == head.stream_id() { return Err(Error::InvalidDependencyId); } Ok(Priority { stream_id: head.stream_id(), dependency: dependency, }) } } impl From for Frame { fn from(src: Priority) -> Self { Frame::Priority(src) } } // ===== impl StreamDependency ===== impl StreamDependency { pub fn new(dependency_id: StreamId, weight: u8, is_exclusive: bool) -> Self { StreamDependency { dependency_id, weight, is_exclusive, } } pub fn load(src: &[u8]) -> Result { if src.len() != 5 { return Err(Error::InvalidPayloadLength); } // Parse the stream ID and exclusive flag let (dependency_id, is_exclusive) = StreamId::parse(&src[..4]); // Read the weight let weight = src[4]; Ok(StreamDependency::new(dependency_id, weight, is_exclusive)) } pub fn dependency_id(&self) -> StreamId { self.dependency_id } } h2-0.1.26/src/frame/reason.rs010066400017500001750000000120651322620427000140540ustar0000000000000000use std::fmt; /// HTTP/2.0 error codes. /// /// Error codes are used in `RST_STREAM` and `GOAWAY` frames to convey the /// reasons for the stream or connection error. For example, /// [`SendStream::send_reset`] takes a `Reason` argument. Also, the `Error` type /// may contain a `Reason`. /// /// Error codes share a common code space. Some error codes apply only to /// streams, others apply only to connections, and others may apply to either. /// See [RFC 7540] for more information. /// /// See [Error Codes in the spec][spec]. /// /// [spec]: http://httpwg.org/specs/rfc7540.html#ErrorCodes /// [`SendStream::send_reset`]: struct.SendStream.html#method.send_reset #[derive(PartialEq, Eq, Clone, Copy)] pub struct Reason(u32); impl Reason { /// The associated condition is not a result of an error. /// /// For example, a GOAWAY might include this code to indicate graceful /// shutdown of a connection. pub const NO_ERROR: Reason = Reason(0); /// The endpoint detected an unspecific protocol error. /// /// This error is for use when a more specific error code is not available. pub const PROTOCOL_ERROR: Reason = Reason(1); /// The endpoint encountered an unexpected internal error. pub const INTERNAL_ERROR: Reason = Reason(2); /// The endpoint detected that its peer violated the flow-control protocol. pub const FLOW_CONTROL_ERROR: Reason = Reason(3); /// The endpoint sent a SETTINGS frame but did not receive a response in /// a timely manner. pub const SETTINGS_TIMEOUT: Reason = Reason(4); /// The endpoint received a frame after a stream was half-closed. pub const STREAM_CLOSED: Reason = Reason(5); /// The endpoint received a frame with an invalid size. pub const FRAME_SIZE_ERROR: Reason = Reason(6); /// The endpoint refused the stream prior to performing any application /// processing. pub const REFUSED_STREAM: Reason = Reason(7); /// Used by the endpoint to indicate that the stream is no longer needed. pub const CANCEL: Reason = Reason(8); /// The endpoint is unable to maintain the header compression context for /// the connection. pub const COMPRESSION_ERROR: Reason = Reason(9); /// The connection established in response to a CONNECT request was reset /// or abnormally closed. pub const CONNECT_ERROR: Reason = Reason(10); /// The endpoint detected that its peer is exhibiting a behavior that might /// be generating excessive load. pub const ENHANCE_YOUR_CALM: Reason = Reason(11); /// The underlying transport has properties that do not meet minimum /// security requirements. pub const INADEQUATE_SECURITY: Reason = Reason(12); /// The endpoint requires that HTTP/1.1 be used instead of HTTP/2. pub const HTTP_1_1_REQUIRED: Reason = Reason(13); /// Get a string description of the error code. pub fn description(&self) -> &str { match self.0 { 0 => "not a result of an error", 1 => "unspecific protocol error detected", 2 => "unexpected internal error encountered", 3 => "flow-control protocol violated", 4 => "settings ACK not received in timely manner", 5 => "received frame when stream half-closed", 6 => "frame with invalid size", 7 => "refused stream before processing any application logic", 8 => "stream no longer needed", 9 => "unable to maintain the header compression context", 10 => { "connection established in response to a CONNECT request was reset or abnormally \ closed" }, 11 => "detected excessive load generating behavior", 12 => "security properties do not meet minimum requirements", 13 => "endpoint requires HTTP/1.1", _ => "unknown reason", } } } impl From for Reason { fn from(src: u32) -> Reason { Reason(src) } } impl From for u32 { fn from(src: Reason) -> u32 { src.0 } } impl fmt::Debug for Reason { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match self.0 { 0 => "NO_ERROR", 1 => "PROTOCOL_ERROR", 2 => "INTERNAL_ERROR", 3 => "FLOW_CONTROL_ERROR", 4 => "SETTINGS_TIMEOUT", 5 => "STREAM_CLOSED", 6 => "FRAME_SIZE_ERROR", 7 => "REFUSED_STREAM", 8 => "CANCEL", 9 => "COMPRESSION_ERROR", 10 => "CONNECT_ERROR", 11 => "ENHANCE_YOUR_CALM", 12 => "INADEQUATE_SECURITY", 13 => "HTTP_1_1_REQUIRED", other => return f.debug_tuple("Reason") .field(&Hex(other)) .finish(), }; f.write_str(name) } } struct Hex(u32); impl fmt::Debug for Hex { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::LowerHex::fmt(&self.0, f) } } impl fmt::Display for Reason { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{}", self.description()) } } h2-0.1.26/src/frame/reset.rs010066400017500001750000000024001327314171300137030ustar0000000000000000use frame::{self, Error, Head, Kind, Reason, StreamId}; use bytes::{BufMut}; #[derive(Debug, Eq, PartialEq)] pub struct Reset { stream_id: StreamId, error_code: Reason, } impl Reset { pub fn new(stream_id: StreamId, error: Reason) -> Reset { Reset { stream_id, error_code: error, } } pub fn stream_id(&self) -> StreamId { self.stream_id } pub fn reason(&self) -> Reason { self.error_code } pub fn load(head: Head, payload: &[u8]) -> Result { if payload.len() != 4 { return Err(Error::InvalidPayloadLength); } let error_code = unpack_octets_4!(payload, 0, u32); Ok(Reset { stream_id: head.stream_id(), error_code: error_code.into(), }) } pub fn encode(&self, dst: &mut B) { trace!( "encoding RESET; id={:?} code={:?}", self.stream_id, self.error_code ); let head = Head::new(Kind::Reset, 0, self.stream_id); head.encode(4, dst); dst.put_u32_be(self.error_code.into()); } } impl From for frame::Frame { fn from(src: Reset) -> Self { frame::Frame::Reset(src) } } h2-0.1.26/src/frame/settings.rs010066400017500001750000000237571351644257100144510ustar0000000000000000use std::fmt; use bytes::{BufMut, BytesMut}; use frame::{util, Error, Frame, FrameSize, Head, Kind, StreamId}; #[derive(Clone, Default, Eq, PartialEq)] pub struct Settings { flags: SettingsFlags, // Fields header_table_size: Option, enable_push: Option, max_concurrent_streams: Option, initial_window_size: Option, max_frame_size: Option, max_header_list_size: Option, } /// An enum that lists all valid settings that can be sent in a SETTINGS /// frame. /// /// Each setting has a value that is a 32 bit unsigned integer (6.5.1.). #[derive(Debug)] pub enum Setting { HeaderTableSize(u32), EnablePush(u32), MaxConcurrentStreams(u32), InitialWindowSize(u32), MaxFrameSize(u32), MaxHeaderListSize(u32), } #[derive(Copy, Clone, Eq, PartialEq, Default)] pub struct SettingsFlags(u8); const ACK: u8 = 0x1; const ALL: u8 = ACK; /// The default value of SETTINGS_HEADER_TABLE_SIZE pub const DEFAULT_SETTINGS_HEADER_TABLE_SIZE: usize = 4_096; /// The default value of SETTINGS_INITIAL_WINDOW_SIZE pub const DEFAULT_INITIAL_WINDOW_SIZE: u32 = 65_535; /// The default value of MAX_FRAME_SIZE pub const DEFAULT_MAX_FRAME_SIZE: FrameSize = 16_384; /// INITIAL_WINDOW_SIZE upper bound pub const MAX_INITIAL_WINDOW_SIZE: usize = (1 << 31) - 1; /// MAX_FRAME_SIZE upper bound pub const MAX_MAX_FRAME_SIZE: FrameSize = (1 << 24) - 1; // ===== impl Settings ===== impl Settings { pub fn ack() -> Settings { Settings { flags: SettingsFlags::ack(), ..Settings::default() } } pub fn is_ack(&self) -> bool { self.flags.is_ack() } pub fn initial_window_size(&self) -> Option { self.initial_window_size } pub fn set_initial_window_size(&mut self, size: Option) { self.initial_window_size = size; } pub fn max_concurrent_streams(&self) -> Option { self.max_concurrent_streams } pub fn set_max_concurrent_streams(&mut self, max: Option) { self.max_concurrent_streams = max; } pub fn max_frame_size(&self) -> Option { self.max_frame_size } pub fn set_max_frame_size(&mut self, size: Option) { if let Some(val) = size { assert!(DEFAULT_MAX_FRAME_SIZE <= val && val <= MAX_MAX_FRAME_SIZE); } self.max_frame_size = size; } pub fn max_header_list_size(&self) -> Option { self.max_header_list_size } pub fn set_max_header_list_size(&mut self, size: Option) { self.max_header_list_size = size; } pub fn header_table_size(&self) -> Option { self.header_table_size } pub fn set_header_table_size(&mut self, size: Option) { self.header_table_size = size; } pub fn is_push_enabled(&self) -> bool { self.enable_push.unwrap_or(1) != 0 } pub fn set_enable_push(&mut self, enable: bool) { self.enable_push = Some(enable as u32); } pub fn load(head: Head, payload: &[u8]) -> Result { use self::Setting::*; debug_assert_eq!(head.kind(), ::frame::Kind::Settings); if !head.stream_id().is_zero() { return Err(Error::InvalidStreamId); } // Load the flag let flag = SettingsFlags::load(head.flag()); if flag.is_ack() { // Ensure that the payload is empty if payload.len() > 0 { return Err(Error::InvalidPayloadLength); } // Return the ACK frame return Ok(Settings::ack()); } // Ensure the payload length is correct, each setting is 6 bytes long. if payload.len() % 6 != 0 { debug!("invalid settings payload length; len={:?}", payload.len()); return Err(Error::InvalidPayloadAckSettings); } let mut settings = Settings::default(); debug_assert!(!settings.flags.is_ack()); for raw in payload.chunks(6) { match Setting::load(raw) { Some(HeaderTableSize(val)) => { settings.header_table_size = Some(val); }, Some(EnablePush(val)) => match val { 0 | 1 => { settings.enable_push = Some(val); }, _ => { return Err(Error::InvalidSettingValue); }, }, Some(MaxConcurrentStreams(val)) => { settings.max_concurrent_streams = Some(val); }, Some(InitialWindowSize(val)) => if val as usize > MAX_INITIAL_WINDOW_SIZE { return Err(Error::InvalidSettingValue); } else { settings.initial_window_size = Some(val); }, Some(MaxFrameSize(val)) => { if val < DEFAULT_MAX_FRAME_SIZE || val > MAX_MAX_FRAME_SIZE { return Err(Error::InvalidSettingValue); } else { settings.max_frame_size = Some(val); } }, Some(MaxHeaderListSize(val)) => { settings.max_header_list_size = Some(val); }, None => {}, } } Ok(settings) } fn payload_len(&self) -> usize { let mut len = 0; self.for_each(|_| len += 6); len } pub fn encode(&self, dst: &mut BytesMut) { // Create & encode an appropriate frame head let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero()); let payload_len = self.payload_len(); trace!("encoding SETTINGS; len={}", payload_len); head.encode(payload_len, dst); // Encode the settings self.for_each(|setting| { trace!("encoding setting; val={:?}", setting); setting.encode(dst) }); } fn for_each(&self, mut f: F) { use self::Setting::*; if let Some(v) = self.header_table_size { f(HeaderTableSize(v)); } if let Some(v) = self.enable_push { f(EnablePush(v)); } if let Some(v) = self.max_concurrent_streams { f(MaxConcurrentStreams(v)); } if let Some(v) = self.initial_window_size { f(InitialWindowSize(v)); } if let Some(v) = self.max_frame_size { f(MaxFrameSize(v)); } if let Some(v) = self.max_header_list_size { f(MaxHeaderListSize(v)); } } } impl From for Frame { fn from(src: Settings) -> Frame { Frame::Settings(src) } } impl fmt::Debug for Settings { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut builder = f.debug_struct("Settings"); builder.field("flags", &self.flags); self.for_each(|setting| match setting { Setting::EnablePush(v) => { builder.field("enable_push", &v); } Setting::HeaderTableSize(v) => { builder.field("header_table_size", &v); } Setting::InitialWindowSize(v) => { builder.field("initial_window_size", &v); } Setting::MaxConcurrentStreams(v) => { builder.field("max_concurrent_streams", &v); } Setting::MaxFrameSize(v) => { builder.field("max_frame_size", &v); } Setting::MaxHeaderListSize(v) => { builder.field("max_header_list_size", &v); } }); builder.finish() } } // ===== impl Setting ===== impl Setting { /// Creates a new `Setting` with the correct variant corresponding to the /// given setting id, based on the settings IDs defined in section /// 6.5.2. pub fn from_id(id: u16, val: u32) -> Option { use self::Setting::*; match id { 1 => Some(HeaderTableSize(val)), 2 => Some(EnablePush(val)), 3 => Some(MaxConcurrentStreams(val)), 4 => Some(InitialWindowSize(val)), 5 => Some(MaxFrameSize(val)), 6 => Some(MaxHeaderListSize(val)), _ => None, } } /// Creates a new `Setting` by parsing the given buffer of 6 bytes, which /// contains the raw byte representation of the setting, according to the /// "SETTINGS format" defined in section 6.5.1. /// /// The `raw` parameter should have length at least 6 bytes, since the /// length of the raw setting is exactly 6 bytes. /// /// # Panics /// /// If given a buffer shorter than 6 bytes, the function will panic. fn load(raw: &[u8]) -> Option { let id: u16 = ((raw[0] as u16) << 8) | (raw[1] as u16); let val: u32 = unpack_octets_4!(raw, 2, u32); Setting::from_id(id, val) } fn encode(&self, dst: &mut BytesMut) { use self::Setting::*; let (kind, val) = match *self { HeaderTableSize(v) => (1, v), EnablePush(v) => (2, v), MaxConcurrentStreams(v) => (3, v), InitialWindowSize(v) => (4, v), MaxFrameSize(v) => (5, v), MaxHeaderListSize(v) => (6, v), }; dst.put_u16_be(kind); dst.put_u32_be(val); } } // ===== impl SettingsFlags ===== impl SettingsFlags { pub fn empty() -> SettingsFlags { SettingsFlags(0) } pub fn load(bits: u8) -> SettingsFlags { SettingsFlags(bits & ALL) } pub fn ack() -> SettingsFlags { SettingsFlags(ACK) } pub fn is_ack(&self) -> bool { self.0 & ACK == ACK } } impl From for u8 { fn from(src: SettingsFlags) -> u8 { src.0 } } impl fmt::Debug for SettingsFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { util::debug_flags(f, self.0) .flag_if(self.is_ack(), "ACK") .finish() } } h2-0.1.26/src/frame/stream_id.rs010066400017500001750000000053141332764616700145540ustar0000000000000000use byteorder::{BigEndian, ByteOrder}; use std::u32; /// A stream identifier, as described in [Section 5.1.1] of RFC 7540. /// /// Streams are identified with an unsigned 31-bit integer. Streams /// initiated by a client MUST use odd-numbered stream identifiers; those /// initiated by the server MUST use even-numbered stream identifiers. A /// stream identifier of zero (0x0) is used for connection control /// messages; the stream identifier of zero cannot be used to establish a /// new stream. /// /// [Section 5.1.1]: https://tools.ietf.org/html/rfc7540#section-5.1.1 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct StreamId(u32); #[derive(Debug, Copy, Clone)] pub struct StreamIdOverflow; const STREAM_ID_MASK: u32 = 1 << 31; impl StreamId { /// Stream ID 0. pub const ZERO: StreamId = StreamId(0); /// The maximum allowed stream ID. pub const MAX: StreamId = StreamId(u32::MAX >> 1); /// Parse the stream ID #[inline] pub fn parse(buf: &[u8]) -> (StreamId, bool) { let unpacked = BigEndian::read_u32(buf); let flag = unpacked & STREAM_ID_MASK == STREAM_ID_MASK; // Now clear the most significant bit, as that is reserved and MUST be // ignored when received. (StreamId(unpacked & !STREAM_ID_MASK), flag) } /// Returns true if this stream ID corresponds to a stream that /// was initiated by the client. pub fn is_client_initiated(&self) -> bool { let id = self.0; id != 0 && id % 2 == 1 } /// Returns true if this stream ID corresponds to a stream that /// was initiated by the server. pub fn is_server_initiated(&self) -> bool { let id = self.0; id != 0 && id % 2 == 0 } /// Return a new `StreamId` for stream 0. #[inline] pub fn zero() -> StreamId { StreamId::ZERO } /// Returns true if this stream ID is zero. pub fn is_zero(&self) -> bool { self.0 == 0 } /// Returns the next stream ID initiated by the same peer as this stream /// ID, or an error if incrementing this stream ID would overflow the /// maximum. pub fn next_id(&self) -> Result { let next = self.0 + 2; if next > StreamId::MAX.0 { Err(StreamIdOverflow) } else { Ok(StreamId(next)) } } } impl From for StreamId { fn from(src: u32) -> Self { assert_eq!(src & STREAM_ID_MASK, 0, "invalid stream ID -- MSB is set"); StreamId(src) } } impl From for u32 { fn from(src: StreamId) -> Self { src.0 } } impl PartialEq for StreamId { fn eq(&self, other: &u32) -> bool { self.0 == *other } } h2-0.1.26/src/frame/util.rs010066400017500001750000000041641346734666600135720ustar0000000000000000use std::fmt; use super::Error; use bytes::Bytes; /// Strip padding from the given payload. /// /// It is assumed that the frame had the padded flag set. This means that the /// first byte is the length of the padding with that many /// 0 bytes expected to follow the actual payload. /// /// # Returns /// /// A slice of the given payload where the actual one is found and the length /// of the padding. /// /// If the padded payload is invalid (e.g. the length of the padding is equal /// to the total length), returns `None`. pub fn strip_padding(payload: &mut Bytes) -> Result { let payload_len = payload.len(); if payload_len == 0 { // If this is the case, the frame is invalid as no padding length can be // extracted, even though the frame should be padded. return Err(Error::TooMuchPadding); } let pad_len = payload[0] as usize; if pad_len >= payload_len { // This is invalid: the padding length MUST be less than the // total frame size. return Err(Error::TooMuchPadding); } let _ = payload.split_to(1); let _ = payload.split_off(payload_len - pad_len - 1); Ok(pad_len as u8) } pub(super) fn debug_flags<'a, 'f: 'a>(fmt: &'a mut fmt::Formatter<'f>, bits: u8) -> DebugFlags<'a, 'f> { let result = write!(fmt, "({:#x}", bits); DebugFlags { fmt, result, started: false, } } pub(super) struct DebugFlags<'a, 'f: 'a> { fmt: &'a mut fmt::Formatter<'f>, result: fmt::Result, started: bool, } impl<'a, 'f: 'a> DebugFlags<'a, 'f> { pub(super) fn flag_if(&mut self, enabled: bool, name: &str) -> &mut Self { if enabled { self.result = self.result.and_then(|()| { let prefix = if self.started { " | " } else { self.started = true; ": " }; write!(self.fmt, "{}{}", prefix, name) }); } self } pub(super) fn finish(&mut self) -> fmt::Result { self.result.and_then(|()| { write!(self.fmt, ")") }) } } h2-0.1.26/src/frame/window_update.rs010066400017500001750000000032301347426626300154470ustar0000000000000000use frame::{self, Error, Head, Kind, StreamId}; use bytes::{BufMut}; const SIZE_INCREMENT_MASK: u32 = 1 << 31; #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct WindowUpdate { stream_id: StreamId, size_increment: u32, } impl WindowUpdate { pub fn new(stream_id: StreamId, size_increment: u32) -> WindowUpdate { WindowUpdate { stream_id, size_increment, } } pub fn stream_id(&self) -> StreamId { self.stream_id } pub fn size_increment(&self) -> u32 { self.size_increment } /// Builds a `WindowUpdate` frame from a raw frame. pub fn load(head: Head, payload: &[u8]) -> Result { debug_assert_eq!(head.kind(), ::frame::Kind::WindowUpdate); if payload.len() != 4 { return Err(Error::BadFrameSize); } // Clear the most significant bit, as that is reserved and MUST be ignored // when received. let size_increment = unpack_octets_4!(payload, 0, u32) & !SIZE_INCREMENT_MASK; if size_increment == 0 { return Err(Error::InvalidWindowUpdateValue.into()); } Ok(WindowUpdate { stream_id: head.stream_id(), size_increment, }) } pub fn encode(&self, dst: &mut B) { trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); let head = Head::new(Kind::WindowUpdate, 0, self.stream_id); head.encode(4, dst); dst.put_u32_be(self.size_increment); } } impl From for frame::Frame { fn from(src: WindowUpdate) -> Self { frame::Frame::WindowUpdate(src) } } h2-0.1.26/src/hpack/decoder.rs010066400017500001750000000642551351644257100142100ustar0000000000000000use super::{huffman, Header}; use frame; use bytes::{Buf, Bytes, BytesMut}; use http::header; use http::method::{self, Method}; use http::status::{self, StatusCode}; use string::String; use std::cmp; use std::collections::VecDeque; use std::io::Cursor; use std::str::Utf8Error; /// Decodes headers using HPACK #[derive(Debug)] pub struct Decoder { // Protocol indicated that the max table size will update max_size_update: Option, last_max_update: usize, table: Table, buffer: BytesMut, } /// Represents all errors that can be encountered while performing the decoding /// of an HPACK header set. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum DecoderError { InvalidRepresentation, InvalidIntegerPrefix, InvalidTableIndex, InvalidHuffmanCode, InvalidUtf8, InvalidStatusCode, InvalidPseudoheader, InvalidMaxDynamicSize, IntegerOverflow, NeedMore(NeedMore), } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum NeedMore { UnexpectedEndOfStream, IntegerUnderflow, StringUnderflow, } #[derive(Debug)] enum Representation { /// Indexed header field representation /// /// An indexed header field representation identifies an entry in either the /// static table or the dynamic table (see Section 2.3). /// /// # Header encoding /// /// ```text /// 0 1 2 3 4 5 6 7 /// +---+---+---+---+---+---+---+---+ /// | 1 | Index (7+) | /// +---+---------------------------+ /// ``` Indexed, /// Literal Header Field with Incremental Indexing /// /// A literal header field with incremental indexing representation results /// in appending a header field to the decoded header list and inserting it /// as a new entry into the dynamic table. /// /// # Header encoding /// /// ```text /// 0 1 2 3 4 5 6 7 /// +---+---+---+---+---+---+---+---+ /// | 0 | 1 | Index (6+) | /// +---+---+-----------------------+ /// | H | Value Length (7+) | /// +---+---------------------------+ /// | Value String (Length octets) | /// +-------------------------------+ /// ``` LiteralWithIndexing, /// Literal Header Field without Indexing /// /// A literal header field without indexing representation results in /// appending a header field to the decoded header list without altering the /// dynamic table. /// /// # Header encoding /// /// ```text /// 0 1 2 3 4 5 6 7 /// +---+---+---+---+---+---+---+---+ /// | 0 | 0 | 0 | 0 | Index (4+) | /// +---+---+-----------------------+ /// | H | Value Length (7+) | /// +---+---------------------------+ /// | Value String (Length octets) | /// +-------------------------------+ /// ``` LiteralWithoutIndexing, /// Literal Header Field Never Indexed /// /// A literal header field never-indexed representation results in appending /// a header field to the decoded header list without altering the dynamic /// table. Intermediaries MUST use the same representation for encoding this /// header field. /// /// ```text /// 0 1 2 3 4 5 6 7 /// +---+---+---+---+---+---+---+---+ /// | 0 | 0 | 0 | 1 | Index (4+) | /// +---+---+-----------------------+ /// | H | Value Length (7+) | /// +---+---------------------------+ /// | Value String (Length octets) | /// +-------------------------------+ /// ``` LiteralNeverIndexed, /// Dynamic Table Size Update /// /// A dynamic table size update signals a change to the size of the dynamic /// table. /// /// # Header encoding /// /// ```text /// 0 1 2 3 4 5 6 7 /// +---+---+---+---+---+---+---+---+ /// | 0 | 0 | 1 | Max size (5+) | /// +---+---------------------------+ /// ``` SizeUpdate, } #[derive(Debug)] struct Table { entries: VecDeque
, size: usize, max_size: usize, } // ===== impl Decoder ===== impl Decoder { /// Creates a new `Decoder` with all settings set to default values. pub fn new(size: usize) -> Decoder { Decoder { max_size_update: None, last_max_update: size, table: Table::new(size), buffer: BytesMut::with_capacity(4096), } } /// Queues a potential size update pub fn queue_size_update(&mut self, size: usize) { let size = match self.max_size_update { Some(v) => cmp::max(v, size), None => size, }; // If size isn't changing, then an Update isn't required if self.table.max_size() != size { self.max_size_update = Some(size); } } /// Decodes the headers found in the given buffer. pub fn decode(&mut self, src: &mut Cursor<&mut BytesMut>, mut f: F) -> Result<(), DecoderError> where F: FnMut(Header), { use self::Representation::*; trace!("decode"); // If a max size update is expected, it MUST occur at the beginning // of the next header block. if let Some(size) = self.max_size_update { let ty = match peek_u8(src) { Some(byte) => Representation::load(byte)?, None => return Ok(()), }; match ty { SizeUpdate => { trace!(" SizeUpdate required and found"); self.last_max_update = size; self.max_size_update = None; // Handle the dynamic table size update self.process_size_update(src)?; consume(src); }, ty => { trace!(" expected SizeUpdate, received={:?}", ty); return Err(DecoderError::InvalidMaxDynamicSize); }, } } // Above, a SizeUpdate was *required* because of a SETTINGS change. // However, a SizeUpdate can happen without one... let mut can_resize = true; while let Some(ty) = peek_u8(src) { // At this point we are always at the beginning of the next block // within the HPACK data. The type of the block can always be // determined from the first byte. match Representation::load(ty)? { Indexed => { trace!(" Indexed; rem={:?}", src.remaining()); can_resize = false; let entry = self.decode_indexed(src)?; consume(src); f(entry); }, LiteralWithIndexing => { trace!(" LiteralWithIndexing; rem={:?}", src.remaining()); can_resize = false; let entry = self.decode_literal(src, true)?; // Insert the header into the table self.table.insert(entry.clone()); consume(src); f(entry); }, LiteralWithoutIndexing => { trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining()); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); f(entry); }, LiteralNeverIndexed => { trace!(" LiteralNeverIndexed; rem={:?}", src.remaining()); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); // TODO: Track that this should never be indexed f(entry); }, SizeUpdate => { trace!(" SizeUpdate; rem={:?}", src.remaining()); if !can_resize { return Err(DecoderError::InvalidMaxDynamicSize); } // Handle the dynamic table size update self.process_size_update(src)?; consume(src); }, } } Ok(()) } fn process_size_update(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result<(), DecoderError> { let new_size = decode_int(buf, 5)?; if new_size > self.last_max_update { return Err(DecoderError::InvalidMaxDynamicSize); } debug!( "Decoder changed max table size from {} to {}", self.table.size(), new_size ); self.table.set_max_size(new_size); Ok(()) } fn decode_indexed(&self, buf: &mut Cursor<&mut BytesMut>) -> Result { let index = decode_int(buf, 7)?; self.table.get(index) } fn decode_literal( &mut self, buf: &mut Cursor<&mut BytesMut>, index: bool, ) -> Result { let prefix = if index { 6 } else { 4 }; // Extract the table index for the name, or 0 if not indexed let table_idx = decode_int(buf, prefix)?; // First, read the header name if table_idx == 0 { // Read the name as a literal let name = self.decode_string(buf)?; let value = self.decode_string(buf)?; Header::new(name, value) } else { let e = self.table.get(table_idx)?; let value = self.decode_string(buf)?; e.name().into_entry(value) } } fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result { const HUFF_FLAG: u8 = 0b10000000; // The first bit in the first byte contains the huffman encoded flag. let huff = match peek_u8(buf) { Some(hdr) => (hdr & HUFF_FLAG) == HUFF_FLAG, None => return Err(DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)), }; // Decode the string length using 7 bit prefix let len = decode_int(buf, 7)?; if len > buf.remaining() { trace!( "decode_string underflow; len={}; remaining={}", len, buf.remaining() ); return Err(DecoderError::NeedMore(NeedMore::StringUnderflow)); } if huff { let ret = { let raw = &buf.bytes()[..len]; huffman::decode(raw, &mut self.buffer).map(Into::into) }; buf.advance(len); return ret; } Ok(take(buf, len)) } } impl Default for Decoder { fn default() -> Decoder { Decoder::new(4096) } } // ===== impl Representation ===== impl Representation { pub fn load(byte: u8) -> Result { const INDEXED: u8 = 0b10000000; const LITERAL_WITH_INDEXING: u8 = 0b01000000; const LITERAL_WITHOUT_INDEXING: u8 = 0b11110000; const LITERAL_NEVER_INDEXED: u8 = 0b00010000; const SIZE_UPDATE_MASK: u8 = 0b11100000; const SIZE_UPDATE: u8 = 0b00100000; // TODO: What did I even write here? if byte & INDEXED == INDEXED { Ok(Representation::Indexed) } else if byte & LITERAL_WITH_INDEXING == LITERAL_WITH_INDEXING { Ok(Representation::LiteralWithIndexing) } else if byte & LITERAL_WITHOUT_INDEXING == 0 { Ok(Representation::LiteralWithoutIndexing) } else if byte & LITERAL_WITHOUT_INDEXING == LITERAL_NEVER_INDEXED { Ok(Representation::LiteralNeverIndexed) } else if byte & SIZE_UPDATE_MASK == SIZE_UPDATE { Ok(Representation::SizeUpdate) } else { Err(DecoderError::InvalidRepresentation) } } } fn decode_int(buf: &mut B, prefix_size: u8) -> Result { // The octet limit is chosen such that the maximum allowed *value* can // never overflow an unsigned 32-bit integer. The maximum value of any // integer that can be encoded with 5 octets is ~2^28 const MAX_BYTES: usize = 5; const VARINT_MASK: u8 = 0b01111111; const VARINT_FLAG: u8 = 0b10000000; if prefix_size < 1 || prefix_size > 8 { return Err(DecoderError::InvalidIntegerPrefix); } if !buf.has_remaining() { return Err(DecoderError::NeedMore(NeedMore::IntegerUnderflow)); } let mask = if prefix_size == 8 { 0xFF } else { (1u8 << prefix_size).wrapping_sub(1) }; let mut ret = (buf.get_u8() & mask) as usize; if ret < mask as usize { // Value fits in the prefix bits return Ok(ret); } // The int did not fit in the prefix bits, so continue reading. // // The total number of bytes used to represent the int. The first byte was // the prefix, so start at 1. let mut bytes = 1; // The rest of the int is stored as a varint -- 7 bits for the value and 1 // bit to indicate if it is the last byte. let mut shift = 0; while buf.has_remaining() { let b = buf.get_u8(); bytes += 1; ret += ((b & VARINT_MASK) as usize) << shift; shift += 7; if b & VARINT_FLAG == 0 { return Ok(ret); } if bytes == MAX_BYTES { // The spec requires that this situation is an error return Err(DecoderError::IntegerOverflow); } } Err(DecoderError::NeedMore(NeedMore::IntegerUnderflow)) } fn peek_u8(buf: &mut B) -> Option { if buf.has_remaining() { Some(buf.bytes()[0]) } else { None } } fn take(buf: &mut Cursor<&mut BytesMut>, n: usize) -> Bytes { let pos = buf.position() as usize; let mut head = buf.get_mut().split_to(pos + n); buf.set_position(0); head.split_to(pos); head.freeze() } fn consume(buf: &mut Cursor<&mut BytesMut>) { // remove bytes from the internal BytesMut when they have been successfully // decoded. This is a more permanent cursor position, which will be // used to resume if decoding was only partial. take(buf, 0); } // ===== impl Table ===== impl Table { fn new(max_size: usize) -> Table { Table { entries: VecDeque::new(), size: 0, max_size: max_size, } } fn size(&self) -> usize { self.size } fn max_size(&self) -> usize { self.max_size } /// Returns the entry located at the given index. /// /// The table is 1-indexed and constructed in such a way that the first /// entries belong to the static table, followed by entries in the dynamic /// table. They are merged into a single index address space, though. /// /// This is according to the [HPACK spec, section 2.3.3.] /// (http://http2.github.io/http2-spec/compression.html#index.address.space) pub fn get(&self, index: usize) -> Result { if index == 0 { return Err(DecoderError::InvalidTableIndex); } if index <= 61 { return Ok(get_static(index)); } // Convert the index for lookup in the entries structure. match self.entries.get(index - 62) { Some(e) => Ok(e.clone()), None => Err(DecoderError::InvalidTableIndex), } } fn insert(&mut self, entry: Header) { let len = entry.len(); self.reserve(len); if self.size + len <= self.max_size { self.size += len; // Track the entry self.entries.push_front(entry); } } fn set_max_size(&mut self, size: usize) { self.max_size = size; // Make the table size fit within the new constraints. self.consolidate(); } fn reserve(&mut self, size: usize) { while self.size + size > self.max_size { match self.entries.pop_back() { Some(last) => { self.size -= last.len(); } None => return, } } } fn consolidate(&mut self) { while self.size > self.max_size { { let last = match self.entries.back() { Some(x) => x, None => { // Can never happen as the size of the table must reach // 0 by the time we've exhausted all elements. panic!("Size of table != 0, but no headers left!"); }, }; self.size -= last.len(); } self.entries.pop_back(); } } } // ===== impl DecoderError ===== impl From for DecoderError { fn from(_: Utf8Error) -> DecoderError { // TODO: Better error? DecoderError::InvalidUtf8 } } impl From for DecoderError { fn from(_: header::InvalidHeaderValue) -> DecoderError { // TODO: Better error? DecoderError::InvalidUtf8 } } impl From for DecoderError { fn from(_: header::InvalidHeaderName) -> DecoderError { // TODO: Better error DecoderError::InvalidUtf8 } } impl From for DecoderError { fn from(_: method::InvalidMethod) -> DecoderError { // TODO: Better error DecoderError::InvalidUtf8 } } impl From for DecoderError { fn from(_: status::InvalidStatusCode) -> DecoderError { // TODO: Better error DecoderError::InvalidUtf8 } } impl From for frame::Error { fn from(src: DecoderError) -> Self { frame::Error::Hpack(src) } } /// Get an entry from the static table pub fn get_static(idx: usize) -> Header { use http::header::HeaderValue; match idx { 1 => Header::Authority(from_static("")), 2 => Header::Method(Method::GET), 3 => Header::Method(Method::POST), 4 => Header::Path(from_static("/")), 5 => Header::Path(from_static("/index.html")), 6 => Header::Scheme(from_static("http")), 7 => Header::Scheme(from_static("https")), 8 => Header::Status(StatusCode::OK), 9 => Header::Status(StatusCode::NO_CONTENT), 10 => Header::Status(StatusCode::PARTIAL_CONTENT), 11 => Header::Status(StatusCode::NOT_MODIFIED), 12 => Header::Status(StatusCode::BAD_REQUEST), 13 => Header::Status(StatusCode::NOT_FOUND), 14 => Header::Status(StatusCode::INTERNAL_SERVER_ERROR), 15 => Header::Field { name: header::ACCEPT_CHARSET, value: HeaderValue::from_static(""), }, 16 => Header::Field { name: header::ACCEPT_ENCODING, value: HeaderValue::from_static("gzip, deflate"), }, 17 => Header::Field { name: header::ACCEPT_LANGUAGE, value: HeaderValue::from_static(""), }, 18 => Header::Field { name: header::ACCEPT_RANGES, value: HeaderValue::from_static(""), }, 19 => Header::Field { name: header::ACCEPT, value: HeaderValue::from_static(""), }, 20 => Header::Field { name: header::ACCESS_CONTROL_ALLOW_ORIGIN, value: HeaderValue::from_static(""), }, 21 => Header::Field { name: header::AGE, value: HeaderValue::from_static(""), }, 22 => Header::Field { name: header::ALLOW, value: HeaderValue::from_static(""), }, 23 => Header::Field { name: header::AUTHORIZATION, value: HeaderValue::from_static(""), }, 24 => Header::Field { name: header::CACHE_CONTROL, value: HeaderValue::from_static(""), }, 25 => Header::Field { name: header::CONTENT_DISPOSITION, value: HeaderValue::from_static(""), }, 26 => Header::Field { name: header::CONTENT_ENCODING, value: HeaderValue::from_static(""), }, 27 => Header::Field { name: header::CONTENT_LANGUAGE, value: HeaderValue::from_static(""), }, 28 => Header::Field { name: header::CONTENT_LENGTH, value: HeaderValue::from_static(""), }, 29 => Header::Field { name: header::CONTENT_LOCATION, value: HeaderValue::from_static(""), }, 30 => Header::Field { name: header::CONTENT_RANGE, value: HeaderValue::from_static(""), }, 31 => Header::Field { name: header::CONTENT_TYPE, value: HeaderValue::from_static(""), }, 32 => Header::Field { name: header::COOKIE, value: HeaderValue::from_static(""), }, 33 => Header::Field { name: header::DATE, value: HeaderValue::from_static(""), }, 34 => Header::Field { name: header::ETAG, value: HeaderValue::from_static(""), }, 35 => Header::Field { name: header::EXPECT, value: HeaderValue::from_static(""), }, 36 => Header::Field { name: header::EXPIRES, value: HeaderValue::from_static(""), }, 37 => Header::Field { name: header::FROM, value: HeaderValue::from_static(""), }, 38 => Header::Field { name: header::HOST, value: HeaderValue::from_static(""), }, 39 => Header::Field { name: header::IF_MATCH, value: HeaderValue::from_static(""), }, 40 => Header::Field { name: header::IF_MODIFIED_SINCE, value: HeaderValue::from_static(""), }, 41 => Header::Field { name: header::IF_NONE_MATCH, value: HeaderValue::from_static(""), }, 42 => Header::Field { name: header::IF_RANGE, value: HeaderValue::from_static(""), }, 43 => Header::Field { name: header::IF_UNMODIFIED_SINCE, value: HeaderValue::from_static(""), }, 44 => Header::Field { name: header::LAST_MODIFIED, value: HeaderValue::from_static(""), }, 45 => Header::Field { name: header::LINK, value: HeaderValue::from_static(""), }, 46 => Header::Field { name: header::LOCATION, value: HeaderValue::from_static(""), }, 47 => Header::Field { name: header::MAX_FORWARDS, value: HeaderValue::from_static(""), }, 48 => Header::Field { name: header::PROXY_AUTHENTICATE, value: HeaderValue::from_static(""), }, 49 => Header::Field { name: header::PROXY_AUTHORIZATION, value: HeaderValue::from_static(""), }, 50 => Header::Field { name: header::RANGE, value: HeaderValue::from_static(""), }, 51 => Header::Field { name: header::REFERER, value: HeaderValue::from_static(""), }, 52 => Header::Field { name: header::REFRESH, value: HeaderValue::from_static(""), }, 53 => Header::Field { name: header::RETRY_AFTER, value: HeaderValue::from_static(""), }, 54 => Header::Field { name: header::SERVER, value: HeaderValue::from_static(""), }, 55 => Header::Field { name: header::SET_COOKIE, value: HeaderValue::from_static(""), }, 56 => Header::Field { name: header::STRICT_TRANSPORT_SECURITY, value: HeaderValue::from_static(""), }, 57 => Header::Field { name: header::TRANSFER_ENCODING, value: HeaderValue::from_static(""), }, 58 => Header::Field { name: header::USER_AGENT, value: HeaderValue::from_static(""), }, 59 => Header::Field { name: header::VARY, value: HeaderValue::from_static(""), }, 60 => Header::Field { name: header::VIA, value: HeaderValue::from_static(""), }, 61 => Header::Field { name: header::WWW_AUTHENTICATE, value: HeaderValue::from_static(""), }, _ => unreachable!(), } } fn from_static(s: &'static str) -> String { unsafe { String::from_utf8_unchecked(Bytes::from_static(s.as_bytes())) } } #[cfg(test)] mod test { use super::*; use hpack::Header; #[test] fn test_peek_u8() { let b = 0xff; let mut buf = Cursor::new(vec![b]); assert_eq!(peek_u8(&mut buf), Some(b)); assert_eq!(buf.get_u8(), b); assert_eq!(peek_u8(&mut buf), None); } #[test] fn test_decode_string_empty() { let mut de = Decoder::new(0); let mut buf = BytesMut::new(); let err = de.decode_string(&mut Cursor::new(&mut buf)).unwrap_err(); assert_eq!(err, DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)); } #[test] fn test_decode_empty() { let mut de = Decoder::new(0); let mut buf = BytesMut::new(); let empty = de.decode(&mut Cursor::new(&mut buf), |_| {}).unwrap(); assert_eq!(empty, ()); } #[test] fn test_decode_indexed_larger_than_table() { let mut de = Decoder::new(0); let mut buf = vec![0b01000000, 0x80 | 2]; buf.extend(huff_encode(b"foo")); buf.extend(&[0x80 | 3]); buf.extend(huff_encode(b"bar")); let mut buf = buf.into(); let mut res = vec![]; let _ = de.decode(&mut Cursor::new(&mut buf), |h| { res.push(h); }).unwrap(); assert_eq!(res.len(), 1); assert_eq!(de.table.size(), 0); match res[0] { Header::Field { ref name, ref value } => { assert_eq!(name, "foo"); assert_eq!(value, "bar"); } _ => panic!(), } } fn huff_encode(src: &[u8]) -> BytesMut { let mut buf = BytesMut::new(); huffman::encode(src, &mut buf).unwrap(); buf } } h2-0.1.26/src/hpack/encoder.rs010066400017500001750000000634471351644257100142240ustar0000000000000000use super::{huffman, Header}; use super::table::{Index, Table}; use bytes::{BufMut, BytesMut}; use http::header::{HeaderName, HeaderValue}; #[derive(Debug)] pub struct Encoder { table: Table, size_update: Option, } #[derive(Debug)] pub enum Encode { Full, Partial(EncodeState), } #[derive(Debug)] pub struct EncodeState { index: Index, value: Option, } #[derive(Debug, PartialEq, Eq)] pub enum EncoderError { BufferOverflow, } #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum SizeUpdate { One(usize), Two(usize, usize), // min, max } impl Encoder { pub fn new(max_size: usize, capacity: usize) -> Encoder { Encoder { table: Table::new(max_size, capacity), size_update: None, } } /// Queues a max size update. /// /// The next call to `encode` will include a dynamic size update frame. pub fn update_max_size(&mut self, val: usize) { match self.size_update { Some(SizeUpdate::One(old)) => if val > old { if old > self.table.max_size() { self.size_update = Some(SizeUpdate::One(val)); } else { self.size_update = Some(SizeUpdate::Two(old, val)); } } else { self.size_update = Some(SizeUpdate::One(val)); }, Some(SizeUpdate::Two(min, _)) => if val < min { self.size_update = Some(SizeUpdate::One(val)); } else { self.size_update = Some(SizeUpdate::Two(min, val)); }, None => { if val != self.table.max_size() { // Don't bother writing a frame if the value already matches // the table's max size. self.size_update = Some(SizeUpdate::One(val)); } }, } } /// Encode a set of headers into the provide buffer pub fn encode( &mut self, resume: Option, headers: &mut I, dst: &mut BytesMut, ) -> Encode where I: Iterator>>, { let len = dst.len(); if let Err(e) = self.encode_size_updates(dst) { if e == EncoderError::BufferOverflow { dst.truncate(len); } unreachable!("encode_size_updates errored"); } let mut last_index = None; if let Some(resume) = resume { let len = dst.len(); let res = match resume.value { Some(ref value) => self.encode_header_without_name(&resume.index, value, dst), None => self.encode_header(&resume.index, dst), }; if res.is_err() { dst.truncate(len); return Encode::Partial(resume); } last_index = Some(resume.index); } for header in headers { let len = dst.len(); match header.reify() { // The header has an associated name. In which case, try to // index it in the table. Ok(header) => { let index = self.table.index(header); let res = self.encode_header(&index, dst); if res.is_err() { dst.truncate(len); return Encode::Partial(EncodeState { index: index, value: None, }); } last_index = Some(index); }, // The header does not have an associated name. This means that // the name is the same as the previously yielded header. In // which case, we skip table lookup and just use the same index // as the previous entry. Err(value) => { let res = self.encode_header_without_name( last_index.as_ref().unwrap_or_else(|| { panic!("encoding header without name, but no previous index to use for name"); }), &value, dst, ); if res.is_err() { dst.truncate(len); return Encode::Partial(EncodeState { index: last_index.unwrap(), // checked just above value: Some(value), }); } }, }; } Encode::Full } fn encode_size_updates(&mut self, dst: &mut BytesMut) -> Result<(), EncoderError> { match self.size_update.take() { Some(SizeUpdate::One(val)) => { self.table.resize(val); encode_size_update(val, dst)?; }, Some(SizeUpdate::Two(min, max)) => { self.table.resize(min); self.table.resize(max); encode_size_update(min, dst)?; encode_size_update(max, dst)?; }, None => {}, } Ok(()) } fn encode_header(&mut self, index: &Index, dst: &mut BytesMut) -> Result<(), EncoderError> { match *index { Index::Indexed(idx, _) => { encode_int(idx, 7, 0x80, dst)?; }, Index::Name(idx, _) => { let header = self.table.resolve(&index); encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst)?; }, Index::Inserted(_) => { let header = self.table.resolve(&index); assert!(!header.is_sensitive()); if !dst.has_remaining_mut() { return Err(EncoderError::BufferOverflow); } dst.put_u8(0b01000000); encode_str(header.name().as_slice(), dst)?; encode_str(header.value_slice(), dst)?; }, Index::InsertedValue(idx, _) => { let header = self.table.resolve(&index); assert!(!header.is_sensitive()); encode_int(idx, 6, 0b01000000, dst)?; encode_str(header.value_slice(), dst)?; }, Index::NotIndexed(_) => { let header = self.table.resolve(&index); encode_not_indexed2( header.name().as_slice(), header.value_slice(), header.is_sensitive(), dst, )?; }, } Ok(()) } fn encode_header_without_name( &mut self, last: &Index, value: &HeaderValue, dst: &mut BytesMut, ) -> Result<(), EncoderError> { match *last { Index::Indexed(..) | Index::Name(..) | Index::Inserted(..) | Index::InsertedValue(..) => { let idx = self.table.resolve_idx(last); encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst)?; }, Index::NotIndexed(_) => { let last = self.table.resolve(last); encode_not_indexed2( last.name().as_slice(), value.as_ref(), value.is_sensitive(), dst, )?; }, } Ok(()) } } impl Default for Encoder { fn default() -> Encoder { Encoder::new(4096, 0) } } fn encode_size_update(val: usize, dst: &mut B) -> Result<(), EncoderError> { encode_int(val, 5, 0b00100000, dst) } fn encode_not_indexed( name: usize, value: &[u8], sensitive: bool, dst: &mut BytesMut, ) -> Result<(), EncoderError> { if sensitive { encode_int(name, 4, 0b10000, dst)?; } else { encode_int(name, 4, 0, dst)?; } encode_str(value, dst)?; Ok(()) } fn encode_not_indexed2( name: &[u8], value: &[u8], sensitive: bool, dst: &mut BytesMut, ) -> Result<(), EncoderError> { if !dst.has_remaining_mut() { return Err(EncoderError::BufferOverflow); } if sensitive { dst.put_u8(0b10000); } else { dst.put_u8(0); } encode_str(name, dst)?; encode_str(value, dst)?; Ok(()) } fn encode_str(val: &[u8], dst: &mut BytesMut) -> Result<(), EncoderError> { use std::io::Cursor; if !dst.has_remaining_mut() { return Err(EncoderError::BufferOverflow); } if val.len() != 0 { let idx = dst.len(); // Push a placeholder byte for the length header dst.put_u8(0); // Encode with huffman huffman::encode(val, dst)?; let huff_len = dst.len() - (idx + 1); if encode_int_one_byte(huff_len, 7) { // Write the string head dst[idx] = 0x80 | huff_len as u8; } else { // Write the head to a placeholer let mut buf = [0; 8]; let head_len = { let mut head_dst = Cursor::new(&mut buf); encode_int(huff_len, 7, 0x80, &mut head_dst)?; head_dst.position() as usize }; if dst.remaining_mut() < head_len { return Err(EncoderError::BufferOverflow); } // This is just done to reserve space in the destination dst.put_slice(&buf[1..head_len]); // Shift the header forward for i in 0..huff_len { let src_i = idx + 1 + (huff_len - (i + 1)); let dst_i = idx + head_len + (huff_len - (i + 1)); dst[dst_i] = dst[src_i]; } // Copy in the head for i in 0..head_len { dst[idx + i] = buf[i]; } } } else { // Write an empty string dst.put_u8(0); } Ok(()) } /// Encode an integer into the given destination buffer fn encode_int( mut value: usize, // The integer to encode prefix_bits: usize, // The number of bits in the prefix first_byte: u8, // The base upon which to start encoding the int dst: &mut B, ) -> Result<(), EncoderError> { let mut rem = dst.remaining_mut(); if rem == 0 { return Err(EncoderError::BufferOverflow); } if encode_int_one_byte(value, prefix_bits) { dst.put_u8(first_byte | value as u8); return Ok(()); } let low = (1 << prefix_bits) - 1; value -= low; if value > 0x0fffffff { panic!("value out of range"); } dst.put_u8(first_byte | low as u8); rem -= 1; while value >= 128 { if rem == 0 { return Err(EncoderError::BufferOverflow); } dst.put_u8(0b10000000 | value as u8); rem -= 1; value = value >> 7; } if rem == 0 { return Err(EncoderError::BufferOverflow); } dst.put_u8(value as u8); Ok(()) } /// Returns true if the in the int can be fully encoded in the first byte. fn encode_int_one_byte(value: usize, prefix_bits: usize) -> bool { value < (1 << prefix_bits) - 1 } #[cfg(test)] mod test { use super::*; use hpack::Header; use http::*; #[test] fn test_encode_method_get() { let mut encoder = Encoder::default(); let res = encode(&mut encoder, vec![method("GET")]); assert_eq!(*res, [0x80 | 2]); assert_eq!(encoder.table.len(), 0); } #[test] fn test_encode_method_post() { let mut encoder = Encoder::default(); let res = encode(&mut encoder, vec![method("POST")]); assert_eq!(*res, [0x80 | 3]); assert_eq!(encoder.table.len(), 0); } #[test] fn test_encode_method_patch() { let mut encoder = Encoder::default(); let res = encode(&mut encoder, vec![method("PATCH")]); assert_eq!(res[0], 0b01000000 | 2); // Incremental indexing w/ name pulled from table assert_eq!(res[1], 0x80 | 5); // header value w/ huffman coding assert_eq!("PATCH", huff_decode(&res[2..7])); assert_eq!(encoder.table.len(), 1); let res = encode(&mut encoder, vec![method("PATCH")]); assert_eq!(1 << 7 | 62, res[0]); assert_eq!(1, res.len()); } #[test] fn test_encode_indexed_name_literal_value() { let mut encoder = Encoder::default(); let res = encode(&mut encoder, vec![header("content-language", "foo")]); assert_eq!(res[0], 0b01000000 | 27); // Indexed name assert_eq!(res[1], 0x80 | 2); // header value w/ huffman coding assert_eq!("foo", huff_decode(&res[2..4])); // Same name, new value should still use incremental let res = encode(&mut encoder, vec![header("content-language", "bar")]); assert_eq!(res[0], 0b01000000 | 27); // Indexed name assert_eq!(res[1], 0x80 | 3); // header value w/ huffman coding assert_eq!("bar", huff_decode(&res[2..5])); } #[test] fn test_repeated_headers_are_indexed() { let mut encoder = Encoder::default(); let res = encode(&mut encoder, vec![header("foo", "hello")]); assert_eq!(&[0b01000000, 0x80 | 2], &res[0..2]); assert_eq!("foo", huff_decode(&res[2..4])); assert_eq!(0x80 | 4, res[4]); assert_eq!("hello", huff_decode(&res[5..])); assert_eq!(9, res.len()); assert_eq!(1, encoder.table.len()); let res = encode(&mut encoder, vec![header("foo", "hello")]); assert_eq!([0x80 | 62], *res); assert_eq!(encoder.table.len(), 1); } #[test] fn test_evicting_headers() { let mut encoder = Encoder::default(); // Fill the table for i in 0..64 { let key = format!("x-hello-world-{:02}", i); let res = encode(&mut encoder, vec![header(&key, &key)]); assert_eq!(&[0b01000000, 0x80 | 12], &res[0..2]); assert_eq!(key, huff_decode(&res[2..14])); assert_eq!(0x80 | 12, res[14]); assert_eq!(key, huff_decode(&res[15..])); assert_eq!(27, res.len()); // Make sure the header can be found... let res = encode(&mut encoder, vec![header(&key, &key)]); // Only check that it is found assert_eq!(0x80, res[0] & 0x80); } assert_eq!(4096, encoder.table.size()); assert_eq!(64, encoder.table.len()); // Find existing headers for i in 0..64 { let key = format!("x-hello-world-{:02}", i); let res = encode(&mut encoder, vec![header(&key, &key)]); assert_eq!(0x80, res[0] & 0x80); } // Insert a new header let key = "x-hello-world-64"; let res = encode(&mut encoder, vec![header(key, key)]); assert_eq!(&[0b01000000, 0x80 | 12], &res[0..2]); assert_eq!(key, huff_decode(&res[2..14])); assert_eq!(0x80 | 12, res[14]); assert_eq!(key, huff_decode(&res[15..])); assert_eq!(27, res.len()); assert_eq!(64, encoder.table.len()); // Now try encoding entries that should exist in the table for i in 1..65 { let key = format!("x-hello-world-{:02}", i); let res = encode(&mut encoder, vec![header(&key, &key)]); assert_eq!(0x80 | (61 + (65 - i)), res[0]); } } #[test] fn test_large_headers_are_not_indexed() { let mut encoder = Encoder::new(128, 0); let key = "hello-world-hello-world-HELLO-zzz"; let res = encode(&mut encoder, vec![header(key, key)]); assert_eq!(&[0, 0x80 | 25], &res[..2]); assert_eq!(0, encoder.table.len()); assert_eq!(0, encoder.table.size()); } #[test] fn test_sensitive_headers_are_never_indexed() { use http::header::HeaderValue; let name = "my-password".parse().unwrap(); let mut value = HeaderValue::from_bytes(b"12345").unwrap(); value.set_sensitive(true); let header = Header::Field { name: Some(name), value: value, }; // Now, try to encode the sensitive header let mut encoder = Encoder::default(); let res = encode(&mut encoder, vec![header]); assert_eq!(&[0b10000, 0x80 | 8], &res[..2]); assert_eq!("my-password", huff_decode(&res[2..10])); assert_eq!(0x80 | 4, res[10]); assert_eq!("12345", huff_decode(&res[11..])); // Now, try to encode a sensitive header w/ a name in the static table let name = "authorization".parse().unwrap(); let mut value = HeaderValue::from_bytes(b"12345").unwrap(); value.set_sensitive(true); let header = Header::Field { name: Some(name), value: value, }; let mut encoder = Encoder::default(); let res = encode(&mut encoder, vec![header]); assert_eq!(&[0b11111, 8], &res[..2]); assert_eq!(0x80 | 4, res[2]); assert_eq!("12345", huff_decode(&res[3..])); // Using the name component of a previously indexed header (without // sensitive flag set) let _ = encode( &mut encoder, vec![self::header("my-password", "not-so-secret")], ); let name = "my-password".parse().unwrap(); let mut value = HeaderValue::from_bytes(b"12345").unwrap(); value.set_sensitive(true); let header = Header::Field { name: Some(name), value: value, }; let res = encode(&mut encoder, vec![header]); assert_eq!(&[0b11111, 47], &res[..2]); assert_eq!(0x80 | 4, res[2]); assert_eq!("12345", huff_decode(&res[3..])); } #[test] fn test_content_length_value_not_indexed() { let mut encoder = Encoder::default(); let res = encode(&mut encoder, vec![header("content-length", "1234")]); assert_eq!(&[15, 13, 0x80 | 3], &res[0..3]); assert_eq!("1234", huff_decode(&res[3..])); assert_eq!(6, res.len()); } #[test] fn test_encoding_headers_with_same_name() { let mut encoder = Encoder::default(); let name = "hello"; // Encode first one let _ = encode(&mut encoder, vec![header(name, "one")]); // Encode second one let res = encode(&mut encoder, vec![header(name, "two")]); assert_eq!(&[0x40 | 62, 0x80 | 3], &res[0..2]); assert_eq!("two", huff_decode(&res[2..])); assert_eq!(5, res.len()); // Encode the first one again let res = encode(&mut encoder, vec![header(name, "one")]); assert_eq!(&[0x80 | 63], &res[..]); // Now the second one let res = encode(&mut encoder, vec![header(name, "two")]); assert_eq!(&[0x80 | 62], &res[..]); } #[test] fn test_evicting_headers_when_multiple_of_same_name_are_in_table() { // The encoder only has space for 2 headers let mut encoder = Encoder::new(76, 0); let _ = encode(&mut encoder, vec![header("foo", "bar")]); assert_eq!(1, encoder.table.len()); let _ = encode(&mut encoder, vec![header("bar", "foo")]); assert_eq!(2, encoder.table.len()); // This will evict the first header, while still referencing the header // name let res = encode(&mut encoder, vec![header("foo", "baz")]); assert_eq!(&[0x40 | 63, 0, 0x80 | 3], &res[..3]); assert_eq!(2, encoder.table.len()); // Try adding the same header again let res = encode(&mut encoder, vec![header("foo", "baz")]); assert_eq!(&[0x80 | 62], &res[..]); assert_eq!(2, encoder.table.len()); } #[test] fn test_max_size_zero() { // Static table only let mut encoder = Encoder::new(0, 0); let res = encode(&mut encoder, vec![method("GET")]); assert_eq!(*res, [0x80 | 2]); assert_eq!(encoder.table.len(), 0); let res = encode(&mut encoder, vec![header("foo", "bar")]); assert_eq!(&[0, 0x80 | 2], &res[..2]); assert_eq!("foo", huff_decode(&res[2..4])); assert_eq!(0x80 | 3, res[4]); assert_eq!("bar", huff_decode(&res[5..8])); assert_eq!(0, encoder.table.len()); // Encode a custom value let res = encode(&mut encoder, vec![header("transfer-encoding", "chunked")]); assert_eq!(&[15, 42, 0x80 | 6], &res[..3]); assert_eq!("chunked", huff_decode(&res[3..])); } #[test] fn test_update_max_size_combos() { let mut encoder = Encoder::default(); assert!(encoder.size_update.is_none()); assert_eq!(4096, encoder.table.max_size()); encoder.update_max_size(4096); // Default size assert!(encoder.size_update.is_none()); encoder.update_max_size(0); assert_eq!(Some(SizeUpdate::One(0)), encoder.size_update); encoder.update_max_size(100); assert_eq!(Some(SizeUpdate::Two(0, 100)), encoder.size_update); let mut encoder = Encoder::default(); encoder.update_max_size(8000); assert_eq!(Some(SizeUpdate::One(8000)), encoder.size_update); encoder.update_max_size(100); assert_eq!(Some(SizeUpdate::One(100)), encoder.size_update); encoder.update_max_size(8000); assert_eq!(Some(SizeUpdate::Two(100, 8000)), encoder.size_update); encoder.update_max_size(4000); assert_eq!(Some(SizeUpdate::Two(100, 4000)), encoder.size_update); encoder.update_max_size(50); assert_eq!(Some(SizeUpdate::One(50)), encoder.size_update); } #[test] fn test_resizing_table() { let mut encoder = Encoder::default(); // Add a header let _ = encode(&mut encoder, vec![header("foo", "bar")]); encoder.update_max_size(1); assert_eq!(1, encoder.table.len()); let res = encode(&mut encoder, vec![method("GET")]); assert_eq!(&[32 | 1, 0x80 | 2], &res[..]); assert_eq!(0, encoder.table.len()); let res = encode(&mut encoder, vec![header("foo", "bar")]); assert_eq!(0, res[0]); encoder.update_max_size(100); let res = encode(&mut encoder, vec![header("foo", "bar")]); assert_eq!(&[32 | 31, 69, 64], &res[..3]); encoder.update_max_size(0); let res = encode(&mut encoder, vec![header("foo", "bar")]); assert_eq!(&[32, 0], &res[..2]); } #[test] fn test_decreasing_table_size_without_eviction() { let mut encoder = Encoder::default(); // Add a header let _ = encode(&mut encoder, vec![header("foo", "bar")]); encoder.update_max_size(100); assert_eq!(1, encoder.table.len()); let res = encode(&mut encoder, vec![header("foo", "bar")]); assert_eq!(&[32 | 31, 69, 0x80 | 62], &res[..]); } #[test] fn test_nameless_header() { let mut encoder = Encoder::default(); let res = encode( &mut encoder, vec![ Header::Field { name: Some("hello".parse().unwrap()), value: HeaderValue::from_bytes(b"world").unwrap(), }, Header::Field { name: None, value: HeaderValue::from_bytes(b"zomg").unwrap(), }, ], ); assert_eq!(&[0x40, 0x80 | 4], &res[0..2]); assert_eq!("hello", huff_decode(&res[2..6])); assert_eq!(0x80 | 4, res[6]); assert_eq!("world", huff_decode(&res[7..11])); // Next is not indexed assert_eq!(&[15, 47, 0x80 | 3], &res[11..14]); assert_eq!("zomg", huff_decode(&res[14..])); } #[test] fn test_nameless_header_at_resume() { let mut encoder = Encoder::default(); let mut dst = BytesMut::from(Vec::with_capacity(15)); let mut input = vec![ Header::Field { name: Some("hello".parse().unwrap()), value: HeaderValue::from_bytes(b"world").unwrap(), }, Header::Field { name: None, value: HeaderValue::from_bytes(b"zomg").unwrap(), }, Header::Field { name: None, value: HeaderValue::from_bytes(b"sup").unwrap(), }, ].into_iter(); let resume = match encoder.encode(None, &mut input, &mut dst) { Encode::Partial(r) => r, _ => panic!(), }; assert_eq!(&[0x40, 0x80 | 4], &dst[0..2]); assert_eq!("hello", huff_decode(&dst[2..6])); assert_eq!(0x80 | 4, dst[6]); assert_eq!("world", huff_decode(&dst[7..11])); dst.clear(); match encoder.encode(Some(resume), &mut input, &mut dst) { Encode::Full => {}, unexpected => panic!("resume returned unexpected: {:?}", unexpected), } // Next is not indexed assert_eq!(&[15, 47, 0x80 | 3], &dst[0..3]); assert_eq!("zomg", huff_decode(&dst[3..6])); assert_eq!(&[15, 47, 0x80 | 3], &dst[6..9]); assert_eq!("sup", huff_decode(&dst[9..])); } #[test] #[ignore] fn test_evicted_overflow() { // Not sure what the best way to do this is. } fn encode(e: &mut Encoder, hdrs: Vec>>) -> BytesMut { let mut dst = BytesMut::with_capacity(1024); e.encode(None, &mut hdrs.into_iter(), &mut dst); dst } fn method(s: &str) -> Header> { Header::Method(Method::from_bytes(s.as_bytes()).unwrap()) } fn header(name: &str, val: &str) -> Header> { let name = HeaderName::from_bytes(name.as_bytes()).unwrap(); let value = HeaderValue::from_bytes(val.as_bytes()).unwrap(); Header::Field { name: Some(name), value: value, } } fn huff_decode(src: &[u8]) -> BytesMut { let mut buf = BytesMut::new(); huffman::decode(src, &mut buf).unwrap() } } h2-0.1.26/src/hpack/header.rs010066400017500001750000000176021347357406400140320ustar0000000000000000use super::{DecoderError, NeedMore}; use bytes::Bytes; use http::{Method, StatusCode}; use http::header::{HeaderName, HeaderValue}; use string::{String, TryFrom}; /// HTTP/2.0 Header #[derive(Debug, Clone, Eq, PartialEq)] pub enum Header { Field { name: T, value: HeaderValue }, // TODO: Change these types to `http::uri` types. Authority(String), Method(Method), Scheme(String), Path(String), Status(StatusCode), } /// The header field name #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub enum Name<'a> { Field(&'a HeaderName), Authority, Method, Scheme, Path, Status, } pub fn len(name: &HeaderName, value: &HeaderValue) -> usize { let n: &str = name.as_ref(); 32 + n.len() + value.len() } impl Header> { pub fn reify(self) -> Result { use self::Header::*; Ok(match self { Field { name: Some(n), value, } => Field { name: n, value: value, }, Field { name: None, value, } => return Err(value), Authority(v) => Authority(v), Method(v) => Method(v), Scheme(v) => Scheme(v), Path(v) => Path(v), Status(v) => Status(v), }) } } impl Header { pub fn new(name: Bytes, value: Bytes) -> Result { if name.len() == 0 { return Err(DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)); } if name[0] == b':' { match &name[1..] { b"authority" => { let value = String::try_from(value)?; Ok(Header::Authority(value)) }, b"method" => { let method = Method::from_bytes(&value)?; Ok(Header::Method(method)) }, b"scheme" => { let value = String::try_from(value)?; Ok(Header::Scheme(value)) }, b"path" => { let value = String::try_from(value)?; Ok(Header::Path(value)) }, b"status" => { let status = StatusCode::from_bytes(&value)?; Ok(Header::Status(status)) }, _ => Err(DecoderError::InvalidPseudoheader), } } else { // HTTP/2 requires lower case header names let name = HeaderName::from_lowercase(&name)?; let value = HeaderValue::from_bytes(&value)?; Ok(Header::Field { name: name, value: value, }) } } pub fn len(&self) -> usize { match *self { Header::Field { ref name, ref value, } => len(name, value), Header::Authority(ref v) => 32 + 10 + v.len(), Header::Method(ref v) => 32 + 7 + v.as_ref().len(), Header::Scheme(ref v) => 32 + 7 + v.len(), Header::Path(ref v) => 32 + 5 + v.len(), Header::Status(_) => 32 + 7 + 3, } } /// Returns the header name pub fn name(&self) -> Name { match *self { Header::Field { ref name, .. } => Name::Field(name), Header::Authority(..) => Name::Authority, Header::Method(..) => Name::Method, Header::Scheme(..) => Name::Scheme, Header::Path(..) => Name::Path, Header::Status(..) => Name::Status, } } pub fn value_slice(&self) -> &[u8] { match *self { Header::Field { ref value, .. } => value.as_ref(), Header::Authority(ref v) => v.as_ref(), Header::Method(ref v) => v.as_ref().as_ref(), Header::Scheme(ref v) => v.as_ref(), Header::Path(ref v) => v.as_ref(), Header::Status(ref v) => v.as_str().as_ref(), } } pub fn value_eq(&self, other: &Header) -> bool { match *self { Header::Field { ref value, .. } => { let a = value; match *other { Header::Field { ref value, .. } => a == value, _ => false, } }, Header::Authority(ref a) => match *other { Header::Authority(ref b) => a == b, _ => false, }, Header::Method(ref a) => match *other { Header::Method(ref b) => a == b, _ => false, }, Header::Scheme(ref a) => match *other { Header::Scheme(ref b) => a == b, _ => false, }, Header::Path(ref a) => match *other { Header::Path(ref b) => a == b, _ => false, }, Header::Status(ref a) => match *other { Header::Status(ref b) => a == b, _ => false, }, } } pub fn is_sensitive(&self) -> bool { match *self { Header::Field { ref value, .. } => value.is_sensitive(), // TODO: Technically these other header values can be sensitive too. _ => false, } } pub fn skip_value_index(&self) -> bool { use http::header; match *self { Header::Field { ref name, .. } => match *name { header::AGE | header::AUTHORIZATION | header::CONTENT_LENGTH | header::ETAG | header::IF_MODIFIED_SINCE | header::IF_NONE_MATCH | header::LOCATION | header::COOKIE | header::SET_COOKIE => true, _ => false, }, Header::Path(..) => true, _ => false, } } } // Mostly for tests impl From
for Header> { fn from(src: Header) -> Self { match src { Header::Field { name, value, } => Header::Field { name: Some(name), value, }, Header::Authority(v) => Header::Authority(v), Header::Method(v) => Header::Method(v), Header::Scheme(v) => Header::Scheme(v), Header::Path(v) => Header::Path(v), Header::Status(v) => Header::Status(v), } } } impl<'a> Name<'a> { pub fn into_entry(self, value: Bytes) -> Result { match self { Name::Field(name) => Ok(Header::Field { name: name.clone(), value: HeaderValue::from_bytes(&*value)?, }), Name::Authority => Ok(Header::Authority(String::try_from(value)?)), Name::Method => Ok(Header::Method(Method::from_bytes(&*value)?)), Name::Scheme => Ok(Header::Scheme(String::try_from(value)?)), Name::Path => Ok(Header::Path(String::try_from(value)?)), Name::Status => { match StatusCode::from_bytes(&value) { Ok(status) => Ok(Header::Status(status)), // TODO: better error handling Err(_) => Err(DecoderError::InvalidStatusCode), } }, } } pub fn as_slice(&self) -> &[u8] { match *self { Name::Field(ref name) => name.as_ref(), Name::Authority => b":authority", Name::Method => b":method", Name::Scheme => b":scheme", Name::Path => b":path", Name::Status => b":status", } } } h2-0.1.26/src/hpack/huffman/mod.rs010066400017500001750000000122641316574105100147720ustar0000000000000000mod table; use self::table::{DECODE_TABLE, ENCODE_TABLE}; use hpack::{DecoderError, EncoderError}; use bytes::{BufMut, BytesMut}; // Constructed in the generated `table.rs` file struct Decoder { state: usize, maybe_eos: bool, } // These flags must match the ones in genhuff.rs const MAYBE_EOS: u8 = 1; const DECODED: u8 = 2; const ERROR: u8 = 4; pub fn decode(src: &[u8], buf: &mut BytesMut) -> Result { let mut decoder = Decoder::new(); // Max compression ratio is >= 0.5 buf.reserve(src.len() << 1); for b in src { if let Some(b) = decoder.decode4(b >> 4)? { buf.put_u8(b); } if let Some(b) = decoder.decode4(b & 0xf)? { buf.put_u8(b); } } if !decoder.is_final() { return Err(DecoderError::InvalidHuffmanCode); } Ok(buf.take()) } // TODO: return error when there is not enough room to encode the value pub fn encode(src: &[u8], dst: &mut B) -> Result<(), EncoderError> { let mut bits: u64 = 0; let mut bits_left = 40; let mut rem = dst.remaining_mut(); for &b in src { let (nbits, code) = ENCODE_TABLE[b as usize]; bits |= code << (bits_left - nbits); bits_left -= nbits; while bits_left <= 32 { if rem == 0 { return Err(EncoderError::BufferOverflow); } dst.put_u8((bits >> 32) as u8); bits <<= 8; bits_left += 8; rem -= 1; } } if bits_left != 40 { if rem == 0 { return Err(EncoderError::BufferOverflow); } // This writes the EOS token bits |= (1 << bits_left) - 1; dst.put_u8((bits >> 32) as u8); } Ok(()) } impl Decoder { fn new() -> Decoder { Decoder { state: 0, maybe_eos: false, } } // Decodes 4 bits fn decode4(&mut self, input: u8) -> Result, DecoderError> { // (next-state, byte, flags) let (next, byte, flags) = DECODE_TABLE[self.state][input as usize]; if flags & ERROR == ERROR { // Data followed the EOS marker return Err(DecoderError::InvalidHuffmanCode); } let mut ret = None; if flags & DECODED == DECODED { ret = Some(byte); } self.state = next; self.maybe_eos = flags & MAYBE_EOS == MAYBE_EOS; Ok(ret) } fn is_final(&self) -> bool { self.state == 0 || self.maybe_eos } } #[cfg(test)] mod test { use super::*; fn decode(src: &[u8]) -> Result { let mut buf = BytesMut::new(); super::decode(src, &mut buf) } #[test] fn decode_single_byte() { assert_eq!("o", decode(&[0b00111111]).unwrap()); assert_eq!("0", decode(&[0x0 + 7]).unwrap()); assert_eq!("A", decode(&[(0x21 << 2) + 3]).unwrap()); } #[test] fn single_char_multi_byte() { assert_eq!("#", decode(&[255, 160 + 15]).unwrap()); assert_eq!("$", decode(&[255, 200 + 7]).unwrap()); assert_eq!("\x0a", decode(&[255, 255, 255, 240 + 3]).unwrap()); } #[test] fn multi_char() { assert_eq!("!0", decode(&[254, 1]).unwrap()); assert_eq!(" !", decode(&[0b01010011, 0b11111000]).unwrap()); } #[test] fn encode_single_byte() { let mut dst = Vec::with_capacity(1); encode(b"o", &mut dst).unwrap(); assert_eq!(&dst[..], &[0b00111111]); dst.clear(); encode(b"0", &mut dst).unwrap(); assert_eq!(&dst[..], &[0x0 + 7]); dst.clear(); encode(b"A", &mut dst).unwrap(); assert_eq!(&dst[..], &[(0x21 << 2) + 3]); } #[test] fn encode_decode_str() { const DATA: &'static [&'static str] = &[ "hello world", ":method", ":scheme", ":authority", "yahoo.co.jp", "GET", "http", ":path", "/images/top/sp2/cmn/logo-ns-130528.png", "example.com", "hpack-test", "xxxxxxx1", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20100101 Firefox/16.0", "accept", "Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "cookie", "B=76j09a189a6h4&b=3&s=0b", "TE", "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi non bibendum libero. \ Etiam ultrices lorem ut.", ]; for s in DATA { let mut dst = Vec::with_capacity(s.len()); encode(s.as_bytes(), &mut dst).unwrap(); let decoded = decode(&dst).unwrap(); assert_eq!(&decoded[..], s.as_bytes()); } } #[test] fn encode_decode_u8() { const DATA: &'static [&'static [u8]] = &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; for s in DATA { let mut dst = Vec::with_capacity(s.len()); encode(s, &mut dst).unwrap(); let decoded = decode(&dst).unwrap(); assert_eq!(&decoded[..], &s[..]); } } } h2-0.1.26/src/hpack/huffman/table.rs010066400017500001750000003264541312505306200153050ustar0000000000000000// !!! DO NOT EDIT !!! Generated by util/genhuff/src/main.rs // (num-bits, bits) pub const ENCODE_TABLE: [(usize, u64); 257] = [ (13, 0x1ff8), (23, 0x7fffd8), (28, 0xfffffe2), (28, 0xfffffe3), (28, 0xfffffe4), (28, 0xfffffe5), (28, 0xfffffe6), (28, 0xfffffe7), (28, 0xfffffe8), (24, 0xffffea), (30, 0x3ffffffc), (28, 0xfffffe9), (28, 0xfffffea), (30, 0x3ffffffd), (28, 0xfffffeb), (28, 0xfffffec), (28, 0xfffffed), (28, 0xfffffee), (28, 0xfffffef), (28, 0xffffff0), (28, 0xffffff1), (28, 0xffffff2), (30, 0x3ffffffe), (28, 0xffffff3), (28, 0xffffff4), (28, 0xffffff5), (28, 0xffffff6), (28, 0xffffff7), (28, 0xffffff8), (28, 0xffffff9), (28, 0xffffffa), (28, 0xffffffb), (6, 0x14), (10, 0x3f8), (10, 0x3f9), (12, 0xffa), (13, 0x1ff9), (6, 0x15), (8, 0xf8), (11, 0x7fa), (10, 0x3fa), (10, 0x3fb), (8, 0xf9), (11, 0x7fb), (8, 0xfa), (6, 0x16), (6, 0x17), (6, 0x18), (5, 0x0), (5, 0x1), (5, 0x2), (6, 0x19), (6, 0x1a), (6, 0x1b), (6, 0x1c), (6, 0x1d), (6, 0x1e), (6, 0x1f), (7, 0x5c), (8, 0xfb), (15, 0x7ffc), (6, 0x20), (12, 0xffb), (10, 0x3fc), (13, 0x1ffa), (6, 0x21), (7, 0x5d), (7, 0x5e), (7, 0x5f), (7, 0x60), (7, 0x61), (7, 0x62), (7, 0x63), (7, 0x64), (7, 0x65), (7, 0x66), (7, 0x67), (7, 0x68), (7, 0x69), (7, 0x6a), (7, 0x6b), (7, 0x6c), (7, 0x6d), (7, 0x6e), (7, 0x6f), (7, 0x70), (7, 0x71), (7, 0x72), (8, 0xfc), (7, 0x73), (8, 0xfd), (13, 0x1ffb), (19, 0x7fff0), (13, 0x1ffc), (14, 0x3ffc), (6, 0x22), (15, 0x7ffd), (5, 0x3), (6, 0x23), (5, 0x4), (6, 0x24), (5, 0x5), (6, 0x25), (6, 0x26), (6, 0x27), (5, 0x6), (7, 0x74), (7, 0x75), (6, 0x28), (6, 0x29), (6, 0x2a), (5, 0x7), (6, 0x2b), (7, 0x76), (6, 0x2c), (5, 0x8), (5, 0x9), (6, 0x2d), (7, 0x77), (7, 0x78), (7, 0x79), (7, 0x7a), (7, 0x7b), (15, 0x7ffe), (11, 0x7fc), (14, 0x3ffd), (13, 0x1ffd), (28, 0xffffffc), (20, 0xfffe6), (22, 0x3fffd2), (20, 0xfffe7), (20, 0xfffe8), (22, 0x3fffd3), (22, 0x3fffd4), (22, 0x3fffd5), (23, 0x7fffd9), (22, 0x3fffd6), (23, 0x7fffda), (23, 0x7fffdb), (23, 0x7fffdc), (23, 0x7fffdd), (23, 0x7fffde), (24, 0xffffeb), (23, 0x7fffdf), (24, 0xffffec), (24, 0xffffed), (22, 0x3fffd7), (23, 0x7fffe0), (24, 0xffffee), (23, 0x7fffe1), (23, 0x7fffe2), (23, 0x7fffe3), (23, 0x7fffe4), (21, 0x1fffdc), (22, 0x3fffd8), (23, 0x7fffe5), (22, 0x3fffd9), (23, 0x7fffe6), (23, 0x7fffe7), (24, 0xffffef), (22, 0x3fffda), (21, 0x1fffdd), (20, 0xfffe9), (22, 0x3fffdb), (22, 0x3fffdc), (23, 0x7fffe8), (23, 0x7fffe9), (21, 0x1fffde), (23, 0x7fffea), (22, 0x3fffdd), (22, 0x3fffde), (24, 0xfffff0), (21, 0x1fffdf), (22, 0x3fffdf), (23, 0x7fffeb), (23, 0x7fffec), (21, 0x1fffe0), (21, 0x1fffe1), (22, 0x3fffe0), (21, 0x1fffe2), (23, 0x7fffed), (22, 0x3fffe1), (23, 0x7fffee), (23, 0x7fffef), (20, 0xfffea), (22, 0x3fffe2), (22, 0x3fffe3), (22, 0x3fffe4), (23, 0x7ffff0), (22, 0x3fffe5), (22, 0x3fffe6), (23, 0x7ffff1), (26, 0x3ffffe0), (26, 0x3ffffe1), (20, 0xfffeb), (19, 0x7fff1), (22, 0x3fffe7), (23, 0x7ffff2), (22, 0x3fffe8), (25, 0x1ffffec), (26, 0x3ffffe2), (26, 0x3ffffe3), (26, 0x3ffffe4), (27, 0x7ffffde), (27, 0x7ffffdf), (26, 0x3ffffe5), (24, 0xfffff1), (25, 0x1ffffed), (19, 0x7fff2), (21, 0x1fffe3), (26, 0x3ffffe6), (27, 0x7ffffe0), (27, 0x7ffffe1), (26, 0x3ffffe7), (27, 0x7ffffe2), (24, 0xfffff2), (21, 0x1fffe4), (21, 0x1fffe5), (26, 0x3ffffe8), (26, 0x3ffffe9), (28, 0xffffffd), (27, 0x7ffffe3), (27, 0x7ffffe4), (27, 0x7ffffe5), (20, 0xfffec), (24, 0xfffff3), (20, 0xfffed), (21, 0x1fffe6), (22, 0x3fffe9), (21, 0x1fffe7), (21, 0x1fffe8), (23, 0x7ffff3), (22, 0x3fffea), (22, 0x3fffeb), (25, 0x1ffffee), (25, 0x1ffffef), (24, 0xfffff4), (24, 0xfffff5), (26, 0x3ffffea), (23, 0x7ffff4), (26, 0x3ffffeb), (27, 0x7ffffe6), (26, 0x3ffffec), (26, 0x3ffffed), (27, 0x7ffffe7), (27, 0x7ffffe8), (27, 0x7ffffe9), (27, 0x7ffffea), (27, 0x7ffffeb), (28, 0xffffffe), (27, 0x7ffffec), (27, 0x7ffffed), (27, 0x7ffffee), (27, 0x7ffffef), (27, 0x7fffff0), (26, 0x3ffffee), (30, 0x3fffffff), ]; // (next-state, byte, flags) pub const DECODE_TABLE: [[(usize, u8, u8); 16]; 256] = [ // 0 [ (4, 0, 0x00), (5, 0, 0x00), (7, 0, 0x00), (8, 0, 0x00), (11, 0, 0x00), (12, 0, 0x00), (16, 0, 0x00), (19, 0, 0x00), (25, 0, 0x00), (28, 0, 0x00), (32, 0, 0x00), (35, 0, 0x00), (42, 0, 0x00), (49, 0, 0x00), (57, 0, 0x00), (64, 0, 0x01), ], // 1 [ (0, 48, 0x02), (0, 49, 0x02), (0, 50, 0x02), (0, 97, 0x02), (0, 99, 0x02), (0, 101, 0x02), (0, 105, 0x02), (0, 111, 0x02), (0, 115, 0x02), (0, 116, 0x02), (13, 0, 0x00), (14, 0, 0x00), (17, 0, 0x00), (18, 0, 0x00), (20, 0, 0x00), (21, 0, 0x00), ], // 2 [ (1, 48, 0x02), (22, 48, 0x03), (1, 49, 0x02), (22, 49, 0x03), (1, 50, 0x02), (22, 50, 0x03), (1, 97, 0x02), (22, 97, 0x03), (1, 99, 0x02), (22, 99, 0x03), (1, 101, 0x02), (22, 101, 0x03), (1, 105, 0x02), (22, 105, 0x03), (1, 111, 0x02), (22, 111, 0x03), ], // 3 [ (2, 48, 0x02), (9, 48, 0x02), (23, 48, 0x02), (40, 48, 0x03), (2, 49, 0x02), (9, 49, 0x02), (23, 49, 0x02), (40, 49, 0x03), (2, 50, 0x02), (9, 50, 0x02), (23, 50, 0x02), (40, 50, 0x03), (2, 97, 0x02), (9, 97, 0x02), (23, 97, 0x02), (40, 97, 0x03), ], // 4 [ (3, 48, 0x02), (6, 48, 0x02), (10, 48, 0x02), (15, 48, 0x02), (24, 48, 0x02), (31, 48, 0x02), (41, 48, 0x02), (56, 48, 0x03), (3, 49, 0x02), (6, 49, 0x02), (10, 49, 0x02), (15, 49, 0x02), (24, 49, 0x02), (31, 49, 0x02), (41, 49, 0x02), (56, 49, 0x03), ], // 5 [ (3, 50, 0x02), (6, 50, 0x02), (10, 50, 0x02), (15, 50, 0x02), (24, 50, 0x02), (31, 50, 0x02), (41, 50, 0x02), (56, 50, 0x03), (3, 97, 0x02), (6, 97, 0x02), (10, 97, 0x02), (15, 97, 0x02), (24, 97, 0x02), (31, 97, 0x02), (41, 97, 0x02), (56, 97, 0x03), ], // 6 [ (2, 99, 0x02), (9, 99, 0x02), (23, 99, 0x02), (40, 99, 0x03), (2, 101, 0x02), (9, 101, 0x02), (23, 101, 0x02), (40, 101, 0x03), (2, 105, 0x02), (9, 105, 0x02), (23, 105, 0x02), (40, 105, 0x03), (2, 111, 0x02), (9, 111, 0x02), (23, 111, 0x02), (40, 111, 0x03), ], // 7 [ (3, 99, 0x02), (6, 99, 0x02), (10, 99, 0x02), (15, 99, 0x02), (24, 99, 0x02), (31, 99, 0x02), (41, 99, 0x02), (56, 99, 0x03), (3, 101, 0x02), (6, 101, 0x02), (10, 101, 0x02), (15, 101, 0x02), (24, 101, 0x02), (31, 101, 0x02), (41, 101, 0x02), (56, 101, 0x03), ], // 8 [ (3, 105, 0x02), (6, 105, 0x02), (10, 105, 0x02), (15, 105, 0x02), (24, 105, 0x02), (31, 105, 0x02), (41, 105, 0x02), (56, 105, 0x03), (3, 111, 0x02), (6, 111, 0x02), (10, 111, 0x02), (15, 111, 0x02), (24, 111, 0x02), (31, 111, 0x02), (41, 111, 0x02), (56, 111, 0x03), ], // 9 [ (1, 115, 0x02), (22, 115, 0x03), (1, 116, 0x02), (22, 116, 0x03), (0, 32, 0x02), (0, 37, 0x02), (0, 45, 0x02), (0, 46, 0x02), (0, 47, 0x02), (0, 51, 0x02), (0, 52, 0x02), (0, 53, 0x02), (0, 54, 0x02), (0, 55, 0x02), (0, 56, 0x02), (0, 57, 0x02), ], // 10 [ (2, 115, 0x02), (9, 115, 0x02), (23, 115, 0x02), (40, 115, 0x03), (2, 116, 0x02), (9, 116, 0x02), (23, 116, 0x02), (40, 116, 0x03), (1, 32, 0x02), (22, 32, 0x03), (1, 37, 0x02), (22, 37, 0x03), (1, 45, 0x02), (22, 45, 0x03), (1, 46, 0x02), (22, 46, 0x03), ], // 11 [ (3, 115, 0x02), (6, 115, 0x02), (10, 115, 0x02), (15, 115, 0x02), (24, 115, 0x02), (31, 115, 0x02), (41, 115, 0x02), (56, 115, 0x03), (3, 116, 0x02), (6, 116, 0x02), (10, 116, 0x02), (15, 116, 0x02), (24, 116, 0x02), (31, 116, 0x02), (41, 116, 0x02), (56, 116, 0x03), ], // 12 [ (2, 32, 0x02), (9, 32, 0x02), (23, 32, 0x02), (40, 32, 0x03), (2, 37, 0x02), (9, 37, 0x02), (23, 37, 0x02), (40, 37, 0x03), (2, 45, 0x02), (9, 45, 0x02), (23, 45, 0x02), (40, 45, 0x03), (2, 46, 0x02), (9, 46, 0x02), (23, 46, 0x02), (40, 46, 0x03), ], // 13 [ (3, 32, 0x02), (6, 32, 0x02), (10, 32, 0x02), (15, 32, 0x02), (24, 32, 0x02), (31, 32, 0x02), (41, 32, 0x02), (56, 32, 0x03), (3, 37, 0x02), (6, 37, 0x02), (10, 37, 0x02), (15, 37, 0x02), (24, 37, 0x02), (31, 37, 0x02), (41, 37, 0x02), (56, 37, 0x03), ], // 14 [ (3, 45, 0x02), (6, 45, 0x02), (10, 45, 0x02), (15, 45, 0x02), (24, 45, 0x02), (31, 45, 0x02), (41, 45, 0x02), (56, 45, 0x03), (3, 46, 0x02), (6, 46, 0x02), (10, 46, 0x02), (15, 46, 0x02), (24, 46, 0x02), (31, 46, 0x02), (41, 46, 0x02), (56, 46, 0x03), ], // 15 [ (1, 47, 0x02), (22, 47, 0x03), (1, 51, 0x02), (22, 51, 0x03), (1, 52, 0x02), (22, 52, 0x03), (1, 53, 0x02), (22, 53, 0x03), (1, 54, 0x02), (22, 54, 0x03), (1, 55, 0x02), (22, 55, 0x03), (1, 56, 0x02), (22, 56, 0x03), (1, 57, 0x02), (22, 57, 0x03), ], // 16 [ (2, 47, 0x02), (9, 47, 0x02), (23, 47, 0x02), (40, 47, 0x03), (2, 51, 0x02), (9, 51, 0x02), (23, 51, 0x02), (40, 51, 0x03), (2, 52, 0x02), (9, 52, 0x02), (23, 52, 0x02), (40, 52, 0x03), (2, 53, 0x02), (9, 53, 0x02), (23, 53, 0x02), (40, 53, 0x03), ], // 17 [ (3, 47, 0x02), (6, 47, 0x02), (10, 47, 0x02), (15, 47, 0x02), (24, 47, 0x02), (31, 47, 0x02), (41, 47, 0x02), (56, 47, 0x03), (3, 51, 0x02), (6, 51, 0x02), (10, 51, 0x02), (15, 51, 0x02), (24, 51, 0x02), (31, 51, 0x02), (41, 51, 0x02), (56, 51, 0x03), ], // 18 [ (3, 52, 0x02), (6, 52, 0x02), (10, 52, 0x02), (15, 52, 0x02), (24, 52, 0x02), (31, 52, 0x02), (41, 52, 0x02), (56, 52, 0x03), (3, 53, 0x02), (6, 53, 0x02), (10, 53, 0x02), (15, 53, 0x02), (24, 53, 0x02), (31, 53, 0x02), (41, 53, 0x02), (56, 53, 0x03), ], // 19 [ (2, 54, 0x02), (9, 54, 0x02), (23, 54, 0x02), (40, 54, 0x03), (2, 55, 0x02), (9, 55, 0x02), (23, 55, 0x02), (40, 55, 0x03), (2, 56, 0x02), (9, 56, 0x02), (23, 56, 0x02), (40, 56, 0x03), (2, 57, 0x02), (9, 57, 0x02), (23, 57, 0x02), (40, 57, 0x03), ], // 20 [ (3, 54, 0x02), (6, 54, 0x02), (10, 54, 0x02), (15, 54, 0x02), (24, 54, 0x02), (31, 54, 0x02), (41, 54, 0x02), (56, 54, 0x03), (3, 55, 0x02), (6, 55, 0x02), (10, 55, 0x02), (15, 55, 0x02), (24, 55, 0x02), (31, 55, 0x02), (41, 55, 0x02), (56, 55, 0x03), ], // 21 [ (3, 56, 0x02), (6, 56, 0x02), (10, 56, 0x02), (15, 56, 0x02), (24, 56, 0x02), (31, 56, 0x02), (41, 56, 0x02), (56, 56, 0x03), (3, 57, 0x02), (6, 57, 0x02), (10, 57, 0x02), (15, 57, 0x02), (24, 57, 0x02), (31, 57, 0x02), (41, 57, 0x02), (56, 57, 0x03), ], // 22 [ (26, 0, 0x00), (27, 0, 0x00), (29, 0, 0x00), (30, 0, 0x00), (33, 0, 0x00), (34, 0, 0x00), (36, 0, 0x00), (37, 0, 0x00), (43, 0, 0x00), (46, 0, 0x00), (50, 0, 0x00), (53, 0, 0x00), (58, 0, 0x00), (61, 0, 0x00), (65, 0, 0x00), (68, 0, 0x01), ], // 23 [ (0, 61, 0x02), (0, 65, 0x02), (0, 95, 0x02), (0, 98, 0x02), (0, 100, 0x02), (0, 102, 0x02), (0, 103, 0x02), (0, 104, 0x02), (0, 108, 0x02), (0, 109, 0x02), (0, 110, 0x02), (0, 112, 0x02), (0, 114, 0x02), (0, 117, 0x02), (38, 0, 0x00), (39, 0, 0x00), ], // 24 [ (1, 61, 0x02), (22, 61, 0x03), (1, 65, 0x02), (22, 65, 0x03), (1, 95, 0x02), (22, 95, 0x03), (1, 98, 0x02), (22, 98, 0x03), (1, 100, 0x02), (22, 100, 0x03), (1, 102, 0x02), (22, 102, 0x03), (1, 103, 0x02), (22, 103, 0x03), (1, 104, 0x02), (22, 104, 0x03), ], // 25 [ (2, 61, 0x02), (9, 61, 0x02), (23, 61, 0x02), (40, 61, 0x03), (2, 65, 0x02), (9, 65, 0x02), (23, 65, 0x02), (40, 65, 0x03), (2, 95, 0x02), (9, 95, 0x02), (23, 95, 0x02), (40, 95, 0x03), (2, 98, 0x02), (9, 98, 0x02), (23, 98, 0x02), (40, 98, 0x03), ], // 26 [ (3, 61, 0x02), (6, 61, 0x02), (10, 61, 0x02), (15, 61, 0x02), (24, 61, 0x02), (31, 61, 0x02), (41, 61, 0x02), (56, 61, 0x03), (3, 65, 0x02), (6, 65, 0x02), (10, 65, 0x02), (15, 65, 0x02), (24, 65, 0x02), (31, 65, 0x02), (41, 65, 0x02), (56, 65, 0x03), ], // 27 [ (3, 95, 0x02), (6, 95, 0x02), (10, 95, 0x02), (15, 95, 0x02), (24, 95, 0x02), (31, 95, 0x02), (41, 95, 0x02), (56, 95, 0x03), (3, 98, 0x02), (6, 98, 0x02), (10, 98, 0x02), (15, 98, 0x02), (24, 98, 0x02), (31, 98, 0x02), (41, 98, 0x02), (56, 98, 0x03), ], // 28 [ (2, 100, 0x02), (9, 100, 0x02), (23, 100, 0x02), (40, 100, 0x03), (2, 102, 0x02), (9, 102, 0x02), (23, 102, 0x02), (40, 102, 0x03), (2, 103, 0x02), (9, 103, 0x02), (23, 103, 0x02), (40, 103, 0x03), (2, 104, 0x02), (9, 104, 0x02), (23, 104, 0x02), (40, 104, 0x03), ], // 29 [ (3, 100, 0x02), (6, 100, 0x02), (10, 100, 0x02), (15, 100, 0x02), (24, 100, 0x02), (31, 100, 0x02), (41, 100, 0x02), (56, 100, 0x03), (3, 102, 0x02), (6, 102, 0x02), (10, 102, 0x02), (15, 102, 0x02), (24, 102, 0x02), (31, 102, 0x02), (41, 102, 0x02), (56, 102, 0x03), ], // 30 [ (3, 103, 0x02), (6, 103, 0x02), (10, 103, 0x02), (15, 103, 0x02), (24, 103, 0x02), (31, 103, 0x02), (41, 103, 0x02), (56, 103, 0x03), (3, 104, 0x02), (6, 104, 0x02), (10, 104, 0x02), (15, 104, 0x02), (24, 104, 0x02), (31, 104, 0x02), (41, 104, 0x02), (56, 104, 0x03), ], // 31 [ (1, 108, 0x02), (22, 108, 0x03), (1, 109, 0x02), (22, 109, 0x03), (1, 110, 0x02), (22, 110, 0x03), (1, 112, 0x02), (22, 112, 0x03), (1, 114, 0x02), (22, 114, 0x03), (1, 117, 0x02), (22, 117, 0x03), (0, 58, 0x02), (0, 66, 0x02), (0, 67, 0x02), (0, 68, 0x02), ], // 32 [ (2, 108, 0x02), (9, 108, 0x02), (23, 108, 0x02), (40, 108, 0x03), (2, 109, 0x02), (9, 109, 0x02), (23, 109, 0x02), (40, 109, 0x03), (2, 110, 0x02), (9, 110, 0x02), (23, 110, 0x02), (40, 110, 0x03), (2, 112, 0x02), (9, 112, 0x02), (23, 112, 0x02), (40, 112, 0x03), ], // 33 [ (3, 108, 0x02), (6, 108, 0x02), (10, 108, 0x02), (15, 108, 0x02), (24, 108, 0x02), (31, 108, 0x02), (41, 108, 0x02), (56, 108, 0x03), (3, 109, 0x02), (6, 109, 0x02), (10, 109, 0x02), (15, 109, 0x02), (24, 109, 0x02), (31, 109, 0x02), (41, 109, 0x02), (56, 109, 0x03), ], // 34 [ (3, 110, 0x02), (6, 110, 0x02), (10, 110, 0x02), (15, 110, 0x02), (24, 110, 0x02), (31, 110, 0x02), (41, 110, 0x02), (56, 110, 0x03), (3, 112, 0x02), (6, 112, 0x02), (10, 112, 0x02), (15, 112, 0x02), (24, 112, 0x02), (31, 112, 0x02), (41, 112, 0x02), (56, 112, 0x03), ], // 35 [ (2, 114, 0x02), (9, 114, 0x02), (23, 114, 0x02), (40, 114, 0x03), (2, 117, 0x02), (9, 117, 0x02), (23, 117, 0x02), (40, 117, 0x03), (1, 58, 0x02), (22, 58, 0x03), (1, 66, 0x02), (22, 66, 0x03), (1, 67, 0x02), (22, 67, 0x03), (1, 68, 0x02), (22, 68, 0x03), ], // 36 [ (3, 114, 0x02), (6, 114, 0x02), (10, 114, 0x02), (15, 114, 0x02), (24, 114, 0x02), (31, 114, 0x02), (41, 114, 0x02), (56, 114, 0x03), (3, 117, 0x02), (6, 117, 0x02), (10, 117, 0x02), (15, 117, 0x02), (24, 117, 0x02), (31, 117, 0x02), (41, 117, 0x02), (56, 117, 0x03), ], // 37 [ (2, 58, 0x02), (9, 58, 0x02), (23, 58, 0x02), (40, 58, 0x03), (2, 66, 0x02), (9, 66, 0x02), (23, 66, 0x02), (40, 66, 0x03), (2, 67, 0x02), (9, 67, 0x02), (23, 67, 0x02), (40, 67, 0x03), (2, 68, 0x02), (9, 68, 0x02), (23, 68, 0x02), (40, 68, 0x03), ], // 38 [ (3, 58, 0x02), (6, 58, 0x02), (10, 58, 0x02), (15, 58, 0x02), (24, 58, 0x02), (31, 58, 0x02), (41, 58, 0x02), (56, 58, 0x03), (3, 66, 0x02), (6, 66, 0x02), (10, 66, 0x02), (15, 66, 0x02), (24, 66, 0x02), (31, 66, 0x02), (41, 66, 0x02), (56, 66, 0x03), ], // 39 [ (3, 67, 0x02), (6, 67, 0x02), (10, 67, 0x02), (15, 67, 0x02), (24, 67, 0x02), (31, 67, 0x02), (41, 67, 0x02), (56, 67, 0x03), (3, 68, 0x02), (6, 68, 0x02), (10, 68, 0x02), (15, 68, 0x02), (24, 68, 0x02), (31, 68, 0x02), (41, 68, 0x02), (56, 68, 0x03), ], // 40 [ (44, 0, 0x00), (45, 0, 0x00), (47, 0, 0x00), (48, 0, 0x00), (51, 0, 0x00), (52, 0, 0x00), (54, 0, 0x00), (55, 0, 0x00), (59, 0, 0x00), (60, 0, 0x00), (62, 0, 0x00), (63, 0, 0x00), (66, 0, 0x00), (67, 0, 0x00), (69, 0, 0x00), (72, 0, 0x01), ], // 41 [ (0, 69, 0x02), (0, 70, 0x02), (0, 71, 0x02), (0, 72, 0x02), (0, 73, 0x02), (0, 74, 0x02), (0, 75, 0x02), (0, 76, 0x02), (0, 77, 0x02), (0, 78, 0x02), (0, 79, 0x02), (0, 80, 0x02), (0, 81, 0x02), (0, 82, 0x02), (0, 83, 0x02), (0, 84, 0x02), ], // 42 [ (1, 69, 0x02), (22, 69, 0x03), (1, 70, 0x02), (22, 70, 0x03), (1, 71, 0x02), (22, 71, 0x03), (1, 72, 0x02), (22, 72, 0x03), (1, 73, 0x02), (22, 73, 0x03), (1, 74, 0x02), (22, 74, 0x03), (1, 75, 0x02), (22, 75, 0x03), (1, 76, 0x02), (22, 76, 0x03), ], // 43 [ (2, 69, 0x02), (9, 69, 0x02), (23, 69, 0x02), (40, 69, 0x03), (2, 70, 0x02), (9, 70, 0x02), (23, 70, 0x02), (40, 70, 0x03), (2, 71, 0x02), (9, 71, 0x02), (23, 71, 0x02), (40, 71, 0x03), (2, 72, 0x02), (9, 72, 0x02), (23, 72, 0x02), (40, 72, 0x03), ], // 44 [ (3, 69, 0x02), (6, 69, 0x02), (10, 69, 0x02), (15, 69, 0x02), (24, 69, 0x02), (31, 69, 0x02), (41, 69, 0x02), (56, 69, 0x03), (3, 70, 0x02), (6, 70, 0x02), (10, 70, 0x02), (15, 70, 0x02), (24, 70, 0x02), (31, 70, 0x02), (41, 70, 0x02), (56, 70, 0x03), ], // 45 [ (3, 71, 0x02), (6, 71, 0x02), (10, 71, 0x02), (15, 71, 0x02), (24, 71, 0x02), (31, 71, 0x02), (41, 71, 0x02), (56, 71, 0x03), (3, 72, 0x02), (6, 72, 0x02), (10, 72, 0x02), (15, 72, 0x02), (24, 72, 0x02), (31, 72, 0x02), (41, 72, 0x02), (56, 72, 0x03), ], // 46 [ (2, 73, 0x02), (9, 73, 0x02), (23, 73, 0x02), (40, 73, 0x03), (2, 74, 0x02), (9, 74, 0x02), (23, 74, 0x02), (40, 74, 0x03), (2, 75, 0x02), (9, 75, 0x02), (23, 75, 0x02), (40, 75, 0x03), (2, 76, 0x02), (9, 76, 0x02), (23, 76, 0x02), (40, 76, 0x03), ], // 47 [ (3, 73, 0x02), (6, 73, 0x02), (10, 73, 0x02), (15, 73, 0x02), (24, 73, 0x02), (31, 73, 0x02), (41, 73, 0x02), (56, 73, 0x03), (3, 74, 0x02), (6, 74, 0x02), (10, 74, 0x02), (15, 74, 0x02), (24, 74, 0x02), (31, 74, 0x02), (41, 74, 0x02), (56, 74, 0x03), ], // 48 [ (3, 75, 0x02), (6, 75, 0x02), (10, 75, 0x02), (15, 75, 0x02), (24, 75, 0x02), (31, 75, 0x02), (41, 75, 0x02), (56, 75, 0x03), (3, 76, 0x02), (6, 76, 0x02), (10, 76, 0x02), (15, 76, 0x02), (24, 76, 0x02), (31, 76, 0x02), (41, 76, 0x02), (56, 76, 0x03), ], // 49 [ (1, 77, 0x02), (22, 77, 0x03), (1, 78, 0x02), (22, 78, 0x03), (1, 79, 0x02), (22, 79, 0x03), (1, 80, 0x02), (22, 80, 0x03), (1, 81, 0x02), (22, 81, 0x03), (1, 82, 0x02), (22, 82, 0x03), (1, 83, 0x02), (22, 83, 0x03), (1, 84, 0x02), (22, 84, 0x03), ], // 50 [ (2, 77, 0x02), (9, 77, 0x02), (23, 77, 0x02), (40, 77, 0x03), (2, 78, 0x02), (9, 78, 0x02), (23, 78, 0x02), (40, 78, 0x03), (2, 79, 0x02), (9, 79, 0x02), (23, 79, 0x02), (40, 79, 0x03), (2, 80, 0x02), (9, 80, 0x02), (23, 80, 0x02), (40, 80, 0x03), ], // 51 [ (3, 77, 0x02), (6, 77, 0x02), (10, 77, 0x02), (15, 77, 0x02), (24, 77, 0x02), (31, 77, 0x02), (41, 77, 0x02), (56, 77, 0x03), (3, 78, 0x02), (6, 78, 0x02), (10, 78, 0x02), (15, 78, 0x02), (24, 78, 0x02), (31, 78, 0x02), (41, 78, 0x02), (56, 78, 0x03), ], // 52 [ (3, 79, 0x02), (6, 79, 0x02), (10, 79, 0x02), (15, 79, 0x02), (24, 79, 0x02), (31, 79, 0x02), (41, 79, 0x02), (56, 79, 0x03), (3, 80, 0x02), (6, 80, 0x02), (10, 80, 0x02), (15, 80, 0x02), (24, 80, 0x02), (31, 80, 0x02), (41, 80, 0x02), (56, 80, 0x03), ], // 53 [ (2, 81, 0x02), (9, 81, 0x02), (23, 81, 0x02), (40, 81, 0x03), (2, 82, 0x02), (9, 82, 0x02), (23, 82, 0x02), (40, 82, 0x03), (2, 83, 0x02), (9, 83, 0x02), (23, 83, 0x02), (40, 83, 0x03), (2, 84, 0x02), (9, 84, 0x02), (23, 84, 0x02), (40, 84, 0x03), ], // 54 [ (3, 81, 0x02), (6, 81, 0x02), (10, 81, 0x02), (15, 81, 0x02), (24, 81, 0x02), (31, 81, 0x02), (41, 81, 0x02), (56, 81, 0x03), (3, 82, 0x02), (6, 82, 0x02), (10, 82, 0x02), (15, 82, 0x02), (24, 82, 0x02), (31, 82, 0x02), (41, 82, 0x02), (56, 82, 0x03), ], // 55 [ (3, 83, 0x02), (6, 83, 0x02), (10, 83, 0x02), (15, 83, 0x02), (24, 83, 0x02), (31, 83, 0x02), (41, 83, 0x02), (56, 83, 0x03), (3, 84, 0x02), (6, 84, 0x02), (10, 84, 0x02), (15, 84, 0x02), (24, 84, 0x02), (31, 84, 0x02), (41, 84, 0x02), (56, 84, 0x03), ], // 56 [ (0, 85, 0x02), (0, 86, 0x02), (0, 87, 0x02), (0, 89, 0x02), (0, 106, 0x02), (0, 107, 0x02), (0, 113, 0x02), (0, 118, 0x02), (0, 119, 0x02), (0, 120, 0x02), (0, 121, 0x02), (0, 122, 0x02), (70, 0, 0x00), (71, 0, 0x00), (73, 0, 0x00), (74, 0, 0x01), ], // 57 [ (1, 85, 0x02), (22, 85, 0x03), (1, 86, 0x02), (22, 86, 0x03), (1, 87, 0x02), (22, 87, 0x03), (1, 89, 0x02), (22, 89, 0x03), (1, 106, 0x02), (22, 106, 0x03), (1, 107, 0x02), (22, 107, 0x03), (1, 113, 0x02), (22, 113, 0x03), (1, 118, 0x02), (22, 118, 0x03), ], // 58 [ (2, 85, 0x02), (9, 85, 0x02), (23, 85, 0x02), (40, 85, 0x03), (2, 86, 0x02), (9, 86, 0x02), (23, 86, 0x02), (40, 86, 0x03), (2, 87, 0x02), (9, 87, 0x02), (23, 87, 0x02), (40, 87, 0x03), (2, 89, 0x02), (9, 89, 0x02), (23, 89, 0x02), (40, 89, 0x03), ], // 59 [ (3, 85, 0x02), (6, 85, 0x02), (10, 85, 0x02), (15, 85, 0x02), (24, 85, 0x02), (31, 85, 0x02), (41, 85, 0x02), (56, 85, 0x03), (3, 86, 0x02), (6, 86, 0x02), (10, 86, 0x02), (15, 86, 0x02), (24, 86, 0x02), (31, 86, 0x02), (41, 86, 0x02), (56, 86, 0x03), ], // 60 [ (3, 87, 0x02), (6, 87, 0x02), (10, 87, 0x02), (15, 87, 0x02), (24, 87, 0x02), (31, 87, 0x02), (41, 87, 0x02), (56, 87, 0x03), (3, 89, 0x02), (6, 89, 0x02), (10, 89, 0x02), (15, 89, 0x02), (24, 89, 0x02), (31, 89, 0x02), (41, 89, 0x02), (56, 89, 0x03), ], // 61 [ (2, 106, 0x02), (9, 106, 0x02), (23, 106, 0x02), (40, 106, 0x03), (2, 107, 0x02), (9, 107, 0x02), (23, 107, 0x02), (40, 107, 0x03), (2, 113, 0x02), (9, 113, 0x02), (23, 113, 0x02), (40, 113, 0x03), (2, 118, 0x02), (9, 118, 0x02), (23, 118, 0x02), (40, 118, 0x03), ], // 62 [ (3, 106, 0x02), (6, 106, 0x02), (10, 106, 0x02), (15, 106, 0x02), (24, 106, 0x02), (31, 106, 0x02), (41, 106, 0x02), (56, 106, 0x03), (3, 107, 0x02), (6, 107, 0x02), (10, 107, 0x02), (15, 107, 0x02), (24, 107, 0x02), (31, 107, 0x02), (41, 107, 0x02), (56, 107, 0x03), ], // 63 [ (3, 113, 0x02), (6, 113, 0x02), (10, 113, 0x02), (15, 113, 0x02), (24, 113, 0x02), (31, 113, 0x02), (41, 113, 0x02), (56, 113, 0x03), (3, 118, 0x02), (6, 118, 0x02), (10, 118, 0x02), (15, 118, 0x02), (24, 118, 0x02), (31, 118, 0x02), (41, 118, 0x02), (56, 118, 0x03), ], // 64 [ (1, 119, 0x02), (22, 119, 0x03), (1, 120, 0x02), (22, 120, 0x03), (1, 121, 0x02), (22, 121, 0x03), (1, 122, 0x02), (22, 122, 0x03), (0, 38, 0x02), (0, 42, 0x02), (0, 44, 0x02), (0, 59, 0x02), (0, 88, 0x02), (0, 90, 0x02), (75, 0, 0x00), (78, 0, 0x00), ], // 65 [ (2, 119, 0x02), (9, 119, 0x02), (23, 119, 0x02), (40, 119, 0x03), (2, 120, 0x02), (9, 120, 0x02), (23, 120, 0x02), (40, 120, 0x03), (2, 121, 0x02), (9, 121, 0x02), (23, 121, 0x02), (40, 121, 0x03), (2, 122, 0x02), (9, 122, 0x02), (23, 122, 0x02), (40, 122, 0x03), ], // 66 [ (3, 119, 0x02), (6, 119, 0x02), (10, 119, 0x02), (15, 119, 0x02), (24, 119, 0x02), (31, 119, 0x02), (41, 119, 0x02), (56, 119, 0x03), (3, 120, 0x02), (6, 120, 0x02), (10, 120, 0x02), (15, 120, 0x02), (24, 120, 0x02), (31, 120, 0x02), (41, 120, 0x02), (56, 120, 0x03), ], // 67 [ (3, 121, 0x02), (6, 121, 0x02), (10, 121, 0x02), (15, 121, 0x02), (24, 121, 0x02), (31, 121, 0x02), (41, 121, 0x02), (56, 121, 0x03), (3, 122, 0x02), (6, 122, 0x02), (10, 122, 0x02), (15, 122, 0x02), (24, 122, 0x02), (31, 122, 0x02), (41, 122, 0x02), (56, 122, 0x03), ], // 68 [ (1, 38, 0x02), (22, 38, 0x03), (1, 42, 0x02), (22, 42, 0x03), (1, 44, 0x02), (22, 44, 0x03), (1, 59, 0x02), (22, 59, 0x03), (1, 88, 0x02), (22, 88, 0x03), (1, 90, 0x02), (22, 90, 0x03), (76, 0, 0x00), (77, 0, 0x00), (79, 0, 0x00), (81, 0, 0x00), ], // 69 [ (2, 38, 0x02), (9, 38, 0x02), (23, 38, 0x02), (40, 38, 0x03), (2, 42, 0x02), (9, 42, 0x02), (23, 42, 0x02), (40, 42, 0x03), (2, 44, 0x02), (9, 44, 0x02), (23, 44, 0x02), (40, 44, 0x03), (2, 59, 0x02), (9, 59, 0x02), (23, 59, 0x02), (40, 59, 0x03), ], // 70 [ (3, 38, 0x02), (6, 38, 0x02), (10, 38, 0x02), (15, 38, 0x02), (24, 38, 0x02), (31, 38, 0x02), (41, 38, 0x02), (56, 38, 0x03), (3, 42, 0x02), (6, 42, 0x02), (10, 42, 0x02), (15, 42, 0x02), (24, 42, 0x02), (31, 42, 0x02), (41, 42, 0x02), (56, 42, 0x03), ], // 71 [ (3, 44, 0x02), (6, 44, 0x02), (10, 44, 0x02), (15, 44, 0x02), (24, 44, 0x02), (31, 44, 0x02), (41, 44, 0x02), (56, 44, 0x03), (3, 59, 0x02), (6, 59, 0x02), (10, 59, 0x02), (15, 59, 0x02), (24, 59, 0x02), (31, 59, 0x02), (41, 59, 0x02), (56, 59, 0x03), ], // 72 [ (2, 88, 0x02), (9, 88, 0x02), (23, 88, 0x02), (40, 88, 0x03), (2, 90, 0x02), (9, 90, 0x02), (23, 90, 0x02), (40, 90, 0x03), (0, 33, 0x02), (0, 34, 0x02), (0, 40, 0x02), (0, 41, 0x02), (0, 63, 0x02), (80, 0, 0x00), (82, 0, 0x00), (84, 0, 0x00), ], // 73 [ (3, 88, 0x02), (6, 88, 0x02), (10, 88, 0x02), (15, 88, 0x02), (24, 88, 0x02), (31, 88, 0x02), (41, 88, 0x02), (56, 88, 0x03), (3, 90, 0x02), (6, 90, 0x02), (10, 90, 0x02), (15, 90, 0x02), (24, 90, 0x02), (31, 90, 0x02), (41, 90, 0x02), (56, 90, 0x03), ], // 74 [ (1, 33, 0x02), (22, 33, 0x03), (1, 34, 0x02), (22, 34, 0x03), (1, 40, 0x02), (22, 40, 0x03), (1, 41, 0x02), (22, 41, 0x03), (1, 63, 0x02), (22, 63, 0x03), (0, 39, 0x02), (0, 43, 0x02), (0, 124, 0x02), (83, 0, 0x00), (85, 0, 0x00), (88, 0, 0x00), ], // 75 [ (2, 33, 0x02), (9, 33, 0x02), (23, 33, 0x02), (40, 33, 0x03), (2, 34, 0x02), (9, 34, 0x02), (23, 34, 0x02), (40, 34, 0x03), (2, 40, 0x02), (9, 40, 0x02), (23, 40, 0x02), (40, 40, 0x03), (2, 41, 0x02), (9, 41, 0x02), (23, 41, 0x02), (40, 41, 0x03), ], // 76 [ (3, 33, 0x02), (6, 33, 0x02), (10, 33, 0x02), (15, 33, 0x02), (24, 33, 0x02), (31, 33, 0x02), (41, 33, 0x02), (56, 33, 0x03), (3, 34, 0x02), (6, 34, 0x02), (10, 34, 0x02), (15, 34, 0x02), (24, 34, 0x02), (31, 34, 0x02), (41, 34, 0x02), (56, 34, 0x03), ], // 77 [ (3, 40, 0x02), (6, 40, 0x02), (10, 40, 0x02), (15, 40, 0x02), (24, 40, 0x02), (31, 40, 0x02), (41, 40, 0x02), (56, 40, 0x03), (3, 41, 0x02), (6, 41, 0x02), (10, 41, 0x02), (15, 41, 0x02), (24, 41, 0x02), (31, 41, 0x02), (41, 41, 0x02), (56, 41, 0x03), ], // 78 [ (2, 63, 0x02), (9, 63, 0x02), (23, 63, 0x02), (40, 63, 0x03), (1, 39, 0x02), (22, 39, 0x03), (1, 43, 0x02), (22, 43, 0x03), (1, 124, 0x02), (22, 124, 0x03), (0, 35, 0x02), (0, 62, 0x02), (86, 0, 0x00), (87, 0, 0x00), (89, 0, 0x00), (90, 0, 0x00), ], // 79 [ (3, 63, 0x02), (6, 63, 0x02), (10, 63, 0x02), (15, 63, 0x02), (24, 63, 0x02), (31, 63, 0x02), (41, 63, 0x02), (56, 63, 0x03), (2, 39, 0x02), (9, 39, 0x02), (23, 39, 0x02), (40, 39, 0x03), (2, 43, 0x02), (9, 43, 0x02), (23, 43, 0x02), (40, 43, 0x03), ], // 80 [ (3, 39, 0x02), (6, 39, 0x02), (10, 39, 0x02), (15, 39, 0x02), (24, 39, 0x02), (31, 39, 0x02), (41, 39, 0x02), (56, 39, 0x03), (3, 43, 0x02), (6, 43, 0x02), (10, 43, 0x02), (15, 43, 0x02), (24, 43, 0x02), (31, 43, 0x02), (41, 43, 0x02), (56, 43, 0x03), ], // 81 [ (2, 124, 0x02), (9, 124, 0x02), (23, 124, 0x02), (40, 124, 0x03), (1, 35, 0x02), (22, 35, 0x03), (1, 62, 0x02), (22, 62, 0x03), (0, 0, 0x02), (0, 36, 0x02), (0, 64, 0x02), (0, 91, 0x02), (0, 93, 0x02), (0, 126, 0x02), (91, 0, 0x00), (92, 0, 0x00), ], // 82 [ (3, 124, 0x02), (6, 124, 0x02), (10, 124, 0x02), (15, 124, 0x02), (24, 124, 0x02), (31, 124, 0x02), (41, 124, 0x02), (56, 124, 0x03), (2, 35, 0x02), (9, 35, 0x02), (23, 35, 0x02), (40, 35, 0x03), (2, 62, 0x02), (9, 62, 0x02), (23, 62, 0x02), (40, 62, 0x03), ], // 83 [ (3, 35, 0x02), (6, 35, 0x02), (10, 35, 0x02), (15, 35, 0x02), (24, 35, 0x02), (31, 35, 0x02), (41, 35, 0x02), (56, 35, 0x03), (3, 62, 0x02), (6, 62, 0x02), (10, 62, 0x02), (15, 62, 0x02), (24, 62, 0x02), (31, 62, 0x02), (41, 62, 0x02), (56, 62, 0x03), ], // 84 [ (1, 0, 0x02), (22, 0, 0x03), (1, 36, 0x02), (22, 36, 0x03), (1, 64, 0x02), (22, 64, 0x03), (1, 91, 0x02), (22, 91, 0x03), (1, 93, 0x02), (22, 93, 0x03), (1, 126, 0x02), (22, 126, 0x03), (0, 94, 0x02), (0, 125, 0x02), (93, 0, 0x00), (94, 0, 0x00), ], // 85 [ (2, 0, 0x02), (9, 0, 0x02), (23, 0, 0x02), (40, 0, 0x03), (2, 36, 0x02), (9, 36, 0x02), (23, 36, 0x02), (40, 36, 0x03), (2, 64, 0x02), (9, 64, 0x02), (23, 64, 0x02), (40, 64, 0x03), (2, 91, 0x02), (9, 91, 0x02), (23, 91, 0x02), (40, 91, 0x03), ], // 86 [ (3, 0, 0x02), (6, 0, 0x02), (10, 0, 0x02), (15, 0, 0x02), (24, 0, 0x02), (31, 0, 0x02), (41, 0, 0x02), (56, 0, 0x03), (3, 36, 0x02), (6, 36, 0x02), (10, 36, 0x02), (15, 36, 0x02), (24, 36, 0x02), (31, 36, 0x02), (41, 36, 0x02), (56, 36, 0x03), ], // 87 [ (3, 64, 0x02), (6, 64, 0x02), (10, 64, 0x02), (15, 64, 0x02), (24, 64, 0x02), (31, 64, 0x02), (41, 64, 0x02), (56, 64, 0x03), (3, 91, 0x02), (6, 91, 0x02), (10, 91, 0x02), (15, 91, 0x02), (24, 91, 0x02), (31, 91, 0x02), (41, 91, 0x02), (56, 91, 0x03), ], // 88 [ (2, 93, 0x02), (9, 93, 0x02), (23, 93, 0x02), (40, 93, 0x03), (2, 126, 0x02), (9, 126, 0x02), (23, 126, 0x02), (40, 126, 0x03), (1, 94, 0x02), (22, 94, 0x03), (1, 125, 0x02), (22, 125, 0x03), (0, 60, 0x02), (0, 96, 0x02), (0, 123, 0x02), (95, 0, 0x00), ], // 89 [ (3, 93, 0x02), (6, 93, 0x02), (10, 93, 0x02), (15, 93, 0x02), (24, 93, 0x02), (31, 93, 0x02), (41, 93, 0x02), (56, 93, 0x03), (3, 126, 0x02), (6, 126, 0x02), (10, 126, 0x02), (15, 126, 0x02), (24, 126, 0x02), (31, 126, 0x02), (41, 126, 0x02), (56, 126, 0x03), ], // 90 [ (2, 94, 0x02), (9, 94, 0x02), (23, 94, 0x02), (40, 94, 0x03), (2, 125, 0x02), (9, 125, 0x02), (23, 125, 0x02), (40, 125, 0x03), (1, 60, 0x02), (22, 60, 0x03), (1, 96, 0x02), (22, 96, 0x03), (1, 123, 0x02), (22, 123, 0x03), (96, 0, 0x00), (110, 0, 0x00), ], // 91 [ (3, 94, 0x02), (6, 94, 0x02), (10, 94, 0x02), (15, 94, 0x02), (24, 94, 0x02), (31, 94, 0x02), (41, 94, 0x02), (56, 94, 0x03), (3, 125, 0x02), (6, 125, 0x02), (10, 125, 0x02), (15, 125, 0x02), (24, 125, 0x02), (31, 125, 0x02), (41, 125, 0x02), (56, 125, 0x03), ], // 92 [ (2, 60, 0x02), (9, 60, 0x02), (23, 60, 0x02), (40, 60, 0x03), (2, 96, 0x02), (9, 96, 0x02), (23, 96, 0x02), (40, 96, 0x03), (2, 123, 0x02), (9, 123, 0x02), (23, 123, 0x02), (40, 123, 0x03), (97, 0, 0x00), (101, 0, 0x00), (111, 0, 0x00), (133, 0, 0x00), ], // 93 [ (3, 60, 0x02), (6, 60, 0x02), (10, 60, 0x02), (15, 60, 0x02), (24, 60, 0x02), (31, 60, 0x02), (41, 60, 0x02), (56, 60, 0x03), (3, 96, 0x02), (6, 96, 0x02), (10, 96, 0x02), (15, 96, 0x02), (24, 96, 0x02), (31, 96, 0x02), (41, 96, 0x02), (56, 96, 0x03), ], // 94 [ (3, 123, 0x02), (6, 123, 0x02), (10, 123, 0x02), (15, 123, 0x02), (24, 123, 0x02), (31, 123, 0x02), (41, 123, 0x02), (56, 123, 0x03), (98, 0, 0x00), (99, 0, 0x00), (102, 0, 0x00), (105, 0, 0x00), (112, 0, 0x00), (119, 0, 0x00), (134, 0, 0x00), (153, 0, 0x00), ], // 95 [ (0, 92, 0x02), (0, 195, 0x02), (0, 208, 0x02), (100, 0, 0x00), (103, 0, 0x00), (104, 0, 0x00), (106, 0, 0x00), (107, 0, 0x00), (113, 0, 0x00), (116, 0, 0x00), (120, 0, 0x00), (126, 0, 0x00), (135, 0, 0x00), (142, 0, 0x00), (154, 0, 0x00), (169, 0, 0x00), ], // 96 [ (1, 92, 0x02), (22, 92, 0x03), (1, 195, 0x02), (22, 195, 0x03), (1, 208, 0x02), (22, 208, 0x03), (0, 128, 0x02), (0, 130, 0x02), (0, 131, 0x02), (0, 162, 0x02), (0, 184, 0x02), (0, 194, 0x02), (0, 224, 0x02), (0, 226, 0x02), (108, 0, 0x00), (109, 0, 0x00), ], // 97 [ (2, 92, 0x02), (9, 92, 0x02), (23, 92, 0x02), (40, 92, 0x03), (2, 195, 0x02), (9, 195, 0x02), (23, 195, 0x02), (40, 195, 0x03), (2, 208, 0x02), (9, 208, 0x02), (23, 208, 0x02), (40, 208, 0x03), (1, 128, 0x02), (22, 128, 0x03), (1, 130, 0x02), (22, 130, 0x03), ], // 98 [ (3, 92, 0x02), (6, 92, 0x02), (10, 92, 0x02), (15, 92, 0x02), (24, 92, 0x02), (31, 92, 0x02), (41, 92, 0x02), (56, 92, 0x03), (3, 195, 0x02), (6, 195, 0x02), (10, 195, 0x02), (15, 195, 0x02), (24, 195, 0x02), (31, 195, 0x02), (41, 195, 0x02), (56, 195, 0x03), ], // 99 [ (3, 208, 0x02), (6, 208, 0x02), (10, 208, 0x02), (15, 208, 0x02), (24, 208, 0x02), (31, 208, 0x02), (41, 208, 0x02), (56, 208, 0x03), (2, 128, 0x02), (9, 128, 0x02), (23, 128, 0x02), (40, 128, 0x03), (2, 130, 0x02), (9, 130, 0x02), (23, 130, 0x02), (40, 130, 0x03), ], // 100 [ (3, 128, 0x02), (6, 128, 0x02), (10, 128, 0x02), (15, 128, 0x02), (24, 128, 0x02), (31, 128, 0x02), (41, 128, 0x02), (56, 128, 0x03), (3, 130, 0x02), (6, 130, 0x02), (10, 130, 0x02), (15, 130, 0x02), (24, 130, 0x02), (31, 130, 0x02), (41, 130, 0x02), (56, 130, 0x03), ], // 101 [ (1, 131, 0x02), (22, 131, 0x03), (1, 162, 0x02), (22, 162, 0x03), (1, 184, 0x02), (22, 184, 0x03), (1, 194, 0x02), (22, 194, 0x03), (1, 224, 0x02), (22, 224, 0x03), (1, 226, 0x02), (22, 226, 0x03), (0, 153, 0x02), (0, 161, 0x02), (0, 167, 0x02), (0, 172, 0x02), ], // 102 [ (2, 131, 0x02), (9, 131, 0x02), (23, 131, 0x02), (40, 131, 0x03), (2, 162, 0x02), (9, 162, 0x02), (23, 162, 0x02), (40, 162, 0x03), (2, 184, 0x02), (9, 184, 0x02), (23, 184, 0x02), (40, 184, 0x03), (2, 194, 0x02), (9, 194, 0x02), (23, 194, 0x02), (40, 194, 0x03), ], // 103 [ (3, 131, 0x02), (6, 131, 0x02), (10, 131, 0x02), (15, 131, 0x02), (24, 131, 0x02), (31, 131, 0x02), (41, 131, 0x02), (56, 131, 0x03), (3, 162, 0x02), (6, 162, 0x02), (10, 162, 0x02), (15, 162, 0x02), (24, 162, 0x02), (31, 162, 0x02), (41, 162, 0x02), (56, 162, 0x03), ], // 104 [ (3, 184, 0x02), (6, 184, 0x02), (10, 184, 0x02), (15, 184, 0x02), (24, 184, 0x02), (31, 184, 0x02), (41, 184, 0x02), (56, 184, 0x03), (3, 194, 0x02), (6, 194, 0x02), (10, 194, 0x02), (15, 194, 0x02), (24, 194, 0x02), (31, 194, 0x02), (41, 194, 0x02), (56, 194, 0x03), ], // 105 [ (2, 224, 0x02), (9, 224, 0x02), (23, 224, 0x02), (40, 224, 0x03), (2, 226, 0x02), (9, 226, 0x02), (23, 226, 0x02), (40, 226, 0x03), (1, 153, 0x02), (22, 153, 0x03), (1, 161, 0x02), (22, 161, 0x03), (1, 167, 0x02), (22, 167, 0x03), (1, 172, 0x02), (22, 172, 0x03), ], // 106 [ (3, 224, 0x02), (6, 224, 0x02), (10, 224, 0x02), (15, 224, 0x02), (24, 224, 0x02), (31, 224, 0x02), (41, 224, 0x02), (56, 224, 0x03), (3, 226, 0x02), (6, 226, 0x02), (10, 226, 0x02), (15, 226, 0x02), (24, 226, 0x02), (31, 226, 0x02), (41, 226, 0x02), (56, 226, 0x03), ], // 107 [ (2, 153, 0x02), (9, 153, 0x02), (23, 153, 0x02), (40, 153, 0x03), (2, 161, 0x02), (9, 161, 0x02), (23, 161, 0x02), (40, 161, 0x03), (2, 167, 0x02), (9, 167, 0x02), (23, 167, 0x02), (40, 167, 0x03), (2, 172, 0x02), (9, 172, 0x02), (23, 172, 0x02), (40, 172, 0x03), ], // 108 [ (3, 153, 0x02), (6, 153, 0x02), (10, 153, 0x02), (15, 153, 0x02), (24, 153, 0x02), (31, 153, 0x02), (41, 153, 0x02), (56, 153, 0x03), (3, 161, 0x02), (6, 161, 0x02), (10, 161, 0x02), (15, 161, 0x02), (24, 161, 0x02), (31, 161, 0x02), (41, 161, 0x02), (56, 161, 0x03), ], // 109 [ (3, 167, 0x02), (6, 167, 0x02), (10, 167, 0x02), (15, 167, 0x02), (24, 167, 0x02), (31, 167, 0x02), (41, 167, 0x02), (56, 167, 0x03), (3, 172, 0x02), (6, 172, 0x02), (10, 172, 0x02), (15, 172, 0x02), (24, 172, 0x02), (31, 172, 0x02), (41, 172, 0x02), (56, 172, 0x03), ], // 110 [ (114, 0, 0x00), (115, 0, 0x00), (117, 0, 0x00), (118, 0, 0x00), (121, 0, 0x00), (123, 0, 0x00), (127, 0, 0x00), (130, 0, 0x00), (136, 0, 0x00), (139, 0, 0x00), (143, 0, 0x00), (146, 0, 0x00), (155, 0, 0x00), (162, 0, 0x00), (170, 0, 0x00), (180, 0, 0x00), ], // 111 [ (0, 176, 0x02), (0, 177, 0x02), (0, 179, 0x02), (0, 209, 0x02), (0, 216, 0x02), (0, 217, 0x02), (0, 227, 0x02), (0, 229, 0x02), (0, 230, 0x02), (122, 0, 0x00), (124, 0, 0x00), (125, 0, 0x00), (128, 0, 0x00), (129, 0, 0x00), (131, 0, 0x00), (132, 0, 0x00), ], // 112 [ (1, 176, 0x02), (22, 176, 0x03), (1, 177, 0x02), (22, 177, 0x03), (1, 179, 0x02), (22, 179, 0x03), (1, 209, 0x02), (22, 209, 0x03), (1, 216, 0x02), (22, 216, 0x03), (1, 217, 0x02), (22, 217, 0x03), (1, 227, 0x02), (22, 227, 0x03), (1, 229, 0x02), (22, 229, 0x03), ], // 113 [ (2, 176, 0x02), (9, 176, 0x02), (23, 176, 0x02), (40, 176, 0x03), (2, 177, 0x02), (9, 177, 0x02), (23, 177, 0x02), (40, 177, 0x03), (2, 179, 0x02), (9, 179, 0x02), (23, 179, 0x02), (40, 179, 0x03), (2, 209, 0x02), (9, 209, 0x02), (23, 209, 0x02), (40, 209, 0x03), ], // 114 [ (3, 176, 0x02), (6, 176, 0x02), (10, 176, 0x02), (15, 176, 0x02), (24, 176, 0x02), (31, 176, 0x02), (41, 176, 0x02), (56, 176, 0x03), (3, 177, 0x02), (6, 177, 0x02), (10, 177, 0x02), (15, 177, 0x02), (24, 177, 0x02), (31, 177, 0x02), (41, 177, 0x02), (56, 177, 0x03), ], // 115 [ (3, 179, 0x02), (6, 179, 0x02), (10, 179, 0x02), (15, 179, 0x02), (24, 179, 0x02), (31, 179, 0x02), (41, 179, 0x02), (56, 179, 0x03), (3, 209, 0x02), (6, 209, 0x02), (10, 209, 0x02), (15, 209, 0x02), (24, 209, 0x02), (31, 209, 0x02), (41, 209, 0x02), (56, 209, 0x03), ], // 116 [ (2, 216, 0x02), (9, 216, 0x02), (23, 216, 0x02), (40, 216, 0x03), (2, 217, 0x02), (9, 217, 0x02), (23, 217, 0x02), (40, 217, 0x03), (2, 227, 0x02), (9, 227, 0x02), (23, 227, 0x02), (40, 227, 0x03), (2, 229, 0x02), (9, 229, 0x02), (23, 229, 0x02), (40, 229, 0x03), ], // 117 [ (3, 216, 0x02), (6, 216, 0x02), (10, 216, 0x02), (15, 216, 0x02), (24, 216, 0x02), (31, 216, 0x02), (41, 216, 0x02), (56, 216, 0x03), (3, 217, 0x02), (6, 217, 0x02), (10, 217, 0x02), (15, 217, 0x02), (24, 217, 0x02), (31, 217, 0x02), (41, 217, 0x02), (56, 217, 0x03), ], // 118 [ (3, 227, 0x02), (6, 227, 0x02), (10, 227, 0x02), (15, 227, 0x02), (24, 227, 0x02), (31, 227, 0x02), (41, 227, 0x02), (56, 227, 0x03), (3, 229, 0x02), (6, 229, 0x02), (10, 229, 0x02), (15, 229, 0x02), (24, 229, 0x02), (31, 229, 0x02), (41, 229, 0x02), (56, 229, 0x03), ], // 119 [ (1, 230, 0x02), (22, 230, 0x03), (0, 129, 0x02), (0, 132, 0x02), (0, 133, 0x02), (0, 134, 0x02), (0, 136, 0x02), (0, 146, 0x02), (0, 154, 0x02), (0, 156, 0x02), (0, 160, 0x02), (0, 163, 0x02), (0, 164, 0x02), (0, 169, 0x02), (0, 170, 0x02), (0, 173, 0x02), ], // 120 [ (2, 230, 0x02), (9, 230, 0x02), (23, 230, 0x02), (40, 230, 0x03), (1, 129, 0x02), (22, 129, 0x03), (1, 132, 0x02), (22, 132, 0x03), (1, 133, 0x02), (22, 133, 0x03), (1, 134, 0x02), (22, 134, 0x03), (1, 136, 0x02), (22, 136, 0x03), (1, 146, 0x02), (22, 146, 0x03), ], // 121 [ (3, 230, 0x02), (6, 230, 0x02), (10, 230, 0x02), (15, 230, 0x02), (24, 230, 0x02), (31, 230, 0x02), (41, 230, 0x02), (56, 230, 0x03), (2, 129, 0x02), (9, 129, 0x02), (23, 129, 0x02), (40, 129, 0x03), (2, 132, 0x02), (9, 132, 0x02), (23, 132, 0x02), (40, 132, 0x03), ], // 122 [ (3, 129, 0x02), (6, 129, 0x02), (10, 129, 0x02), (15, 129, 0x02), (24, 129, 0x02), (31, 129, 0x02), (41, 129, 0x02), (56, 129, 0x03), (3, 132, 0x02), (6, 132, 0x02), (10, 132, 0x02), (15, 132, 0x02), (24, 132, 0x02), (31, 132, 0x02), (41, 132, 0x02), (56, 132, 0x03), ], // 123 [ (2, 133, 0x02), (9, 133, 0x02), (23, 133, 0x02), (40, 133, 0x03), (2, 134, 0x02), (9, 134, 0x02), (23, 134, 0x02), (40, 134, 0x03), (2, 136, 0x02), (9, 136, 0x02), (23, 136, 0x02), (40, 136, 0x03), (2, 146, 0x02), (9, 146, 0x02), (23, 146, 0x02), (40, 146, 0x03), ], // 124 [ (3, 133, 0x02), (6, 133, 0x02), (10, 133, 0x02), (15, 133, 0x02), (24, 133, 0x02), (31, 133, 0x02), (41, 133, 0x02), (56, 133, 0x03), (3, 134, 0x02), (6, 134, 0x02), (10, 134, 0x02), (15, 134, 0x02), (24, 134, 0x02), (31, 134, 0x02), (41, 134, 0x02), (56, 134, 0x03), ], // 125 [ (3, 136, 0x02), (6, 136, 0x02), (10, 136, 0x02), (15, 136, 0x02), (24, 136, 0x02), (31, 136, 0x02), (41, 136, 0x02), (56, 136, 0x03), (3, 146, 0x02), (6, 146, 0x02), (10, 146, 0x02), (15, 146, 0x02), (24, 146, 0x02), (31, 146, 0x02), (41, 146, 0x02), (56, 146, 0x03), ], // 126 [ (1, 154, 0x02), (22, 154, 0x03), (1, 156, 0x02), (22, 156, 0x03), (1, 160, 0x02), (22, 160, 0x03), (1, 163, 0x02), (22, 163, 0x03), (1, 164, 0x02), (22, 164, 0x03), (1, 169, 0x02), (22, 169, 0x03), (1, 170, 0x02), (22, 170, 0x03), (1, 173, 0x02), (22, 173, 0x03), ], // 127 [ (2, 154, 0x02), (9, 154, 0x02), (23, 154, 0x02), (40, 154, 0x03), (2, 156, 0x02), (9, 156, 0x02), (23, 156, 0x02), (40, 156, 0x03), (2, 160, 0x02), (9, 160, 0x02), (23, 160, 0x02), (40, 160, 0x03), (2, 163, 0x02), (9, 163, 0x02), (23, 163, 0x02), (40, 163, 0x03), ], // 128 [ (3, 154, 0x02), (6, 154, 0x02), (10, 154, 0x02), (15, 154, 0x02), (24, 154, 0x02), (31, 154, 0x02), (41, 154, 0x02), (56, 154, 0x03), (3, 156, 0x02), (6, 156, 0x02), (10, 156, 0x02), (15, 156, 0x02), (24, 156, 0x02), (31, 156, 0x02), (41, 156, 0x02), (56, 156, 0x03), ], // 129 [ (3, 160, 0x02), (6, 160, 0x02), (10, 160, 0x02), (15, 160, 0x02), (24, 160, 0x02), (31, 160, 0x02), (41, 160, 0x02), (56, 160, 0x03), (3, 163, 0x02), (6, 163, 0x02), (10, 163, 0x02), (15, 163, 0x02), (24, 163, 0x02), (31, 163, 0x02), (41, 163, 0x02), (56, 163, 0x03), ], // 130 [ (2, 164, 0x02), (9, 164, 0x02), (23, 164, 0x02), (40, 164, 0x03), (2, 169, 0x02), (9, 169, 0x02), (23, 169, 0x02), (40, 169, 0x03), (2, 170, 0x02), (9, 170, 0x02), (23, 170, 0x02), (40, 170, 0x03), (2, 173, 0x02), (9, 173, 0x02), (23, 173, 0x02), (40, 173, 0x03), ], // 131 [ (3, 164, 0x02), (6, 164, 0x02), (10, 164, 0x02), (15, 164, 0x02), (24, 164, 0x02), (31, 164, 0x02), (41, 164, 0x02), (56, 164, 0x03), (3, 169, 0x02), (6, 169, 0x02), (10, 169, 0x02), (15, 169, 0x02), (24, 169, 0x02), (31, 169, 0x02), (41, 169, 0x02), (56, 169, 0x03), ], // 132 [ (3, 170, 0x02), (6, 170, 0x02), (10, 170, 0x02), (15, 170, 0x02), (24, 170, 0x02), (31, 170, 0x02), (41, 170, 0x02), (56, 170, 0x03), (3, 173, 0x02), (6, 173, 0x02), (10, 173, 0x02), (15, 173, 0x02), (24, 173, 0x02), (31, 173, 0x02), (41, 173, 0x02), (56, 173, 0x03), ], // 133 [ (137, 0, 0x00), (138, 0, 0x00), (140, 0, 0x00), (141, 0, 0x00), (144, 0, 0x00), (145, 0, 0x00), (147, 0, 0x00), (150, 0, 0x00), (156, 0, 0x00), (159, 0, 0x00), (163, 0, 0x00), (166, 0, 0x00), (171, 0, 0x00), (174, 0, 0x00), (181, 0, 0x00), (190, 0, 0x00), ], // 134 [ (0, 178, 0x02), (0, 181, 0x02), (0, 185, 0x02), (0, 186, 0x02), (0, 187, 0x02), (0, 189, 0x02), (0, 190, 0x02), (0, 196, 0x02), (0, 198, 0x02), (0, 228, 0x02), (0, 232, 0x02), (0, 233, 0x02), (148, 0, 0x00), (149, 0, 0x00), (151, 0, 0x00), (152, 0, 0x00), ], // 135 [ (1, 178, 0x02), (22, 178, 0x03), (1, 181, 0x02), (22, 181, 0x03), (1, 185, 0x02), (22, 185, 0x03), (1, 186, 0x02), (22, 186, 0x03), (1, 187, 0x02), (22, 187, 0x03), (1, 189, 0x02), (22, 189, 0x03), (1, 190, 0x02), (22, 190, 0x03), (1, 196, 0x02), (22, 196, 0x03), ], // 136 [ (2, 178, 0x02), (9, 178, 0x02), (23, 178, 0x02), (40, 178, 0x03), (2, 181, 0x02), (9, 181, 0x02), (23, 181, 0x02), (40, 181, 0x03), (2, 185, 0x02), (9, 185, 0x02), (23, 185, 0x02), (40, 185, 0x03), (2, 186, 0x02), (9, 186, 0x02), (23, 186, 0x02), (40, 186, 0x03), ], // 137 [ (3, 178, 0x02), (6, 178, 0x02), (10, 178, 0x02), (15, 178, 0x02), (24, 178, 0x02), (31, 178, 0x02), (41, 178, 0x02), (56, 178, 0x03), (3, 181, 0x02), (6, 181, 0x02), (10, 181, 0x02), (15, 181, 0x02), (24, 181, 0x02), (31, 181, 0x02), (41, 181, 0x02), (56, 181, 0x03), ], // 138 [ (3, 185, 0x02), (6, 185, 0x02), (10, 185, 0x02), (15, 185, 0x02), (24, 185, 0x02), (31, 185, 0x02), (41, 185, 0x02), (56, 185, 0x03), (3, 186, 0x02), (6, 186, 0x02), (10, 186, 0x02), (15, 186, 0x02), (24, 186, 0x02), (31, 186, 0x02), (41, 186, 0x02), (56, 186, 0x03), ], // 139 [ (2, 187, 0x02), (9, 187, 0x02), (23, 187, 0x02), (40, 187, 0x03), (2, 189, 0x02), (9, 189, 0x02), (23, 189, 0x02), (40, 189, 0x03), (2, 190, 0x02), (9, 190, 0x02), (23, 190, 0x02), (40, 190, 0x03), (2, 196, 0x02), (9, 196, 0x02), (23, 196, 0x02), (40, 196, 0x03), ], // 140 [ (3, 187, 0x02), (6, 187, 0x02), (10, 187, 0x02), (15, 187, 0x02), (24, 187, 0x02), (31, 187, 0x02), (41, 187, 0x02), (56, 187, 0x03), (3, 189, 0x02), (6, 189, 0x02), (10, 189, 0x02), (15, 189, 0x02), (24, 189, 0x02), (31, 189, 0x02), (41, 189, 0x02), (56, 189, 0x03), ], // 141 [ (3, 190, 0x02), (6, 190, 0x02), (10, 190, 0x02), (15, 190, 0x02), (24, 190, 0x02), (31, 190, 0x02), (41, 190, 0x02), (56, 190, 0x03), (3, 196, 0x02), (6, 196, 0x02), (10, 196, 0x02), (15, 196, 0x02), (24, 196, 0x02), (31, 196, 0x02), (41, 196, 0x02), (56, 196, 0x03), ], // 142 [ (1, 198, 0x02), (22, 198, 0x03), (1, 228, 0x02), (22, 228, 0x03), (1, 232, 0x02), (22, 232, 0x03), (1, 233, 0x02), (22, 233, 0x03), (0, 1, 0x02), (0, 135, 0x02), (0, 137, 0x02), (0, 138, 0x02), (0, 139, 0x02), (0, 140, 0x02), (0, 141, 0x02), (0, 143, 0x02), ], // 143 [ (2, 198, 0x02), (9, 198, 0x02), (23, 198, 0x02), (40, 198, 0x03), (2, 228, 0x02), (9, 228, 0x02), (23, 228, 0x02), (40, 228, 0x03), (2, 232, 0x02), (9, 232, 0x02), (23, 232, 0x02), (40, 232, 0x03), (2, 233, 0x02), (9, 233, 0x02), (23, 233, 0x02), (40, 233, 0x03), ], // 144 [ (3, 198, 0x02), (6, 198, 0x02), (10, 198, 0x02), (15, 198, 0x02), (24, 198, 0x02), (31, 198, 0x02), (41, 198, 0x02), (56, 198, 0x03), (3, 228, 0x02), (6, 228, 0x02), (10, 228, 0x02), (15, 228, 0x02), (24, 228, 0x02), (31, 228, 0x02), (41, 228, 0x02), (56, 228, 0x03), ], // 145 [ (3, 232, 0x02), (6, 232, 0x02), (10, 232, 0x02), (15, 232, 0x02), (24, 232, 0x02), (31, 232, 0x02), (41, 232, 0x02), (56, 232, 0x03), (3, 233, 0x02), (6, 233, 0x02), (10, 233, 0x02), (15, 233, 0x02), (24, 233, 0x02), (31, 233, 0x02), (41, 233, 0x02), (56, 233, 0x03), ], // 146 [ (1, 1, 0x02), (22, 1, 0x03), (1, 135, 0x02), (22, 135, 0x03), (1, 137, 0x02), (22, 137, 0x03), (1, 138, 0x02), (22, 138, 0x03), (1, 139, 0x02), (22, 139, 0x03), (1, 140, 0x02), (22, 140, 0x03), (1, 141, 0x02), (22, 141, 0x03), (1, 143, 0x02), (22, 143, 0x03), ], // 147 [ (2, 1, 0x02), (9, 1, 0x02), (23, 1, 0x02), (40, 1, 0x03), (2, 135, 0x02), (9, 135, 0x02), (23, 135, 0x02), (40, 135, 0x03), (2, 137, 0x02), (9, 137, 0x02), (23, 137, 0x02), (40, 137, 0x03), (2, 138, 0x02), (9, 138, 0x02), (23, 138, 0x02), (40, 138, 0x03), ], // 148 [ (3, 1, 0x02), (6, 1, 0x02), (10, 1, 0x02), (15, 1, 0x02), (24, 1, 0x02), (31, 1, 0x02), (41, 1, 0x02), (56, 1, 0x03), (3, 135, 0x02), (6, 135, 0x02), (10, 135, 0x02), (15, 135, 0x02), (24, 135, 0x02), (31, 135, 0x02), (41, 135, 0x02), (56, 135, 0x03), ], // 149 [ (3, 137, 0x02), (6, 137, 0x02), (10, 137, 0x02), (15, 137, 0x02), (24, 137, 0x02), (31, 137, 0x02), (41, 137, 0x02), (56, 137, 0x03), (3, 138, 0x02), (6, 138, 0x02), (10, 138, 0x02), (15, 138, 0x02), (24, 138, 0x02), (31, 138, 0x02), (41, 138, 0x02), (56, 138, 0x03), ], // 150 [ (2, 139, 0x02), (9, 139, 0x02), (23, 139, 0x02), (40, 139, 0x03), (2, 140, 0x02), (9, 140, 0x02), (23, 140, 0x02), (40, 140, 0x03), (2, 141, 0x02), (9, 141, 0x02), (23, 141, 0x02), (40, 141, 0x03), (2, 143, 0x02), (9, 143, 0x02), (23, 143, 0x02), (40, 143, 0x03), ], // 151 [ (3, 139, 0x02), (6, 139, 0x02), (10, 139, 0x02), (15, 139, 0x02), (24, 139, 0x02), (31, 139, 0x02), (41, 139, 0x02), (56, 139, 0x03), (3, 140, 0x02), (6, 140, 0x02), (10, 140, 0x02), (15, 140, 0x02), (24, 140, 0x02), (31, 140, 0x02), (41, 140, 0x02), (56, 140, 0x03), ], // 152 [ (3, 141, 0x02), (6, 141, 0x02), (10, 141, 0x02), (15, 141, 0x02), (24, 141, 0x02), (31, 141, 0x02), (41, 141, 0x02), (56, 141, 0x03), (3, 143, 0x02), (6, 143, 0x02), (10, 143, 0x02), (15, 143, 0x02), (24, 143, 0x02), (31, 143, 0x02), (41, 143, 0x02), (56, 143, 0x03), ], // 153 [ (157, 0, 0x00), (158, 0, 0x00), (160, 0, 0x00), (161, 0, 0x00), (164, 0, 0x00), (165, 0, 0x00), (167, 0, 0x00), (168, 0, 0x00), (172, 0, 0x00), (173, 0, 0x00), (175, 0, 0x00), (177, 0, 0x00), (182, 0, 0x00), (185, 0, 0x00), (191, 0, 0x00), (207, 0, 0x00), ], // 154 [ (0, 147, 0x02), (0, 149, 0x02), (0, 150, 0x02), (0, 151, 0x02), (0, 152, 0x02), (0, 155, 0x02), (0, 157, 0x02), (0, 158, 0x02), (0, 165, 0x02), (0, 166, 0x02), (0, 168, 0x02), (0, 174, 0x02), (0, 175, 0x02), (0, 180, 0x02), (0, 182, 0x02), (0, 183, 0x02), ], // 155 [ (1, 147, 0x02), (22, 147, 0x03), (1, 149, 0x02), (22, 149, 0x03), (1, 150, 0x02), (22, 150, 0x03), (1, 151, 0x02), (22, 151, 0x03), (1, 152, 0x02), (22, 152, 0x03), (1, 155, 0x02), (22, 155, 0x03), (1, 157, 0x02), (22, 157, 0x03), (1, 158, 0x02), (22, 158, 0x03), ], // 156 [ (2, 147, 0x02), (9, 147, 0x02), (23, 147, 0x02), (40, 147, 0x03), (2, 149, 0x02), (9, 149, 0x02), (23, 149, 0x02), (40, 149, 0x03), (2, 150, 0x02), (9, 150, 0x02), (23, 150, 0x02), (40, 150, 0x03), (2, 151, 0x02), (9, 151, 0x02), (23, 151, 0x02), (40, 151, 0x03), ], // 157 [ (3, 147, 0x02), (6, 147, 0x02), (10, 147, 0x02), (15, 147, 0x02), (24, 147, 0x02), (31, 147, 0x02), (41, 147, 0x02), (56, 147, 0x03), (3, 149, 0x02), (6, 149, 0x02), (10, 149, 0x02), (15, 149, 0x02), (24, 149, 0x02), (31, 149, 0x02), (41, 149, 0x02), (56, 149, 0x03), ], // 158 [ (3, 150, 0x02), (6, 150, 0x02), (10, 150, 0x02), (15, 150, 0x02), (24, 150, 0x02), (31, 150, 0x02), (41, 150, 0x02), (56, 150, 0x03), (3, 151, 0x02), (6, 151, 0x02), (10, 151, 0x02), (15, 151, 0x02), (24, 151, 0x02), (31, 151, 0x02), (41, 151, 0x02), (56, 151, 0x03), ], // 159 [ (2, 152, 0x02), (9, 152, 0x02), (23, 152, 0x02), (40, 152, 0x03), (2, 155, 0x02), (9, 155, 0x02), (23, 155, 0x02), (40, 155, 0x03), (2, 157, 0x02), (9, 157, 0x02), (23, 157, 0x02), (40, 157, 0x03), (2, 158, 0x02), (9, 158, 0x02), (23, 158, 0x02), (40, 158, 0x03), ], // 160 [ (3, 152, 0x02), (6, 152, 0x02), (10, 152, 0x02), (15, 152, 0x02), (24, 152, 0x02), (31, 152, 0x02), (41, 152, 0x02), (56, 152, 0x03), (3, 155, 0x02), (6, 155, 0x02), (10, 155, 0x02), (15, 155, 0x02), (24, 155, 0x02), (31, 155, 0x02), (41, 155, 0x02), (56, 155, 0x03), ], // 161 [ (3, 157, 0x02), (6, 157, 0x02), (10, 157, 0x02), (15, 157, 0x02), (24, 157, 0x02), (31, 157, 0x02), (41, 157, 0x02), (56, 157, 0x03), (3, 158, 0x02), (6, 158, 0x02), (10, 158, 0x02), (15, 158, 0x02), (24, 158, 0x02), (31, 158, 0x02), (41, 158, 0x02), (56, 158, 0x03), ], // 162 [ (1, 165, 0x02), (22, 165, 0x03), (1, 166, 0x02), (22, 166, 0x03), (1, 168, 0x02), (22, 168, 0x03), (1, 174, 0x02), (22, 174, 0x03), (1, 175, 0x02), (22, 175, 0x03), (1, 180, 0x02), (22, 180, 0x03), (1, 182, 0x02), (22, 182, 0x03), (1, 183, 0x02), (22, 183, 0x03), ], // 163 [ (2, 165, 0x02), (9, 165, 0x02), (23, 165, 0x02), (40, 165, 0x03), (2, 166, 0x02), (9, 166, 0x02), (23, 166, 0x02), (40, 166, 0x03), (2, 168, 0x02), (9, 168, 0x02), (23, 168, 0x02), (40, 168, 0x03), (2, 174, 0x02), (9, 174, 0x02), (23, 174, 0x02), (40, 174, 0x03), ], // 164 [ (3, 165, 0x02), (6, 165, 0x02), (10, 165, 0x02), (15, 165, 0x02), (24, 165, 0x02), (31, 165, 0x02), (41, 165, 0x02), (56, 165, 0x03), (3, 166, 0x02), (6, 166, 0x02), (10, 166, 0x02), (15, 166, 0x02), (24, 166, 0x02), (31, 166, 0x02), (41, 166, 0x02), (56, 166, 0x03), ], // 165 [ (3, 168, 0x02), (6, 168, 0x02), (10, 168, 0x02), (15, 168, 0x02), (24, 168, 0x02), (31, 168, 0x02), (41, 168, 0x02), (56, 168, 0x03), (3, 174, 0x02), (6, 174, 0x02), (10, 174, 0x02), (15, 174, 0x02), (24, 174, 0x02), (31, 174, 0x02), (41, 174, 0x02), (56, 174, 0x03), ], // 166 [ (2, 175, 0x02), (9, 175, 0x02), (23, 175, 0x02), (40, 175, 0x03), (2, 180, 0x02), (9, 180, 0x02), (23, 180, 0x02), (40, 180, 0x03), (2, 182, 0x02), (9, 182, 0x02), (23, 182, 0x02), (40, 182, 0x03), (2, 183, 0x02), (9, 183, 0x02), (23, 183, 0x02), (40, 183, 0x03), ], // 167 [ (3, 175, 0x02), (6, 175, 0x02), (10, 175, 0x02), (15, 175, 0x02), (24, 175, 0x02), (31, 175, 0x02), (41, 175, 0x02), (56, 175, 0x03), (3, 180, 0x02), (6, 180, 0x02), (10, 180, 0x02), (15, 180, 0x02), (24, 180, 0x02), (31, 180, 0x02), (41, 180, 0x02), (56, 180, 0x03), ], // 168 [ (3, 182, 0x02), (6, 182, 0x02), (10, 182, 0x02), (15, 182, 0x02), (24, 182, 0x02), (31, 182, 0x02), (41, 182, 0x02), (56, 182, 0x03), (3, 183, 0x02), (6, 183, 0x02), (10, 183, 0x02), (15, 183, 0x02), (24, 183, 0x02), (31, 183, 0x02), (41, 183, 0x02), (56, 183, 0x03), ], // 169 [ (0, 188, 0x02), (0, 191, 0x02), (0, 197, 0x02), (0, 231, 0x02), (0, 239, 0x02), (176, 0, 0x00), (178, 0, 0x00), (179, 0, 0x00), (183, 0, 0x00), (184, 0, 0x00), (186, 0, 0x00), (187, 0, 0x00), (192, 0, 0x00), (199, 0, 0x00), (208, 0, 0x00), (223, 0, 0x00), ], // 170 [ (1, 188, 0x02), (22, 188, 0x03), (1, 191, 0x02), (22, 191, 0x03), (1, 197, 0x02), (22, 197, 0x03), (1, 231, 0x02), (22, 231, 0x03), (1, 239, 0x02), (22, 239, 0x03), (0, 9, 0x02), (0, 142, 0x02), (0, 144, 0x02), (0, 145, 0x02), (0, 148, 0x02), (0, 159, 0x02), ], // 171 [ (2, 188, 0x02), (9, 188, 0x02), (23, 188, 0x02), (40, 188, 0x03), (2, 191, 0x02), (9, 191, 0x02), (23, 191, 0x02), (40, 191, 0x03), (2, 197, 0x02), (9, 197, 0x02), (23, 197, 0x02), (40, 197, 0x03), (2, 231, 0x02), (9, 231, 0x02), (23, 231, 0x02), (40, 231, 0x03), ], // 172 [ (3, 188, 0x02), (6, 188, 0x02), (10, 188, 0x02), (15, 188, 0x02), (24, 188, 0x02), (31, 188, 0x02), (41, 188, 0x02), (56, 188, 0x03), (3, 191, 0x02), (6, 191, 0x02), (10, 191, 0x02), (15, 191, 0x02), (24, 191, 0x02), (31, 191, 0x02), (41, 191, 0x02), (56, 191, 0x03), ], // 173 [ (3, 197, 0x02), (6, 197, 0x02), (10, 197, 0x02), (15, 197, 0x02), (24, 197, 0x02), (31, 197, 0x02), (41, 197, 0x02), (56, 197, 0x03), (3, 231, 0x02), (6, 231, 0x02), (10, 231, 0x02), (15, 231, 0x02), (24, 231, 0x02), (31, 231, 0x02), (41, 231, 0x02), (56, 231, 0x03), ], // 174 [ (2, 239, 0x02), (9, 239, 0x02), (23, 239, 0x02), (40, 239, 0x03), (1, 9, 0x02), (22, 9, 0x03), (1, 142, 0x02), (22, 142, 0x03), (1, 144, 0x02), (22, 144, 0x03), (1, 145, 0x02), (22, 145, 0x03), (1, 148, 0x02), (22, 148, 0x03), (1, 159, 0x02), (22, 159, 0x03), ], // 175 [ (3, 239, 0x02), (6, 239, 0x02), (10, 239, 0x02), (15, 239, 0x02), (24, 239, 0x02), (31, 239, 0x02), (41, 239, 0x02), (56, 239, 0x03), (2, 9, 0x02), (9, 9, 0x02), (23, 9, 0x02), (40, 9, 0x03), (2, 142, 0x02), (9, 142, 0x02), (23, 142, 0x02), (40, 142, 0x03), ], // 176 [ (3, 9, 0x02), (6, 9, 0x02), (10, 9, 0x02), (15, 9, 0x02), (24, 9, 0x02), (31, 9, 0x02), (41, 9, 0x02), (56, 9, 0x03), (3, 142, 0x02), (6, 142, 0x02), (10, 142, 0x02), (15, 142, 0x02), (24, 142, 0x02), (31, 142, 0x02), (41, 142, 0x02), (56, 142, 0x03), ], // 177 [ (2, 144, 0x02), (9, 144, 0x02), (23, 144, 0x02), (40, 144, 0x03), (2, 145, 0x02), (9, 145, 0x02), (23, 145, 0x02), (40, 145, 0x03), (2, 148, 0x02), (9, 148, 0x02), (23, 148, 0x02), (40, 148, 0x03), (2, 159, 0x02), (9, 159, 0x02), (23, 159, 0x02), (40, 159, 0x03), ], // 178 [ (3, 144, 0x02), (6, 144, 0x02), (10, 144, 0x02), (15, 144, 0x02), (24, 144, 0x02), (31, 144, 0x02), (41, 144, 0x02), (56, 144, 0x03), (3, 145, 0x02), (6, 145, 0x02), (10, 145, 0x02), (15, 145, 0x02), (24, 145, 0x02), (31, 145, 0x02), (41, 145, 0x02), (56, 145, 0x03), ], // 179 [ (3, 148, 0x02), (6, 148, 0x02), (10, 148, 0x02), (15, 148, 0x02), (24, 148, 0x02), (31, 148, 0x02), (41, 148, 0x02), (56, 148, 0x03), (3, 159, 0x02), (6, 159, 0x02), (10, 159, 0x02), (15, 159, 0x02), (24, 159, 0x02), (31, 159, 0x02), (41, 159, 0x02), (56, 159, 0x03), ], // 180 [ (0, 171, 0x02), (0, 206, 0x02), (0, 215, 0x02), (0, 225, 0x02), (0, 236, 0x02), (0, 237, 0x02), (188, 0, 0x00), (189, 0, 0x00), (193, 0, 0x00), (196, 0, 0x00), (200, 0, 0x00), (203, 0, 0x00), (209, 0, 0x00), (216, 0, 0x00), (224, 0, 0x00), (238, 0, 0x00), ], // 181 [ (1, 171, 0x02), (22, 171, 0x03), (1, 206, 0x02), (22, 206, 0x03), (1, 215, 0x02), (22, 215, 0x03), (1, 225, 0x02), (22, 225, 0x03), (1, 236, 0x02), (22, 236, 0x03), (1, 237, 0x02), (22, 237, 0x03), (0, 199, 0x02), (0, 207, 0x02), (0, 234, 0x02), (0, 235, 0x02), ], // 182 [ (2, 171, 0x02), (9, 171, 0x02), (23, 171, 0x02), (40, 171, 0x03), (2, 206, 0x02), (9, 206, 0x02), (23, 206, 0x02), (40, 206, 0x03), (2, 215, 0x02), (9, 215, 0x02), (23, 215, 0x02), (40, 215, 0x03), (2, 225, 0x02), (9, 225, 0x02), (23, 225, 0x02), (40, 225, 0x03), ], // 183 [ (3, 171, 0x02), (6, 171, 0x02), (10, 171, 0x02), (15, 171, 0x02), (24, 171, 0x02), (31, 171, 0x02), (41, 171, 0x02), (56, 171, 0x03), (3, 206, 0x02), (6, 206, 0x02), (10, 206, 0x02), (15, 206, 0x02), (24, 206, 0x02), (31, 206, 0x02), (41, 206, 0x02), (56, 206, 0x03), ], // 184 [ (3, 215, 0x02), (6, 215, 0x02), (10, 215, 0x02), (15, 215, 0x02), (24, 215, 0x02), (31, 215, 0x02), (41, 215, 0x02), (56, 215, 0x03), (3, 225, 0x02), (6, 225, 0x02), (10, 225, 0x02), (15, 225, 0x02), (24, 225, 0x02), (31, 225, 0x02), (41, 225, 0x02), (56, 225, 0x03), ], // 185 [ (2, 236, 0x02), (9, 236, 0x02), (23, 236, 0x02), (40, 236, 0x03), (2, 237, 0x02), (9, 237, 0x02), (23, 237, 0x02), (40, 237, 0x03), (1, 199, 0x02), (22, 199, 0x03), (1, 207, 0x02), (22, 207, 0x03), (1, 234, 0x02), (22, 234, 0x03), (1, 235, 0x02), (22, 235, 0x03), ], // 186 [ (3, 236, 0x02), (6, 236, 0x02), (10, 236, 0x02), (15, 236, 0x02), (24, 236, 0x02), (31, 236, 0x02), (41, 236, 0x02), (56, 236, 0x03), (3, 237, 0x02), (6, 237, 0x02), (10, 237, 0x02), (15, 237, 0x02), (24, 237, 0x02), (31, 237, 0x02), (41, 237, 0x02), (56, 237, 0x03), ], // 187 [ (2, 199, 0x02), (9, 199, 0x02), (23, 199, 0x02), (40, 199, 0x03), (2, 207, 0x02), (9, 207, 0x02), (23, 207, 0x02), (40, 207, 0x03), (2, 234, 0x02), (9, 234, 0x02), (23, 234, 0x02), (40, 234, 0x03), (2, 235, 0x02), (9, 235, 0x02), (23, 235, 0x02), (40, 235, 0x03), ], // 188 [ (3, 199, 0x02), (6, 199, 0x02), (10, 199, 0x02), (15, 199, 0x02), (24, 199, 0x02), (31, 199, 0x02), (41, 199, 0x02), (56, 199, 0x03), (3, 207, 0x02), (6, 207, 0x02), (10, 207, 0x02), (15, 207, 0x02), (24, 207, 0x02), (31, 207, 0x02), (41, 207, 0x02), (56, 207, 0x03), ], // 189 [ (3, 234, 0x02), (6, 234, 0x02), (10, 234, 0x02), (15, 234, 0x02), (24, 234, 0x02), (31, 234, 0x02), (41, 234, 0x02), (56, 234, 0x03), (3, 235, 0x02), (6, 235, 0x02), (10, 235, 0x02), (15, 235, 0x02), (24, 235, 0x02), (31, 235, 0x02), (41, 235, 0x02), (56, 235, 0x03), ], // 190 [ (194, 0, 0x00), (195, 0, 0x00), (197, 0, 0x00), (198, 0, 0x00), (201, 0, 0x00), (202, 0, 0x00), (204, 0, 0x00), (205, 0, 0x00), (210, 0, 0x00), (213, 0, 0x00), (217, 0, 0x00), (220, 0, 0x00), (225, 0, 0x00), (231, 0, 0x00), (239, 0, 0x00), (246, 0, 0x00), ], // 191 [ (0, 192, 0x02), (0, 193, 0x02), (0, 200, 0x02), (0, 201, 0x02), (0, 202, 0x02), (0, 205, 0x02), (0, 210, 0x02), (0, 213, 0x02), (0, 218, 0x02), (0, 219, 0x02), (0, 238, 0x02), (0, 240, 0x02), (0, 242, 0x02), (0, 243, 0x02), (0, 255, 0x02), (206, 0, 0x00), ], // 192 [ (1, 192, 0x02), (22, 192, 0x03), (1, 193, 0x02), (22, 193, 0x03), (1, 200, 0x02), (22, 200, 0x03), (1, 201, 0x02), (22, 201, 0x03), (1, 202, 0x02), (22, 202, 0x03), (1, 205, 0x02), (22, 205, 0x03), (1, 210, 0x02), (22, 210, 0x03), (1, 213, 0x02), (22, 213, 0x03), ], // 193 [ (2, 192, 0x02), (9, 192, 0x02), (23, 192, 0x02), (40, 192, 0x03), (2, 193, 0x02), (9, 193, 0x02), (23, 193, 0x02), (40, 193, 0x03), (2, 200, 0x02), (9, 200, 0x02), (23, 200, 0x02), (40, 200, 0x03), (2, 201, 0x02), (9, 201, 0x02), (23, 201, 0x02), (40, 201, 0x03), ], // 194 [ (3, 192, 0x02), (6, 192, 0x02), (10, 192, 0x02), (15, 192, 0x02), (24, 192, 0x02), (31, 192, 0x02), (41, 192, 0x02), (56, 192, 0x03), (3, 193, 0x02), (6, 193, 0x02), (10, 193, 0x02), (15, 193, 0x02), (24, 193, 0x02), (31, 193, 0x02), (41, 193, 0x02), (56, 193, 0x03), ], // 195 [ (3, 200, 0x02), (6, 200, 0x02), (10, 200, 0x02), (15, 200, 0x02), (24, 200, 0x02), (31, 200, 0x02), (41, 200, 0x02), (56, 200, 0x03), (3, 201, 0x02), (6, 201, 0x02), (10, 201, 0x02), (15, 201, 0x02), (24, 201, 0x02), (31, 201, 0x02), (41, 201, 0x02), (56, 201, 0x03), ], // 196 [ (2, 202, 0x02), (9, 202, 0x02), (23, 202, 0x02), (40, 202, 0x03), (2, 205, 0x02), (9, 205, 0x02), (23, 205, 0x02), (40, 205, 0x03), (2, 210, 0x02), (9, 210, 0x02), (23, 210, 0x02), (40, 210, 0x03), (2, 213, 0x02), (9, 213, 0x02), (23, 213, 0x02), (40, 213, 0x03), ], // 197 [ (3, 202, 0x02), (6, 202, 0x02), (10, 202, 0x02), (15, 202, 0x02), (24, 202, 0x02), (31, 202, 0x02), (41, 202, 0x02), (56, 202, 0x03), (3, 205, 0x02), (6, 205, 0x02), (10, 205, 0x02), (15, 205, 0x02), (24, 205, 0x02), (31, 205, 0x02), (41, 205, 0x02), (56, 205, 0x03), ], // 198 [ (3, 210, 0x02), (6, 210, 0x02), (10, 210, 0x02), (15, 210, 0x02), (24, 210, 0x02), (31, 210, 0x02), (41, 210, 0x02), (56, 210, 0x03), (3, 213, 0x02), (6, 213, 0x02), (10, 213, 0x02), (15, 213, 0x02), (24, 213, 0x02), (31, 213, 0x02), (41, 213, 0x02), (56, 213, 0x03), ], // 199 [ (1, 218, 0x02), (22, 218, 0x03), (1, 219, 0x02), (22, 219, 0x03), (1, 238, 0x02), (22, 238, 0x03), (1, 240, 0x02), (22, 240, 0x03), (1, 242, 0x02), (22, 242, 0x03), (1, 243, 0x02), (22, 243, 0x03), (1, 255, 0x02), (22, 255, 0x03), (0, 203, 0x02), (0, 204, 0x02), ], // 200 [ (2, 218, 0x02), (9, 218, 0x02), (23, 218, 0x02), (40, 218, 0x03), (2, 219, 0x02), (9, 219, 0x02), (23, 219, 0x02), (40, 219, 0x03), (2, 238, 0x02), (9, 238, 0x02), (23, 238, 0x02), (40, 238, 0x03), (2, 240, 0x02), (9, 240, 0x02), (23, 240, 0x02), (40, 240, 0x03), ], // 201 [ (3, 218, 0x02), (6, 218, 0x02), (10, 218, 0x02), (15, 218, 0x02), (24, 218, 0x02), (31, 218, 0x02), (41, 218, 0x02), (56, 218, 0x03), (3, 219, 0x02), (6, 219, 0x02), (10, 219, 0x02), (15, 219, 0x02), (24, 219, 0x02), (31, 219, 0x02), (41, 219, 0x02), (56, 219, 0x03), ], // 202 [ (3, 238, 0x02), (6, 238, 0x02), (10, 238, 0x02), (15, 238, 0x02), (24, 238, 0x02), (31, 238, 0x02), (41, 238, 0x02), (56, 238, 0x03), (3, 240, 0x02), (6, 240, 0x02), (10, 240, 0x02), (15, 240, 0x02), (24, 240, 0x02), (31, 240, 0x02), (41, 240, 0x02), (56, 240, 0x03), ], // 203 [ (2, 242, 0x02), (9, 242, 0x02), (23, 242, 0x02), (40, 242, 0x03), (2, 243, 0x02), (9, 243, 0x02), (23, 243, 0x02), (40, 243, 0x03), (2, 255, 0x02), (9, 255, 0x02), (23, 255, 0x02), (40, 255, 0x03), (1, 203, 0x02), (22, 203, 0x03), (1, 204, 0x02), (22, 204, 0x03), ], // 204 [ (3, 242, 0x02), (6, 242, 0x02), (10, 242, 0x02), (15, 242, 0x02), (24, 242, 0x02), (31, 242, 0x02), (41, 242, 0x02), (56, 242, 0x03), (3, 243, 0x02), (6, 243, 0x02), (10, 243, 0x02), (15, 243, 0x02), (24, 243, 0x02), (31, 243, 0x02), (41, 243, 0x02), (56, 243, 0x03), ], // 205 [ (3, 255, 0x02), (6, 255, 0x02), (10, 255, 0x02), (15, 255, 0x02), (24, 255, 0x02), (31, 255, 0x02), (41, 255, 0x02), (56, 255, 0x03), (2, 203, 0x02), (9, 203, 0x02), (23, 203, 0x02), (40, 203, 0x03), (2, 204, 0x02), (9, 204, 0x02), (23, 204, 0x02), (40, 204, 0x03), ], // 206 [ (3, 203, 0x02), (6, 203, 0x02), (10, 203, 0x02), (15, 203, 0x02), (24, 203, 0x02), (31, 203, 0x02), (41, 203, 0x02), (56, 203, 0x03), (3, 204, 0x02), (6, 204, 0x02), (10, 204, 0x02), (15, 204, 0x02), (24, 204, 0x02), (31, 204, 0x02), (41, 204, 0x02), (56, 204, 0x03), ], // 207 [ (211, 0, 0x00), (212, 0, 0x00), (214, 0, 0x00), (215, 0, 0x00), (218, 0, 0x00), (219, 0, 0x00), (221, 0, 0x00), (222, 0, 0x00), (226, 0, 0x00), (228, 0, 0x00), (232, 0, 0x00), (235, 0, 0x00), (240, 0, 0x00), (243, 0, 0x00), (247, 0, 0x00), (250, 0, 0x00), ], // 208 [ (0, 211, 0x02), (0, 212, 0x02), (0, 214, 0x02), (0, 221, 0x02), (0, 222, 0x02), (0, 223, 0x02), (0, 241, 0x02), (0, 244, 0x02), (0, 245, 0x02), (0, 246, 0x02), (0, 247, 0x02), (0, 248, 0x02), (0, 250, 0x02), (0, 251, 0x02), (0, 252, 0x02), (0, 253, 0x02), ], // 209 [ (1, 211, 0x02), (22, 211, 0x03), (1, 212, 0x02), (22, 212, 0x03), (1, 214, 0x02), (22, 214, 0x03), (1, 221, 0x02), (22, 221, 0x03), (1, 222, 0x02), (22, 222, 0x03), (1, 223, 0x02), (22, 223, 0x03), (1, 241, 0x02), (22, 241, 0x03), (1, 244, 0x02), (22, 244, 0x03), ], // 210 [ (2, 211, 0x02), (9, 211, 0x02), (23, 211, 0x02), (40, 211, 0x03), (2, 212, 0x02), (9, 212, 0x02), (23, 212, 0x02), (40, 212, 0x03), (2, 214, 0x02), (9, 214, 0x02), (23, 214, 0x02), (40, 214, 0x03), (2, 221, 0x02), (9, 221, 0x02), (23, 221, 0x02), (40, 221, 0x03), ], // 211 [ (3, 211, 0x02), (6, 211, 0x02), (10, 211, 0x02), (15, 211, 0x02), (24, 211, 0x02), (31, 211, 0x02), (41, 211, 0x02), (56, 211, 0x03), (3, 212, 0x02), (6, 212, 0x02), (10, 212, 0x02), (15, 212, 0x02), (24, 212, 0x02), (31, 212, 0x02), (41, 212, 0x02), (56, 212, 0x03), ], // 212 [ (3, 214, 0x02), (6, 214, 0x02), (10, 214, 0x02), (15, 214, 0x02), (24, 214, 0x02), (31, 214, 0x02), (41, 214, 0x02), (56, 214, 0x03), (3, 221, 0x02), (6, 221, 0x02), (10, 221, 0x02), (15, 221, 0x02), (24, 221, 0x02), (31, 221, 0x02), (41, 221, 0x02), (56, 221, 0x03), ], // 213 [ (2, 222, 0x02), (9, 222, 0x02), (23, 222, 0x02), (40, 222, 0x03), (2, 223, 0x02), (9, 223, 0x02), (23, 223, 0x02), (40, 223, 0x03), (2, 241, 0x02), (9, 241, 0x02), (23, 241, 0x02), (40, 241, 0x03), (2, 244, 0x02), (9, 244, 0x02), (23, 244, 0x02), (40, 244, 0x03), ], // 214 [ (3, 222, 0x02), (6, 222, 0x02), (10, 222, 0x02), (15, 222, 0x02), (24, 222, 0x02), (31, 222, 0x02), (41, 222, 0x02), (56, 222, 0x03), (3, 223, 0x02), (6, 223, 0x02), (10, 223, 0x02), (15, 223, 0x02), (24, 223, 0x02), (31, 223, 0x02), (41, 223, 0x02), (56, 223, 0x03), ], // 215 [ (3, 241, 0x02), (6, 241, 0x02), (10, 241, 0x02), (15, 241, 0x02), (24, 241, 0x02), (31, 241, 0x02), (41, 241, 0x02), (56, 241, 0x03), (3, 244, 0x02), (6, 244, 0x02), (10, 244, 0x02), (15, 244, 0x02), (24, 244, 0x02), (31, 244, 0x02), (41, 244, 0x02), (56, 244, 0x03), ], // 216 [ (1, 245, 0x02), (22, 245, 0x03), (1, 246, 0x02), (22, 246, 0x03), (1, 247, 0x02), (22, 247, 0x03), (1, 248, 0x02), (22, 248, 0x03), (1, 250, 0x02), (22, 250, 0x03), (1, 251, 0x02), (22, 251, 0x03), (1, 252, 0x02), (22, 252, 0x03), (1, 253, 0x02), (22, 253, 0x03), ], // 217 [ (2, 245, 0x02), (9, 245, 0x02), (23, 245, 0x02), (40, 245, 0x03), (2, 246, 0x02), (9, 246, 0x02), (23, 246, 0x02), (40, 246, 0x03), (2, 247, 0x02), (9, 247, 0x02), (23, 247, 0x02), (40, 247, 0x03), (2, 248, 0x02), (9, 248, 0x02), (23, 248, 0x02), (40, 248, 0x03), ], // 218 [ (3, 245, 0x02), (6, 245, 0x02), (10, 245, 0x02), (15, 245, 0x02), (24, 245, 0x02), (31, 245, 0x02), (41, 245, 0x02), (56, 245, 0x03), (3, 246, 0x02), (6, 246, 0x02), (10, 246, 0x02), (15, 246, 0x02), (24, 246, 0x02), (31, 246, 0x02), (41, 246, 0x02), (56, 246, 0x03), ], // 219 [ (3, 247, 0x02), (6, 247, 0x02), (10, 247, 0x02), (15, 247, 0x02), (24, 247, 0x02), (31, 247, 0x02), (41, 247, 0x02), (56, 247, 0x03), (3, 248, 0x02), (6, 248, 0x02), (10, 248, 0x02), (15, 248, 0x02), (24, 248, 0x02), (31, 248, 0x02), (41, 248, 0x02), (56, 248, 0x03), ], // 220 [ (2, 250, 0x02), (9, 250, 0x02), (23, 250, 0x02), (40, 250, 0x03), (2, 251, 0x02), (9, 251, 0x02), (23, 251, 0x02), (40, 251, 0x03), (2, 252, 0x02), (9, 252, 0x02), (23, 252, 0x02), (40, 252, 0x03), (2, 253, 0x02), (9, 253, 0x02), (23, 253, 0x02), (40, 253, 0x03), ], // 221 [ (3, 250, 0x02), (6, 250, 0x02), (10, 250, 0x02), (15, 250, 0x02), (24, 250, 0x02), (31, 250, 0x02), (41, 250, 0x02), (56, 250, 0x03), (3, 251, 0x02), (6, 251, 0x02), (10, 251, 0x02), (15, 251, 0x02), (24, 251, 0x02), (31, 251, 0x02), (41, 251, 0x02), (56, 251, 0x03), ], // 222 [ (3, 252, 0x02), (6, 252, 0x02), (10, 252, 0x02), (15, 252, 0x02), (24, 252, 0x02), (31, 252, 0x02), (41, 252, 0x02), (56, 252, 0x03), (3, 253, 0x02), (6, 253, 0x02), (10, 253, 0x02), (15, 253, 0x02), (24, 253, 0x02), (31, 253, 0x02), (41, 253, 0x02), (56, 253, 0x03), ], // 223 [ (0, 254, 0x02), (227, 0, 0x00), (229, 0, 0x00), (230, 0, 0x00), (233, 0, 0x00), (234, 0, 0x00), (236, 0, 0x00), (237, 0, 0x00), (241, 0, 0x00), (242, 0, 0x00), (244, 0, 0x00), (245, 0, 0x00), (248, 0, 0x00), (249, 0, 0x00), (251, 0, 0x00), (252, 0, 0x00), ], // 224 [ (1, 254, 0x02), (22, 254, 0x03), (0, 2, 0x02), (0, 3, 0x02), (0, 4, 0x02), (0, 5, 0x02), (0, 6, 0x02), (0, 7, 0x02), (0, 8, 0x02), (0, 11, 0x02), (0, 12, 0x02), (0, 14, 0x02), (0, 15, 0x02), (0, 16, 0x02), (0, 17, 0x02), (0, 18, 0x02), ], // 225 [ (2, 254, 0x02), (9, 254, 0x02), (23, 254, 0x02), (40, 254, 0x03), (1, 2, 0x02), (22, 2, 0x03), (1, 3, 0x02), (22, 3, 0x03), (1, 4, 0x02), (22, 4, 0x03), (1, 5, 0x02), (22, 5, 0x03), (1, 6, 0x02), (22, 6, 0x03), (1, 7, 0x02), (22, 7, 0x03), ], // 226 [ (3, 254, 0x02), (6, 254, 0x02), (10, 254, 0x02), (15, 254, 0x02), (24, 254, 0x02), (31, 254, 0x02), (41, 254, 0x02), (56, 254, 0x03), (2, 2, 0x02), (9, 2, 0x02), (23, 2, 0x02), (40, 2, 0x03), (2, 3, 0x02), (9, 3, 0x02), (23, 3, 0x02), (40, 3, 0x03), ], // 227 [ (3, 2, 0x02), (6, 2, 0x02), (10, 2, 0x02), (15, 2, 0x02), (24, 2, 0x02), (31, 2, 0x02), (41, 2, 0x02), (56, 2, 0x03), (3, 3, 0x02), (6, 3, 0x02), (10, 3, 0x02), (15, 3, 0x02), (24, 3, 0x02), (31, 3, 0x02), (41, 3, 0x02), (56, 3, 0x03), ], // 228 [ (2, 4, 0x02), (9, 4, 0x02), (23, 4, 0x02), (40, 4, 0x03), (2, 5, 0x02), (9, 5, 0x02), (23, 5, 0x02), (40, 5, 0x03), (2, 6, 0x02), (9, 6, 0x02), (23, 6, 0x02), (40, 6, 0x03), (2, 7, 0x02), (9, 7, 0x02), (23, 7, 0x02), (40, 7, 0x03), ], // 229 [ (3, 4, 0x02), (6, 4, 0x02), (10, 4, 0x02), (15, 4, 0x02), (24, 4, 0x02), (31, 4, 0x02), (41, 4, 0x02), (56, 4, 0x03), (3, 5, 0x02), (6, 5, 0x02), (10, 5, 0x02), (15, 5, 0x02), (24, 5, 0x02), (31, 5, 0x02), (41, 5, 0x02), (56, 5, 0x03), ], // 230 [ (3, 6, 0x02), (6, 6, 0x02), (10, 6, 0x02), (15, 6, 0x02), (24, 6, 0x02), (31, 6, 0x02), (41, 6, 0x02), (56, 6, 0x03), (3, 7, 0x02), (6, 7, 0x02), (10, 7, 0x02), (15, 7, 0x02), (24, 7, 0x02), (31, 7, 0x02), (41, 7, 0x02), (56, 7, 0x03), ], // 231 [ (1, 8, 0x02), (22, 8, 0x03), (1, 11, 0x02), (22, 11, 0x03), (1, 12, 0x02), (22, 12, 0x03), (1, 14, 0x02), (22, 14, 0x03), (1, 15, 0x02), (22, 15, 0x03), (1, 16, 0x02), (22, 16, 0x03), (1, 17, 0x02), (22, 17, 0x03), (1, 18, 0x02), (22, 18, 0x03), ], // 232 [ (2, 8, 0x02), (9, 8, 0x02), (23, 8, 0x02), (40, 8, 0x03), (2, 11, 0x02), (9, 11, 0x02), (23, 11, 0x02), (40, 11, 0x03), (2, 12, 0x02), (9, 12, 0x02), (23, 12, 0x02), (40, 12, 0x03), (2, 14, 0x02), (9, 14, 0x02), (23, 14, 0x02), (40, 14, 0x03), ], // 233 [ (3, 8, 0x02), (6, 8, 0x02), (10, 8, 0x02), (15, 8, 0x02), (24, 8, 0x02), (31, 8, 0x02), (41, 8, 0x02), (56, 8, 0x03), (3, 11, 0x02), (6, 11, 0x02), (10, 11, 0x02), (15, 11, 0x02), (24, 11, 0x02), (31, 11, 0x02), (41, 11, 0x02), (56, 11, 0x03), ], // 234 [ (3, 12, 0x02), (6, 12, 0x02), (10, 12, 0x02), (15, 12, 0x02), (24, 12, 0x02), (31, 12, 0x02), (41, 12, 0x02), (56, 12, 0x03), (3, 14, 0x02), (6, 14, 0x02), (10, 14, 0x02), (15, 14, 0x02), (24, 14, 0x02), (31, 14, 0x02), (41, 14, 0x02), (56, 14, 0x03), ], // 235 [ (2, 15, 0x02), (9, 15, 0x02), (23, 15, 0x02), (40, 15, 0x03), (2, 16, 0x02), (9, 16, 0x02), (23, 16, 0x02), (40, 16, 0x03), (2, 17, 0x02), (9, 17, 0x02), (23, 17, 0x02), (40, 17, 0x03), (2, 18, 0x02), (9, 18, 0x02), (23, 18, 0x02), (40, 18, 0x03), ], // 236 [ (3, 15, 0x02), (6, 15, 0x02), (10, 15, 0x02), (15, 15, 0x02), (24, 15, 0x02), (31, 15, 0x02), (41, 15, 0x02), (56, 15, 0x03), (3, 16, 0x02), (6, 16, 0x02), (10, 16, 0x02), (15, 16, 0x02), (24, 16, 0x02), (31, 16, 0x02), (41, 16, 0x02), (56, 16, 0x03), ], // 237 [ (3, 17, 0x02), (6, 17, 0x02), (10, 17, 0x02), (15, 17, 0x02), (24, 17, 0x02), (31, 17, 0x02), (41, 17, 0x02), (56, 17, 0x03), (3, 18, 0x02), (6, 18, 0x02), (10, 18, 0x02), (15, 18, 0x02), (24, 18, 0x02), (31, 18, 0x02), (41, 18, 0x02), (56, 18, 0x03), ], // 238 [ (0, 19, 0x02), (0, 20, 0x02), (0, 21, 0x02), (0, 23, 0x02), (0, 24, 0x02), (0, 25, 0x02), (0, 26, 0x02), (0, 27, 0x02), (0, 28, 0x02), (0, 29, 0x02), (0, 30, 0x02), (0, 31, 0x02), (0, 127, 0x02), (0, 220, 0x02), (0, 249, 0x02), (253, 0, 0x00), ], // 239 [ (1, 19, 0x02), (22, 19, 0x03), (1, 20, 0x02), (22, 20, 0x03), (1, 21, 0x02), (22, 21, 0x03), (1, 23, 0x02), (22, 23, 0x03), (1, 24, 0x02), (22, 24, 0x03), (1, 25, 0x02), (22, 25, 0x03), (1, 26, 0x02), (22, 26, 0x03), (1, 27, 0x02), (22, 27, 0x03), ], // 240 [ (2, 19, 0x02), (9, 19, 0x02), (23, 19, 0x02), (40, 19, 0x03), (2, 20, 0x02), (9, 20, 0x02), (23, 20, 0x02), (40, 20, 0x03), (2, 21, 0x02), (9, 21, 0x02), (23, 21, 0x02), (40, 21, 0x03), (2, 23, 0x02), (9, 23, 0x02), (23, 23, 0x02), (40, 23, 0x03), ], // 241 [ (3, 19, 0x02), (6, 19, 0x02), (10, 19, 0x02), (15, 19, 0x02), (24, 19, 0x02), (31, 19, 0x02), (41, 19, 0x02), (56, 19, 0x03), (3, 20, 0x02), (6, 20, 0x02), (10, 20, 0x02), (15, 20, 0x02), (24, 20, 0x02), (31, 20, 0x02), (41, 20, 0x02), (56, 20, 0x03), ], // 242 [ (3, 21, 0x02), (6, 21, 0x02), (10, 21, 0x02), (15, 21, 0x02), (24, 21, 0x02), (31, 21, 0x02), (41, 21, 0x02), (56, 21, 0x03), (3, 23, 0x02), (6, 23, 0x02), (10, 23, 0x02), (15, 23, 0x02), (24, 23, 0x02), (31, 23, 0x02), (41, 23, 0x02), (56, 23, 0x03), ], // 243 [ (2, 24, 0x02), (9, 24, 0x02), (23, 24, 0x02), (40, 24, 0x03), (2, 25, 0x02), (9, 25, 0x02), (23, 25, 0x02), (40, 25, 0x03), (2, 26, 0x02), (9, 26, 0x02), (23, 26, 0x02), (40, 26, 0x03), (2, 27, 0x02), (9, 27, 0x02), (23, 27, 0x02), (40, 27, 0x03), ], // 244 [ (3, 24, 0x02), (6, 24, 0x02), (10, 24, 0x02), (15, 24, 0x02), (24, 24, 0x02), (31, 24, 0x02), (41, 24, 0x02), (56, 24, 0x03), (3, 25, 0x02), (6, 25, 0x02), (10, 25, 0x02), (15, 25, 0x02), (24, 25, 0x02), (31, 25, 0x02), (41, 25, 0x02), (56, 25, 0x03), ], // 245 [ (3, 26, 0x02), (6, 26, 0x02), (10, 26, 0x02), (15, 26, 0x02), (24, 26, 0x02), (31, 26, 0x02), (41, 26, 0x02), (56, 26, 0x03), (3, 27, 0x02), (6, 27, 0x02), (10, 27, 0x02), (15, 27, 0x02), (24, 27, 0x02), (31, 27, 0x02), (41, 27, 0x02), (56, 27, 0x03), ], // 246 [ (1, 28, 0x02), (22, 28, 0x03), (1, 29, 0x02), (22, 29, 0x03), (1, 30, 0x02), (22, 30, 0x03), (1, 31, 0x02), (22, 31, 0x03), (1, 127, 0x02), (22, 127, 0x03), (1, 220, 0x02), (22, 220, 0x03), (1, 249, 0x02), (22, 249, 0x03), (254, 0, 0x00), (255, 0, 0x00), ], // 247 [ (2, 28, 0x02), (9, 28, 0x02), (23, 28, 0x02), (40, 28, 0x03), (2, 29, 0x02), (9, 29, 0x02), (23, 29, 0x02), (40, 29, 0x03), (2, 30, 0x02), (9, 30, 0x02), (23, 30, 0x02), (40, 30, 0x03), (2, 31, 0x02), (9, 31, 0x02), (23, 31, 0x02), (40, 31, 0x03), ], // 248 [ (3, 28, 0x02), (6, 28, 0x02), (10, 28, 0x02), (15, 28, 0x02), (24, 28, 0x02), (31, 28, 0x02), (41, 28, 0x02), (56, 28, 0x03), (3, 29, 0x02), (6, 29, 0x02), (10, 29, 0x02), (15, 29, 0x02), (24, 29, 0x02), (31, 29, 0x02), (41, 29, 0x02), (56, 29, 0x03), ], // 249 [ (3, 30, 0x02), (6, 30, 0x02), (10, 30, 0x02), (15, 30, 0x02), (24, 30, 0x02), (31, 30, 0x02), (41, 30, 0x02), (56, 30, 0x03), (3, 31, 0x02), (6, 31, 0x02), (10, 31, 0x02), (15, 31, 0x02), (24, 31, 0x02), (31, 31, 0x02), (41, 31, 0x02), (56, 31, 0x03), ], // 250 [ (2, 127, 0x02), (9, 127, 0x02), (23, 127, 0x02), (40, 127, 0x03), (2, 220, 0x02), (9, 220, 0x02), (23, 220, 0x02), (40, 220, 0x03), (2, 249, 0x02), (9, 249, 0x02), (23, 249, 0x02), (40, 249, 0x03), (0, 10, 0x02), (0, 13, 0x02), (0, 22, 0x02), (0, 0, 0x04), ], // 251 [ (3, 127, 0x02), (6, 127, 0x02), (10, 127, 0x02), (15, 127, 0x02), (24, 127, 0x02), (31, 127, 0x02), (41, 127, 0x02), (56, 127, 0x03), (3, 220, 0x02), (6, 220, 0x02), (10, 220, 0x02), (15, 220, 0x02), (24, 220, 0x02), (31, 220, 0x02), (41, 220, 0x02), (56, 220, 0x03), ], // 252 [ (3, 249, 0x02), (6, 249, 0x02), (10, 249, 0x02), (15, 249, 0x02), (24, 249, 0x02), (31, 249, 0x02), (41, 249, 0x02), (56, 249, 0x03), (1, 10, 0x02), (22, 10, 0x03), (1, 13, 0x02), (22, 13, 0x03), (1, 22, 0x02), (22, 22, 0x03), (0, 0, 0x04), (0, 0, 0x05), ], // 253 [ (2, 10, 0x02), (9, 10, 0x02), (23, 10, 0x02), (40, 10, 0x03), (2, 13, 0x02), (9, 13, 0x02), (23, 13, 0x02), (40, 13, 0x03), (2, 22, 0x02), (9, 22, 0x02), (23, 22, 0x02), (40, 22, 0x03), (0, 0, 0x04), (0, 0, 0x04), (0, 0, 0x04), (0, 0, 0x05), ], // 254 [ (3, 10, 0x02), (6, 10, 0x02), (10, 10, 0x02), (15, 10, 0x02), (24, 10, 0x02), (31, 10, 0x02), (41, 10, 0x02), (56, 10, 0x03), (3, 13, 0x02), (6, 13, 0x02), (10, 13, 0x02), (15, 13, 0x02), (24, 13, 0x02), (31, 13, 0x02), (41, 13, 0x02), (56, 13, 0x03), ], // 255 [ (3, 22, 0x02), (6, 22, 0x02), (10, 22, 0x02), (15, 22, 0x02), (24, 22, 0x02), (31, 22, 0x02), (41, 22, 0x02), (56, 22, 0x03), (0, 0, 0x04), (0, 0, 0x04), (0, 0, 0x04), (0, 0, 0x04), (0, 0, 0x04), (0, 0, 0x04), (0, 0, 0x04), (0, 0, 0x05), ], ]; h2-0.1.26/src/hpack/mod.rs010066400017500001750000000003771347362176000133560ustar0000000000000000mod encoder; mod decoder; pub(crate) mod header; mod huffman; mod table; #[cfg(test)] mod test; pub use self::decoder::{Decoder, DecoderError, NeedMore}; pub use self::encoder::{Encode, EncodeState, Encoder, EncoderError}; pub use self::header::Header; h2-0.1.26/src/hpack/table.rs010066400017500001750000000572111346707370100136640ustar0000000000000000use super::Header; use fnv::FnvHasher; use http::header; use http::method::Method; use std::{cmp, mem, usize}; use std::collections::VecDeque; use std::hash::{Hash, Hasher}; /// HPACK encoder table #[derive(Debug)] pub struct Table { mask: usize, indices: Vec>, slots: VecDeque, inserted: usize, // Size is in bytes size: usize, max_size: usize, } #[derive(Debug)] pub enum Index { // The header is already fully indexed Indexed(usize, Header), // The name is indexed, but not the value Name(usize, Header), // The full header has been inserted into the table. Inserted(usize), // Only the value has been inserted (hpack table idx, slots idx) InsertedValue(usize, usize), // The header is not indexed by this table NotIndexed(Header), } #[derive(Debug)] struct Slot { hash: HashValue, header: Header, next: Option, } #[derive(Debug, Clone, Copy, Eq, PartialEq)] struct Pos { index: usize, hash: HashValue, } #[derive(Debug, Copy, Clone, Eq, PartialEq)] struct HashValue(usize); const MAX_SIZE: usize = (1 << 16); const DYN_OFFSET: usize = 62; macro_rules! probe_loop { ($probe_var: ident < $len: expr, $body: expr) => { debug_assert!($len > 0); loop { if $probe_var < $len { $body $probe_var += 1; } else { $probe_var = 0; } } }; } impl Table { pub fn new(max_size: usize, capacity: usize) -> Table { if capacity == 0 { Table { mask: 0, indices: vec![], slots: VecDeque::new(), inserted: 0, size: 0, max_size: max_size, } } else { let capacity = cmp::max(to_raw_capacity(capacity).next_power_of_two(), 8); Table { mask: capacity.wrapping_sub(1), indices: vec![None; capacity], slots: VecDeque::with_capacity(usable_capacity(capacity)), inserted: 0, size: 0, max_size: max_size, } } } #[inline] pub fn capacity(&self) -> usize { usable_capacity(self.indices.len()) } pub fn max_size(&self) -> usize { self.max_size } /// Gets the header stored in the table pub fn resolve<'a>(&'a self, index: &'a Index) -> &'a Header { use self::Index::*; match *index { Indexed(_, ref h) => h, Name(_, ref h) => h, Inserted(idx) => &self.slots[idx].header, InsertedValue(_, idx) => &self.slots[idx].header, NotIndexed(ref h) => h, } } pub fn resolve_idx(&self, index: &Index) -> usize { use self::Index::*; match *index { Indexed(idx, ..) => idx, Name(idx, ..) => idx, Inserted(idx) => idx + DYN_OFFSET, InsertedValue(_name_idx, slot_idx) => slot_idx + DYN_OFFSET, NotIndexed(_) => panic!("cannot resolve index"), } } /// Index the header in the HPACK table. pub fn index(&mut self, header: Header) -> Index { // Check the static table let statik = index_static(&header); // Don't index certain headers. This logic is borrowed from nghttp2. if header.skip_value_index() { // Right now, if this is true, the header name is always in the // static table. At some point in the future, this might not be true // and this logic will need to be updated. debug_assert!( statik.is_some(), "skip_value_index requires a static name", ); return Index::new(statik, header); } // If the header is already indexed by the static table, return that if let Some((n, true)) = statik { return Index::Indexed(n, header); } // Don't index large headers if header.len() * 4 > self.max_size * 3 { return Index::new(statik, header); } self.index_dynamic(header, statik) } fn index_dynamic(&mut self, header: Header, statik: Option<(usize, bool)>) -> Index { debug_assert!(self.assert_valid_state("one")); if header.len() + self.size < self.max_size || !header.is_sensitive() { // Only grow internal storage if needed self.reserve_one(); } if self.indices.is_empty() { // If `indices` is not empty, then it is impossible for all // `indices` entries to be `Some`. So, we only need to check for the // empty case. return Index::new(statik, header); } let hash = hash_header(&header); let desired_pos = desired_pos(self.mask, hash); let mut probe = desired_pos; let mut dist = 0; // Start at the ideal position, checking all slots probe_loop!(probe < self.indices.len(), { if let Some(pos) = self.indices[probe] { // The slot is already occupied, but check if it has a lower // displacement. let their_dist = probe_distance(self.mask, pos.hash, probe); let slot_idx = pos.index.wrapping_add(self.inserted); if their_dist < dist { // Index robinhood return self.index_vacant(header, hash, dist, probe, statik); } else if pos.hash == hash && self.slots[slot_idx].header.name() == header.name() { // Matching name, check values return self.index_occupied(header, hash, pos.index, statik.map(|(n, _)| n)); } } else { return self.index_vacant(header, hash, dist, probe, statik); } dist += 1; }); } fn index_occupied( &mut self, header: Header, hash: HashValue, mut index: usize, statik: Option, ) -> Index { debug_assert!(self.assert_valid_state("top")); // There already is a match for the given header name. Check if a value // matches. The header will also only be inserted if the table is not at // capacity. loop { // Compute the real index into the VecDeque let real_idx = index.wrapping_add(self.inserted); if self.slots[real_idx].header.value_eq(&header) { // We have a full match! return Index::Indexed(real_idx + DYN_OFFSET, header); } if let Some(next) = self.slots[real_idx].next { index = next; continue; } if header.is_sensitive() { // Should we assert this? // debug_assert!(statik.is_none()); return Index::Name(real_idx + DYN_OFFSET, header); } self.update_size(header.len(), Some(index)); // Insert the new header self.insert(header, hash); // Recompute real_idx as it just changed. let new_real_idx = index.wrapping_add(self.inserted); // The previous node in the linked list may have gotten evicted // while making room for this header. if new_real_idx < self.slots.len() { let idx = 0usize.wrapping_sub(self.inserted); self.slots[new_real_idx].next = Some(idx); } debug_assert!(self.assert_valid_state("bottom")); // Even if the previous header was evicted, we can still reference // it when inserting the new one... return if let Some(n) = statik { // If name is in static table, use it instead Index::InsertedValue(n, 0) } else { Index::InsertedValue(real_idx + DYN_OFFSET, 0) }; } } fn index_vacant( &mut self, header: Header, hash: HashValue, mut dist: usize, mut probe: usize, statik: Option<(usize, bool)>, ) -> Index { if header.is_sensitive() { return Index::new(statik, header); } debug_assert!(self.assert_valid_state("top")); debug_assert!(dist == 0 || self.indices[probe.wrapping_sub(1) & self.mask].is_some()); // Passing in `usize::MAX` for prev_idx since there is no previous // header in this case. if self.update_size(header.len(), None) { while dist != 0 { let back = probe.wrapping_sub(1) & self.mask; if let Some(pos) = self.indices[back] { let their_dist = probe_distance(self.mask, pos.hash, back); if their_dist < (dist - 1) { probe = back; dist -= 1; } else { break; } } else { probe = back; dist -= 1; } } } debug_assert!(self.assert_valid_state("after update")); self.insert(header, hash); let pos_idx = 0usize.wrapping_sub(self.inserted); let prev = mem::replace( &mut self.indices[probe], Some(Pos { index: pos_idx, hash: hash, }), ); if let Some(mut prev) = prev { // Shift forward let mut probe = probe + 1; probe_loop!(probe < self.indices.len(), { let pos = &mut self.indices[probe as usize]; prev = match mem::replace(pos, Some(prev)) { Some(p) => p, None => break, }; }); } debug_assert!(self.assert_valid_state("bottom")); if let Some((n, _)) = statik { Index::InsertedValue(n, 0) } else { Index::Inserted(0) } } fn insert(&mut self, header: Header, hash: HashValue) { self.inserted = self.inserted.wrapping_add(1); self.slots.push_front(Slot { hash: hash, header: header, next: None, }); } pub fn resize(&mut self, size: usize) { self.max_size = size; if size == 0 { self.size = 0; for i in &mut self.indices { *i = None; } self.slots.clear(); self.inserted = 0; } else { self.converge(None); } } fn update_size(&mut self, len: usize, prev_idx: Option) -> bool { self.size += len; self.converge(prev_idx) } fn converge(&mut self, prev_idx: Option) -> bool { let mut ret = false; while self.size > self.max_size { ret = true; self.evict(prev_idx); } ret } fn evict(&mut self, prev_idx: Option) { let pos_idx = (self.slots.len() - 1).wrapping_sub(self.inserted); debug_assert!(!self.slots.is_empty()); debug_assert!(self.assert_valid_state("one")); // Remove the header let slot = self.slots.pop_back().unwrap(); let mut probe = desired_pos(self.mask, slot.hash); // Update the size self.size -= slot.header.len(); debug_assert_eq!( self.indices .iter() .filter_map(|p| *p) .filter(|p| p.index == pos_idx) .count(), 1 ); // Find the associated position probe_loop!(probe < self.indices.len(), { debug_assert!(!self.indices[probe].is_none()); let mut pos = self.indices[probe].unwrap(); if pos.index == pos_idx { if let Some(idx) = slot.next { pos.index = idx; self.indices[probe] = Some(pos); } else if Some(pos.index) == prev_idx { pos.index = 0usize.wrapping_sub(self.inserted + 1); self.indices[probe] = Some(pos); } else { self.indices[probe] = None; self.remove_phase_two(probe); } break; } }); debug_assert!(self.assert_valid_state("two")); } // Shifts all indices that were displaced by the header that has just been // removed. fn remove_phase_two(&mut self, probe: usize) { let mut last_probe = probe; let mut probe = probe + 1; probe_loop!(probe < self.indices.len(), { if let Some(pos) = self.indices[probe] { if probe_distance(self.mask, pos.hash, probe) > 0 { self.indices[last_probe] = self.indices[probe].take(); } else { break; } } else { break; } last_probe = probe; }); debug_assert!(self.assert_valid_state("two")); } fn reserve_one(&mut self) { let len = self.slots.len(); if len == self.capacity() { if len == 0 { let new_raw_cap = 8; self.mask = 8 - 1; self.indices = vec![None; new_raw_cap]; } else { let raw_cap = self.indices.len(); self.grow(raw_cap << 1); } } } #[inline] fn grow(&mut self, new_raw_cap: usize) { // This path can never be reached when handling the first allocation in // the map. debug_assert!(self.assert_valid_state("top")); // find first ideally placed element -- start of cluster let mut first_ideal = 0; for (i, pos) in self.indices.iter().enumerate() { if let Some(pos) = *pos { if 0 == probe_distance(self.mask, pos.hash, i) { first_ideal = i; break; } } } // visit the entries in an order where we can simply reinsert them // into self.indices without any bucket stealing. let old_indices = mem::replace(&mut self.indices, vec![None; new_raw_cap]); self.mask = new_raw_cap.wrapping_sub(1); for &pos in &old_indices[first_ideal..] { self.reinsert_entry_in_order(pos); } for &pos in &old_indices[..first_ideal] { self.reinsert_entry_in_order(pos); } debug_assert!(self.assert_valid_state("bottom")); } fn reinsert_entry_in_order(&mut self, pos: Option) { if let Some(pos) = pos { // Find first empty bucket and insert there let mut probe = desired_pos(self.mask, pos.hash); probe_loop!(probe < self.indices.len(), { if self.indices[probe].is_none() { // empty bucket, insert here self.indices[probe] = Some(pos); return; } debug_assert!({ let them = self.indices[probe].unwrap(); let their_distance = probe_distance(self.mask, them.hash, probe); let our_distance = probe_distance(self.mask, pos.hash, probe); their_distance >= our_distance }); }); } } #[cfg(not(test))] fn assert_valid_state(&self, _: &'static str) -> bool { true } #[cfg(test)] fn assert_valid_state(&self, _msg: &'static str) -> bool { /* // Checks that the internal map structure is valid // // Ensure all hash codes in indices match the associated slot for pos in &self.indices { if let Some(pos) = *pos { let real_idx = pos.index.wrapping_add(self.inserted); if real_idx.wrapping_add(1) != 0 { assert!(real_idx < self.slots.len(), "out of index; real={}; len={}, msg={}", real_idx, self.slots.len(), msg); assert_eq!(pos.hash, self.slots[real_idx].hash, "index hash does not match slot; msg={}", msg); } } } // Every index is only available once for i in 0..self.indices.len() { if self.indices[i].is_none() { continue; } for j in i+1..self.indices.len() { assert_ne!(self.indices[i], self.indices[j], "duplicate indices; msg={}", msg); } } for (index, slot) in self.slots.iter().enumerate() { let mut indexed = None; // First, see if the slot is indexed for (i, pos) in self.indices.iter().enumerate() { if let Some(pos) = *pos { let real_idx = pos.index.wrapping_add(self.inserted); if real_idx == index { indexed = Some(i); // Already know that there is no dup, so break break; } } } if let Some(actual) = indexed { // Ensure that it is accessible.. let desired = desired_pos(self.mask, slot.hash); let mut probe = desired; let mut dist = 0; probe_loop!(probe < self.indices.len(), { assert!(self.indices[probe].is_some(), "unexpected empty slot; probe={}; hash={:?}; msg={}", probe, slot.hash, msg); let pos = self.indices[probe].unwrap(); let their_dist = probe_distance(self.mask, pos.hash, probe); let real_idx = pos.index.wrapping_add(self.inserted); if real_idx == index { break; } assert!(dist <= their_dist, "could not find entry; actual={}; desired={};" + "probe={}, dist={}; their_dist={}; index={}; msg={}", actual, desired, probe, dist, their_dist, index.wrapping_sub(self.inserted), msg); dist += 1; }); } else { // There is exactly one next link let cnt = self.slots.iter().map(|s| s.next) .filter(|n| *n == Some(index.wrapping_sub(self.inserted))) .count(); assert_eq!(1, cnt, "more than one node pointing here; msg={}", msg); } } */ // TODO: Ensure linked lists are correct: no cycles, etc... true } } #[cfg(test)] impl Table { /// Returns the number of headers in the table pub fn len(&self) -> usize { self.slots.len() } /// Returns the table size pub fn size(&self) -> usize { self.size } } impl Index { fn new(v: Option<(usize, bool)>, e: Header) -> Index { match v { None => Index::NotIndexed(e), Some((n, true)) => Index::Indexed(n, e), Some((n, false)) => Index::Name(n, e), } } } #[inline] fn usable_capacity(cap: usize) -> usize { cap - cap / 4 } #[inline] fn to_raw_capacity(n: usize) -> usize { n + n / 3 } #[inline] fn desired_pos(mask: usize, hash: HashValue) -> usize { (hash.0 & mask) as usize } #[inline] fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize { current.wrapping_sub(desired_pos(mask, hash)) & mask as usize } fn hash_header(header: &Header) -> HashValue { const MASK: u64 = (MAX_SIZE as u64) - 1; let mut h = FnvHasher::default(); header.name().hash(&mut h); HashValue((h.finish() & MASK) as usize) } /// Checks the static table for the header. If found, returns the index and a /// boolean representing if the value matched as well. fn index_static(header: &Header) -> Option<(usize, bool)> { match *header { Header::Field { ref name, ref value, } => match *name { header::ACCEPT_CHARSET => Some((15, false)), header::ACCEPT_ENCODING => if value == "gzip, deflate" { Some((16, true)) } else { Some((16, false)) }, header::ACCEPT_LANGUAGE => Some((17, false)), header::ACCEPT_RANGES => Some((18, false)), header::ACCEPT => Some((19, false)), header::ACCESS_CONTROL_ALLOW_ORIGIN => Some((20, false)), header::AGE => Some((21, false)), header::ALLOW => Some((22, false)), header::AUTHORIZATION => Some((23, false)), header::CACHE_CONTROL => Some((24, false)), header::CONTENT_DISPOSITION => Some((25, false)), header::CONTENT_ENCODING => Some((26, false)), header::CONTENT_LANGUAGE => Some((27, false)), header::CONTENT_LENGTH => Some((28, false)), header::CONTENT_LOCATION => Some((29, false)), header::CONTENT_RANGE => Some((30, false)), header::CONTENT_TYPE => Some((31, false)), header::COOKIE => Some((32, false)), header::DATE => Some((33, false)), header::ETAG => Some((34, false)), header::EXPECT => Some((35, false)), header::EXPIRES => Some((36, false)), header::FROM => Some((37, false)), header::HOST => Some((38, false)), header::IF_MATCH => Some((39, false)), header::IF_MODIFIED_SINCE => Some((40, false)), header::IF_NONE_MATCH => Some((41, false)), header::IF_RANGE => Some((42, false)), header::IF_UNMODIFIED_SINCE => Some((43, false)), header::LAST_MODIFIED => Some((44, false)), header::LINK => Some((45, false)), header::LOCATION => Some((46, false)), header::MAX_FORWARDS => Some((47, false)), header::PROXY_AUTHENTICATE => Some((48, false)), header::PROXY_AUTHORIZATION => Some((49, false)), header::RANGE => Some((50, false)), header::REFERER => Some((51, false)), header::REFRESH => Some((52, false)), header::RETRY_AFTER => Some((53, false)), header::SERVER => Some((54, false)), header::SET_COOKIE => Some((55, false)), header::STRICT_TRANSPORT_SECURITY => Some((56, false)), header::TRANSFER_ENCODING => Some((57, false)), header::USER_AGENT => Some((58, false)), header::VARY => Some((59, false)), header::VIA => Some((60, false)), header::WWW_AUTHENTICATE => Some((61, false)), _ => None, }, Header::Authority(_) => Some((1, false)), Header::Method(ref v) => match *v { Method::GET => Some((2, true)), Method::POST => Some((3, true)), _ => Some((2, false)), }, Header::Scheme(ref v) => match &**v { "http" => Some((6, true)), "https" => Some((7, true)), _ => Some((6, false)), }, Header::Path(ref v) => match &**v { "/" => Some((4, true)), "/index.html" => Some((5, true)), _ => Some((4, false)), }, Header::Status(ref v) => match u16::from(*v) { 200 => Some((8, true)), 204 => Some((9, true)), 206 => Some((10, true)), 304 => Some((11, true)), 400 => Some((12, true)), 404 => Some((13, true)), 500 => Some((14, true)), _ => Some((8, false)), }, } } h2-0.1.26/src/hpack/test/fixture.rs010066400017500001750000000652201351641525700152420ustar0000000000000000extern crate bytes; extern crate hex; extern crate serde_json; use hpack::{Decoder, Encoder, Header}; use self::bytes::BytesMut; use self::hex::FromHex; use self::serde_json::Value; use std::fs::File; use std::io::Cursor; use std::io::prelude::*; use std::path::Path; use std::str; fn test_fixture(path: &Path) { let mut file = File::open(path).unwrap(); let mut data = String::new(); file.read_to_string(&mut data).unwrap(); let story: Value = serde_json::from_str(&data).unwrap(); test_story(story); } fn test_story(story: Value) { let story = story.as_object().unwrap(); if let Some(cases) = story.get("cases") { let mut cases: Vec<_> = cases .as_array() .unwrap() .iter() .map(|case| { let case = case.as_object().unwrap(); let size = case.get("header_table_size") .map(|v| v.as_u64().unwrap() as usize); let wire = case.get("wire").unwrap().as_str().unwrap(); let wire: Vec = FromHex::from_hex(wire.as_bytes()).unwrap(); let expect: Vec<_> = case.get("headers") .unwrap() .as_array() .unwrap() .iter() .map(|h| { let h = h.as_object().unwrap(); let (name, val) = h.iter().next().unwrap(); (name.clone(), val.as_str().unwrap().to_string()) }) .collect(); Case { seqno: case.get("seqno").unwrap().as_u64().unwrap(), wire: wire, expect: expect, header_table_size: size, } }) .collect(); cases.sort_by_key(|c| c.seqno); let mut decoder = Decoder::default(); // First, check decoding against the fixtures for case in &cases { let mut expect = case.expect.clone(); if let Some(size) = case.header_table_size { decoder.queue_size_update(size); } decoder .decode(&mut Cursor::new(&mut case.wire.clone().into()), |e| { let (name, value) = expect.remove(0); assert_eq!(name, key_str(&e)); assert_eq!(value, value_str(&e)); }) .unwrap(); assert_eq!(0, expect.len()); } let mut encoder = Encoder::default(); let mut decoder = Decoder::default(); // Now, encode the headers for case in &cases { let mut buf = BytesMut::with_capacity(64 * 1024); if let Some(size) = case.header_table_size { encoder.update_max_size(size); decoder.queue_size_update(size); } let mut input: Vec<_> = case.expect .iter() .map(|&(ref name, ref value)| { Header::new(name.clone().into(), value.clone().into()) .unwrap() .into() }) .collect(); encoder.encode(None, &mut input.clone().into_iter(), &mut buf); decoder .decode(&mut Cursor::new(&mut buf), |e| { assert_eq!(e, input.remove(0).reify().unwrap()); }) .unwrap(); assert_eq!(0, input.len()); } } } struct Case { seqno: u64, wire: Vec, expect: Vec<(String, String)>, header_table_size: Option, } fn key_str(e: &Header) -> &str { match *e { Header::Field { ref name, .. } => name.as_str(), Header::Authority(..) => ":authority", Header::Method(..) => ":method", Header::Scheme(..) => ":scheme", Header::Path(..) => ":path", Header::Status(..) => ":status", } } fn value_str(e: &Header) -> &str { match *e { Header::Field { ref value, .. } => value.to_str().unwrap(), Header::Authority(ref v) => &**v, Header::Method(ref m) => m.as_str(), Header::Scheme(ref v) => &**v, Header::Path(ref v) => &**v, Header::Status(ref v) => v.as_str(), } } macro_rules! fixture_mod { ($module:ident => { $( ($fn:ident, $path:expr); )+ }) => { mod $module { $( #[test] fn $fn() { let path = ::std::path::Path::new(env!("CARGO_MANIFEST_DIR")) .join("fixtures/hpack") .join($path); super::test_fixture(path.as_ref()); } )+ } } } fixture_mod!( haskell_http2_linear_huffman => { (story_00, "haskell-http2-linear-huffman/story_00.json"); (story_01, "haskell-http2-linear-huffman/story_01.json"); (story_02, "haskell-http2-linear-huffman/story_02.json"); (story_03, "haskell-http2-linear-huffman/story_03.json"); (story_04, "haskell-http2-linear-huffman/story_04.json"); (story_05, "haskell-http2-linear-huffman/story_05.json"); (story_06, "haskell-http2-linear-huffman/story_06.json"); (story_07, "haskell-http2-linear-huffman/story_07.json"); (story_08, "haskell-http2-linear-huffman/story_08.json"); (story_09, "haskell-http2-linear-huffman/story_09.json"); (story_10, "haskell-http2-linear-huffman/story_10.json"); (story_11, "haskell-http2-linear-huffman/story_11.json"); (story_12, "haskell-http2-linear-huffman/story_12.json"); (story_13, "haskell-http2-linear-huffman/story_13.json"); (story_14, "haskell-http2-linear-huffman/story_14.json"); (story_15, "haskell-http2-linear-huffman/story_15.json"); (story_16, "haskell-http2-linear-huffman/story_16.json"); (story_17, "haskell-http2-linear-huffman/story_17.json"); (story_18, "haskell-http2-linear-huffman/story_18.json"); (story_19, "haskell-http2-linear-huffman/story_19.json"); (story_20, "haskell-http2-linear-huffman/story_20.json"); (story_21, "haskell-http2-linear-huffman/story_21.json"); (story_22, "haskell-http2-linear-huffman/story_22.json"); (story_23, "haskell-http2-linear-huffman/story_23.json"); (story_24, "haskell-http2-linear-huffman/story_24.json"); (story_25, "haskell-http2-linear-huffman/story_25.json"); (story_26, "haskell-http2-linear-huffman/story_26.json"); (story_27, "haskell-http2-linear-huffman/story_27.json"); (story_28, "haskell-http2-linear-huffman/story_28.json"); (story_29, "haskell-http2-linear-huffman/story_29.json"); (story_30, "haskell-http2-linear-huffman/story_30.json"); (story_31, "haskell-http2-linear-huffman/story_31.json"); } ); fixture_mod!( python_hpack => { (story_00, "python-hpack/story_00.json"); (story_01, "python-hpack/story_01.json"); (story_02, "python-hpack/story_02.json"); (story_03, "python-hpack/story_03.json"); (story_04, "python-hpack/story_04.json"); (story_05, "python-hpack/story_05.json"); (story_06, "python-hpack/story_06.json"); (story_07, "python-hpack/story_07.json"); (story_08, "python-hpack/story_08.json"); (story_09, "python-hpack/story_09.json"); (story_10, "python-hpack/story_10.json"); (story_11, "python-hpack/story_11.json"); (story_12, "python-hpack/story_12.json"); (story_13, "python-hpack/story_13.json"); (story_14, "python-hpack/story_14.json"); (story_15, "python-hpack/story_15.json"); (story_16, "python-hpack/story_16.json"); (story_17, "python-hpack/story_17.json"); (story_18, "python-hpack/story_18.json"); (story_19, "python-hpack/story_19.json"); (story_20, "python-hpack/story_20.json"); (story_21, "python-hpack/story_21.json"); (story_22, "python-hpack/story_22.json"); (story_23, "python-hpack/story_23.json"); (story_24, "python-hpack/story_24.json"); (story_25, "python-hpack/story_25.json"); (story_26, "python-hpack/story_26.json"); (story_27, "python-hpack/story_27.json"); (story_28, "python-hpack/story_28.json"); (story_29, "python-hpack/story_29.json"); (story_30, "python-hpack/story_30.json"); (story_31, "python-hpack/story_31.json"); } ); fixture_mod!( nghttp2_16384_4096 => { (story_00, "nghttp2-16384-4096/story_00.json"); (story_01, "nghttp2-16384-4096/story_01.json"); (story_02, "nghttp2-16384-4096/story_02.json"); (story_03, "nghttp2-16384-4096/story_03.json"); (story_04, "nghttp2-16384-4096/story_04.json"); (story_05, "nghttp2-16384-4096/story_05.json"); (story_06, "nghttp2-16384-4096/story_06.json"); (story_07, "nghttp2-16384-4096/story_07.json"); (story_08, "nghttp2-16384-4096/story_08.json"); (story_09, "nghttp2-16384-4096/story_09.json"); (story_10, "nghttp2-16384-4096/story_10.json"); (story_11, "nghttp2-16384-4096/story_11.json"); (story_12, "nghttp2-16384-4096/story_12.json"); (story_13, "nghttp2-16384-4096/story_13.json"); (story_14, "nghttp2-16384-4096/story_14.json"); (story_15, "nghttp2-16384-4096/story_15.json"); (story_16, "nghttp2-16384-4096/story_16.json"); (story_17, "nghttp2-16384-4096/story_17.json"); (story_18, "nghttp2-16384-4096/story_18.json"); (story_19, "nghttp2-16384-4096/story_19.json"); (story_20, "nghttp2-16384-4096/story_20.json"); (story_21, "nghttp2-16384-4096/story_21.json"); (story_22, "nghttp2-16384-4096/story_22.json"); (story_23, "nghttp2-16384-4096/story_23.json"); (story_24, "nghttp2-16384-4096/story_24.json"); (story_25, "nghttp2-16384-4096/story_25.json"); (story_26, "nghttp2-16384-4096/story_26.json"); (story_27, "nghttp2-16384-4096/story_27.json"); (story_28, "nghttp2-16384-4096/story_28.json"); (story_29, "nghttp2-16384-4096/story_29.json"); (story_30, "nghttp2-16384-4096/story_30.json"); } ); fixture_mod!( node_http2_hpack => { (story_00, "node-http2-hpack/story_00.json"); (story_01, "node-http2-hpack/story_01.json"); (story_02, "node-http2-hpack/story_02.json"); (story_03, "node-http2-hpack/story_03.json"); (story_04, "node-http2-hpack/story_04.json"); (story_05, "node-http2-hpack/story_05.json"); (story_06, "node-http2-hpack/story_06.json"); (story_07, "node-http2-hpack/story_07.json"); (story_08, "node-http2-hpack/story_08.json"); (story_09, "node-http2-hpack/story_09.json"); (story_10, "node-http2-hpack/story_10.json"); (story_11, "node-http2-hpack/story_11.json"); (story_12, "node-http2-hpack/story_12.json"); (story_13, "node-http2-hpack/story_13.json"); (story_14, "node-http2-hpack/story_14.json"); (story_15, "node-http2-hpack/story_15.json"); (story_16, "node-http2-hpack/story_16.json"); (story_17, "node-http2-hpack/story_17.json"); (story_18, "node-http2-hpack/story_18.json"); (story_19, "node-http2-hpack/story_19.json"); (story_20, "node-http2-hpack/story_20.json"); (story_21, "node-http2-hpack/story_21.json"); (story_22, "node-http2-hpack/story_22.json"); (story_23, "node-http2-hpack/story_23.json"); (story_24, "node-http2-hpack/story_24.json"); (story_25, "node-http2-hpack/story_25.json"); (story_26, "node-http2-hpack/story_26.json"); (story_27, "node-http2-hpack/story_27.json"); (story_28, "node-http2-hpack/story_28.json"); (story_29, "node-http2-hpack/story_29.json"); (story_30, "node-http2-hpack/story_30.json"); (story_31, "node-http2-hpack/story_31.json"); } ); fixture_mod!( nghttp2_change_table_size => { (story_00, "nghttp2-change-table-size/story_00.json"); (story_01, "nghttp2-change-table-size/story_01.json"); (story_02, "nghttp2-change-table-size/story_02.json"); (story_03, "nghttp2-change-table-size/story_03.json"); (story_04, "nghttp2-change-table-size/story_04.json"); (story_05, "nghttp2-change-table-size/story_05.json"); (story_06, "nghttp2-change-table-size/story_06.json"); (story_07, "nghttp2-change-table-size/story_07.json"); (story_08, "nghttp2-change-table-size/story_08.json"); (story_09, "nghttp2-change-table-size/story_09.json"); (story_10, "nghttp2-change-table-size/story_10.json"); (story_11, "nghttp2-change-table-size/story_11.json"); (story_12, "nghttp2-change-table-size/story_12.json"); (story_13, "nghttp2-change-table-size/story_13.json"); (story_14, "nghttp2-change-table-size/story_14.json"); (story_15, "nghttp2-change-table-size/story_15.json"); (story_16, "nghttp2-change-table-size/story_16.json"); (story_17, "nghttp2-change-table-size/story_17.json"); (story_18, "nghttp2-change-table-size/story_18.json"); (story_19, "nghttp2-change-table-size/story_19.json"); (story_20, "nghttp2-change-table-size/story_20.json"); (story_21, "nghttp2-change-table-size/story_21.json"); (story_22, "nghttp2-change-table-size/story_22.json"); (story_23, "nghttp2-change-table-size/story_23.json"); (story_24, "nghttp2-change-table-size/story_24.json"); (story_25, "nghttp2-change-table-size/story_25.json"); (story_26, "nghttp2-change-table-size/story_26.json"); (story_27, "nghttp2-change-table-size/story_27.json"); (story_28, "nghttp2-change-table-size/story_28.json"); (story_29, "nghttp2-change-table-size/story_29.json"); (story_30, "nghttp2-change-table-size/story_30.json"); } ); fixture_mod!( haskell_http2_static_huffman => { (story_00, "haskell-http2-static-huffman/story_00.json"); (story_01, "haskell-http2-static-huffman/story_01.json"); (story_02, "haskell-http2-static-huffman/story_02.json"); (story_03, "haskell-http2-static-huffman/story_03.json"); (story_04, "haskell-http2-static-huffman/story_04.json"); (story_05, "haskell-http2-static-huffman/story_05.json"); (story_06, "haskell-http2-static-huffman/story_06.json"); (story_07, "haskell-http2-static-huffman/story_07.json"); (story_08, "haskell-http2-static-huffman/story_08.json"); (story_09, "haskell-http2-static-huffman/story_09.json"); (story_10, "haskell-http2-static-huffman/story_10.json"); (story_11, "haskell-http2-static-huffman/story_11.json"); (story_12, "haskell-http2-static-huffman/story_12.json"); (story_13, "haskell-http2-static-huffman/story_13.json"); (story_14, "haskell-http2-static-huffman/story_14.json"); (story_15, "haskell-http2-static-huffman/story_15.json"); (story_16, "haskell-http2-static-huffman/story_16.json"); (story_17, "haskell-http2-static-huffman/story_17.json"); (story_18, "haskell-http2-static-huffman/story_18.json"); (story_19, "haskell-http2-static-huffman/story_19.json"); (story_20, "haskell-http2-static-huffman/story_20.json"); (story_21, "haskell-http2-static-huffman/story_21.json"); (story_22, "haskell-http2-static-huffman/story_22.json"); (story_23, "haskell-http2-static-huffman/story_23.json"); (story_24, "haskell-http2-static-huffman/story_24.json"); (story_25, "haskell-http2-static-huffman/story_25.json"); (story_26, "haskell-http2-static-huffman/story_26.json"); (story_27, "haskell-http2-static-huffman/story_27.json"); (story_28, "haskell-http2-static-huffman/story_28.json"); (story_29, "haskell-http2-static-huffman/story_29.json"); (story_30, "haskell-http2-static-huffman/story_30.json"); (story_31, "haskell-http2-static-huffman/story_31.json"); } ); fixture_mod!( haskell_http2_naive_huffman => { (story_00, "haskell-http2-naive-huffman/story_00.json"); (story_01, "haskell-http2-naive-huffman/story_01.json"); (story_02, "haskell-http2-naive-huffman/story_02.json"); (story_03, "haskell-http2-naive-huffman/story_03.json"); (story_04, "haskell-http2-naive-huffman/story_04.json"); (story_05, "haskell-http2-naive-huffman/story_05.json"); (story_06, "haskell-http2-naive-huffman/story_06.json"); (story_07, "haskell-http2-naive-huffman/story_07.json"); (story_08, "haskell-http2-naive-huffman/story_08.json"); (story_09, "haskell-http2-naive-huffman/story_09.json"); (story_10, "haskell-http2-naive-huffman/story_10.json"); (story_11, "haskell-http2-naive-huffman/story_11.json"); (story_12, "haskell-http2-naive-huffman/story_12.json"); (story_13, "haskell-http2-naive-huffman/story_13.json"); (story_14, "haskell-http2-naive-huffman/story_14.json"); (story_15, "haskell-http2-naive-huffman/story_15.json"); (story_16, "haskell-http2-naive-huffman/story_16.json"); (story_17, "haskell-http2-naive-huffman/story_17.json"); (story_18, "haskell-http2-naive-huffman/story_18.json"); (story_19, "haskell-http2-naive-huffman/story_19.json"); (story_20, "haskell-http2-naive-huffman/story_20.json"); (story_21, "haskell-http2-naive-huffman/story_21.json"); (story_22, "haskell-http2-naive-huffman/story_22.json"); (story_23, "haskell-http2-naive-huffman/story_23.json"); (story_24, "haskell-http2-naive-huffman/story_24.json"); (story_25, "haskell-http2-naive-huffman/story_25.json"); (story_26, "haskell-http2-naive-huffman/story_26.json"); (story_27, "haskell-http2-naive-huffman/story_27.json"); (story_28, "haskell-http2-naive-huffman/story_28.json"); (story_29, "haskell-http2-naive-huffman/story_29.json"); (story_30, "haskell-http2-naive-huffman/story_30.json"); (story_31, "haskell-http2-naive-huffman/story_31.json"); } ); fixture_mod!( haskell_http2_naive => { (story_00, "haskell-http2-naive/story_00.json"); (story_01, "haskell-http2-naive/story_01.json"); (story_02, "haskell-http2-naive/story_02.json"); (story_03, "haskell-http2-naive/story_03.json"); (story_04, "haskell-http2-naive/story_04.json"); (story_05, "haskell-http2-naive/story_05.json"); (story_06, "haskell-http2-naive/story_06.json"); (story_07, "haskell-http2-naive/story_07.json"); (story_08, "haskell-http2-naive/story_08.json"); (story_09, "haskell-http2-naive/story_09.json"); (story_10, "haskell-http2-naive/story_10.json"); (story_11, "haskell-http2-naive/story_11.json"); (story_12, "haskell-http2-naive/story_12.json"); (story_13, "haskell-http2-naive/story_13.json"); (story_14, "haskell-http2-naive/story_14.json"); (story_15, "haskell-http2-naive/story_15.json"); (story_16, "haskell-http2-naive/story_16.json"); (story_17, "haskell-http2-naive/story_17.json"); (story_18, "haskell-http2-naive/story_18.json"); (story_19, "haskell-http2-naive/story_19.json"); (story_20, "haskell-http2-naive/story_20.json"); (story_21, "haskell-http2-naive/story_21.json"); (story_22, "haskell-http2-naive/story_22.json"); (story_23, "haskell-http2-naive/story_23.json"); (story_24, "haskell-http2-naive/story_24.json"); (story_25, "haskell-http2-naive/story_25.json"); (story_26, "haskell-http2-naive/story_26.json"); (story_27, "haskell-http2-naive/story_27.json"); (story_28, "haskell-http2-naive/story_28.json"); (story_29, "haskell-http2-naive/story_29.json"); (story_30, "haskell-http2-naive/story_30.json"); (story_31, "haskell-http2-naive/story_31.json"); } ); fixture_mod!( haskell_http2_static => { (story_00, "haskell-http2-static/story_00.json"); (story_01, "haskell-http2-static/story_01.json"); (story_02, "haskell-http2-static/story_02.json"); (story_03, "haskell-http2-static/story_03.json"); (story_04, "haskell-http2-static/story_04.json"); (story_05, "haskell-http2-static/story_05.json"); (story_06, "haskell-http2-static/story_06.json"); (story_07, "haskell-http2-static/story_07.json"); (story_08, "haskell-http2-static/story_08.json"); (story_09, "haskell-http2-static/story_09.json"); (story_10, "haskell-http2-static/story_10.json"); (story_11, "haskell-http2-static/story_11.json"); (story_12, "haskell-http2-static/story_12.json"); (story_13, "haskell-http2-static/story_13.json"); (story_14, "haskell-http2-static/story_14.json"); (story_15, "haskell-http2-static/story_15.json"); (story_16, "haskell-http2-static/story_16.json"); (story_17, "haskell-http2-static/story_17.json"); (story_18, "haskell-http2-static/story_18.json"); (story_19, "haskell-http2-static/story_19.json"); (story_20, "haskell-http2-static/story_20.json"); (story_21, "haskell-http2-static/story_21.json"); (story_22, "haskell-http2-static/story_22.json"); (story_23, "haskell-http2-static/story_23.json"); (story_24, "haskell-http2-static/story_24.json"); (story_25, "haskell-http2-static/story_25.json"); (story_26, "haskell-http2-static/story_26.json"); (story_27, "haskell-http2-static/story_27.json"); (story_28, "haskell-http2-static/story_28.json"); (story_29, "haskell-http2-static/story_29.json"); (story_30, "haskell-http2-static/story_30.json"); (story_31, "haskell-http2-static/story_31.json"); } ); fixture_mod!( nghttp2 => { (story_00, "nghttp2/story_00.json"); (story_01, "nghttp2/story_01.json"); (story_02, "nghttp2/story_02.json"); (story_03, "nghttp2/story_03.json"); (story_04, "nghttp2/story_04.json"); (story_05, "nghttp2/story_05.json"); (story_06, "nghttp2/story_06.json"); (story_07, "nghttp2/story_07.json"); (story_08, "nghttp2/story_08.json"); (story_09, "nghttp2/story_09.json"); (story_10, "nghttp2/story_10.json"); (story_11, "nghttp2/story_11.json"); (story_12, "nghttp2/story_12.json"); (story_13, "nghttp2/story_13.json"); (story_14, "nghttp2/story_14.json"); (story_15, "nghttp2/story_15.json"); (story_16, "nghttp2/story_16.json"); (story_17, "nghttp2/story_17.json"); (story_18, "nghttp2/story_18.json"); (story_19, "nghttp2/story_19.json"); (story_20, "nghttp2/story_20.json"); (story_21, "nghttp2/story_21.json"); (story_22, "nghttp2/story_22.json"); (story_23, "nghttp2/story_23.json"); (story_24, "nghttp2/story_24.json"); (story_25, "nghttp2/story_25.json"); (story_26, "nghttp2/story_26.json"); (story_27, "nghttp2/story_27.json"); (story_28, "nghttp2/story_28.json"); (story_29, "nghttp2/story_29.json"); (story_30, "nghttp2/story_30.json"); (story_31, "nghttp2/story_31.json"); } ); fixture_mod!( haskell_http2_linear => { (story_00, "haskell-http2-linear/story_00.json"); (story_01, "haskell-http2-linear/story_01.json"); (story_02, "haskell-http2-linear/story_02.json"); (story_03, "haskell-http2-linear/story_03.json"); (story_04, "haskell-http2-linear/story_04.json"); (story_05, "haskell-http2-linear/story_05.json"); (story_06, "haskell-http2-linear/story_06.json"); (story_07, "haskell-http2-linear/story_07.json"); (story_08, "haskell-http2-linear/story_08.json"); (story_09, "haskell-http2-linear/story_09.json"); (story_10, "haskell-http2-linear/story_10.json"); (story_11, "haskell-http2-linear/story_11.json"); (story_12, "haskell-http2-linear/story_12.json"); (story_13, "haskell-http2-linear/story_13.json"); (story_14, "haskell-http2-linear/story_14.json"); (story_15, "haskell-http2-linear/story_15.json"); (story_16, "haskell-http2-linear/story_16.json"); (story_17, "haskell-http2-linear/story_17.json"); (story_18, "haskell-http2-linear/story_18.json"); (story_19, "haskell-http2-linear/story_19.json"); (story_20, "haskell-http2-linear/story_20.json"); (story_21, "haskell-http2-linear/story_21.json"); (story_22, "haskell-http2-linear/story_22.json"); (story_23, "haskell-http2-linear/story_23.json"); (story_24, "haskell-http2-linear/story_24.json"); (story_25, "haskell-http2-linear/story_25.json"); (story_26, "haskell-http2-linear/story_26.json"); (story_27, "haskell-http2-linear/story_27.json"); (story_28, "haskell-http2-linear/story_28.json"); (story_29, "haskell-http2-linear/story_29.json"); (story_30, "haskell-http2-linear/story_30.json"); (story_31, "haskell-http2-linear/story_31.json"); } ); fixture_mod!( go_hpack => { (story_00, "go-hpack/story_00.json"); (story_01, "go-hpack/story_01.json"); (story_02, "go-hpack/story_02.json"); (story_03, "go-hpack/story_03.json"); (story_04, "go-hpack/story_04.json"); (story_05, "go-hpack/story_05.json"); (story_06, "go-hpack/story_06.json"); (story_07, "go-hpack/story_07.json"); (story_08, "go-hpack/story_08.json"); (story_09, "go-hpack/story_09.json"); (story_10, "go-hpack/story_10.json"); (story_11, "go-hpack/story_11.json"); (story_12, "go-hpack/story_12.json"); (story_13, "go-hpack/story_13.json"); (story_14, "go-hpack/story_14.json"); (story_15, "go-hpack/story_15.json"); (story_16, "go-hpack/story_16.json"); (story_17, "go-hpack/story_17.json"); (story_18, "go-hpack/story_18.json"); (story_19, "go-hpack/story_19.json"); (story_20, "go-hpack/story_20.json"); (story_21, "go-hpack/story_21.json"); (story_22, "go-hpack/story_22.json"); (story_23, "go-hpack/story_23.json"); (story_24, "go-hpack/story_24.json"); (story_25, "go-hpack/story_25.json"); (story_26, "go-hpack/story_26.json"); (story_27, "go-hpack/story_27.json"); (story_28, "go-hpack/story_28.json"); (story_29, "go-hpack/story_29.json"); (story_30, "go-hpack/story_30.json"); (story_31, "go-hpack/story_31.json"); } ); h2-0.1.26/src/hpack/test/fuzz.rs010066400017500001750000000274221351644257100145530ustar0000000000000000extern crate bytes; extern crate env_logger; extern crate quickcheck; extern crate rand; use hpack::{Decoder, Encode, Encoder, Header}; use http::header::{HeaderName, HeaderValue}; use self::bytes::{Bytes, BytesMut}; use self::quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; use self::rand::{Rng, SeedableRng, StdRng}; use std::io::Cursor; const MAX_CHUNK: usize = 2 * 1024; #[test] fn hpack_fuzz() { let _ = env_logger::try_init(); fn prop(fuzz: FuzzHpack) -> TestResult { fuzz.run(); TestResult::from_bool(true) } QuickCheck::new() .tests(100) .quickcheck(prop as fn(FuzzHpack) -> TestResult) } #[derive(Debug, Clone)] struct FuzzHpack { // The magic seed that makes the test case reproducible seed: [usize; 4], // The set of headers to encode / decode frames: Vec, // The list of chunk sizes to do it in chunks: Vec, // Number of times reduced reduced: usize, } #[derive(Debug, Clone)] struct HeaderFrame { resizes: Vec, headers: Vec>>, } impl FuzzHpack { fn new(seed: [usize; 4]) -> FuzzHpack { // Seed the RNG let mut rng = StdRng::from_seed(&seed); // Generates a bunch of source headers let mut source: Vec>> = vec![]; for _ in 0..2000 { source.push(gen_header(&mut rng)); } // Actual test run headers let num: usize = rng.gen_range(40, 500); let mut frames: Vec = vec![]; let mut added = 0; let skew: i32 = rng.gen_range(1, 5); // Rough number of headers to add while added < num { let mut frame = HeaderFrame { resizes: vec![], headers: vec![], }; #[allow(warnings)] match rng.gen_range(0, 20) { 0 => { // Two resizes let high = rng.gen_range(128, MAX_CHUNK * 2); let low = rng.gen_range(0, high); frame.resizes.extend(&[low, high]); }, 1...3 => { frame.resizes.push(rng.gen_range(128, MAX_CHUNK * 2)); }, _ => {}, } let mut is_name_required = true; for _ in 0..rng.gen_range(1, (num - added) + 1) { let x: f64 = rng.gen_range(0.0, 1.0); let x = x.powi(skew); let i = (x * source.len() as f64) as usize; let header = &source[i]; match header { Header::Field { name: None, .. } => { if is_name_required { continue; } }, Header::Field { .. } => { is_name_required = false; }, _ => { // pseudos can't be followed by a header with no name is_name_required = true; } } frame.headers.push(header.clone()); added += 1; } frames.push(frame); } // Now, generate the buffer sizes used to encode let mut chunks = vec![]; for _ in 0..rng.gen_range(0, 100) { chunks.push(rng.gen_range(0, MAX_CHUNK)); } FuzzHpack { seed: seed, frames: frames, chunks: chunks, reduced: 0, } } fn run(self) { let mut chunks = self.chunks; let frames = self.frames; let mut expect = vec![]; let mut encoder = Encoder::default(); let mut decoder = Decoder::default(); for frame in frames { // build "expected" frames, such that decoding headers always // includes a name let mut prev_name = None; for header in &frame.headers { match header.clone().reify() { Ok(h) => { prev_name = match h { Header::Field { ref name, .. } => Some(name.clone()), _ => None, }; expect.push(h); }, Err(value) => { expect.push(Header::Field { name: prev_name.as_ref().cloned().expect("previous header name"), value, }); } } } let mut input = frame.headers.into_iter(); let mut index = None; let mut buf = BytesMut::with_capacity(chunks.pop().unwrap_or(MAX_CHUNK)); if let Some(max) = frame.resizes.iter().max() { decoder.queue_size_update(*max); } // Apply resizes for resize in &frame.resizes { encoder.update_max_size(*resize); } loop { match encoder.encode(index.take(), &mut input, &mut buf) { Encode::Full => break, Encode::Partial(i) => { index = Some(i); // Decode the chunk! decoder .decode(&mut Cursor::new(&mut buf), |h| { let e = expect.remove(0); assert_eq!(h, e); }) .expect("partial decode"); buf = BytesMut::with_capacity(chunks.pop().unwrap_or(MAX_CHUNK)); }, } } // Decode the chunk! decoder .decode(&mut Cursor::new(&mut buf), |h| { let e = expect.remove(0); assert_eq!(h, e); }) .expect("full decode"); } assert_eq!(0, expect.len()); } } impl Arbitrary for FuzzHpack { fn arbitrary(g: &mut G) -> Self { FuzzHpack::new(quickcheck::Rng::gen(g)) } } fn gen_header(g: &mut StdRng) -> Header> { use http::{Method, StatusCode}; if g.gen_weighted_bool(10) { match g.next_u32() % 5 { 0 => { let value = gen_string(g, 4, 20); Header::Authority(to_shared(value)) }, 1 => { let method = match g.next_u32() % 6 { 0 => Method::GET, 1 => Method::POST, 2 => Method::PUT, 3 => Method::PATCH, 4 => Method::DELETE, 5 => { let n: usize = g.gen_range(3, 7); let bytes: Vec = (0..n) .map(|_| g.choose(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap().clone()) .collect(); Method::from_bytes(&bytes).unwrap() }, _ => unreachable!(), }; Header::Method(method) }, 2 => { let value = match g.next_u32() % 2 { 0 => "http", 1 => "https", _ => unreachable!(), }; Header::Scheme(to_shared(value.to_string())) }, 3 => { let value = match g.next_u32() % 100 { 0 => "/".to_string(), 1 => "/index.html".to_string(), _ => gen_string(g, 2, 20), }; Header::Path(to_shared(value)) }, 4 => { let status = (g.gen::() % 500) + 100; Header::Status(StatusCode::from_u16(status).unwrap()) }, _ => unreachable!(), } } else { let name = if g.gen_weighted_bool(10) { None } else { Some(gen_header_name(g)) }; let mut value = gen_header_value(g); if g.gen_weighted_bool(30) { value.set_sensitive(true); } Header::Field { name, value, } } } fn gen_header_name(g: &mut StdRng) -> HeaderName { use http::header; if g.gen_weighted_bool(2) { g.choose(&[ header::ACCEPT, header::ACCEPT_CHARSET, header::ACCEPT_ENCODING, header::ACCEPT_LANGUAGE, header::ACCEPT_RANGES, header::ACCESS_CONTROL_ALLOW_CREDENTIALS, header::ACCESS_CONTROL_ALLOW_HEADERS, header::ACCESS_CONTROL_ALLOW_METHODS, header::ACCESS_CONTROL_ALLOW_ORIGIN, header::ACCESS_CONTROL_EXPOSE_HEADERS, header::ACCESS_CONTROL_MAX_AGE, header::ACCESS_CONTROL_REQUEST_HEADERS, header::ACCESS_CONTROL_REQUEST_METHOD, header::AGE, header::ALLOW, header::ALT_SVC, header::AUTHORIZATION, header::CACHE_CONTROL, header::CONNECTION, header::CONTENT_DISPOSITION, header::CONTENT_ENCODING, header::CONTENT_LANGUAGE, header::CONTENT_LENGTH, header::CONTENT_LOCATION, header::CONTENT_RANGE, header::CONTENT_SECURITY_POLICY, header::CONTENT_SECURITY_POLICY_REPORT_ONLY, header::CONTENT_TYPE, header::COOKIE, header::DNT, header::DATE, header::ETAG, header::EXPECT, header::EXPIRES, header::FORWARDED, header::FROM, header::HOST, header::IF_MATCH, header::IF_MODIFIED_SINCE, header::IF_NONE_MATCH, header::IF_RANGE, header::IF_UNMODIFIED_SINCE, header::LAST_MODIFIED, header::LINK, header::LOCATION, header::MAX_FORWARDS, header::ORIGIN, header::PRAGMA, header::PROXY_AUTHENTICATE, header::PROXY_AUTHORIZATION, header::PUBLIC_KEY_PINS, header::PUBLIC_KEY_PINS_REPORT_ONLY, header::RANGE, header::REFERER, header::REFERRER_POLICY, header::REFRESH, header::RETRY_AFTER, header::SERVER, header::SET_COOKIE, header::STRICT_TRANSPORT_SECURITY, header::TE, header::TRAILER, header::TRANSFER_ENCODING, header::USER_AGENT, header::UPGRADE, header::UPGRADE_INSECURE_REQUESTS, header::VARY, header::VIA, header::WARNING, header::WWW_AUTHENTICATE, header::X_CONTENT_TYPE_OPTIONS, header::X_DNS_PREFETCH_CONTROL, header::X_FRAME_OPTIONS, header::X_XSS_PROTECTION, ]).unwrap() .clone() } else { let value = gen_string(g, 1, 25); HeaderName::from_bytes(value.as_bytes()).unwrap() } } fn gen_header_value(g: &mut StdRng) -> HeaderValue { let value = gen_string(g, 0, 70); HeaderValue::from_bytes(value.as_bytes()).unwrap() } fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String { let bytes: Vec<_> = (min..max) .map(|_| { // Chars to pick from g.choose(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----") .unwrap() .clone() }) .collect(); String::from_utf8(bytes).unwrap() } fn to_shared(src: String) -> ::string::String { let b: Bytes = src.into(); unsafe { ::string::String::from_utf8_unchecked(b) } } h2-0.1.26/src/hpack/test/mod.rs010066400017500001750000000000271313175774300143270ustar0000000000000000mod fixture; mod fuzz; h2-0.1.26/src/lib.rs010066400017500001750000000111621351645762400122550ustar0000000000000000//! An asynchronous, HTTP/2.0 server and client implementation. //! //! This library implements the [HTTP/2.0] specification. The implementation is //! asynchronous, using [futures] as the basis for the API. The implementation //! is also decoupled from TCP or TLS details. The user must handle ALPN and //! HTTP/1.1 upgrades themselves. //! //! # Getting started //! //! Add the following to your `Cargo.toml` file: //! //! ```toml //! [dependencies] //! h2 = "0.1" //! ``` //! //! Next, add this to your crate: //! //! ```no_run //! extern crate h2; //! ``` //! //! # Layout //! //! The crate is split into [`client`] and [`server`] modules. Types that are //! common to both clients and servers are located at the root of the crate. //! //! See module level documentation for more details on how to use `h2`. //! //! # Handshake //! //! Both the client and the server require a connection to already be in a state //! ready to start the HTTP/2.0 handshake. This library does not provide //! facilities to do this. //! //! There are three ways to reach an appropriate state to start the HTTP/2.0 //! handshake. //! //! * Opening an HTTP/1.1 connection and performing an [upgrade]. //! * Opening a connection with TLS and use ALPN to negotiate the protocol. //! * Open a connection with prior knowledge, i.e. both the client and the //! server assume that the connection is immediately ready to start the //! HTTP/2.0 handshake once opened. //! //! Once the connection is ready to start the HTTP/2.0 handshake, it can be //! passed to [`server::handshake`] or [`client::handshake`]. At this point, the //! library will start the handshake process, which consists of: //! //! * The client sends the connection preface (a predefined sequence of 24 //! octets). //! * Both the client and the server sending a SETTINGS frame. //! //! See the [Starting HTTP/2] in the specification for more details. //! //! # Flow control //! //! [Flow control] is a fundamental feature of HTTP/2.0. The `h2` library //! exposes flow control to the user. //! //! An HTTP/2.0 client or server may not send unlimited data to the peer. When a //! stream is initiated, both the client and the server are provided with an //! initial window size for that stream. A window size is the number of bytes //! the endpoint can send to the peer. At any point in time, the peer may //! increase this window size by sending a `WINDOW_UPDATE` frame. Once a client //! or server has sent data filling the window for a stream, no further data may //! be sent on that stream until the peer increases the window. //! //! There is also a **connection level** window governing data sent across all //! streams. //! //! Managing flow control for inbound data is done through [`ReleaseCapacity`]. //! Managing flow control for outbound data is done through [`SendStream`]. See //! the struct level documentation for those two types for more details. //! //! [HTTP/2.0]: https://http2.github.io/ //! [futures]: https://docs.rs/futures/ //! [`client`]: client/index.html //! [`server`]: server/index.html //! [Flow control]: http://httpwg.org/specs/rfc7540.html#FlowControl //! [`ReleaseCapacity`]: struct.ReleaseCapacity.html //! [`SendStream`]: struct.SendStream.html //! [Starting HTTP/2]: http://httpwg.org/specs/rfc7540.html#starting //! [upgrade]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html #![doc(html_root_url = "https://docs.rs/h2/0.1.26")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] #[macro_use] extern crate futures; #[macro_use] extern crate tokio_io; // HTTP types extern crate http; // Buffer utilities extern crate bytes; // Hash function used for HPACK encoding and tracking stream states. extern crate fnv; extern crate byteorder; extern crate slab; #[macro_use] extern crate log; extern crate string; extern crate indexmap; macro_rules! proto_err { (conn: $($msg:tt)+) => { debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) }; (stream: $($msg:tt)+) => { debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) }; } mod error; #[cfg_attr(feature = "unstable", allow(missing_docs))] mod codec; mod hpack; mod proto; #[cfg(not(feature = "unstable"))] mod frame; #[cfg(feature = "unstable")] #[allow(missing_docs)] pub mod frame; pub mod client; pub mod server; mod share; pub use error::{Error, Reason}; pub use share::{SendStream, StreamId, RecvStream, ReleaseCapacity, PingPong, Ping, Pong}; #[cfg(feature = "unstable")] pub use codec::{Codec, RecvError, SendError, UserError}; h2-0.1.26/src/proto/connection.rs010066400017500001750000000371731347426626300150230ustar0000000000000000use {client, frame, proto, server}; use codec::RecvError; use frame::{Reason, StreamId}; use frame::DEFAULT_INITIAL_WINDOW_SIZE; use proto::*; use bytes::{Bytes, IntoBuf}; use futures::Stream; use tokio_io::{AsyncRead, AsyncWrite}; use std::marker::PhantomData; use std::io; use std::time::Duration; /// An H2 connection #[derive(Debug)] pub(crate) struct Connection where P: Peer, { /// Tracks the connection level state transitions. state: State, /// An error to report back once complete. /// /// This exists separately from State in order to support /// graceful shutdown. error: Option, /// Read / write frame values codec: Codec>, /// Pending GOAWAY frames to write. go_away: GoAway, /// Ping/pong handler ping_pong: PingPong, /// Connection settings settings: Settings, /// Stream state handler streams: Streams, /// Client or server _phantom: PhantomData

, } #[derive(Debug, Clone)] pub(crate) struct Config { pub next_stream_id: StreamId, pub initial_max_send_streams: usize, pub reset_stream_duration: Duration, pub reset_stream_max: usize, pub settings: frame::Settings, } #[derive(Debug)] enum State { /// Currently open in a sane state Open, /// The codec must be flushed Closing(Reason), /// In a closed state Closed(Reason), } impl Connection where T: AsyncRead + AsyncWrite, P: Peer, B: IntoBuf, { pub fn new( codec: Codec>, config: Config, ) -> Connection { let streams = Streams::new(streams::Config { local_init_window_sz: config.settings .initial_window_size() .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), initial_max_send_streams: config.initial_max_send_streams, local_next_stream_id: config.next_stream_id, local_push_enabled: config.settings.is_push_enabled(), local_reset_duration: config.reset_stream_duration, local_reset_max: config.reset_stream_max, remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, remote_max_initiated: config.settings .max_concurrent_streams() .map(|max| max as usize), }); Connection { state: State::Open, error: None, codec: codec, go_away: GoAway::new(), ping_pong: PingPong::new(), settings: Settings::new(), streams: streams, _phantom: PhantomData, } } pub fn set_target_window_size(&mut self, size: WindowSize) { self.streams.set_target_connection_window_size(size); } /// Returns `Ready` when the connection is ready to receive a frame. /// /// Returns `RecvError` as this may raise errors that are caused by delayed /// processing of received frames. fn poll_ready(&mut self) -> Poll<(), RecvError> { // The order of these calls don't really matter too much try_ready!(self.ping_pong.send_pending_pong(&mut self.codec)); try_ready!(self.ping_pong.send_pending_ping(&mut self.codec)); try_ready!( self.settings .send_pending_ack(&mut self.codec, &mut self.streams) ); try_ready!(self.streams.send_pending_refusal(&mut self.codec)); Ok(().into()) } /// Send any pending GOAWAY frames. /// /// This will return `Some(reason)` if the connection should be closed /// afterwards. If this is a graceful shutdown, this returns `None`. fn poll_go_away(&mut self) -> Poll, io::Error> { self.go_away.send_pending_go_away(&mut self.codec) } fn go_away(&mut self, id: StreamId, e: Reason) { let frame = frame::GoAway::new(id, e); self.streams.send_go_away(id); self.go_away.go_away(frame); } fn go_away_now(&mut self, e: Reason) { let last_processed_id = self.streams.last_processed_id(); let frame = frame::GoAway::new(last_processed_id, e); self.go_away.go_away_now(frame); } pub fn go_away_from_user(&mut self, e: Reason) { let last_processed_id = self.streams.last_processed_id(); let frame = frame::GoAway::new(last_processed_id, e); self.go_away.go_away_from_user(frame); // Notify all streams of reason we're abruptly closing. self.streams.recv_err(&proto::Error::Proto(e)); } fn take_error(&mut self, ours: Reason) -> Poll<(), proto::Error> { let reason = if let Some(theirs) = self.error.take() { match (ours, theirs) { // If either side reported an error, return that // to the user. (Reason::NO_ERROR, err) | (err, Reason::NO_ERROR) => err, // If both sides reported an error, give their // error back to th user. We assume our error // was a consequence of their error, and less // important. (_, theirs) => theirs, } } else { ours }; if reason == Reason::NO_ERROR { Ok(().into()) } else { Err(proto::Error::Proto(reason)) } } /// Closes the connection by transitioning to a GOAWAY state /// iff there are no streams or references pub fn maybe_close_connection_if_no_streams(&mut self) { // If we poll() and realize that there are no streams or references // then we can close the connection by transitioning to GOAWAY if !self.streams.has_streams_or_other_references() { self.go_away_now(Reason::NO_ERROR); } } pub(crate) fn take_user_pings(&mut self) -> Option { self.ping_pong.take_user_pings() } /// Advances the internal state of the connection. pub fn poll(&mut self) -> Poll<(), proto::Error> { use codec::RecvError::*; loop { // TODO: probably clean up this glob of code match self.state { // When open, continue to poll a frame State::Open => { match self.poll2() { // The connection has shutdown normally Ok(Async::Ready(())) => self.state = State::Closing(Reason::NO_ERROR), // The connection is not ready to make progress Ok(Async::NotReady) => { // Ensure all window updates have been sent. // // This will also handle flushing `self.codec` try_ready!(self.streams.poll_complete(&mut self.codec)); if self.error.is_some() || self.go_away.should_close_on_idle() { if !self.streams.has_streams() { self.go_away_now(Reason::NO_ERROR); continue; } } return Ok(Async::NotReady); }, // Attempting to read a frame resulted in a connection level // error. This is handled by setting a GOAWAY frame followed by // terminating the connection. Err(Connection(e)) => { debug!("Connection::poll; connection error={:?}", e); // We may have already sent a GOAWAY for this error, // if so, don't send another, just flush and close up. if let Some(reason) = self.go_away.going_away_reason() { if reason == e { trace!(" -> already going away"); self.state = State::Closing(e); continue; } } // Reset all active streams self.streams.recv_err(&e.into()); self.go_away_now(e); }, // Attempting to read a frame resulted in a stream level error. // This is handled by resetting the frame then trying to read // another frame. Err(Stream { id, reason, }) => { trace!("stream error; id={:?}; reason={:?}", id, reason); self.streams.send_reset(id, reason); }, // Attempting to read a frame resulted in an I/O error. All // active streams must be reset. // // TODO: Are I/O errors recoverable? Err(Io(e)) => { debug!("Connection::poll; IO error={:?}", e); let e = e.into(); // Reset all active streams self.streams.recv_err(&e); // Return the error return Err(e); }, } } State::Closing(reason) => { trace!("connection closing after flush"); // Flush/shutdown the codec try_ready!(self.codec.shutdown()); // Transition the state to error self.state = State::Closed(reason); }, State::Closed(reason) => return self.take_error(reason), } } } fn poll2(&mut self) -> Poll<(), RecvError> { use frame::Frame::*; // This happens outside of the loop to prevent needing to do a clock // check and then comparison of the queue possibly multiple times a // second (and thus, the clock wouldn't have changed enough to matter). self.clear_expired_reset_streams(); loop { // First, ensure that the `Connection` is able to receive a frame // // The order here matters: // - poll_go_away may buffer a graceful shutdown GOAWAY frame // - If it has, we've also added a PING to be sent in poll_ready if let Some(reason) = try_ready!(self.poll_go_away()) { if self.go_away.should_close_now() { if self.go_away.is_user_initiated() { // A user initiated abrupt shutdown shouldn't return // the same error back to the user. return Ok(Async::Ready(())); } else { return Err(RecvError::Connection(reason)); } } // Only NO_ERROR should be waiting for idle debug_assert_eq!(reason, Reason::NO_ERROR, "graceful GOAWAY should be NO_ERROR"); } try_ready!(self.poll_ready()); match try_ready!(self.codec.poll()) { Some(Headers(frame)) => { trace!("recv HEADERS; frame={:?}", frame); self.streams.recv_headers(frame)?; }, Some(Data(frame)) => { trace!("recv DATA; frame={:?}", frame); self.streams.recv_data(frame)?; }, Some(Reset(frame)) => { trace!("recv RST_STREAM; frame={:?}", frame); self.streams.recv_reset(frame)?; }, Some(PushPromise(frame)) => { trace!("recv PUSH_PROMISE; frame={:?}", frame); self.streams.recv_push_promise(frame)?; }, Some(Settings(frame)) => { trace!("recv SETTINGS; frame={:?}", frame); self.settings.recv_settings(frame); }, Some(GoAway(frame)) => { trace!("recv GOAWAY; frame={:?}", frame); // This should prevent starting new streams, // but should allow continuing to process current streams // until they are all EOS. Once they are, State should // transition to GoAway. self.streams.recv_go_away(&frame)?; self.error = Some(frame.reason()); }, Some(Ping(frame)) => { trace!("recv PING; frame={:?}", frame); let status = self.ping_pong.recv_ping(frame); if status.is_shutdown() { assert!( self.go_away.is_going_away(), "received unexpected shutdown ping" ); let last_processed_id = self.streams.last_processed_id(); self.go_away(last_processed_id, Reason::NO_ERROR); } }, Some(WindowUpdate(frame)) => { trace!("recv WINDOW_UPDATE; frame={:?}", frame); self.streams.recv_window_update(frame)?; }, Some(Priority(frame)) => { trace!("recv PRIORITY; frame={:?}", frame); // TODO: handle }, None => { trace!("codec closed"); self.streams.recv_eof(false) .ok().expect("mutex poisoned"); return Ok(Async::Ready(())); }, } } } fn clear_expired_reset_streams(&mut self) { self.streams.clear_expired_reset_streams(); } } impl Connection where T: AsyncRead + AsyncWrite, B: IntoBuf, { pub(crate) fn streams(&self) -> &Streams { &self.streams } } impl Connection where T: AsyncRead + AsyncWrite, B: IntoBuf, { pub fn next_incoming(&mut self) -> Option> { self.streams.next_incoming() } // Graceful shutdown only makes sense for server peers. pub fn go_away_gracefully(&mut self) { if self.go_away.is_going_away() { // No reason to start a new one. return; } // According to http://httpwg.org/specs/rfc7540.html#GOAWAY: // // > A server that is attempting to gracefully shut down a connection // > SHOULD send an initial GOAWAY frame with the last stream // > identifier set to 2^31-1 and a NO_ERROR code. This signals to the // > client that a shutdown is imminent and that initiating further // > requests is prohibited. After allowing time for any in-flight // > stream creation (at least one round-trip time), the server can // > send another GOAWAY frame with an updated last stream identifier. // > This ensures that a connection can be cleanly shut down without // > losing requests. self.go_away(StreamId::MAX, Reason::NO_ERROR); // We take the advice of waiting 1 RTT literally, and wait // for a pong before proceeding. self.ping_pong.ping_shutdown(); } } impl Drop for Connection where P: Peer, B: IntoBuf, { fn drop(&mut self) { // Ignore errors as this indicates that the mutex is poisoned. let _ = self.streams.recv_eof(true); } } h2-0.1.26/src/proto/error.rs010066400017500001750000000022511316550207500137710ustar0000000000000000use codec::{RecvError, SendError}; use frame::Reason; use std::io; /// Either an H2 reason or an I/O error #[derive(Debug)] pub enum Error { Proto(Reason), Io(io::Error), } impl Error { /// Clone the error for internal purposes. /// /// `io::Error` is not `Clone`, so we only copy the `ErrorKind`. pub(super) fn shallow_clone(&self) -> Error { match *self { Error::Proto(reason) => Error::Proto(reason), Error::Io(ref io) => Error::Io(io::Error::from(io.kind())), } } } impl From for Error { fn from(src: Reason) -> Self { Error::Proto(src) } } impl From for Error { fn from(src: io::Error) -> Self { Error::Io(src) } } impl From for RecvError { fn from(src: Error) -> RecvError { match src { Error::Proto(reason) => RecvError::Connection(reason), Error::Io(e) => RecvError::Io(e), } } } impl From for SendError { fn from(src: Error) -> SendError { match src { Error::Proto(reason) => SendError::Connection(reason), Error::Io(e) => SendError::Io(e), } } } h2-0.1.26/src/proto/go_away.rs010066400017500001750000000110371345170144600142710ustar0000000000000000use codec::Codec; use frame::{self, Reason, StreamId}; use bytes::Buf; use futures::{Async, Poll}; use std::io; use tokio_io::AsyncWrite; /// Manages our sending of GOAWAY frames. #[derive(Debug)] pub(super) struct GoAway { /// Whether the connection should close now, or wait until idle. close_now: bool, /// Records if we've sent any GOAWAY before. going_away: Option, /// Whether the user started the GOAWAY by calling `abrupt_shutdown`. is_user_initiated: bool, /// A GOAWAY frame that must be buffered in the Codec immediately. pending: Option, } /// Keeps a memory of any GOAWAY frames we've sent before. /// /// This looks very similar to a `frame::GoAway`, but is a separate type. Why? /// Mostly for documentation purposes. This type is to record status. If it /// were a `frame::GoAway`, it might appear like we eventually wanted to /// serialize it. We **only** want to be able to look up these fields at a /// later time. /// /// (Technically, `frame::GoAway` should gain an opaque_debug_data field as /// well, and we wouldn't want to save that here to accidentally dump in logs, /// or waste struct space.) #[derive(Debug)] struct GoingAway { /// Stores the highest stream ID of a GOAWAY that has been sent. /// /// It's illegal to send a subsequent GOAWAY with a higher ID. last_processed_id: StreamId, /// Records the error code of any GOAWAY frame sent. reason: Reason, } impl GoAway { pub fn new() -> Self { GoAway { close_now: false, going_away: None, is_user_initiated: false, pending: None, } } /// Enqueue a GOAWAY frame to be written. /// /// The connection is expected to continue to run until idle. pub fn go_away(&mut self, f: frame::GoAway) { if let Some(ref going_away) = self.going_away { assert!( f.last_stream_id() <= going_away.last_processed_id, "GOAWAY stream IDs shouldn't be higher; \ last_processed_id = {:?}, f.last_stream_id() = {:?}", going_away.last_processed_id, f.last_stream_id(), ); } self.going_away = Some(GoingAway { last_processed_id: f.last_stream_id(), reason: f.reason(), }); self.pending = Some(f); } pub fn go_away_now(&mut self, f: frame::GoAway) { self.close_now = true; if let Some(ref going_away) = self.going_away { // Prevent sending the same GOAWAY twice. if going_away.last_processed_id == f.last_stream_id() && going_away.reason == f.reason() { return; } } self.go_away(f); } pub fn go_away_from_user(&mut self, f: frame::GoAway) { self.is_user_initiated = true; self.go_away_now(f); } /// Return if a GOAWAY has ever been scheduled. pub fn is_going_away(&self) -> bool { self.going_away.is_some() } pub fn is_user_initiated(&self) -> bool { self.is_user_initiated } /// Return the last Reason we've sent. pub fn going_away_reason(&self) -> Option { self.going_away .as_ref() .map(|g| g.reason) } /// Returns if the connection should close now, or wait until idle. pub fn should_close_now(&self) -> bool { self.pending.is_none() && self.close_now } /// Returns if the connection should be closed when idle. pub fn should_close_on_idle(&self) -> bool { !self.close_now && self.going_away .as_ref() .map(|g| g.last_processed_id != StreamId::MAX) .unwrap_or(false) } /// Try to write a pending GOAWAY frame to the buffer. /// /// If a frame is written, the `Reason` of the GOAWAY is returned. pub fn send_pending_go_away(&mut self, dst: &mut Codec) -> Poll, io::Error> where T: AsyncWrite, B: Buf, { if let Some(frame) = self.pending.take() { if !dst.poll_ready()?.is_ready() { self.pending = Some(frame); return Ok(Async::NotReady); } let reason = frame.reason(); dst.buffer(frame.into()) .ok() .expect("invalid GOAWAY frame"); return Ok(Async::Ready(Some(reason))); } else if self.should_close_now() { return Ok(Async::Ready(self.going_away_reason())); } Ok(Async::Ready(None)) } } h2-0.1.26/src/proto/mod.rs010066400017500001750000000015501347426626300134310ustar0000000000000000mod connection; mod error; mod go_away; mod peer; mod ping_pong; mod settings; mod streams; pub(crate) use self::connection::{Config, Connection}; pub(crate) use self::error::Error; pub(crate) use self::peer::{Peer, Dyn as DynPeer}; pub(crate) use self::ping_pong::UserPings; pub(crate) use self::streams::{StreamRef, OpaqueStreamRef, Streams}; pub(crate) use self::streams::{PollReset, Prioritized, Open}; use codec::Codec; use self::go_away::GoAway; use self::ping_pong::PingPong; use self::settings::Settings; use frame::{self, Frame}; use futures::{task, Async, Poll}; use futures::task::Task; use bytes::Buf; use tokio_io::AsyncWrite; pub type PingPayload = [u8; 8]; pub type WindowSize = u32; // Constants pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; h2-0.1.26/src/proto/peer.rs010066400017500001750000000047221350200143700135670ustar0000000000000000use codec::RecvError; use error::Reason; use frame::{Pseudo, StreamId}; use proto::Open; use http::{HeaderMap, Request, Response}; use std::fmt; /// Either a Client or a Server pub(crate) trait Peer { /// Message type polled from the transport type Poll: fmt::Debug; fn dyn() -> Dyn; fn is_server() -> bool; fn convert_poll_message( pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId ) -> Result; fn is_local_init(id: StreamId) -> bool { assert!(!id.is_zero()); Self::is_server() == id.is_server_initiated() } } /// A dynamic representation of `Peer`. /// /// This is used internally to avoid incurring a generic on all internal types. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub(crate) enum Dyn { Client, Server, } #[derive(Debug)] pub enum PollMessage { Client(Response<()>), Server(Request<()>), } // ===== impl Dyn ===== impl Dyn { pub fn is_server(&self) -> bool { *self == Dyn::Server } pub fn is_local_init(&self, id: StreamId) -> bool { assert!(!id.is_zero()); self.is_server() == id.is_server_initiated() } pub fn convert_poll_message( &self, pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId ) -> Result { if self.is_server() { ::server::Peer::convert_poll_message(pseudo, fields, stream_id) .map(PollMessage::Server) } else { ::client::Peer::convert_poll_message(pseudo, fields, stream_id) .map(PollMessage::Client) } } /// Returns true if the remote peer can initiate a stream with the given ID. pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), RecvError> { if self.is_server() { // Ensure that the ID is a valid client initiated ID if mode.is_push_promise() || !id.is_client_initiated() { proto_err!(conn: "cannot open stream {:?} - not client initiated", id); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } Ok(()) } else { // Ensure that the ID is a valid server initiated ID if !mode.is_push_promise() || !id.is_server_initiated() { proto_err!(conn: "cannot open stream {:?} - not server initiated", id); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } Ok(()) } } } h2-0.1.26/src/proto/ping_pong.rs010066400017500001750000000172101343264346100146230ustar0000000000000000use codec::Codec; use frame::Ping; use proto::{self, PingPayload}; use bytes::Buf; use futures::{Async, Poll}; use futures::task::AtomicTask; use std::io; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use tokio_io::AsyncWrite; /// Acknowledges ping requests from the remote. #[derive(Debug)] pub(crate) struct PingPong { pending_ping: Option, pending_pong: Option, user_pings: Option, } #[derive(Debug)] pub(crate) struct UserPings(Arc); #[derive(Debug)] struct UserPingsRx(Arc); #[derive(Debug)] struct UserPingsInner { state: AtomicUsize, /// Task to wake up the main `Connection`. ping_task: AtomicTask, /// Task to wake up `share::PingPong::poll_pong`. pong_task: AtomicTask, } #[derive(Debug)] struct PendingPing { payload: PingPayload, sent: bool, } /// Status returned from `PingPong::recv_ping`. #[derive(Debug)] pub(crate) enum ReceivedPing { MustAck, Unknown, Shutdown, } /// No user ping pending. const USER_STATE_EMPTY: usize = 0; /// User has called `send_ping`, but PING hasn't been written yet. const USER_STATE_PENDING_PING: usize = 1; /// User PING has been written, waiting for PONG. const USER_STATE_PENDING_PONG: usize = 2; /// We've received user PONG, waiting for user to `poll_pong`. const USER_STATE_RECEIVED_PONG: usize = 3; /// The connection is closed. const USER_STATE_CLOSED: usize = 4; // ===== impl PingPong ===== impl PingPong { pub(crate) fn new() -> Self { PingPong { pending_ping: None, pending_pong: None, user_pings: None, } } /// Can only be called once. If called a second time, returns `None`. pub(crate) fn take_user_pings(&mut self) -> Option { if self.user_pings.is_some() { return None; } let user_pings = Arc::new(UserPingsInner { state: AtomicUsize::new(USER_STATE_EMPTY), ping_task: AtomicTask::new(), pong_task: AtomicTask::new(), }); self.user_pings = Some(UserPingsRx(user_pings.clone())); Some(UserPings(user_pings)) } pub(crate) fn ping_shutdown(&mut self) { assert!(self.pending_ping.is_none()); self.pending_ping = Some(PendingPing { payload: Ping::SHUTDOWN, sent: false, }); } /// Process a ping pub(crate) fn recv_ping(&mut self, ping: Ping) -> ReceivedPing { // The caller should always check that `send_pongs` returns ready before // calling `recv_ping`. assert!(self.pending_pong.is_none()); if ping.is_ack() { if let Some(pending) = self.pending_ping.take() { if &pending.payload == ping.payload() { assert_eq!( &pending.payload, &Ping::SHUTDOWN, "pending_ping should be for shutdown", ); trace!("recv PING SHUTDOWN ack"); return ReceivedPing::Shutdown; } // if not the payload we expected, put it back. self.pending_ping = Some(pending); } if let Some(ref users) = self.user_pings { if ping.payload() == &Ping::USER && users.receive_pong() { trace!("recv PING USER ack"); return ReceivedPing::Unknown; } } // else we were acked a ping we didn't send? // The spec doesn't require us to do anything about this, // so for resiliency, just ignore it for now. warn!("recv PING ack that we never sent: {:?}", ping); ReceivedPing::Unknown } else { // Save the ping's payload to be sent as an acknowledgement. self.pending_pong = Some(ping.into_payload()); ReceivedPing::MustAck } } /// Send any pending pongs. pub(crate) fn send_pending_pong(&mut self, dst: &mut Codec) -> Poll<(), io::Error> where T: AsyncWrite, B: Buf, { if let Some(pong) = self.pending_pong.take() { if !dst.poll_ready()?.is_ready() { self.pending_pong = Some(pong); return Ok(Async::NotReady); } dst.buffer(Ping::pong(pong).into()) .expect("invalid pong frame"); } Ok(Async::Ready(())) } /// Send any pending pings. pub(crate) fn send_pending_ping(&mut self, dst: &mut Codec) -> Poll<(), io::Error> where T: AsyncWrite, B: Buf, { if let Some(ref mut ping) = self.pending_ping { if !ping.sent { if !dst.poll_ready()?.is_ready() { return Ok(Async::NotReady); } dst.buffer(Ping::new(ping.payload).into()) .expect("invalid ping frame"); ping.sent = true; } } else if let Some(ref users) = self.user_pings { if users.0.state.load(Ordering::Acquire) == USER_STATE_PENDING_PING { if !dst.poll_ready()?.is_ready() { return Ok(Async::NotReady); } dst.buffer(Ping::new(Ping::USER).into()) .expect("invalid ping frame"); users.0.state.store(USER_STATE_PENDING_PONG, Ordering::Release); } else { users.0.ping_task.register(); } } Ok(Async::Ready(())) } } impl ReceivedPing { pub(crate) fn is_shutdown(&self) -> bool { match *self { ReceivedPing::Shutdown => true, _ => false, } } } // ===== impl UserPings ===== impl UserPings { pub(crate) fn send_ping(&self) -> Result<(), Option> { let prev = self.0.state.compare_and_swap( USER_STATE_EMPTY, // current USER_STATE_PENDING_PING, // new Ordering::AcqRel, ); match prev { USER_STATE_EMPTY => { self.0.ping_task.notify(); Ok(()) }, USER_STATE_CLOSED => { Err(Some(broken_pipe().into())) } _ => { // Was already pending, user error! Err(None) } } } pub(crate) fn poll_pong(&self) -> Poll<(), proto::Error> { // Must register before checking state, in case state were to change // before we could register, and then the ping would just be lost. self.0.pong_task.register(); let prev = self.0.state.compare_and_swap( USER_STATE_RECEIVED_PONG, // current USER_STATE_EMPTY, // new Ordering::AcqRel, ); match prev { USER_STATE_RECEIVED_PONG => Ok(Async::Ready(())), USER_STATE_CLOSED => Err(broken_pipe().into()), _ => Ok(Async::NotReady), } } } // ===== impl UserPingsRx ===== impl UserPingsRx { fn receive_pong(&self) -> bool { let prev = self.0.state.compare_and_swap( USER_STATE_PENDING_PONG, // current USER_STATE_RECEIVED_PONG, // new Ordering::AcqRel, ); if prev == USER_STATE_PENDING_PONG { self.0.pong_task.notify(); true } else { false } } } impl Drop for UserPingsRx { fn drop(&mut self) { self.0.state.store(USER_STATE_CLOSED, Ordering::Release); self.0.pong_task.notify(); } } fn broken_pipe() -> io::Error { io::ErrorKind::BrokenPipe.into() } h2-0.1.26/src/proto/settings.rs010066400017500001750000000036011351644257100145040ustar0000000000000000use codec::RecvError; use frame; use proto::*; #[derive(Debug)] pub(crate) struct Settings { /// Received SETTINGS frame pending processing. The ACK must be written to /// the socket first then the settings applied **before** receiving any /// further frames. pending: Option, } impl Settings { pub fn new() -> Self { Settings { pending: None, } } pub fn recv_settings(&mut self, frame: frame::Settings) { if frame.is_ack() { debug!("received remote settings ack"); // TODO: handle acks } else { assert!(self.pending.is_none()); self.pending = Some(frame); } } pub fn send_pending_ack( &mut self, dst: &mut Codec, streams: &mut Streams, ) -> Poll<(), RecvError> where T: AsyncWrite, B: Buf, C: Buf, P: Peer, { trace!("send_pending_ack; pending={:?}", self.pending); if let Some(ref settings) = self.pending { if !dst.poll_ready()?.is_ready() { trace!("failed to send ACK"); return Ok(Async::NotReady); } // Create an ACK settings frame let frame = frame::Settings::ack(); // Buffer the settings frame dst.buffer(frame.into()) .ok() .expect("invalid settings frame"); trace!("ACK sent; applying settings"); if let Some(val) = settings.max_frame_size() { dst.set_max_send_frame_size(val as usize); } if let Some(val) = settings.header_table_size() { dst.set_max_send_header_table_size(val as usize); } streams.apply_remote_settings(settings)?; } self.pending = None; Ok(().into()) } } h2-0.1.26/src/proto/streams/buffer.rs010066400017500001750000000050141330607744600155750ustar0000000000000000use slab::Slab; /// Buffers frames for multiple streams. #[derive(Debug)] pub struct Buffer { slab: Slab>, } /// A sequence of frames in a `Buffer` #[derive(Debug)] pub struct Deque { indices: Option, } /// Tracks the head & tail for a sequence of frames in a `Buffer`. #[derive(Debug, Default, Copy, Clone)] struct Indices { head: usize, tail: usize, } #[derive(Debug)] struct Slot { value: T, next: Option, } impl Buffer { pub fn new() -> Self { Buffer { slab: Slab::new(), } } } impl Deque { pub fn new() -> Self { Deque { indices: None, } } pub fn is_empty(&self) -> bool { self.indices.is_none() } pub fn push_back(&mut self, buf: &mut Buffer, value: T) { let key = buf.slab.insert(Slot { value, next: None, }); match self.indices { Some(ref mut idxs) => { buf.slab[idxs.tail].next = Some(key); idxs.tail = key; }, None => { self.indices = Some(Indices { head: key, tail: key, }); }, } } pub fn push_front(&mut self, buf: &mut Buffer, value: T) { let key = buf.slab.insert(Slot { value, next: None, }); match self.indices { Some(ref mut idxs) => { buf.slab[key].next = Some(idxs.head); idxs.head = key; }, None => { self.indices = Some(Indices { head: key, tail: key, }); }, } } pub fn pop_front(&mut self, buf: &mut Buffer) -> Option { match self.indices { Some(mut idxs) => { let mut slot = buf.slab.remove(idxs.head); if idxs.head == idxs.tail { assert!(slot.next.is_none()); self.indices = None; } else { idxs.head = slot.next.take().unwrap(); self.indices = Some(idxs); } return Some(slot.value); }, None => None, } } pub fn peek_front<'a, T>(&self, buf: &'a Buffer) -> Option<&'a T> { match self.indices { Some(idxs) => Some(&buf.slab[idxs.head].value), None => None, } } } h2-0.1.26/src/proto/streams/counts.rs010066400017500001750000000135621347526040400156410ustar0000000000000000use super::*; use std::usize; #[derive(Debug)] pub(super) struct Counts { /// Acting as a client or server. This allows us to track which values to /// inc / dec. peer: peer::Dyn, /// Maximum number of locally initiated streams max_send_streams: usize, /// Current number of remote initiated streams num_send_streams: usize, /// Maximum number of remote initiated streams max_recv_streams: usize, /// Current number of locally initiated streams num_recv_streams: usize, /// Maximum number of pending locally reset streams max_reset_streams: usize, /// Current number of pending locally reset streams num_reset_streams: usize, } impl Counts { /// Create a new `Counts` using the provided configuration values. pub fn new(peer: peer::Dyn, config: &Config) -> Self { Counts { peer, max_send_streams: config.initial_max_send_streams, num_send_streams: 0, max_recv_streams: config.remote_max_initiated.unwrap_or(usize::MAX), num_recv_streams: 0, max_reset_streams: config.local_reset_max, num_reset_streams: 0, } } /// Returns the current peer pub fn peer(&self) -> peer::Dyn { self.peer } pub fn has_streams(&self) -> bool { self.num_send_streams != 0 || self.num_recv_streams != 0 } /// Returns true if the receive stream concurrency can be incremented pub fn can_inc_num_recv_streams(&self) -> bool { self.max_recv_streams > self.num_recv_streams } /// Increments the number of concurrent receive streams. /// /// # Panics /// /// Panics on failure as this should have been validated before hand. pub fn inc_num_recv_streams(&mut self, stream: &mut store::Ptr) { assert!(self.can_inc_num_recv_streams()); assert!(!stream.is_counted); // Increment the number of remote initiated streams self.num_recv_streams += 1; stream.is_counted = true; } /// Returns true if the send stream concurrency can be incremented pub fn can_inc_num_send_streams(&self) -> bool { self.max_send_streams > self.num_send_streams } /// Increments the number of concurrent send streams. /// /// # Panics /// /// Panics on failure as this should have been validated before hand. pub fn inc_num_send_streams(&mut self, stream: &mut store::Ptr) { assert!(self.can_inc_num_send_streams()); assert!(!stream.is_counted); // Increment the number of remote initiated streams self.num_send_streams += 1; stream.is_counted = true; } /// Returns true if the number of pending reset streams can be incremented. pub fn can_inc_num_reset_streams(&self) -> bool { self.max_reset_streams > self.num_reset_streams } /// Increments the number of pending reset streams. /// /// # Panics /// /// Panics on failure as this should have been validated before hand. pub fn inc_num_reset_streams(&mut self) { assert!(self.can_inc_num_reset_streams()); self.num_reset_streams += 1; } pub fn apply_remote_settings(&mut self, settings: &frame::Settings) { if let Some(val) = settings.max_concurrent_streams() { self.max_send_streams = val as usize; } } /// Run a block of code that could potentially transition a stream's state. /// /// If the stream state transitions to closed, this function will perform /// all necessary cleanup. /// /// TODO: Is this function still needed? pub fn transition(&mut self, mut stream: store::Ptr, f: F) -> U where F: FnOnce(&mut Self, &mut store::Ptr) -> U, { // TODO: Does this need to be computed before performing the action? let is_pending_reset = stream.is_pending_reset_expiration(); // Run the action let ret = f(self, &mut stream); self.transition_after(stream, is_pending_reset); ret } // TODO: move this to macro? pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) { trace!("transition_after; stream={:?}; state={:?}; is_closed={:?}; \ pending_send_empty={:?}; buffered_send_data={}; \ num_recv={}; num_send={}", stream.id, stream.state, stream.is_closed(), stream.pending_send.is_empty(), stream.buffered_send_data, self.num_recv_streams, self.num_send_streams); if stream.is_closed() { if !stream.is_pending_reset_expiration() { stream.unlink(); if is_reset_counted { self.dec_num_reset_streams(); } } if stream.is_counted { trace!("dec_num_streams; stream={:?}", stream.id); // Decrement the number of active streams. self.dec_num_streams(&mut stream); } } // Release the stream if it requires releasing if stream.is_released() { stream.remove(); } } fn dec_num_streams(&mut self, stream: &mut store::Ptr) { assert!(stream.is_counted); if self.peer.is_local_init(stream.id) { assert!(self.num_send_streams > 0); self.num_send_streams -= 1; stream.is_counted = false; } else { assert!(self.num_recv_streams > 0); self.num_recv_streams -= 1; stream.is_counted = false; } } fn dec_num_reset_streams(&mut self) { assert!(self.num_reset_streams > 0); self.num_reset_streams -= 1; } } impl Drop for Counts { fn drop(&mut self) { use std::thread; if !thread::panicking() { debug_assert!(!self.has_streams()); } } } h2-0.1.26/src/proto/streams/flow_control.rs010066400017500001750000000162141347426626300170420ustar0000000000000000use frame::Reason; use proto::{WindowSize, MAX_WINDOW_SIZE}; use std::fmt; // We don't want to send WINDOW_UPDATE frames for tiny changes, but instead // aggregate them when the changes are significant. Many implementations do // this by keeping a "ratio" of the update version the allowed window size. // // While some may wish to represent this ratio as percentage, using a f32, // we skip having to deal with float math and stick to integers. To do so, // the "ratio" is represented by 2 i32s, split into the numerator and // denominator. For example, a 50% ratio is simply represented as 1/2. // // An example applying this ratio: If a stream has an allowed window size of // 100 bytes, WINDOW_UPDATE frames are scheduled when the unclaimed change // becomes greater than 1/2, or 50 bytes. const UNCLAIMED_NUMERATOR: i32 = 1; const UNCLAIMED_DENOMINATOR: i32 = 2; #[test] fn sanity_unclaimed_ratio() { assert!(UNCLAIMED_NUMERATOR < UNCLAIMED_DENOMINATOR); assert!(UNCLAIMED_NUMERATOR >= 0); assert!(UNCLAIMED_DENOMINATOR > 0); } #[derive(Copy, Clone, Debug)] pub struct FlowControl { /// Window the peer knows about. /// /// This can go negative if a SETTINGS_INITIAL_WINDOW_SIZE is received. /// /// For example, say the peer sends a request and uses 32kb of the window. /// We send a SETTINGS_INITIAL_WINDOW_SIZE of 16kb. The peer has to adjust /// its understanding of the capacity of the window, and that would be: /// /// ```notrust /// default (64kb) - used (32kb) - settings_diff (64kb - 16kb): -16kb /// ``` window_size: Window, /// Window that we know about. /// /// This can go negative if a user declares a smaller target window than /// the peer knows about. available: Window, } impl FlowControl { pub fn new() -> FlowControl { FlowControl { window_size: Window(0), available: Window(0), } } /// Returns the window size as known by the peer pub fn window_size(&self) -> WindowSize { self.window_size.as_size() } /// Returns the window size available to the consumer pub fn available(&self) -> Window { self.available } /// Returns true if there is unavailable window capacity pub fn has_unavailable(&self) -> bool { if self.window_size < 0 { return false; } self.window_size > self.available } pub fn claim_capacity(&mut self, capacity: WindowSize) { self.available -= capacity; } pub fn assign_capacity(&mut self, capacity: WindowSize) { self.available += capacity; } /// If a WINDOW_UPDATE frame should be sent, returns a positive number /// representing the increment to be used. /// /// If there is no available bytes to be reclaimed, or the number of /// available bytes does not reach the threshold, this returns `None`. /// /// This represents pending outbound WINDOW_UPDATE frames. pub fn unclaimed_capacity(&self) -> Option { let available = self.available; if self.window_size >= available { return None; } let unclaimed = available.0 - self.window_size.0; let threshold = self.window_size.0 / UNCLAIMED_DENOMINATOR * UNCLAIMED_NUMERATOR; if unclaimed < threshold { None } else { Some(unclaimed as WindowSize) } } /// Increase the window size. /// /// This is called after receiving a WINDOW_UPDATE frame pub fn inc_window(&mut self, sz: WindowSize) -> Result<(), Reason> { let (val, overflow) = self.window_size.0.overflowing_add(sz as i32); if overflow { return Err(Reason::FLOW_CONTROL_ERROR); } if val > MAX_WINDOW_SIZE as i32 { return Err(Reason::FLOW_CONTROL_ERROR); } trace!( "inc_window; sz={}; old={}; new={}", sz, self.window_size, val ); self.window_size = Window(val); Ok(()) } /// Decrement the window size. /// /// This is called after receiving a SETTINGS frame with a lower /// INITIAL_WINDOW_SIZE value. pub fn dec_window(&mut self, sz: WindowSize) { trace!( "dec_window; sz={}; window={}, available={}", sz, self.window_size, self.available ); // This should not be able to overflow `window_size` from the bottom. self.window_size -= sz; } /// Decrements the window reflecting data has actually been sent. The caller /// must ensure that the window has capacity. pub fn send_data(&mut self, sz: WindowSize) { trace!( "send_data; sz={}; window={}; available={}", sz, self.window_size, self.available ); // Ensure that the argument is correct assert!(sz <= self.window_size); // Update values self.window_size -= sz; self.available -= sz; } } /// The current capacity of a flow-controlled Window. /// /// This number can go negative when either side has used a certain amount /// of capacity when the other side advertises a reduction in size. /// /// This type tries to centralize the knowledge of addition and subtraction /// to this capacity, instead of having integer casts throughout the source. #[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] pub struct Window(i32); impl Window { pub fn as_size(&self) -> WindowSize { if self.0 < 0 { 0 } else { self.0 as WindowSize } } pub fn checked_size(&self) -> WindowSize { assert!(self.0 >= 0, "negative Window"); self.0 as WindowSize } } impl PartialEq for Window { fn eq(&self, other: &WindowSize) -> bool { if self.0 < 0 { false } else { (self.0 as WindowSize).eq(other) } } } impl PartialEq for WindowSize { fn eq(&self, other: &Window) -> bool { other.eq(self) } } impl PartialOrd for Window { fn partial_cmp(&self, other: &WindowSize) -> Option<::std::cmp::Ordering> { if self.0 < 0 { Some(::std::cmp::Ordering::Less) } else { (self.0 as WindowSize).partial_cmp(other) } } } impl PartialOrd for WindowSize { fn partial_cmp(&self, other: &Window) -> Option<::std::cmp::Ordering> { if other.0 < 0 { Some(::std::cmp::Ordering::Greater) } else { self.partial_cmp(&(other.0 as WindowSize)) } } } impl ::std::ops::SubAssign for Window { fn sub_assign(&mut self, other: WindowSize) { self.0 -= other as i32; } } impl ::std::ops::Add for Window { type Output = Self; fn add(self, other: WindowSize) -> Self::Output { Window(self.0 + other as i32) } } impl ::std::ops::AddAssign for Window { fn add_assign(&mut self, other: WindowSize) { self.0 += other as i32; } } impl fmt::Display for Window { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.0, f) } } h2-0.1.26/src/proto/streams/mod.rs010066400017500001750000000032021347403352700150760ustar0000000000000000mod buffer; mod counts; mod flow_control; mod prioritize; mod recv; mod send; mod state; mod store; mod stream; mod streams; pub(crate) use self::prioritize::Prioritized; pub(crate) use self::recv::Open; pub(crate) use self::send::PollReset; pub(crate) use self::streams::{StreamRef, OpaqueStreamRef, Streams}; use self::buffer::Buffer; use self::counts::Counts; use self::flow_control::FlowControl; use self::prioritize::Prioritize; use self::recv::Recv; use self::send::Send; use self::state::State; use self::store::Store; use self::stream::Stream; use frame::{StreamId, StreamIdOverflow}; use proto::*; use bytes::Bytes; use std::time::Duration; #[derive(Debug)] pub struct Config { /// Initial window size of locally initiated streams pub local_init_window_sz: WindowSize, /// Initial maximum number of locally initiated streams. /// After receiving a Settings frame from the remote peer, /// the connection will overwrite this value with the /// MAX_CONCURRENT_STREAMS specified in the frame. pub initial_max_send_streams: usize, /// The stream ID to start the next local stream with pub local_next_stream_id: StreamId, /// If the local peer is willing to receive push promises pub local_push_enabled: bool, /// How long a locally reset stream should ignore frames pub local_reset_duration: Duration, /// Maximum number of locally reset streams to keep at a time pub local_reset_max: usize, /// Initial window size of remote initiated streams pub remote_init_window_sz: WindowSize, /// Maximum number of remote initiated streams pub remote_max_initiated: Option, } h2-0.1.26/src/proto/streams/prioritize.rs010066400017500001750000000771761347432741600165470ustar0000000000000000use super::*; use super::store::Resolve; use frame::{Reason, StreamId}; use codec::UserError; use codec::UserError::*; use bytes::buf::Take; use std::{cmp, fmt, mem}; use std::io; /// # Warning /// /// Queued streams are ordered by stream ID, as we need to ensure that /// lower-numbered streams are sent headers before higher-numbered ones. /// This is because "idle" stream IDs – those which have been initiated but /// have yet to receive frames – will be implicitly closed on receipt of a /// frame on a higher stream ID. If these queues was not ordered by stream /// IDs, some mechanism would be necessary to ensure that the lowest-numberedh] /// idle stream is opened first. #[derive(Debug)] pub(super) struct Prioritize { /// Queue of streams waiting for socket capacity to send a frame. pending_send: store::Queue, /// Queue of streams waiting for window capacity to produce data. pending_capacity: store::Queue, /// Streams waiting for capacity due to max concurrency /// /// The `SendRequest` handle is `Clone`. This enables initiating requests /// from many tasks. However, offering this capability while supporting /// backpressure at some level is tricky. If there are many `SendRequest` /// handles and a single stream becomes available, which handle gets /// assigned that stream? Maybe that handle is no longer ready to send a /// request. /// /// The strategy used is to allow each `SendRequest` handle one buffered /// request. A `SendRequest` handle is ready to send a request if it has no /// associated buffered requests. This is the same strategy as `mpsc` in the /// futures library. pending_open: store::Queue, /// Connection level flow control governing sent data flow: FlowControl, /// Stream ID of the last stream opened. last_opened_id: StreamId, /// What `DATA` frame is currently being sent in the codec. in_flight_data_frame: InFlightData, } #[derive(Debug, Eq, PartialEq)] enum InFlightData { /// There is no `DATA` frame in flight. Nothing, /// There is a `DATA` frame in flight belonging to the given stream. DataFrame(store::Key), /// There was a `DATA` frame, but the stream's queue was since cleared. Drop, } pub(crate) struct Prioritized { // The buffer inner: Take, end_of_stream: bool, // The stream that this is associated with stream: store::Key, } // ===== impl Prioritize ===== impl Prioritize { pub fn new(config: &Config) -> Prioritize { let mut flow = FlowControl::new(); flow.inc_window(config.remote_init_window_sz) .ok() .expect("invalid initial window size"); flow.assign_capacity(config.remote_init_window_sz); trace!("Prioritize::new; flow={:?}", flow); Prioritize { pending_send: store::Queue::new(), pending_capacity: store::Queue::new(), pending_open: store::Queue::new(), flow: flow, last_opened_id: StreamId::ZERO, in_flight_data_frame: InFlightData::Nothing, } } /// Queue a frame to be sent to the remote pub fn queue_frame( &mut self, frame: Frame, buffer: &mut Buffer>, stream: &mut store::Ptr, task: &mut Option, ) { // Queue the frame in the buffer stream.pending_send.push_back(buffer, frame); self.schedule_send(stream, task); } pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option) { // If the stream is waiting to be opened, nothing more to do. if !stream.is_pending_open { trace!("schedule_send; {:?}", stream.id); // Queue the stream self.pending_send.push(stream); // Notify the connection. if let Some(task) = task.take() { task.notify(); } } } pub fn queue_open(&mut self, stream: &mut store::Ptr) { self.pending_open.push(stream); } /// Send a data frame pub fn send_data( &mut self, frame: frame::Data, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, task: &mut Option, ) -> Result<(), UserError> where B: Buf, { let sz = frame.payload().remaining(); if sz > MAX_WINDOW_SIZE as usize { return Err(UserError::PayloadTooBig); } let sz = sz as WindowSize; if !stream.state.is_send_streaming() { if stream.state.is_closed() { return Err(InactiveStreamId); } else { return Err(UnexpectedFrameType); } } // Update the buffered data counter stream.buffered_send_data += sz; trace!( "send_data; sz={}; buffered={}; requested={}", sz, stream.buffered_send_data, stream.requested_send_capacity ); // Implicitly request more send capacity if not enough has been // requested yet. if stream.requested_send_capacity < stream.buffered_send_data { // Update the target requested capacity stream.requested_send_capacity = stream.buffered_send_data; self.try_assign_capacity(stream); } if frame.is_end_stream() { stream.state.send_close(); self.reserve_capacity(0, stream, counts); } trace!( "send_data (2); available={}; buffered={}", stream.send_flow.available(), stream.buffered_send_data ); // The `stream.buffered_send_data == 0` check is here so that, if a zero // length data frame is queued to the front (there is no previously // queued data), it gets sent out immediately even if there is no // available send window. // // Sending out zero length data frames can be done to signal // end-of-stream. // if stream.send_flow.available() > 0 || stream.buffered_send_data == 0 { // The stream currently has capacity to send the data frame, so // queue it up and notify the connection task. self.queue_frame(frame.into(), buffer, stream, task); } else { // The stream has no capacity to send the frame now, save it but // don't notify the connection task. Once additional capacity // becomes available, the frame will be flushed. stream .pending_send .push_back(buffer, frame.into()); } Ok(()) } /// Request capacity to send data pub fn reserve_capacity( &mut self, capacity: WindowSize, stream: &mut store::Ptr, counts: &mut Counts) { trace!( "reserve_capacity; stream={:?}; requested={:?}; effective={:?}; curr={:?}", stream.id, capacity, capacity + stream.buffered_send_data, stream.requested_send_capacity ); // Actual capacity is `capacity` + the current amount of buffered data. // If it were less, then we could never send out the buffered data. let capacity = capacity + stream.buffered_send_data; if capacity == stream.requested_send_capacity { // Nothing to do } else if capacity < stream.requested_send_capacity { // Update the target requested capacity stream.requested_send_capacity = capacity; // Currently available capacity assigned to the stream let available = stream.send_flow.available().as_size(); // If the stream has more assigned capacity than requested, reclaim // some for the connection if available > capacity { let diff = available - capacity; stream.send_flow.claim_capacity(diff); self.assign_connection_capacity(diff, stream, counts); } } else { // If trying to *add* capacity, but the stream send side is closed, // there's nothing to be done. if stream.state.is_send_closed() { return; } // Update the target requested capacity stream.requested_send_capacity = capacity; // Try to assign additional capacity to the stream. If none is // currently available, the stream will be queued to receive some // when more becomes available. self.try_assign_capacity(stream); } } pub fn recv_stream_window_update( &mut self, inc: WindowSize, stream: &mut store::Ptr, ) -> Result<(), Reason> { trace!( "recv_stream_window_update; stream={:?}; state={:?}; inc={}; flow={:?}", stream.id, stream.state, inc, stream.send_flow ); if stream.state.is_send_closed() && stream.buffered_send_data == 0 { // We can't send any data, so don't bother doing anything else. return Ok(()); } // Update the stream level flow control. stream.send_flow.inc_window(inc)?; // If the stream is waiting on additional capacity, then this will // assign it (if available on the connection) and notify the producer self.try_assign_capacity(stream); Ok(()) } pub fn recv_connection_window_update( &mut self, inc: WindowSize, store: &mut Store, counts: &mut Counts, ) -> Result<(), Reason> { // Update the connection's window self.flow.inc_window(inc)?; self.assign_connection_capacity(inc, store, counts); Ok(()) } /// Reclaim all capacity assigned to the stream and re-assign it to the /// connection pub fn reclaim_all_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { let available = stream.send_flow.available().as_size(); stream.send_flow.claim_capacity(available); // Re-assign all capacity to the connection self.assign_connection_capacity(available, stream, counts); } /// Reclaim just reserved capacity, not buffered capacity, and re-assign /// it to the connection pub fn reclaim_reserved_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { // only reclaim requested capacity that isn't already buffered if stream.requested_send_capacity > stream.buffered_send_data { let reserved = stream.requested_send_capacity - stream.buffered_send_data; stream.send_flow.claim_capacity(reserved); self.assign_connection_capacity(reserved, stream, counts); } } pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_capacity.pop(store) { counts.transition(stream, |_, stream| { trace!("clear_pending_capacity; stream={:?}", stream.id); }) } } pub fn assign_connection_capacity( &mut self, inc: WindowSize, store: &mut R, counts: &mut Counts) where R: Resolve, { trace!("assign_connection_capacity; inc={}", inc); self.flow.assign_capacity(inc); // Assign newly acquired capacity to streams pending capacity. while self.flow.available() > 0 { let stream = match self.pending_capacity.pop(store) { Some(stream) => stream, None => return, }; // Streams pending capacity may have been reset before capacity // became available. In that case, the stream won't want any // capacity, and so we shouldn't "transition" on it, but just evict // it and continue the loop. if !(stream.state.is_send_streaming() || stream.buffered_send_data > 0) { continue; } counts.transition(stream, |_, mut stream| { // Try to assign capacity to the stream. This will also re-queue the // stream if there isn't enough connection level capacity to fulfill // the capacity request. self.try_assign_capacity(&mut stream); }) } } /// Request capacity to send data fn try_assign_capacity(&mut self, stream: &mut store::Ptr) { let total_requested = stream.requested_send_capacity; // Total requested should never go below actual assigned // (Note: the window size can go lower than assigned) debug_assert!(total_requested >= stream.send_flow.available()); // The amount of additional capacity that the stream requests. // Don't assign more than the window has available! let additional = cmp::min( total_requested - stream.send_flow.available().as_size(), // Can't assign more than what is available stream.send_flow.window_size() - stream.send_flow.available().as_size(), ); trace!( "try_assign_capacity; stream={:?}, requested={}; additional={}; buffered={}; window={}; conn={}", stream.id, total_requested, additional, stream.buffered_send_data, stream.send_flow.window_size(), self.flow.available() ); if additional == 0 { // Nothing more to do return; } // If the stream has requested capacity, then it must be in the // streaming state (more data could be sent) or there is buffered data // waiting to be sent. debug_assert!( stream.state.is_send_streaming() || stream.buffered_send_data > 0, "state={:?}", stream.state ); // The amount of currently available capacity on the connection let conn_available = self.flow.available().as_size(); // First check if capacity is immediately available if conn_available > 0 { // The amount of capacity to assign to the stream // TODO: Should prioritization factor into this? let assign = cmp::min(conn_available, additional); trace!( " assigning; stream={:?}, capacity={}", stream.id, assign, ); // Assign the capacity to the stream stream.assign_capacity(assign); // Claim the capacity from the connection self.flow.claim_capacity(assign); } trace!( "try_assign_capacity(2); available={}; requested={}; buffered={}; has_unavailable={:?}", stream.send_flow.available(), stream.requested_send_capacity, stream.buffered_send_data, stream.send_flow.has_unavailable() ); if stream.send_flow.available() < stream.requested_send_capacity { if stream.send_flow.has_unavailable() { // The stream requires additional capacity and the stream's // window has available capacity, but the connection window // does not. // // In this case, the stream needs to be queued up for when the // connection has more capacity. self.pending_capacity.push(stream); } } // If data is buffered and the stream is not pending open, then // schedule the stream for execution // // Why do we not push into pending_send when the stream is in pending_open? // // We allow users to call send_request() which schedules a stream to be pending_open // if there is no room according to the concurrency limit (max_send_streams), and we // also allow data to be buffered for send with send_data() if there is no capacity for // the stream to send the data, which attempts to place the stream in pending_send. // If the stream is not open, we don't want the stream to be scheduled for // execution (pending_send). Note that if the stream is in pending_open, it will be // pushed to pending_send when there is room for an open stream. if stream.buffered_send_data > 0 && !stream.is_pending_open { // TODO: This assertion isn't *exactly* correct. There can still be // buffered send data while the stream's pending send queue is // empty. This can happen when a large data frame is in the process // of being **partially** sent. Once the window has been sent, the // data frame will be returned to the prioritization layer to be // re-scheduled. // // That said, it would be nice to figure out how to make this // assertion correctly. // // debug_assert!(!stream.pending_send.is_empty()); self.pending_send.push(stream); } } pub fn poll_complete( &mut self, buffer: &mut Buffer>, store: &mut Store, counts: &mut Counts, dst: &mut Codec>, ) -> Poll<(), io::Error> where T: AsyncWrite, B: Buf, { // Ensure codec is ready try_ready!(dst.poll_ready()); // Reclaim any frame that has previously been written self.reclaim_frame(buffer, store, dst); // The max frame length let max_frame_len = dst.max_send_frame_size(); trace!("poll_complete"); loop { self.schedule_pending_open(store, counts); match self.pop_frame(buffer, store, max_frame_len, counts) { Some(frame) => { trace!("writing frame={:?}", frame); debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing); if let Frame::Data(ref frame) = frame { self.in_flight_data_frame = InFlightData::DataFrame(frame.payload().stream); } dst.buffer(frame).ok().expect("invalid frame"); // Ensure the codec is ready to try the loop again. try_ready!(dst.poll_ready()); // Because, always try to reclaim... self.reclaim_frame(buffer, store, dst); }, None => { // Try to flush the codec. try_ready!(dst.flush()); // This might release a data frame... if !self.reclaim_frame(buffer, store, dst) { return Ok(().into()); } // No need to poll ready as poll_complete() does this for // us... }, } } } /// Tries to reclaim a pending data frame from the codec. /// /// Returns true if a frame was reclaimed. /// /// When a data frame is written to the codec, it may not be written in its /// entirety (large chunks are split up into potentially many data frames). /// In this case, the stream needs to be reprioritized. fn reclaim_frame( &mut self, buffer: &mut Buffer>, store: &mut Store, dst: &mut Codec>, ) -> bool where B: Buf, { trace!("try reclaim frame"); // First check if there are any data chunks to take back if let Some(frame) = dst.take_last_data_frame() { trace!( " -> reclaimed; frame={:?}; sz={}", frame, frame.payload().inner.get_ref().remaining() ); let mut eos = false; let key = frame.payload().stream; match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), InFlightData::Drop => { trace!("not reclaiming frame for cancelled stream"); return false; } InFlightData::DataFrame(k) => { debug_assert_eq!(k, key); } } let mut frame = frame.map(|prioritized| { // TODO: Ensure fully written eos = prioritized.end_of_stream; prioritized.inner.into_inner() }); if frame.payload().has_remaining() { let mut stream = store.resolve(key); if eos { frame.set_end_stream(true); } self.push_back_frame(frame.into(), buffer, &mut stream); return true; } } false } /// Push the frame to the front of the stream's deque, scheduling the /// stream if needed. fn push_back_frame(&mut self, frame: Frame, buffer: &mut Buffer>, stream: &mut store::Ptr) { // Push the frame to the front of the stream's deque stream.pending_send.push_front(buffer, frame); // If needed, schedule the sender if stream.send_flow.available() > 0 { debug_assert!(!stream.pending_send.is_empty()); self.pending_send.push(stream); } } pub fn clear_queue(&mut self, buffer: &mut Buffer>, stream: &mut store::Ptr) { trace!("clear_queue; stream={:?}", stream.id); // TODO: make this more efficient? while let Some(frame) = stream.pending_send.pop_front(buffer) { trace!("dropping; frame={:?}", frame); } stream.buffered_send_data = 0; stream.requested_send_capacity = 0; if let InFlightData::DataFrame(key) = self.in_flight_data_frame { if stream.key() == key { // This stream could get cleaned up now - don't allow the buffered frame to get reclaimed. self.in_flight_data_frame = InFlightData::Drop; } } } pub fn clear_pending_send(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_send.pop(store) { let is_pending_reset = stream.is_pending_reset_expiration(); counts.transition_after(stream, is_pending_reset); } } pub fn clear_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_open.pop(store) { let is_pending_reset = stream.is_pending_reset_expiration(); counts.transition_after(stream, is_pending_reset); } } fn pop_frame( &mut self, buffer: &mut Buffer>, store: &mut Store, max_len: usize, counts: &mut Counts, ) -> Option>> where B: Buf, { trace!("pop_frame"); loop { match self.pending_send.pop(store) { Some(mut stream) => { trace!("pop_frame; stream={:?}; stream.state={:?}", stream.id, stream.state); // It's possible that this stream, besides having data to send, // is also queued to send a reset, and thus is already in the queue // to wait for "some time" after a reset. // // To be safe, we just always ask the stream. let is_pending_reset = stream.is_pending_reset_expiration(); trace!(" --> stream={:?}; is_pending_reset={:?};", stream.id, is_pending_reset); let frame = match stream.pending_send.pop_front(buffer) { Some(Frame::Data(mut frame)) => { // Get the amount of capacity remaining for stream's // window. let stream_capacity = stream.send_flow.available(); let sz = frame.payload().remaining(); trace!( " --> data frame; stream={:?}; sz={}; eos={:?}; window={}; \ available={}; requested={}; buffered={};", frame.stream_id(), sz, frame.is_end_stream(), stream_capacity, stream.send_flow.available(), stream.requested_send_capacity, stream.buffered_send_data, ); // Zero length data frames always have capacity to // be sent. if sz > 0 && stream_capacity == 0 { trace!( " --> stream capacity is 0; requested={}", stream.requested_send_capacity ); // Ensure that the stream is waiting for // connection level capacity // // TODO: uncomment // debug_assert!(stream.is_pending_send_capacity); // The stream has no more capacity, this can // happen if the remote reduced the stream // window. In this case, we need to buffer the // frame and wait for a window update... stream .pending_send .push_front(buffer, frame.into()); continue; } // Only send up to the max frame length let len = cmp::min(sz, max_len); // Only send up to the stream's window capacity let len = cmp::min(len, stream_capacity.as_size() as usize) as WindowSize; // There *must* be be enough connection level // capacity at this point. debug_assert!(len <= self.flow.window_size()); trace!(" --> sending data frame; len={}", len); // Update the flow control trace!(" -- updating stream flow --"); stream.send_flow.send_data(len); // Decrement the stream's buffered data counter debug_assert!(stream.buffered_send_data >= len); stream.buffered_send_data -= len; stream.requested_send_capacity -= len; // Assign the capacity back to the connection that // was just consumed from the stream in the previous // line. self.flow.assign_capacity(len); trace!(" -- updating connection flow --"); self.flow.send_data(len); // Wrap the frame's data payload to ensure that the // correct amount of data gets written. let eos = frame.is_end_stream(); let len = len as usize; if frame.payload().remaining() > len { frame.set_end_stream(false); } Frame::Data(frame.map(|buf| { Prioritized { inner: buf.take(len), end_of_stream: eos, stream: stream.key(), } })) }, Some(frame) => frame.map(|_| unreachable!( "Frame::map closure will only be called \ on DATA frames." ) ), None => { if let Some(reason) = stream.state.get_scheduled_reset() { stream.state.set_reset(reason); let frame = frame::Reset::new(stream.id, reason); Frame::Reset(frame) } else { // If the stream receives a RESET from the peer, it may have // had data buffered to be sent, but all the frames are cleared // in clear_queue(). Instead of doing O(N) traversal through queue // to remove, lets just ignore the stream here. trace!("removing dangling stream from pending_send"); // Since this should only happen as a consequence of `clear_queue`, // we must be in a closed state of some kind. debug_assert!(stream.state.is_closed()); counts.transition_after(stream, is_pending_reset); continue; } } }; trace!("pop_frame; frame={:?}", frame); if cfg!(debug_assertions) && stream.state.is_idle() { debug_assert!(stream.id > self.last_opened_id); self.last_opened_id = stream.id; } if !stream.pending_send.is_empty() || stream.state.is_scheduled_reset() { // TODO: Only requeue the sender IF it is ready to send // the next frame. i.e. don't requeue it if the next // frame is a data frame and the stream does not have // any more capacity. self.pending_send.push(&mut stream); } counts.transition_after(stream, is_pending_reset); return Some(frame); }, None => return None, } } } fn schedule_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { trace!("schedule_pending_open"); // check for any pending open streams while counts.can_inc_num_send_streams() { if let Some(mut stream) = self.pending_open.pop(store) { trace!("schedule_pending_open; stream={:?}", stream.id); counts.inc_num_send_streams(&mut stream); self.pending_send.push(&mut stream); stream.notify_send(); } else { return; } } } } // ===== impl Prioritized ===== impl Buf for Prioritized where B: Buf, { fn remaining(&self) -> usize { self.inner.remaining() } fn bytes(&self) -> &[u8] { self.inner.bytes() } fn advance(&mut self, cnt: usize) { self.inner.advance(cnt) } } impl fmt::Debug for Prioritized { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("Prioritized") .field("remaining", &self.inner.get_ref().remaining()) .field("end_of_stream", &self.end_of_stream) .field("stream", &self.stream) .finish() } } h2-0.1.26/src/proto/streams/recv.rs010066400017500001750000001071421350546774000152710ustar0000000000000000use super::*; use {frame, proto}; use codec::{RecvError, UserError}; use frame::{Reason, DEFAULT_INITIAL_WINDOW_SIZE}; use http::{HeaderMap, Response, Request, Method}; use std::io; use std::time::{Duration, Instant}; #[derive(Debug)] pub(super) struct Recv { /// Initial window size of remote initiated streams init_window_sz: WindowSize, /// Connection level flow control governing received data flow: FlowControl, /// Amount of connection window capacity currently used by outstanding streams. in_flight_data: WindowSize, /// The lowest stream ID that is still idle next_stream_id: Result, /// The stream ID of the last processed stream last_processed_id: StreamId, /// Any streams with a higher ID are ignored. /// /// This starts as MAX, but is lowered when a GOAWAY is received. /// /// > After sending a GOAWAY frame, the sender can discard frames for /// > streams initiated by the receiver with identifiers higher than /// > the identified last stream. max_stream_id: StreamId, /// Streams that have pending window updates pending_window_updates: store::Queue, /// New streams to be accepted pending_accept: store::Queue, /// Locally reset streams that should be reaped when they expire pending_reset_expired: store::Queue, /// How long locally reset streams should ignore received frames reset_duration: Duration, /// Holds frames that are waiting to be read buffer: Buffer, /// Refused StreamId, this represents a frame that must be sent out. refused: Option, /// If push promises are allowed to be recevied. is_push_enabled: bool, } #[derive(Debug)] pub(super) enum Event { Headers(peer::PollMessage), Data(Bytes), Trailers(HeaderMap), } #[derive(Debug)] pub(super) enum RecvHeaderBlockError { Oversize(T), State(RecvError), } #[derive(Debug)] pub(crate) enum Open { PushPromise, Headers, } #[derive(Debug, Clone, Copy)] struct Indices { head: store::Key, tail: store::Key, } impl Recv { pub fn new(peer: peer::Dyn, config: &Config) -> Self { let next_stream_id = if peer.is_server() { 1 } else { 2 }; let mut flow = FlowControl::new(); // connections always have the default window size, regardless of // settings flow.inc_window(DEFAULT_INITIAL_WINDOW_SIZE) .expect("invalid initial remote window size"); flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE); Recv { init_window_sz: config.local_init_window_sz, flow: flow, in_flight_data: 0 as WindowSize, next_stream_id: Ok(next_stream_id.into()), pending_window_updates: store::Queue::new(), last_processed_id: StreamId::ZERO, max_stream_id: StreamId::MAX, pending_accept: store::Queue::new(), pending_reset_expired: store::Queue::new(), reset_duration: config.local_reset_duration, buffer: Buffer::new(), refused: None, is_push_enabled: config.local_push_enabled, } } /// Returns the initial receive window size pub fn init_window_sz(&self) -> WindowSize { self.init_window_sz } /// Returns the ID of the last processed stream pub fn last_processed_id(&self) -> StreamId { self.last_processed_id } /// Update state reflecting a new, remotely opened stream /// /// Returns the stream state if successful. `None` if refused pub fn open( &mut self, id: StreamId, mode: Open, counts: &mut Counts, ) -> Result, RecvError> { assert!(self.refused.is_none()); counts.peer().ensure_can_open(id, mode)?; let next_id = self.next_stream_id()?; if id < next_id { proto_err!(conn: "id ({:?}) < next_id ({:?})", id, next_id); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } self.next_stream_id = id.next_id(); if !counts.can_inc_num_recv_streams() { self.refused = Some(id); return Ok(None); } Ok(Some(id)) } /// Transition the stream state based on receiving headers /// /// The caller ensures that the frame represents headers and not trailers. pub fn recv_headers( &mut self, frame: frame::Headers, stream: &mut store::Ptr, counts: &mut Counts, ) -> Result<(), RecvHeaderBlockError>> { trace!("opening stream; init_window={}", self.init_window_sz); let is_initial = stream.state.recv_open(frame.is_end_stream())?; if is_initial { // TODO: be smarter about this logic if frame.stream_id() > self.last_processed_id { self.last_processed_id = frame.stream_id(); } // Increment the number of concurrent streams counts.inc_num_recv_streams(stream); } if !stream.content_length.is_head() { use super::stream::ContentLength; use http::header; if let Some(content_length) = frame.fields().get(header::CONTENT_LENGTH) { let content_length = match parse_u64(content_length.as_bytes()) { Ok(v) => v, Err(()) => { proto_err!(stream: "could not parse content-length; stream={:?}", stream.id); return Err(RecvError::Stream { id: stream.id, reason: Reason::PROTOCOL_ERROR, }.into()) }, }; stream.content_length = ContentLength::Remaining(content_length); } } if frame.is_over_size() { // A frame is over size if the decoded header block was bigger than // SETTINGS_MAX_HEADER_LIST_SIZE. // // > A server that receives a larger header block than it is willing // > to handle can send an HTTP 431 (Request Header Fields Too // > Large) status code [RFC6585]. A client can discard responses // > that it cannot process. // // So, if peer is a server, we'll send a 431. In either case, // an error is recorded, which will send a REFUSED_STREAM, // since we don't want any of the data frames either. debug!( "stream error REQUEST_HEADER_FIELDS_TOO_LARGE -- \ recv_headers: frame is over size; stream={:?}", stream.id ); return if counts.peer().is_server() && is_initial { let mut res = frame::Headers::new( stream.id, frame::Pseudo::response(::http::StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE), HeaderMap::new() ); res.set_end_stream(); Err(RecvHeaderBlockError::Oversize(Some(res))) } else { Err(RecvHeaderBlockError::Oversize(None)) }; } let stream_id = frame.stream_id(); let (pseudo, fields) = frame.into_parts(); let message = counts.peer().convert_poll_message(pseudo, fields, stream_id)?; // Push the frame onto the stream's recv buffer stream .pending_recv .push_back(&mut self.buffer, Event::Headers(message)); stream.notify_recv(); // Only servers can receive a headers frame that initiates the stream. // This is verified in `Streams` before calling this function. if counts.peer().is_server() { self.pending_accept.push(stream); } Ok(()) } /// Called by the server to get the request /// /// TODO: Should this fn return `Result`? pub fn take_request(&mut self, stream: &mut store::Ptr) -> Request<()> { use super::peer::PollMessage::*; match stream.pending_recv.pop_front(&mut self.buffer) { Some(Event::Headers(Server(request))) => request, _ => panic!(), } } /// Called by the client to get pushed response pub fn poll_pushed( &mut self, stream: &mut store::Ptr ) -> Poll, store::Key)>, proto::Error> { use super::peer::PollMessage::*; let mut ppp = stream.pending_push_promises.take(); let pushed = ppp.pop(stream.store_mut()).map( |mut pushed| match pushed.pending_recv.pop_front(&mut self.buffer) { Some(Event::Headers(Server(headers))) => Async::Ready(Some((headers, pushed.key()))), // When frames are pushed into the queue, it is verified that // the first frame is a HEADERS frame. _ => panic!("Headers not set on pushed stream") } ); stream.pending_push_promises = ppp; if let Some(p) = pushed { Ok(p) } else { let is_open = stream.state.ensure_recv_open()?; if is_open { stream.recv_task = Some(task::current()); Ok(Async::NotReady) } else { Ok(Async::Ready(None)) } } } /// Called by the client to get the response pub fn poll_response( &mut self, stream: &mut store::Ptr, ) -> Poll, proto::Error> { use super::peer::PollMessage::*; // If the buffer is not empty, then the first frame must be a HEADERS // frame or the user violated the contract. match stream.pending_recv.pop_front(&mut self.buffer) { Some(Event::Headers(Client(response))) => Ok(response.into()), Some(_) => panic!("poll_response called after response returned"), None => { stream.state.ensure_recv_open()?; stream.recv_task = Some(task::current()); Ok(Async::NotReady) }, } } /// Transition the stream based on receiving trailers pub fn recv_trailers( &mut self, frame: frame::Headers, stream: &mut store::Ptr, ) -> Result<(), RecvError> { // Transition the state stream.state.recv_close()?; if stream.ensure_content_length_zero().is_err() { proto_err!(stream: "recv_trailers: content-length is not zero; stream={:?};", stream.id); return Err(RecvError::Stream { id: stream.id, reason: Reason::PROTOCOL_ERROR, }); } let trailers = frame.into_fields(); // Push the frame onto the stream's recv buffer stream .pending_recv .push_back(&mut self.buffer, Event::Trailers(trailers)); stream.notify_recv(); Ok(()) } /// Releases capacity of the connection pub fn release_connection_capacity( &mut self, capacity: WindowSize, task: &mut Option, ) { trace!( "release_connection_capacity; size={}, connection in_flight_data={}", capacity, self.in_flight_data, ); // Decrement in-flight data self.in_flight_data -= capacity; // Assign capacity to connection self.flow.assign_capacity(capacity); if self.flow.unclaimed_capacity().is_some() { if let Some(task) = task.take() { task.notify(); } } } /// Releases capacity back to the connection & stream pub fn release_capacity( &mut self, capacity: WindowSize, stream: &mut store::Ptr, task: &mut Option, ) -> Result<(), UserError> { trace!("release_capacity; size={}", capacity); if capacity > stream.in_flight_recv_data { return Err(UserError::ReleaseCapacityTooBig); } self.release_connection_capacity(capacity, task); // Decrement in-flight data stream.in_flight_recv_data -= capacity; // Assign capacity to stream stream.recv_flow.assign_capacity(capacity); if stream.recv_flow.unclaimed_capacity().is_some() { // Queue the stream for sending the WINDOW_UPDATE frame. self.pending_window_updates.push(stream); if let Some(task) = task.take() { task.notify(); } } Ok(()) } /// Release any unclaimed capacity for a closed stream. pub fn release_closed_capacity( &mut self, stream: &mut store::Ptr, task: &mut Option, ) { debug_assert_eq!(stream.ref_count, 0); if stream.in_flight_recv_data == 0 { return; } trace!( "auto-release closed stream ({:?}) capacity: {:?}", stream.id, stream.in_flight_recv_data, ); self.release_connection_capacity( stream.in_flight_recv_data, task, ); stream.in_flight_recv_data = 0; self.clear_recv_buffer(stream); } /// Set the "target" connection window size. /// /// By default, all new connections start with 64kb of window size. As /// streams used and release capacity, we will send WINDOW_UPDATEs for the /// connection to bring it back up to the initial "target". /// /// Setting a target means that we will try to tell the peer about /// WINDOW_UPDATEs so the peer knows it has about `target` window to use /// for the whole connection. /// /// The `task` is an optional parked task for the `Connection` that might /// be blocked on needing more window capacity. pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option) { trace!( "set_target_connection_window; target={}; available={}, reserved={}", target, self.flow.available(), self.in_flight_data, ); // The current target connection window is our `available` plus any // in-flight data reserved by streams. // // Update the flow controller with the difference between the new // target and the current target. let current = (self.flow.available() + self.in_flight_data).checked_size(); if target > current { self.flow.assign_capacity(target - current); } else { self.flow.claim_capacity(current - target); } // If changing the target capacity means we gained a bunch of capacity, // enough that we went over the update threshold, then schedule sending // a connection WINDOW_UPDATE. if self.flow.unclaimed_capacity().is_some() { if let Some(task) = task.take() { task.notify(); } } } pub fn body_is_empty(&self, stream: &store::Ptr) -> bool { if !stream.state.is_recv_closed() { return false; } stream .pending_recv .peek_front(&self.buffer) .map(|event| !event.is_data()) .unwrap_or(true) } pub fn is_end_stream(&self, stream: &store::Ptr) -> bool { if !stream.state.is_recv_closed() { return false; } stream .pending_recv .is_empty() } pub fn recv_data( &mut self, frame: frame::Data, stream: &mut store::Ptr, ) -> Result<(), RecvError> { let sz = frame.payload().len(); // This should have been enforced at the codec::FramedRead layer, so // this is just a sanity check. assert!(sz <= MAX_WINDOW_SIZE as usize); let sz = sz as WindowSize; let is_ignoring_frame = stream.state.is_local_reset(); if !is_ignoring_frame && !stream.state.is_recv_streaming() { // TODO: There are cases where this can be a stream error of // STREAM_CLOSED instead... // Receiving a DATA frame when not expecting one is a protocol // error. proto_err!(conn: "unexpected DATA frame; stream={:?}", stream.id); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } trace!( "recv_data; size={}; connection={}; stream={}", sz, self.flow.window_size(), stream.recv_flow.window_size() ); if is_ignoring_frame { trace!( "recv_data; frame ignored on locally reset {:?} for some time", stream.id, ); return self.ignore_data(sz); } // Ensure that there is enough capacity on the connection before acting // on the stream. self.consume_connection_window(sz)?; if stream.recv_flow.window_size() < sz { // http://httpwg.org/specs/rfc7540.html#WINDOW_UPDATE // > A receiver MAY respond with a stream error (Section 5.4.2) or // > connection error (Section 5.4.1) of type FLOW_CONTROL_ERROR if // > it is unable to accept a frame. // // So, for violating the **stream** window, we can send either a // stream or connection error. We've opted to send a stream // error. return Err(RecvError::Stream { id: stream.id, reason: Reason::FLOW_CONTROL_ERROR, }); } if stream.dec_content_length(frame.payload().len()).is_err() { proto_err!(stream: "recv_data: content-length overflow; stream={:?}; len={:?}", stream.id, frame.payload().len(), ); return Err(RecvError::Stream { id: stream.id, reason: Reason::PROTOCOL_ERROR, }); } if frame.is_end_stream() { if stream.ensure_content_length_zero().is_err() { proto_err!(stream: "recv_data: content-length underflow; stream={:?}; len={:?}", stream.id, frame.payload().len(), ); return Err(RecvError::Stream { id: stream.id, reason: Reason::PROTOCOL_ERROR, }); } if stream.state.recv_close().is_err() { proto_err!(conn: "recv_data: failed to transition to closed state; stream={:?}", stream.id); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } } // Update stream level flow control stream.recv_flow.send_data(sz); // Track the data as in-flight stream.in_flight_recv_data += sz; let event = Event::Data(frame.into_payload()); // Push the frame onto the recv buffer stream.pending_recv.push_back(&mut self.buffer, event); stream.notify_recv(); Ok(()) } pub fn ignore_data(&mut self, sz: WindowSize) -> Result<(), RecvError> { // Ensure that there is enough capacity on the connection... self.consume_connection_window(sz)?; // Since we are ignoring this frame, // we aren't returning the frame to the user. That means they // have no way to release the capacity back to the connection. So // we have to release it automatically. // // This call doesn't send a WINDOW_UPDATE immediately, just marks // the capacity as available to be reclaimed. When the available // capacity meets a threshold, a WINDOW_UPDATE is then sent. self.release_connection_capacity(sz, &mut None); return Ok(()); } pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), RecvError> { if self.flow.window_size() < sz { debug!( "connection error FLOW_CONTROL_ERROR -- window_size ({:?}) < sz ({:?});", self.flow.window_size(), sz, ); return Err(RecvError::Connection(Reason::FLOW_CONTROL_ERROR)); } // Update connection level flow control self.flow.send_data(sz); // Track the data as in-flight self.in_flight_data += sz; Ok(()) } pub fn recv_push_promise( &mut self, frame: frame::PushPromise, stream: &mut store::Ptr, ) -> Result<(), RecvError> { stream.state.reserve_remote()?; if frame.is_over_size() { // A frame is over size if the decoded header block was bigger than // SETTINGS_MAX_HEADER_LIST_SIZE. // // > A server that receives a larger header block than it is willing // > to handle can send an HTTP 431 (Request Header Fields Too // > Large) status code [RFC6585]. A client can discard responses // > that it cannot process. // // So, if peer is a server, we'll send a 431. In either case, // an error is recorded, which will send a REFUSED_STREAM, // since we don't want any of the data frames either. debug!( "stream error REFUSED_STREAM -- recv_push_promise: \ headers frame is over size; promised_id={:?};", frame.promised_id(), ); return Err(RecvError::Stream { id: frame.promised_id(), reason: Reason::REFUSED_STREAM, }); } let promised_id = frame.promised_id(); use http::header; let (pseudo, fields) = frame.into_parts(); let req = ::server::Peer::convert_poll_message(pseudo, fields, promised_id)?; // The spec has some requirements for promised request headers // [https://httpwg.org/specs/rfc7540.html#PushRequests] // A promised request "that indicates the presence of a request body // MUST reset the promised stream with a stream error" if let Some(content_length) = req.headers().get(header::CONTENT_LENGTH) { match parse_u64(content_length.as_bytes()) { Ok(0) => {}, otherwise => { proto_err!(stream: "recv_push_promise; promised request has content-length {:?}; promised_id={:?}", otherwise, promised_id, ); return Err(RecvError::Stream { id: promised_id, reason: Reason::PROTOCOL_ERROR, }); }, } } // "The server MUST include a method in the :method pseudo-header field // that is safe and cacheable" if !Self::safe_and_cacheable(req.method()) { proto_err!( stream: "recv_push_promise: method {} is not safe and cacheable; promised_id={:?}", req.method(), promised_id, ); return Err(RecvError::Stream { id: promised_id, reason: Reason::PROTOCOL_ERROR, }); } use super::peer::PollMessage::*; stream.pending_recv.push_back(&mut self.buffer, Event::Headers(Server(req))); stream.notify_recv(); Ok(()) } fn safe_and_cacheable(method: &Method) -> bool { // Cacheable: https://httpwg.org/specs/rfc7231.html#cacheable.methods // Safe: https://httpwg.org/specs/rfc7231.html#safe.methods return method == Method::GET || method == Method::HEAD; } /// Ensures that `id` is not in the `Idle` state. pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> { if let Ok(next) = self.next_stream_id { if id >= next { debug!("stream ID implicitly closed, PROTOCOL_ERROR; stream={:?}", id); return Err(Reason::PROTOCOL_ERROR); } } // if next_stream_id is overflowed, that's ok. Ok(()) } /// Handle remote sending an explicit RST_STREAM. pub fn recv_reset(&mut self, frame: frame::Reset, stream: &mut Stream) { // Notify the stream stream.state.recv_reset(frame.reason(), stream.is_pending_send); stream.notify_send(); stream.notify_recv(); } /// Handle a received error pub fn recv_err(&mut self, err: &proto::Error, stream: &mut Stream) { // Receive an error stream.state.recv_err(err); // If a receiver is waiting, notify it stream.notify_send(); stream.notify_recv(); } pub fn go_away(&mut self, last_processed_id: StreamId) { assert!(self.max_stream_id >= last_processed_id); self.max_stream_id = last_processed_id; } pub fn recv_eof(&mut self, stream: &mut Stream) { stream.state.recv_eof(); stream.notify_send(); stream.notify_recv(); } pub(super) fn clear_recv_buffer(&mut self, stream: &mut Stream) { while let Some(_) = stream.pending_recv.pop_front(&mut self.buffer) { // drop it } } /// Get the max ID of streams we can receive. /// /// This gets lowered if we send a GOAWAY frame. pub fn max_stream_id(&self) -> StreamId { self.max_stream_id } pub fn next_stream_id(&self) -> Result { if let Ok(id) = self.next_stream_id { Ok(id) } else { Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) } } pub fn may_have_created_stream(&self, id: StreamId) -> bool { if let Ok(next_id) = self.next_stream_id { // Peer::is_local_init should have been called beforehand debug_assert_eq!( id.is_server_initiated(), next_id.is_server_initiated(), ); id < next_id } else { true } } /// Returns true if the remote peer can reserve a stream with the given ID. pub fn ensure_can_reserve(&self) -> Result<(), RecvError> { if !self.is_push_enabled { proto_err!(conn: "recv_push_promise: push is disabled"); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } Ok(()) } /// Add a locally reset stream to queue to be eventually reaped. pub fn enqueue_reset_expiration( &mut self, stream: &mut store::Ptr, counts: &mut Counts, ) { if !stream.state.is_local_reset() || stream.is_pending_reset_expiration() { return; } trace!("enqueue_reset_expiration; {:?}", stream.id); if !counts.can_inc_num_reset_streams() { // try to evict 1 stream if possible // if max allow is 0, this won't be able to evict, // and then we'll just bail after if let Some(evicted) = self.pending_reset_expired.pop(stream.store_mut()) { counts.transition_after(evicted, true); } } if counts.can_inc_num_reset_streams() { counts.inc_num_reset_streams(); self.pending_reset_expired.push(stream); } } /// Send any pending refusals. pub fn send_pending_refusal( &mut self, dst: &mut Codec>, ) -> Poll<(), io::Error> where T: AsyncWrite, B: Buf, { if let Some(stream_id) = self.refused { try_ready!(dst.poll_ready()); // Create the RST_STREAM frame let frame = frame::Reset::new(stream_id, Reason::REFUSED_STREAM); // Buffer the frame dst.buffer(frame.into()) .ok() .expect("invalid RST_STREAM frame"); } self.refused = None; Ok(Async::Ready(())) } pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) { let now = Instant::now(); let reset_duration = self.reset_duration; while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| { let reset_at = stream.reset_at.expect("reset_at must be set if in queue"); now - reset_at > reset_duration }) { counts.transition_after(stream, true); } } pub fn clear_queues(&mut self, clear_pending_accept: bool, store: &mut Store, counts: &mut Counts) { self.clear_stream_window_update_queue(store, counts); self.clear_all_reset_streams(store, counts); if clear_pending_accept { self.clear_all_pending_accept(store, counts); } } fn clear_stream_window_update_queue(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_window_updates.pop(store) { counts.transition(stream, |_, stream| { trace!("clear_stream_window_update_queue; stream={:?}", stream.id); }) } } /// Called on EOF fn clear_all_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_reset_expired.pop(store) { counts.transition_after(stream, true); } } fn clear_all_pending_accept(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_accept.pop(store) { counts.transition_after(stream, false); } } pub fn poll_complete( &mut self, store: &mut Store, counts: &mut Counts, dst: &mut Codec>, ) -> Poll<(), io::Error> where T: AsyncWrite, B: Buf, { // Send any pending connection level window updates try_ready!(self.send_connection_window_update(dst)); // Send any pending stream level window updates try_ready!(self.send_stream_window_updates(store, counts, dst)); Ok(().into()) } /// Send connection level window update fn send_connection_window_update( &mut self, dst: &mut Codec>, ) -> Poll<(), io::Error> where T: AsyncWrite, B: Buf, { if let Some(incr) = self.flow.unclaimed_capacity() { let frame = frame::WindowUpdate::new(StreamId::zero(), incr); // Ensure the codec has capacity try_ready!(dst.poll_ready()); // Buffer the WINDOW_UPDATE frame dst.buffer(frame.into()) .ok() .expect("invalid WINDOW_UPDATE frame"); // Update flow control self.flow .inc_window(incr) .ok() .expect("unexpected flow control state"); } Ok(().into()) } /// Send stream level window update pub fn send_stream_window_updates( &mut self, store: &mut Store, counts: &mut Counts, dst: &mut Codec>, ) -> Poll<(), io::Error> where T: AsyncWrite, B: Buf, { loop { // Ensure the codec has capacity try_ready!(dst.poll_ready()); // Get the next stream let stream = match self.pending_window_updates.pop(store) { Some(stream) => stream, None => return Ok(().into()), }; counts.transition(stream, |_, stream| { trace!("pending_window_updates -- pop; stream={:?}", stream.id); debug_assert!(!stream.is_pending_window_update); if !stream.state.is_recv_streaming() { // No need to send window updates on the stream if the stream is // no longer receiving data. // // TODO: is this correct? We could possibly send a window // update on a ReservedRemote stream if we already know // we want to stream the data faster... return; } // TODO: de-dup if let Some(incr) = stream.recv_flow.unclaimed_capacity() { // Create the WINDOW_UPDATE frame let frame = frame::WindowUpdate::new(stream.id, incr); // Buffer it dst.buffer(frame.into()) .ok() .expect("invalid WINDOW_UPDATE frame"); // Update flow control stream .recv_flow .inc_window(incr) .ok() .expect("unexpected flow control state"); } }) } } pub fn next_incoming(&mut self, store: &mut Store) -> Option { self.pending_accept.pop(store).map(|ptr| ptr.key()) } pub fn poll_data(&mut self, stream: &mut Stream) -> Poll, proto::Error> { // TODO: Return error when the stream is reset match stream.pending_recv.pop_front(&mut self.buffer) { Some(Event::Data(payload)) => Ok(Some(payload).into()), Some(event) => { // Frame is trailer stream.pending_recv.push_front(&mut self.buffer, event); // Notify the recv task. This is done just in case // `poll_trailers` was called. // // It is very likely that `notify_recv` will just be a no-op (as // the task will be None), so this isn't really much of a // performance concern. It also means we don't have to track // state to see if `poll_trailers` was called before `poll_data` // returned `None`. stream.notify_recv(); // No more data frames Ok(None.into()) }, None => self.schedule_recv(stream), } } pub fn poll_trailers( &mut self, stream: &mut Stream, ) -> Poll, proto::Error> { match stream.pending_recv.pop_front(&mut self.buffer) { Some(Event::Trailers(trailers)) => Ok(Some(trailers).into()), Some(event) => { // Frame is not trailers.. not ready to poll trailers yet. stream.pending_recv.push_front(&mut self.buffer, event); Ok(Async::NotReady) }, None => self.schedule_recv(stream), } } fn schedule_recv(&mut self, stream: &mut Stream) -> Poll, proto::Error> { if stream.state.ensure_recv_open()? { // Request to get notified once more frames arrive stream.recv_task = Some(task::current()); Ok(Async::NotReady) } else { // No more frames will be received Ok(None.into()) } } } // ===== impl Event ===== impl Event { fn is_data(&self) -> bool { match *self { Event::Data(..) => true, _ => false, } } } // ===== impl Open ===== impl Open { pub fn is_push_promise(&self) -> bool { use self::Open::*; match *self { PushPromise => true, _ => false, } } } // ===== impl RecvHeaderBlockError ===== impl From for RecvHeaderBlockError { fn from(err: RecvError) -> Self { RecvHeaderBlockError::State(err) } } // ===== util ===== fn parse_u64(src: &[u8]) -> Result { if src.len() > 19 { // At danger for overflow... return Err(()); } let mut ret = 0; for &d in src { if d < b'0' || d > b'9' { return Err(()); } ret *= 10; ret += (d - b'0') as u64; } Ok(ret) } h2-0.1.26/src/proto/streams/send.rs010066400017500001750000000347451350546774000152730ustar0000000000000000use codec::{RecvError, UserError}; use frame::{self, Reason}; use super::{ store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId, StreamIdOverflow, WindowSize, }; use bytes::Buf; use http; use futures::{Async, Poll}; use futures::task::Task; use tokio_io::AsyncWrite; use std::io; /// Manages state transitions related to outbound frames. #[derive(Debug)] pub(super) struct Send { /// Stream identifier to use for next initialized stream. next_stream_id: Result, /// Initial window size of locally initiated streams init_window_sz: WindowSize, /// Prioritization layer prioritize: Prioritize, } /// A value to detect which public API has called `poll_reset`. #[derive(Debug)] pub(crate) enum PollReset { AwaitingHeaders, Streaming, } impl Send { /// Create a new `Send` pub fn new(config: &Config) -> Self { Send { init_window_sz: config.remote_init_window_sz, next_stream_id: Ok(config.local_next_stream_id), prioritize: Prioritize::new(config), } } /// Returns the initial send window size pub fn init_window_sz(&self) -> WindowSize { self.init_window_sz } pub fn open(&mut self) -> Result { let stream_id = self.ensure_next_stream_id()?; self.next_stream_id = stream_id.next_id(); Ok(stream_id) } pub fn send_headers( &mut self, frame: frame::Headers, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, task: &mut Option, ) -> Result<(), UserError> { trace!( "send_headers; frame={:?}; init_window={:?}", frame, self.init_window_sz ); // 8.1.2.2. Connection-Specific Header Fields if frame.fields().contains_key(http::header::CONNECTION) || frame.fields().contains_key(http::header::TRANSFER_ENCODING) || frame.fields().contains_key(http::header::UPGRADE) || frame.fields().contains_key("keep-alive") || frame.fields().contains_key("proxy-connection") { debug!("illegal connection-specific headers found"); return Err(UserError::MalformedHeaders); } else if let Some(te) = frame.fields().get(http::header::TE) { if te != "trailers" { debug!("illegal connection-specific headers found"); return Err(UserError::MalformedHeaders); } } if frame.has_too_big_field() { return Err(UserError::HeaderTooBig); } let end_stream = frame.is_end_stream(); // Update the state stream.state.send_open(end_stream)?; if counts.peer().is_local_init(frame.stream_id()) { if counts.can_inc_num_send_streams() { counts.inc_num_send_streams(stream); } else { self.prioritize.queue_open(stream); } } // Queue the frame for sending self.prioritize.queue_frame(frame.into(), buffer, stream, task); Ok(()) } /// Send an explicit RST_STREAM frame pub fn send_reset( &mut self, reason: Reason, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, task: &mut Option, ) { let is_reset = stream.state.is_reset(); let is_closed = stream.state.is_closed(); let is_empty = stream.pending_send.is_empty(); trace!( "send_reset(..., reason={:?}, stream={:?}, ..., \ is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \ state={:?} \ ", reason, stream.id, is_reset, is_closed, is_empty, stream.state ); if is_reset { // Don't double reset trace!( " -> not sending RST_STREAM ({:?} is already reset)", stream.id ); return; } // Transition the state to reset no matter what. stream.state.set_reset(reason); // If closed AND the send queue is flushed, then the stream cannot be // reset explicitly, either. Implicit resets can still be queued. if is_closed && is_empty { trace!( " -> not sending explicit RST_STREAM ({:?} was closed \ and send queue was flushed)", stream.id ); return; } // Clear all pending outbound frames. // Note that we don't call `self.recv_err` because we want to enqueue // the reset frame before transitioning the stream inside // `reclaim_all_capacity`. self.prioritize.clear_queue(buffer, stream); let frame = frame::Reset::new(stream.id, reason); trace!("send_reset -- queueing; frame={:?}", frame); self.prioritize.queue_frame(frame.into(), buffer, stream, task); self.prioritize.reclaim_all_capacity(stream, counts); } pub fn schedule_implicit_reset( &mut self, stream: &mut store::Ptr, reason: Reason, counts: &mut Counts, task: &mut Option, ) { if stream.state.is_closed() { // Stream is already closed, nothing more to do return; } stream.state.set_scheduled_reset(reason); self.prioritize.reclaim_reserved_capacity(stream, counts); self.prioritize.schedule_send(stream, task); } pub fn send_data( &mut self, frame: frame::Data, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, task: &mut Option, ) -> Result<(), UserError> where B: Buf, { self.prioritize.send_data(frame, buffer, stream, counts, task) } pub fn send_trailers( &mut self, frame: frame::Headers, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, task: &mut Option, ) -> Result<(), UserError> { // TODO: Should this logic be moved into state.rs? if !stream.state.is_send_streaming() { return Err(UserError::UnexpectedFrameType); } if frame.has_too_big_field() { return Err(UserError::HeaderTooBig); } stream.state.send_close(); trace!("send_trailers -- queuing; frame={:?}", frame); self.prioritize.queue_frame(frame.into(), buffer, stream, task); // Release any excess capacity self.prioritize.reserve_capacity(0, stream, counts); Ok(()) } pub fn poll_complete( &mut self, buffer: &mut Buffer>, store: &mut Store, counts: &mut Counts, dst: &mut Codec>, ) -> Poll<(), io::Error> where T: AsyncWrite, B: Buf, { self.prioritize.poll_complete(buffer, store, counts, dst) } /// Request capacity to send data pub fn reserve_capacity( &mut self, capacity: WindowSize, stream: &mut store::Ptr, counts: &mut Counts) { self.prioritize.reserve_capacity(capacity, stream, counts) } pub fn poll_capacity( &mut self, stream: &mut store::Ptr, ) -> Poll, UserError> { if !stream.state.is_send_streaming() { return Ok(Async::Ready(None)); } if !stream.send_capacity_inc { stream.wait_send(); return Ok(Async::NotReady); } stream.send_capacity_inc = false; Ok(Async::Ready(Some(self.capacity(stream)))) } /// Current available stream send capacity pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize { let available = stream.send_flow.available().as_size(); let buffered = stream.buffered_send_data; if available <= buffered { 0 } else { available - buffered } } pub fn poll_reset( &self, stream: &mut Stream, mode: PollReset, ) -> Poll { match stream.state.ensure_reason(mode)? { Some(reason) => Ok(reason.into()), None => { stream.wait_send(); Ok(Async::NotReady) }, } } pub fn recv_connection_window_update( &mut self, frame: frame::WindowUpdate, store: &mut Store, counts: &mut Counts, ) -> Result<(), Reason> { self.prioritize .recv_connection_window_update(frame.size_increment(), store, counts) } pub fn recv_stream_window_update( &mut self, sz: WindowSize, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, task: &mut Option, ) -> Result<(), Reason> { if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) { debug!("recv_stream_window_update !!; err={:?}", e); self.send_reset( Reason::FLOW_CONTROL_ERROR.into(), buffer, stream, counts, task); return Err(e); } Ok(()) } pub fn recv_err( &mut self, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, ) { // Clear all pending outbound frames self.prioritize.clear_queue(buffer, stream); self.prioritize.reclaim_all_capacity(stream, counts); } pub fn apply_remote_settings( &mut self, settings: &frame::Settings, buffer: &mut Buffer>, store: &mut Store, counts: &mut Counts, task: &mut Option, ) -> Result<(), RecvError> { // Applies an update to the remote endpoint's initial window size. // // Per RFC 7540 §6.9.2: // // In addition to changing the flow-control window for streams that are // not yet active, a SETTINGS frame can alter the initial flow-control // window size for streams with active flow-control windows (that is, // streams in the "open" or "half-closed (remote)" state). When the // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust // the size of all stream flow-control windows that it maintains by the // difference between the new value and the old value. // // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available // space in a flow-control window to become negative. A sender MUST // track the negative flow-control window and MUST NOT send new // flow-controlled frames until it receives WINDOW_UPDATE frames that // cause the flow-control window to become positive. if let Some(val) = settings.initial_window_size() { let old_val = self.init_window_sz; self.init_window_sz = val; if val < old_val { // We must decrease the (remote) window on every open stream. let dec = old_val - val; trace!("decrementing all windows; dec={}", dec); let mut total_reclaimed = 0; store.for_each(|mut stream| { let stream = &mut *stream; stream.send_flow.dec_window(dec); // It's possible that decreasing the window causes // `window_size` (the stream-specific window) to fall below // `available` (the portion of the connection-level window // that we have allocated to the stream). // In this case, we should take that excess allocation away // and reassign it to other streams. let window_size = stream.send_flow.window_size(); let available = stream.send_flow.available().as_size(); let reclaimed = if available > window_size { // Drop down to `window_size`. let reclaim = available - window_size; stream.send_flow.claim_capacity(reclaim); total_reclaimed += reclaim; reclaim } else { 0 }; trace!( "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", stream.id, dec, reclaimed, stream.send_flow ); // TODO: Should this notify the producer when the capacity // of a stream is reduced? Maybe it should if the capacity // is reduced to zero, allowing the producer to stop work. Ok::<_, RecvError>(()) })?; self.prioritize .assign_connection_capacity(total_reclaimed, store, counts); } else if val > old_val { let inc = val - old_val; store.for_each(|mut stream| { self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) .map_err(RecvError::Connection) })?; } } Ok(()) } pub fn clear_queues(&mut self, store: &mut Store, counts: &mut Counts) { self.prioritize.clear_pending_capacity(store, counts); self.prioritize.clear_pending_send(store, counts); self.prioritize.clear_pending_open(store, counts); } pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> { if let Ok(next) = self.next_stream_id { if id >= next { return Err(Reason::PROTOCOL_ERROR); } } // if next_stream_id is overflowed, that's ok. Ok(()) } pub fn ensure_next_stream_id(&self) -> Result { self.next_stream_id.map_err(|_| UserError::OverflowedStreamId) } pub fn may_have_created_stream(&self, id: StreamId) -> bool { if let Ok(next_id) = self.next_stream_id { // Peer::is_local_init should have been called beforehand debug_assert_eq!( id.is_server_initiated(), next_id.is_server_initiated(), ); id < next_id } else { true } } } h2-0.1.26/src/proto/streams/state.rs010066400017500001750000000343101350500452700154340ustar0000000000000000use std::io; use codec::{RecvError, UserError}; use codec::UserError::*; use frame::Reason; use proto::{self, PollReset}; use self::Inner::*; use self::Peer::*; /// Represents the state of an H2 stream /// /// ```not_rust /// +--------+ /// send PP | | recv PP /// ,--------| idle |--------. /// / | | \ /// v +--------+ v /// +----------+ | +----------+ /// | | | send H / | | /// ,------| reserved | | recv H | reserved |------. /// | | (local) | | | (remote) | | /// | +----------+ v +----------+ | /// | | +--------+ | | /// | | recv ES | | send ES | | /// | send H | ,-------| open |-------. | recv H | /// | | / | | \ | | /// | v v +--------+ v v | /// | +----------+ | +----------+ | /// | | half | | | half | | /// | | closed | | send R / | closed | | /// | | (remote) | | recv R | (local) | | /// | +----------+ | +----------+ | /// | | | | | /// | | send ES / | recv ES / | | /// | | send R / v send R / | | /// | | recv R +--------+ recv R | | /// | send R / `----------->| |<-----------' send R / | /// | recv R | closed | recv R | /// `----------------------->| |<----------------------' /// +--------+ /// /// send: endpoint sends this frame /// recv: endpoint receives this frame /// /// H: HEADERS frame (with implied CONTINUATIONs) /// PP: PUSH_PROMISE frame (with implied CONTINUATIONs) /// ES: END_STREAM flag /// R: RST_STREAM frame /// ``` #[derive(Debug, Clone)] pub struct State { inner: Inner, } #[derive(Debug, Clone, Copy)] enum Inner { Idle, // TODO: these states shouldn't count against concurrency limits: //ReservedLocal, ReservedRemote, Open { local: Peer, remote: Peer }, HalfClosedLocal(Peer), // TODO: explicitly name this value HalfClosedRemote(Peer), Closed(Cause), } #[derive(Debug, Copy, Clone)] enum Peer { AwaitingHeaders, Streaming, } #[derive(Debug, Copy, Clone)] enum Cause { EndStream, Proto(Reason), LocallyReset(Reason), Io, /// This indicates to the connection that a reset frame must be sent out /// once the send queue has been flushed. /// /// Examples of when this could happen: /// - User drops all references to a stream, so we want to CANCEL the it. /// - Header block size was too large, so we want to REFUSE, possibly /// after sending a 431 response frame. Scheduled(Reason), } impl State { /// Opens the send-half of a stream if it is not already open. pub fn send_open(&mut self, eos: bool) -> Result<(), UserError> { let local = Streaming; self.inner = match self.inner { Idle => if eos { HalfClosedLocal(AwaitingHeaders) } else { Open { local, remote: AwaitingHeaders, } }, Open { local: AwaitingHeaders, remote, } => if eos { HalfClosedLocal(remote) } else { Open { local, remote, } }, HalfClosedRemote(AwaitingHeaders) => if eos { Closed(Cause::EndStream) } else { HalfClosedRemote(local) }, _ => { // All other transitions result in a protocol error return Err(UnexpectedFrameType); }, }; return Ok(()); } /// Opens the receive-half of the stream when a HEADERS frame is received. /// /// Returns true if this transitions the state to Open. pub fn recv_open(&mut self, eos: bool) -> Result { let remote = Streaming; let mut initial = false; self.inner = match self.inner { Idle => { initial = true; if eos { HalfClosedRemote(AwaitingHeaders) } else { Open { local: AwaitingHeaders, remote, } } }, ReservedRemote => { initial = true; if eos { Closed(Cause::EndStream) } else { HalfClosedLocal(Streaming) } }, Open { local, remote: AwaitingHeaders, } => if eos { HalfClosedRemote(local) } else { Open { local, remote, } }, HalfClosedLocal(AwaitingHeaders) => if eos { Closed(Cause::EndStream) } else { HalfClosedLocal(remote) }, state => { // All other transitions result in a protocol error proto_err!(conn: "recv_open: in unexpected state {:?}", state); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); }, }; return Ok(initial); } /// Transition from Idle -> ReservedRemote pub fn reserve_remote(&mut self) -> Result<(), RecvError> { match self.inner { Idle => { self.inner = ReservedRemote; Ok(()) }, state => { proto_err!(conn: "reserve_remote: in unexpected state {:?}", state); Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) } } } /// Indicates that the remote side will not send more data to the local. pub fn recv_close(&mut self) -> Result<(), RecvError> { match self.inner { Open { local, .. } => { // The remote side will continue to receive data. trace!("recv_close: Open => HalfClosedRemote({:?})", local); self.inner = HalfClosedRemote(local); Ok(()) }, HalfClosedLocal(..) => { trace!("recv_close: HalfClosedLocal => Closed"); self.inner = Closed(Cause::EndStream); Ok(()) }, state => { proto_err!(conn: "recv_close: in unexpected state {:?}", state); Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) } } } /// The remote explicitly sent a RST_STREAM. /// /// # Arguments /// - `reason`: the reason field of the received RST_STREAM frame. /// - `queued`: true if this stream has frames in the pending send queue. pub fn recv_reset(&mut self, reason: Reason, queued: bool) { match self.inner { // If the stream is already in a `Closed` state, do nothing, // provided that there are no frames still in the send queue. Closed(..) if !queued => {}, // A notionally `Closed` stream may still have queued frames in // the following cases: // // - if the cause is `Cause::Scheduled(..)` (i.e. we have not // actually closed the stream yet). // - if the cause is `Cause::EndStream`: we transition to this // state when an EOS frame is *enqueued* (so that it's invalid // to enqueue more frames), not when the EOS frame is *sent*; // therefore, there may still be frames ahead of the EOS frame // in the send queue. // // In either of these cases, we want to overwrite the stream's // previous state with the received RST_STREAM, so that the queue // will be cleared by `Prioritize::pop_frame`. state => { trace!( "recv_reset; reason={:?}; state={:?}; queued={:?}", reason, state, queued ); self.inner = Closed(Cause::Proto(reason)); }, } } /// We noticed a protocol error. pub fn recv_err(&mut self, err: &proto::Error) { use proto::Error::*; match self.inner { Closed(..) => {}, _ => { trace!("recv_err; err={:?}", err); self.inner = Closed(match *err { Proto(reason) => Cause::LocallyReset(reason), Io(..) => Cause::Io, }); }, } } pub fn recv_eof(&mut self) { match self.inner { Closed(..) => {}, s => { trace!("recv_eof; state={:?}", s); self.inner = Closed(Cause::Io); } } } /// Indicates that the local side will not send more data to the local. pub fn send_close(&mut self) { match self.inner { Open { remote, .. } => { // The remote side will continue to receive data. trace!("send_close: Open => HalfClosedLocal({:?})", remote); self.inner = HalfClosedLocal(remote); }, HalfClosedRemote(..) => { trace!("send_close: HalfClosedRemote => Closed"); self.inner = Closed(Cause::EndStream); }, state => panic!("send_close: unexpected state {:?}", state), } } /// Set the stream state to reset locally. pub fn set_reset(&mut self, reason: Reason) { self.inner = Closed(Cause::LocallyReset(reason)); } /// Set the stream state to a scheduled reset. pub fn set_scheduled_reset(&mut self, reason: Reason) { debug_assert!(!self.is_closed()); self.inner = Closed(Cause::Scheduled(reason)); } pub fn get_scheduled_reset(&self) -> Option { match self.inner { Closed(Cause::Scheduled(reason)) => Some(reason), _ => None, } } pub fn is_scheduled_reset(&self) -> bool { match self.inner { Closed(Cause::Scheduled(..)) => true, _ => false, } } pub fn is_local_reset(&self) -> bool { match self.inner { Closed(Cause::LocallyReset(_)) => true, Closed(Cause::Scheduled(..)) => true, _ => false, } } /// Returns true if the stream is already reset. pub fn is_reset(&self) -> bool { match self.inner { Closed(Cause::EndStream) => false, Closed(_) => true, _ => false, } } pub fn is_send_streaming(&self) -> bool { match self.inner { Open { local: Streaming, .. } => true, HalfClosedRemote(Streaming) => true, _ => false, } } /// Returns true when the stream is in a state to receive headers pub fn is_recv_headers(&self) -> bool { match self.inner { Idle => true, Open { remote: AwaitingHeaders, .. } => true, HalfClosedLocal(AwaitingHeaders) => true, ReservedRemote => true, _ => false, } } pub fn is_recv_streaming(&self) -> bool { match self.inner { Open { remote: Streaming, .. } => true, HalfClosedLocal(Streaming) => true, _ => false, } } pub fn is_closed(&self) -> bool { match self.inner { Closed(_) => true, _ => false, } } pub fn is_recv_closed(&self) -> bool { match self.inner { Closed(..) | HalfClosedRemote(..) => true, _ => false, } } pub fn is_send_closed(&self) -> bool { match self.inner { Closed(..) | HalfClosedLocal(..) | ReservedRemote => true, _ => false, } } pub fn is_idle(&self) -> bool { match self.inner { Idle => true, _ => false, } } pub fn ensure_recv_open(&self) -> Result { // TODO: Is this correct? match self.inner { Closed(Cause::Proto(reason)) | Closed(Cause::LocallyReset(reason)) | Closed(Cause::Scheduled(reason)) => Err(proto::Error::Proto(reason)), Closed(Cause::Io) => Err(proto::Error::Io(io::ErrorKind::BrokenPipe.into())), Closed(Cause::EndStream) | HalfClosedRemote(..) => Ok(false), _ => Ok(true), } } /// Returns a reason if the stream has been reset. pub(super) fn ensure_reason(&self, mode: PollReset) -> Result, ::Error> { match self.inner { Closed(Cause::Proto(reason)) | Closed(Cause::LocallyReset(reason)) | Closed(Cause::Scheduled(reason)) => Ok(Some(reason)), Closed(Cause::Io) => Err(proto::Error::Io(io::ErrorKind::BrokenPipe.into()).into()), Open { local: Streaming, .. } | HalfClosedRemote(Streaming) => match mode { PollReset::AwaitingHeaders => { Err(UserError::PollResetAfterSendResponse.into()) }, PollReset::Streaming => Ok(None), }, _ => Ok(None), } } } impl Default for State { fn default() -> State { State { inner: Inner::Idle, } } } impl Default for Peer { fn default() -> Self { AwaitingHeaders } } h2-0.1.26/src/proto/streams/store.rs010066400017500001750000000225231350500452700154530ustar0000000000000000use super::*; use slab; use indexmap::{self, IndexMap}; use std::fmt; use std::marker::PhantomData; use std::ops; /// Storage for streams #[derive(Debug)] pub(super) struct Store { slab: slab::Slab, ids: IndexMap, } /// "Pointer" to an entry in the store pub(super) struct Ptr<'a> { key: Key, store: &'a mut Store, } /// References an entry in the store. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) struct Key { index: SlabIndex, /// Keep the stream ID in the key as an ABA guard, since slab indices /// could be re-used with a new stream. stream_id: StreamId, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] struct SlabIndex(usize); #[derive(Debug)] pub(super) struct Queue { indices: Option, _p: PhantomData, } pub(super) trait Next { fn next(stream: &Stream) -> Option; fn set_next(stream: &mut Stream, key: Option); fn take_next(stream: &mut Stream) -> Option; fn is_queued(stream: &Stream) -> bool; fn set_queued(stream: &mut Stream, val: bool); } /// A linked list #[derive(Debug, Clone, Copy)] struct Indices { pub head: Key, pub tail: Key, } pub(super) enum Entry<'a> { Occupied(OccupiedEntry<'a>), Vacant(VacantEntry<'a>), } pub(super) struct OccupiedEntry<'a> { ids: indexmap::map::OccupiedEntry<'a, StreamId, SlabIndex>, } pub(super) struct VacantEntry<'a> { ids: indexmap::map::VacantEntry<'a, StreamId, SlabIndex>, slab: &'a mut slab::Slab, } pub(super) trait Resolve { fn resolve(&mut self, key: Key) -> Ptr; } // ===== impl Store ===== impl Store { pub fn new() -> Self { Store { slab: slab::Slab::new(), ids: IndexMap::new(), } } pub fn find_mut(&mut self, id: &StreamId) -> Option { let index = match self.ids.get(id) { Some(key) => *key, None => return None, }; Some(Ptr { key: Key { index, stream_id: *id, }, store: self, }) } pub fn insert(&mut self, id: StreamId, val: Stream) -> Ptr { let index = SlabIndex(self.slab.insert(val)); assert!(self.ids.insert(id, index).is_none()); Ptr { key: Key { index, stream_id: id, }, store: self, } } pub fn find_entry(&mut self, id: StreamId) -> Entry { use self::indexmap::map::Entry::*; match self.ids.entry(id) { Occupied(e) => Entry::Occupied(OccupiedEntry { ids: e, }), Vacant(e) => Entry::Vacant(VacantEntry { ids: e, slab: &mut self.slab, }), } } pub fn for_each(&mut self, mut f: F) -> Result<(), E> where F: FnMut(Ptr) -> Result<(), E>, { let mut len = self.ids.len(); let mut i = 0; while i < len { // Get the key by index, this makes the borrow checker happy let (stream_id, index) = { let entry = self.ids.get_index(i).unwrap(); (*entry.0, *entry.1) }; f(Ptr { key: Key { index, stream_id, }, store: self, })?; // TODO: This logic probably could be better... let new_len = self.ids.len(); if new_len < len { debug_assert!(new_len == len - 1); len -= 1; } else { i += 1; } } Ok(()) } } impl Resolve for Store { fn resolve(&mut self, key: Key) -> Ptr { Ptr { key: key, store: self, } } } impl ops::Index for Store { type Output = Stream; fn index(&self, key: Key) -> &Self::Output { self.slab .get(key.index.0) .filter(|s| s.id == key.stream_id) .unwrap_or_else(|| { panic!("dangling store key for stream_id={:?}", key.stream_id); }) } } impl ops::IndexMut for Store { fn index_mut(&mut self, key: Key) -> &mut Self::Output { self.slab .get_mut(key.index.0) .filter(|s| s.id == key.stream_id) .unwrap_or_else(|| { panic!("dangling store key for stream_id={:?}", key.stream_id); }) } } impl Store { #[cfg(feature = "unstable")] pub fn num_active_streams(&self) -> usize { self.ids.len() } #[cfg(feature = "unstable")] pub fn num_wired_streams(&self) -> usize { self.slab.len() } } impl Drop for Store { fn drop(&mut self) { use std::thread; if !thread::panicking() { debug_assert!(self.slab.is_empty()); } } } // ===== impl Queue ===== impl Queue where N: Next, { pub fn new() -> Self { Queue { indices: None, _p: PhantomData, } } pub fn take(&mut self) -> Self { Queue { indices: self.indices.take(), _p: PhantomData, } } /// Queue the stream. /// /// If the stream is already contained by the list, return `false`. pub fn push(&mut self, stream: &mut store::Ptr) -> bool { trace!("Queue::push"); if N::is_queued(stream) { trace!(" -> already queued"); return false; } N::set_queued(stream, true); // The next pointer shouldn't be set debug_assert!(N::next(stream).is_none()); // Queue the stream match self.indices { Some(ref mut idxs) => { trace!(" -> existing entries"); // Update the current tail node to point to `stream` let key = stream.key(); N::set_next(&mut stream.resolve(idxs.tail), Some(key)); // Update the tail pointer idxs.tail = stream.key(); }, None => { trace!(" -> first entry"); self.indices = Some(store::Indices { head: stream.key(), tail: stream.key(), }); }, } true } pub fn pop<'a, R>(&mut self, store: &'a mut R) -> Option> where R: Resolve, { if let Some(mut idxs) = self.indices { let mut stream = store.resolve(idxs.head); if idxs.head == idxs.tail { assert!(N::next(&*stream).is_none()); self.indices = None; } else { idxs.head = N::take_next(&mut *stream).unwrap(); self.indices = Some(idxs); } debug_assert!(N::is_queued(&*stream)); N::set_queued(&mut *stream, false); return Some(stream); } None } pub fn pop_if<'a, R, F>(&mut self, store: &'a mut R, f: F) -> Option> where R: Resolve, F: Fn(&Stream) -> bool, { if let Some(idxs) = self.indices { let should_pop = f(&store.resolve(idxs.head)); if should_pop { return self.pop(store); } } None } } // ===== impl Ptr ===== impl<'a> Ptr<'a> { /// Returns the Key associated with the stream pub fn key(&self) -> Key { self.key } pub fn store_mut(&mut self) -> &mut Store { &mut self.store } /// Remove the stream from the store pub fn remove(self) -> StreamId { // The stream must have been unlinked before this point debug_assert!(!self.store.ids.contains_key(&self.key.stream_id)); // Remove the stream state let stream = self.store.slab.remove(self.key.index.0); assert_eq!(stream.id, self.key.stream_id); stream.id } /// Remove the StreamId -> stream state association. /// /// This will effectively remove the stream as far as the H2 protocol is /// concerned. pub fn unlink(&mut self) { let id = self.key.stream_id; self.store.ids.remove(&id); } } impl<'a> Resolve for Ptr<'a> { fn resolve(&mut self, key: Key) -> Ptr { Ptr { key: key, store: &mut *self.store, } } } impl<'a> ops::Deref for Ptr<'a> { type Target = Stream; fn deref(&self) -> &Stream { &self.store[self.key] } } impl<'a> ops::DerefMut for Ptr<'a> { fn deref_mut(&mut self) -> &mut Stream { &mut self.store[self.key] } } impl<'a> fmt::Debug for Ptr<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(fmt) } } // ===== impl OccupiedEntry ===== impl<'a> OccupiedEntry<'a> { pub fn key(&self) -> Key { let stream_id = *self.ids.key(); let index = *self.ids.get(); Key { index, stream_id, } } } // ===== impl VacantEntry ===== impl<'a> VacantEntry<'a> { pub fn insert(self, value: Stream) -> Key { // Insert the value in the slab let stream_id = value.id; let index = SlabIndex(self.slab.insert(value)); // Insert the handle in the ID map self.ids.insert(index); Key { index, stream_id, } } } h2-0.1.26/src/proto/streams/stream.rs010066400017500001750000000320671347526041500156240ustar0000000000000000use super::*; use std::time::Instant; use std::usize; /// Tracks Stream related state /// /// # Reference counting /// /// There can be a number of outstanding handles to a single Stream. These are /// tracked using reference counting. The `ref_count` field represents the /// number of outstanding userspace handles that can reach this stream. /// /// It's important to note that when the stream is placed in an internal queue /// (such as an accept queue), this is **not** tracked by a reference count. /// Thus, `ref_count` can be zero and the stream still has to be kept around. #[derive(Debug)] pub(super) struct Stream { /// The h2 stream identifier pub id: StreamId, /// Current state of the stream pub state: State, /// Set to `true` when the stream is counted against the connection's max /// concurrent streams. pub is_counted: bool, /// Number of outstanding handles pointing to this stream pub ref_count: usize, // ===== Fields related to sending ===== /// Next node in the accept linked list pub next_pending_send: Option, /// Set to true when the stream is pending accept pub is_pending_send: bool, /// Send data flow control pub send_flow: FlowControl, /// Amount of send capacity that has been requested, but not yet allocated. pub requested_send_capacity: WindowSize, /// Amount of data buffered at the prioritization layer. /// TODO: Technically this could be greater than the window size... pub buffered_send_data: WindowSize, /// Task tracking additional send capacity (i.e. window updates). send_task: Option, /// Frames pending for this stream being sent to the socket pub pending_send: buffer::Deque, /// Next node in the linked list of streams waiting for additional /// connection level capacity. pub next_pending_send_capacity: Option, /// True if the stream is waiting for outbound connection capacity pub is_pending_send_capacity: bool, /// Set to true when the send capacity has been incremented pub send_capacity_inc: bool, /// Next node in the open linked list pub next_open: Option, /// Set to true when the stream is pending to be opened pub is_pending_open: bool, // ===== Fields related to receiving ===== /// Next node in the accept linked list pub next_pending_accept: Option, /// Set to true when the stream is pending accept pub is_pending_accept: bool, /// Receive data flow control pub recv_flow: FlowControl, pub in_flight_recv_data: WindowSize, /// Next node in the linked list of streams waiting to send window updates. pub next_window_update: Option, /// True if the stream is waiting to send a window update pub is_pending_window_update: bool, /// The time when this stream may have been locally reset. pub reset_at: Option, /// Next node in list of reset streams that should expire eventually pub next_reset_expire: Option, /// Frames pending for this stream to read pub pending_recv: buffer::Deque, /// Task tracking receiving frames pub recv_task: Option, /// The stream's pending push promises pub pending_push_promises: store::Queue, /// Validate content-length headers pub content_length: ContentLength, } /// State related to validating a stream's content-length #[derive(Debug)] pub enum ContentLength { Omitted, Head, Remaining(u64), } #[derive(Debug)] pub(super) struct NextAccept; #[derive(Debug)] pub(super) struct NextSend; #[derive(Debug)] pub(super) struct NextSendCapacity; #[derive(Debug)] pub(super) struct NextWindowUpdate; #[derive(Debug)] pub(super) struct NextOpen; #[derive(Debug)] pub(super) struct NextResetExpire; impl Stream { pub fn new( id: StreamId, init_send_window: WindowSize, init_recv_window: WindowSize, ) -> Stream { let mut send_flow = FlowControl::new(); let mut recv_flow = FlowControl::new(); recv_flow .inc_window(init_recv_window) .ok() .expect("invalid initial receive window"); recv_flow.assign_capacity(init_recv_window); send_flow .inc_window(init_send_window) .ok() .expect("invalid initial send window size"); Stream { id, state: State::default(), ref_count: 0, is_counted: false, // ===== Fields related to sending ===== next_pending_send: None, is_pending_send: false, send_flow: send_flow, requested_send_capacity: 0, buffered_send_data: 0, send_task: None, pending_send: buffer::Deque::new(), is_pending_send_capacity: false, next_pending_send_capacity: None, send_capacity_inc: false, is_pending_open: false, next_open: None, // ===== Fields related to receiving ===== next_pending_accept: None, is_pending_accept: false, recv_flow: recv_flow, in_flight_recv_data: 0, next_window_update: None, is_pending_window_update: false, reset_at: None, next_reset_expire: None, pending_recv: buffer::Deque::new(), recv_task: None, pending_push_promises: store::Queue::new(), content_length: ContentLength::Omitted, } } /// Increment the stream's ref count pub fn ref_inc(&mut self) { assert!(self.ref_count < usize::MAX); self.ref_count += 1; } /// Decrements the stream's ref count pub fn ref_dec(&mut self) { assert!(self.ref_count > 0); self.ref_count -= 1; } /// Returns true if stream is currently being held for some time because of /// a local reset. pub fn is_pending_reset_expiration(&self) -> bool { self.reset_at.is_some() } /// Returns true if the stream is closed pub fn is_closed(&self) -> bool { // The state has fully transitioned to closed. self.state.is_closed() && // Because outbound frames transition the stream state before being // buffered, we have to ensure that all frames have been flushed. self.pending_send.is_empty() && // Sometimes large data frames are sent out in chunks. After a chunk // of the frame is sent, the remainder is pushed back onto the send // queue to be rescheduled. // // Checking for additional buffered data lets us catch this case. self.buffered_send_data == 0 } /// Returns true if the stream is no longer in use pub fn is_released(&self) -> bool { // The stream is closed and fully flushed self.is_closed() && // There are no more outstanding references to the stream self.ref_count == 0 && // The stream is not in any queue !self.is_pending_send && !self.is_pending_send_capacity && !self.is_pending_accept && !self.is_pending_window_update && !self.is_pending_open && !self.reset_at.is_some() } /// Returns true when the consumer of the stream has dropped all handles /// (indicating no further interest in the stream) and the stream state is /// not actually closed. /// /// In this case, a reset should be sent. pub fn is_canceled_interest(&self) -> bool { self.ref_count == 0 && !self.state.is_closed() } pub fn assign_capacity(&mut self, capacity: WindowSize) { debug_assert!(capacity > 0); self.send_capacity_inc = true; self.send_flow.assign_capacity(capacity); trace!(" assigned capacity to stream; available={}; buffered={}; id={:?}", self.send_flow.available(), self.buffered_send_data, self.id); // Only notify if the capacity exceeds the amount of buffered data if self.send_flow.available() > self.buffered_send_data { trace!(" notifying task"); self.notify_send(); } } /// Returns `Err` when the decrement cannot be completed due to overflow. pub fn dec_content_length(&mut self, len: usize) -> Result<(), ()> { match self.content_length { ContentLength::Remaining(ref mut rem) => match rem.checked_sub(len as u64) { Some(val) => *rem = val, None => return Err(()), }, ContentLength::Head => return Err(()), _ => {}, } Ok(()) } pub fn ensure_content_length_zero(&self) -> Result<(), ()> { match self.content_length { ContentLength::Remaining(0) => Ok(()), ContentLength::Remaining(_) => Err(()), _ => Ok(()), } } pub fn notify_send(&mut self) { if let Some(task) = self.send_task.take() { task.notify(); } } pub fn wait_send(&mut self) { self.send_task = Some(task::current()); } pub fn notify_recv(&mut self) { if let Some(task) = self.recv_task.take() { task.notify(); } } } impl store::Next for NextAccept { fn next(stream: &Stream) -> Option { stream.next_pending_accept } fn set_next(stream: &mut Stream, key: Option) { stream.next_pending_accept = key; } fn take_next(stream: &mut Stream) -> Option { stream.next_pending_accept.take() } fn is_queued(stream: &Stream) -> bool { stream.is_pending_accept } fn set_queued(stream: &mut Stream, val: bool) { stream.is_pending_accept = val; } } impl store::Next for NextSend { fn next(stream: &Stream) -> Option { stream.next_pending_send } fn set_next(stream: &mut Stream, key: Option) { stream.next_pending_send = key; } fn take_next(stream: &mut Stream) -> Option { stream.next_pending_send.take() } fn is_queued(stream: &Stream) -> bool { stream.is_pending_send } fn set_queued(stream: &mut Stream, val: bool) { if val { // ensure that stream is not queued for being opened // if it's being put into queue for sending data debug_assert_eq!(stream.is_pending_open, false); } stream.is_pending_send = val; } } impl store::Next for NextSendCapacity { fn next(stream: &Stream) -> Option { stream.next_pending_send_capacity } fn set_next(stream: &mut Stream, key: Option) { stream.next_pending_send_capacity = key; } fn take_next(stream: &mut Stream) -> Option { stream.next_pending_send_capacity.take() } fn is_queued(stream: &Stream) -> bool { stream.is_pending_send_capacity } fn set_queued(stream: &mut Stream, val: bool) { stream.is_pending_send_capacity = val; } } impl store::Next for NextWindowUpdate { fn next(stream: &Stream) -> Option { stream.next_window_update } fn set_next(stream: &mut Stream, key: Option) { stream.next_window_update = key; } fn take_next(stream: &mut Stream) -> Option { stream.next_window_update.take() } fn is_queued(stream: &Stream) -> bool { stream.is_pending_window_update } fn set_queued(stream: &mut Stream, val: bool) { stream.is_pending_window_update = val; } } impl store::Next for NextOpen { fn next(stream: &Stream) -> Option { stream.next_open } fn set_next(stream: &mut Stream, key: Option) { stream.next_open = key; } fn take_next(stream: &mut Stream) -> Option { stream.next_open.take() } fn is_queued(stream: &Stream) -> bool { stream.is_pending_open } fn set_queued(stream: &mut Stream, val: bool) { if val { // ensure that stream is not queued for being sent // if it's being put into queue for opening the stream debug_assert_eq!(stream.is_pending_send, false); } stream.is_pending_open = val; } } impl store::Next for NextResetExpire { fn next(stream: &Stream) -> Option { stream.next_reset_expire } fn set_next(stream: &mut Stream, key: Option) { stream.next_reset_expire = key; } fn take_next(stream: &mut Stream) -> Option { stream.next_reset_expire.take() } fn is_queued(stream: &Stream) -> bool { stream.reset_at.is_some() } fn set_queued(stream: &mut Stream, val: bool) { if val { stream.reset_at = Some(Instant::now()); } else { stream.reset_at = None; } } } // ===== impl ContentLength ===== impl ContentLength { pub fn is_head(&self) -> bool { match *self { ContentLength::Head => true, _ => false, } } } h2-0.1.26/src/proto/streams/streams.rs010066400017500001750000001254331350546774000160130ustar0000000000000000use {client, proto, server}; use codec::{Codec, RecvError, SendError, UserError}; use frame::{self, Frame, Reason}; use proto::{peer, Peer, Open, WindowSize}; use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId}; use super::recv::RecvHeaderBlockError; use super::store::{self, Entry, Resolve, Store}; use bytes::{Buf, Bytes}; use futures::{task, Async, Poll}; use http::{HeaderMap, Request, Response}; use tokio_io::AsyncWrite; use std::{fmt, io}; use std::sync::{Arc, Mutex}; #[derive(Debug)] pub(crate) struct Streams where P: Peer, { /// Holds most of the connection and stream related state for processing /// HTTP/2.0 frames associated with streams. inner: Arc>, /// This is the queue of frames to be written to the wire. This is split out /// to avoid requiring a `B` generic on all public API types even if `B` is /// not technically required. /// /// Currently, splitting this out requires a second `Arc` + `Mutex`. /// However, it should be possible to avoid this duplication with a little /// bit of unsafe code. This optimization has been postponed until it has /// been shown to be necessary. send_buffer: Arc>, _p: ::std::marker::PhantomData

, } /// Reference to the stream state #[derive(Debug)] pub(crate) struct StreamRef { opaque: OpaqueStreamRef, send_buffer: Arc>, } /// Reference to the stream state that hides the send data chunk generic pub(crate) struct OpaqueStreamRef { inner: Arc>, key: store::Key, } /// Fields needed to manage state related to managing the set of streams. This /// is mostly split out to make ownership happy. /// /// TODO: better name #[derive(Debug)] struct Inner { /// Tracks send & recv stream concurrency. counts: Counts, /// Connection level state and performs actions on streams actions: Actions, /// Stores stream state store: Store, /// The number of stream refs to this shared state. refs: usize, } #[derive(Debug)] struct Actions { /// Manages state transitions initiated by receiving frames recv: Recv, /// Manages state transitions initiated by sending frames send: Send, /// Task that calls `poll_complete`. task: Option, /// If the connection errors, a copy is kept for any StreamRefs. conn_error: Option, } /// Contains the buffer of frames to be written to the wire. #[derive(Debug)] struct SendBuffer { inner: Mutex>>, } // ===== impl Streams ===== impl Streams where B: Buf, P: Peer, { pub fn new(config: Config) -> Self { let peer = P::dyn(); Streams { inner: Arc::new(Mutex::new(Inner { counts: Counts::new(peer, &config), actions: Actions { recv: Recv::new(peer, &config), send: Send::new(&config), task: None, conn_error: None, }, store: Store::new(), refs: 1, })), send_buffer: Arc::new(SendBuffer::new()), _p: ::std::marker::PhantomData, } } pub fn set_target_connection_window_size(&mut self, size: WindowSize) { let mut me = self.inner.lock().unwrap(); let me = &mut *me; me.actions .recv .set_target_connection_window(size, &mut me.actions.task) } /// Process inbound headers pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), RecvError> { let id = frame.stream_id(); let mut me = self.inner.lock().unwrap(); let me = &mut *me; // The GOAWAY process has begun. All streams with a greater ID than // specified as part of GOAWAY should be ignored. if id > me.actions.recv.max_stream_id() { trace!("id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", id, me.actions.recv.max_stream_id()); return Ok(()); } let key = match me.store.find_entry(id) { Entry::Occupied(e) => e.key(), Entry::Vacant(e) => { // Client: it's possible to send a request, and then send // a RST_STREAM while the response HEADERS were in transit. // // Server: we can't reset a stream before having received // the request headers, so don't allow. if !P::is_server() { // This may be response headers for a stream we've already // forgotten about... if me.actions.may_have_forgotten_stream::

(id) { debug!( "recv_headers for old stream={:?}, sending STREAM_CLOSED", id, ); return Err(RecvError::Stream { id, reason: Reason::STREAM_CLOSED, }); } } match me.actions.recv.open(id, Open::Headers, &mut me.counts)? { Some(stream_id) => { let stream = Stream::new( stream_id, me.actions.send.init_window_sz(), me.actions.recv.init_window_sz(), ); e.insert(stream) }, None => return Ok(()), } }, }; let stream = me.store.resolve(key); if stream.state.is_local_reset() { // Locally reset streams must ignore frames "for some time". // This is because the remote may have sent trailers before // receiving the RST_STREAM frame. trace!("recv_headers; ignoring trailers on {:?}", stream.id); return Ok(()); } let actions = &mut me.actions; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; me.counts.transition(stream, |counts, stream| { trace!( "recv_headers; stream={:?}; state={:?}", stream.id, stream.state ); let res = if stream.state.is_recv_headers() { match actions.recv.recv_headers(frame, stream, counts) { Ok(()) => Ok(()), Err(RecvHeaderBlockError::Oversize(resp)) => { if let Some(resp) = resp { let sent = actions.send.send_headers( resp, send_buffer, stream, counts, &mut actions.task); debug_assert!(sent.is_ok(), "oversize response should not fail"); actions.send.schedule_implicit_reset( stream, Reason::REFUSED_STREAM, counts, &mut actions.task); actions.recv.enqueue_reset_expiration(stream, counts); Ok(()) } else { Err(RecvError::Stream { id: stream.id, reason: Reason::REFUSED_STREAM, }) } }, Err(RecvHeaderBlockError::State(err)) => Err(err), } } else { if !frame.is_end_stream() { // Receiving trailers that don't set EOS is a "malformed" // message. Malformed messages are a stream error. proto_err!(stream: "recv_headers: trailers frame was not EOS; stream={:?}", stream.id); return Err(RecvError::Stream { id: stream.id, reason: Reason::PROTOCOL_ERROR, }); } actions.recv.recv_trailers(frame, stream) }; actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) }) } pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), RecvError> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let id = frame.stream_id(); let stream = match me.store.find_mut(&id) { Some(stream) => stream, None => { // The GOAWAY process has begun. All streams with a greater ID // than specified as part of GOAWAY should be ignored. if id > me.actions.recv.max_stream_id() { trace!("id ({:?}) > max_stream_id ({:?}), ignoring DATA", id, me.actions.recv.max_stream_id()); return Ok(()); } if me.actions.may_have_forgotten_stream::

(id) { debug!( "recv_data for old stream={:?}, sending STREAM_CLOSED", id, ); let sz = frame.payload().len(); // This should have been enforced at the codec::FramedRead layer, so // this is just a sanity check. assert!(sz <= super::MAX_WINDOW_SIZE as usize); let sz = sz as WindowSize; me.actions.recv.ignore_data(sz)?; return Err(RecvError::Stream { id, reason: Reason::STREAM_CLOSED, }); } proto_err!(conn: "recv_data: stream not found; id={:?}", id); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); }, }; let actions = &mut me.actions; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; me.counts.transition(stream, |counts, stream| { let sz = frame.payload().len(); let res = actions.recv.recv_data(frame, stream); // Any stream error after receiving a DATA frame means // we won't give the data to the user, and so they can't // release the capacity. We do it automatically. if let Err(RecvError::Stream { .. }) = res { actions.recv.release_connection_capacity(sz as WindowSize, &mut None); } actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) }) } pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), RecvError> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let id = frame.stream_id(); if id.is_zero() { proto_err!(conn: "recv_reset: invalid stream ID 0"); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } // The GOAWAY process has begun. All streams with a greater ID than // specified as part of GOAWAY should be ignored. if id > me.actions.recv.max_stream_id() { trace!("id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", id, me.actions.recv.max_stream_id()); return Ok(()); } let stream = match me.store.find_mut(&id) { Some(stream) => stream, None => { // TODO: Are there other error cases? me.actions .ensure_not_idle(me.counts.peer(), id) .map_err(RecvError::Connection)?; return Ok(()); }, }; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; let actions = &mut me.actions; me.counts.transition(stream, |counts, stream| { actions.recv.recv_reset(frame, stream); actions.send.recv_err(send_buffer, stream, counts); assert!(stream.state.is_closed()); Ok(()) }) } /// Handle a received error and return the ID of the last processed stream. pub fn recv_err(&mut self, err: &proto::Error) -> StreamId { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let actions = &mut me.actions; let counts = &mut me.counts; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; let last_processed_id = actions.recv.last_processed_id(); me.store .for_each(|stream| { counts.transition(stream, |counts, stream| { actions.recv.recv_err(err, &mut *stream); actions.send.recv_err(send_buffer, stream, counts); Ok::<_, ()>(()) }) }) .unwrap(); actions.conn_error = Some(err.shallow_clone()); last_processed_id } pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), RecvError> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let actions = &mut me.actions; let counts = &mut me.counts; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; let last_stream_id = frame.last_stream_id(); let err = frame.reason().into(); if last_stream_id > actions.recv.max_stream_id() { // The remote endpoint sent a `GOAWAY` frame indicating a stream // that we never sent, or that we have already terminated on account // of previous `GOAWAY` frame. In either case, that is illegal. // (When sending multiple `GOAWAY`s, "Endpoints MUST NOT increase // the value they send in the last stream identifier, since the // peers might already have retried unprocessed requests on another // connection.") proto_err!(conn: "recv_go_away: last_stream_id ({:?}) > max_stream_id ({:?})", last_stream_id, actions.recv.max_stream_id(), ); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } actions.recv.go_away(last_stream_id); me.store .for_each(|stream| if stream.id > last_stream_id { counts.transition(stream, |counts, stream| { actions.recv.recv_err(&err, &mut *stream); actions.send.recv_err(send_buffer, stream, counts); Ok::<_, ()>(()) }) } else { Ok::<_, ()>(()) }) .unwrap(); actions.conn_error = Some(err); Ok(()) } pub fn last_processed_id(&self) -> StreamId { self.inner.lock().unwrap().actions.recv.last_processed_id() } pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), RecvError> { let id = frame.stream_id(); let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; if id.is_zero() { me.actions .send .recv_connection_window_update(frame, &mut me.store, &mut me.counts) .map_err(RecvError::Connection)?; } else { // The remote may send window updates for streams that the local now // considers closed. It's ok... if let Some(mut stream) = me.store.find_mut(&id) { // This result is ignored as there is nothing to do when there // is an error. The stream is reset by the function on error and // the error is informational. let _ = me.actions.send.recv_stream_window_update( frame.size_increment(), send_buffer, &mut stream, &mut me.counts, &mut me.actions.task, ); } else { me.actions .ensure_not_idle(me.counts.peer(), id) .map_err(RecvError::Connection)?; } } Ok(()) } pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), RecvError> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let id = frame.stream_id(); let promised_id = frame.promised_id(); // First, ensure that the initiating stream is still in a valid state. let parent_key = match me.store.find_mut(&id) { Some(stream) => { // The GOAWAY process has begun. All streams with a greater ID // than specified as part of GOAWAY should be ignored. if id > me.actions.recv.max_stream_id() { trace!("id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", id, me.actions.recv.max_stream_id()); return Ok(()); } // The stream must be receive open stream.state.ensure_recv_open()?; stream.key() } None => { proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state"); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) }, }; // TODO: Streams in the reserved states do not count towards the concurrency // limit. However, it seems like there should be a cap otherwise this // could grow in memory indefinitely. // Ensure that we can reserve streams me.actions.recv.ensure_can_reserve()?; // Next, open the stream. // // If `None` is returned, then the stream is being refused. There is no // further work to be done. if me.actions.recv.open(promised_id, Open::PushPromise, &mut me.counts)?.is_none() { return Ok(()); } // Try to handle the frame and create a corresponding key for the pushed stream // this requires a bit of indirection to make the borrow checker happy. let child_key: Option = { // Create state for the stream let stream = me.store.insert(promised_id, { Stream::new( promised_id, me.actions.send.init_window_sz(), me.actions.recv.init_window_sz()) }); let actions = &mut me.actions; me.counts.transition(stream, |counts, stream| { let stream_valid = actions.recv.recv_push_promise(frame, stream); match stream_valid { Ok(()) => Ok(Some(stream.key())), _ => { let mut send_buffer = self.send_buffer.inner.lock().unwrap(); actions.reset_on_recv_stream_err(&mut *send_buffer, stream, counts, stream_valid) .map(|()| None) } } })? }; // If we're successful, push the headers and stream... if let Some(child) = child_key { let mut ppp = me.store[parent_key].pending_push_promises.take(); ppp.push(&mut me.store.resolve(child)); let parent = &mut me.store.resolve(parent_key); parent.pending_push_promises = ppp; parent.notify_recv(); }; Ok(()) } pub fn next_incoming(&mut self) -> Option> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let key = me.actions.recv.next_incoming(&mut me.store); // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding // the lock, so it can't. me.refs += 1; key.map(|key| { let stream = &mut me.store.resolve(key); trace!("next_incoming; id={:?}, state={:?}", stream.id, stream.state); StreamRef { opaque: OpaqueStreamRef::new(self.inner.clone(), stream), send_buffer: self.send_buffer.clone(), } }) } pub fn send_pending_refusal( &mut self, dst: &mut Codec>, ) -> Poll<(), io::Error> where T: AsyncWrite, { let mut me = self.inner.lock().unwrap(); let me = &mut *me; me.actions.recv.send_pending_refusal(dst) } pub fn clear_expired_reset_streams(&mut self) { let mut me = self.inner.lock().unwrap(); let me = &mut *me; me.actions.recv.clear_expired_reset_streams(&mut me.store, &mut me.counts); } pub fn poll_complete(&mut self, dst: &mut Codec>) -> Poll<(), io::Error> where T: AsyncWrite, { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; // Send WINDOW_UPDATE frames first // // TODO: It would probably be better to interleave updates w/ data // frames. try_ready!(me.actions.recv.poll_complete(&mut me.store, &mut me.counts, dst)); // Send any other pending frames try_ready!(me.actions.send.poll_complete( send_buffer, &mut me.store, &mut me.counts, dst )); // Nothing else to do, track the task me.actions.task = Some(task::current()); Ok(().into()) } pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; me.counts.apply_remote_settings(frame); me.actions.send.apply_remote_settings( frame, send_buffer, &mut me.store, &mut me.counts, &mut me.actions.task) } pub fn send_request( &mut self, request: Request<()>, end_of_stream: bool, pending: Option<&OpaqueStreamRef>, ) -> Result, SendError> { use http::Method; use super::stream::ContentLength; // TODO: There is a hazard with assigning a stream ID before the // prioritize layer. If prioritization reorders new streams, this // implicitly closes the earlier stream IDs. // // See: hyperium/h2#11 let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; me.actions.ensure_no_conn_error()?; me.actions.send.ensure_next_stream_id()?; // The `pending` argument is provided by the `Client`, and holds // a store `Key` of a `Stream` that may have been not been opened // yet. // // If that stream is still pending, the Client isn't allowed to // queue up another pending stream. They should use `poll_ready`. if let Some(stream) = pending { if me.store.resolve(stream.key).is_pending_open { return Err(UserError::Rejected.into()); } } if me.counts.peer().is_server() { // Servers cannot open streams. PushPromise must first be reserved. return Err(UserError::UnexpectedFrameType.into()); } let stream_id = me.actions.send.open()?; let mut stream = Stream::new( stream_id, me.actions.send.init_window_sz(), me.actions.recv.init_window_sz(), ); if *request.method() == Method::HEAD { stream.content_length = ContentLength::Head; } // Convert the message let headers = client::Peer::convert_send_message( stream_id, request, end_of_stream)?; let mut stream = me.store.insert(stream.id, stream); let sent = me.actions.send.send_headers( headers, send_buffer, &mut stream, &mut me.counts, &mut me.actions.task, ); // send_headers can return a UserError, if it does, // we should forget about this stream. if let Err(err) = sent { stream.unlink(); stream.remove(); return Err(err.into()); } // Given that the stream has been initialized, it should not be in the // closed state. debug_assert!(!stream.state.is_closed()); // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding // the lock, so it can't. me.refs += 1; Ok(StreamRef { opaque: OpaqueStreamRef::new( self.inner.clone(), &mut stream, ), send_buffer: self.send_buffer.clone(), }) } pub fn send_reset(&mut self, id: StreamId, reason: Reason) { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let key = match me.store.find_entry(id) { Entry::Occupied(e) => e.key(), Entry::Vacant(e) => { let stream = Stream::new(id, 0, 0); e.insert(stream) }, }; let stream = me.store.resolve(key); let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; me.actions.send_reset(stream, reason, &mut me.counts, send_buffer); } pub fn send_go_away(&mut self, last_processed_id: StreamId) { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let actions = &mut me.actions; actions.recv.go_away(last_processed_id); } } impl Streams where B: Buf, { pub fn poll_pending_open(&mut self, pending: Option<&OpaqueStreamRef>) -> Poll<(), ::Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; me.actions.ensure_no_conn_error()?; me.actions.send.ensure_next_stream_id()?; if let Some(pending) = pending { let mut stream = me.store.resolve(pending.key); trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); if stream.is_pending_open { stream.wait_send(); return Ok(Async::NotReady); } } Ok(().into()) } } impl Streams where P: Peer, { /// This function is safe to call multiple times. /// /// A `Result` is returned to avoid panicking if the mutex is poisoned. pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { let mut me = self.inner.lock().map_err(|_| ())?; let me = &mut *me; let actions = &mut me.actions; let counts = &mut me.counts; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; if actions.conn_error.is_none() { actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into()); } trace!("Streams::recv_eof"); me.store .for_each(|stream| { counts.transition(stream, |counts, stream| { actions.recv.recv_eof(stream); // This handles resetting send state associated with the // stream actions.send.recv_err(send_buffer, stream, counts); Ok::<_, ()>(()) }) }) .expect("recv_eof"); actions.clear_queues(clear_pending_accept, &mut me.store, counts); Ok(()) } #[cfg(feature = "unstable")] pub fn num_active_streams(&self) -> usize { let me = self.inner.lock().unwrap(); me.store.num_active_streams() } pub fn has_streams(&self) -> bool { let me = self.inner.lock().unwrap(); me.counts.has_streams() } pub fn has_streams_or_other_references(&self) -> bool { let me = self.inner.lock().unwrap(); me.counts.has_streams() || me.refs > 1 } #[cfg(feature = "unstable")] pub fn num_wired_streams(&self) -> usize { let me = self.inner.lock().unwrap(); me.store.num_wired_streams() } } // no derive because we don't need B and P to be Clone. impl Clone for Streams where P: Peer, { fn clone(&self) -> Self { self.inner.lock().unwrap().refs += 1; Streams { inner: self.inner.clone(), send_buffer: self.send_buffer.clone(), _p: ::std::marker::PhantomData, } } } impl Drop for Streams where P: Peer, { fn drop(&mut self) { let _ = self.inner.lock().map(|mut inner| inner.refs -= 1); } } // ===== impl StreamRef ===== impl StreamRef { pub fn send_data(&mut self, data: B, end_stream: bool) -> Result<(), UserError> where B: Buf, { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; let stream = me.store.resolve(self.opaque.key); let actions = &mut me.actions; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; me.counts.transition(stream, |counts, stream| { // Create the data frame let mut frame = frame::Data::new(stream.id, data); frame.set_end_stream(end_stream); // Send the data frame actions.send.send_data( frame, send_buffer, stream, counts, &mut actions.task) }) } pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), UserError> { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; let stream = me.store.resolve(self.opaque.key); let actions = &mut me.actions; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; me.counts.transition(stream, |counts, stream| { // Create the trailers frame let frame = frame::Headers::trailers(stream.id, trailers); // Send the trailers frame actions.send.send_trailers( frame, send_buffer, stream, counts, &mut actions.task) }) } pub fn send_reset(&mut self, reason: Reason) { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; let stream = me.store.resolve(self.opaque.key); let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; me.actions.send_reset(stream, reason, &mut me.counts, send_buffer); } pub fn send_response( &mut self, response: Response<()>, end_of_stream: bool, ) -> Result<(), UserError> { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; let stream = me.store.resolve(self.opaque.key); let actions = &mut me.actions; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; me.counts.transition(stream, |counts, stream| { let frame = server::Peer::convert_send_message(stream.id, response, end_of_stream); actions.send.send_headers( frame, send_buffer, stream, counts, &mut actions.task) }) } /// Called by the server after the stream is accepted. Given that clients /// initialize streams by sending HEADERS, the request will always be /// available. /// /// # Panics /// /// This function panics if the request isn't present. pub fn take_request(&self) -> Request<()> { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.opaque.key); me.actions.recv.take_request(&mut stream) } /// Called by a client to see if the current stream is pending open pub fn is_pending_open(&self) -> bool { let mut me = self.opaque.inner.lock().unwrap(); me.store.resolve(self.opaque.key).is_pending_open } /// Request capacity to send data pub fn reserve_capacity(&mut self, capacity: WindowSize) { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.opaque.key); me.actions.send.reserve_capacity(capacity, &mut stream, &mut me.counts) } /// Returns the stream's current send capacity. pub fn capacity(&self) -> WindowSize { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.opaque.key); me.actions.send.capacity(&mut stream) } /// Request to be notified when the stream's capacity increases pub fn poll_capacity(&mut self) -> Poll, UserError> { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.opaque.key); me.actions.send.poll_capacity(&mut stream) } /// Request to be notified for if a `RST_STREAM` is received for this stream. pub(crate) fn poll_reset(&mut self, mode: proto::PollReset) -> Poll { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.opaque.key); me.actions.send.poll_reset(&mut stream, mode) .map_err(From::from) } pub fn clone_to_opaque(&self) -> OpaqueStreamRef where B: 'static, { self.opaque.clone() } pub fn stream_id(&self) -> StreamId { self.opaque.stream_id() } } impl Clone for StreamRef { fn clone(&self) -> Self { StreamRef { opaque: self.opaque.clone(), send_buffer: self.send_buffer.clone(), } } } // ===== impl OpaqueStreamRef ===== impl OpaqueStreamRef { fn new(inner: Arc>, stream: &mut store::Ptr) -> OpaqueStreamRef { stream.ref_inc(); OpaqueStreamRef { inner, key: stream.key() } } /// Called by a client to check for a received response. pub fn poll_response(&mut self) -> Poll, proto::Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.key); me.actions.recv.poll_response(&mut stream) } /// Called by a client to check for a pushed request. pub fn poll_pushed( &mut self ) -> Poll, OpaqueStreamRef)>, proto::Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let res = { let mut stream = me.store.resolve(self.key); try_ready!(me.actions.recv.poll_pushed(&mut stream)) }; Ok(Async::Ready(res.map(|(h, key)| { me.refs += 1; let opaque_ref = OpaqueStreamRef::new(self.inner.clone(), &mut me.store.resolve(key)); (h, opaque_ref) }))) } pub fn body_is_empty(&self) -> bool { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let stream = me.store.resolve(self.key); me.actions.recv.body_is_empty(&stream) } pub fn is_end_stream(&self) -> bool { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let stream = me.store.resolve(self.key); me.actions.recv.is_end_stream(&stream) } pub fn poll_data(&mut self) -> Poll, proto::Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.key); me.actions.recv.poll_data(&mut stream) } pub fn poll_trailers(&mut self) -> Poll, proto::Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.key); me.actions.recv.poll_trailers(&mut stream) } /// Releases recv capacity back to the peer. This may result in sending /// WINDOW_UPDATE frames on both the stream and connection. pub fn release_capacity(&mut self, capacity: WindowSize) -> Result<(), UserError> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.key); me.actions .recv .release_capacity(capacity, &mut stream, &mut me.actions.task) } pub(crate) fn clear_recv_buffer(&mut self) { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.key); me.actions .recv .clear_recv_buffer(&mut stream); } pub fn stream_id(&self) -> StreamId { self.inner.lock() .unwrap() .store[self.key] .id } } impl fmt::Debug for OpaqueStreamRef { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { use std::sync::TryLockError::*; match self.inner.try_lock() { Ok(me) => { let stream = &me.store[self.key]; fmt.debug_struct("OpaqueStreamRef") .field("stream_id", &stream.id) .field("ref_count", &stream.ref_count) .finish() }, Err(Poisoned(_)) => { fmt.debug_struct("OpaqueStreamRef") .field("inner", &"") .finish() } Err(WouldBlock) => { fmt.debug_struct("OpaqueStreamRef") .field("inner", &"") .finish() } } } } impl Clone for OpaqueStreamRef { fn clone(&self) -> Self { // Increment the ref count let mut inner = self.inner.lock().unwrap(); inner.store.resolve(self.key).ref_inc(); inner.refs += 1; OpaqueStreamRef { inner: self.inner.clone(), key: self.key.clone(), } } } impl Drop for OpaqueStreamRef { fn drop(&mut self) { drop_stream_ref(&self.inner, self.key); } } // TODO: Move back in fn above fn drop_stream_ref(inner: &Mutex, key: store::Key) { let mut me = match inner.lock() { Ok(inner) => inner, Err(_) => if ::std::thread::panicking() { trace!("StreamRef::drop; mutex poisoned"); return; } else { panic!("StreamRef::drop; mutex poisoned"); }, }; let me = &mut *me; me.refs -= 1; let mut stream = me.store.resolve(key); trace!("drop_stream_ref; stream={:?}", stream); // decrement the stream's ref count by 1. stream.ref_dec(); let actions = &mut me.actions; // If the stream is not referenced and it is already // closed (does not have to go through logic below // of canceling the stream), we should notify the task // (connection) so that it can close properly if stream.ref_count == 0 && stream.is_closed() { if let Some(task) = actions.task.take() { task.notify(); } } me.counts.transition(stream, |counts, stream| { maybe_cancel(stream, actions, counts); if stream.ref_count == 0 { // Release any recv window back to connection, no one can access // it anymore. actions.recv.release_closed_capacity(stream, &mut actions.task); // We won't be able to reach our push promises anymore let mut ppp = stream.pending_push_promises.take(); while let Some(promise) = ppp.pop(stream.store_mut()) { counts.transition(promise, |counts, stream| { maybe_cancel(stream, actions, counts); }); } } }); } fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) { if stream.is_canceled_interest() { actions.send.schedule_implicit_reset( stream, Reason::CANCEL, counts, &mut actions.task); actions.recv.enqueue_reset_expiration(stream, counts); } } // ===== impl SendBuffer ===== impl SendBuffer { fn new() -> Self { let inner = Mutex::new(Buffer::new()); SendBuffer { inner } } } // ===== impl Actions ===== impl Actions { fn send_reset( &mut self, stream: store::Ptr, reason: Reason, counts: &mut Counts, send_buffer: &mut Buffer>, ) { counts.transition(stream, |counts, stream| { self.send.send_reset( reason, send_buffer, stream, counts, &mut self.task); self.recv.enqueue_reset_expiration(stream, counts); // if a RecvStream is parked, ensure it's notified stream.notify_recv(); }); } fn reset_on_recv_stream_err( &mut self, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, res: Result<(), RecvError>, ) -> Result<(), RecvError> { if let Err(RecvError::Stream { reason, .. }) = res { // Reset the stream. self.send.send_reset(reason, buffer, stream, counts, &mut self.task); Ok(()) } else { res } } fn ensure_not_idle(&mut self, peer: peer::Dyn, id: StreamId) -> Result<(), Reason> { if peer.is_local_init(id) { self.send.ensure_not_idle(id) } else { self.recv.ensure_not_idle(id) } } fn ensure_no_conn_error(&self) -> Result<(), proto::Error> { if let Some(ref err) = self.conn_error { Err(err.shallow_clone()) } else { Ok(()) } } /// Check if we possibly could have processed and since forgotten this stream. /// /// If we send a RST_STREAM for a stream, we will eventually "forget" about /// the stream to free up memory. It's possible that the remote peer had /// frames in-flight, and by the time we receive them, our own state is /// gone. We *could* tear everything down by sending a GOAWAY, but it /// is more likely to be latency/memory constraints that caused this, /// and not a bad actor. So be less catastrophic, the spec allows /// us to send another RST_STREAM of STREAM_CLOSED. fn may_have_forgotten_stream(&self, id: StreamId) -> bool { if id.is_zero() { return false; } if P::is_local_init(id) { self.send.may_have_created_stream(id) } else { self.recv.may_have_created_stream(id) } } fn clear_queues(&mut self, clear_pending_accept: bool, store: &mut Store, counts: &mut Counts) { self.recv.clear_queues(clear_pending_accept, store, counts); self.send.clear_queues(store, counts); } } h2-0.1.26/src/server.rs010066400017500001750000001302071351644257100130120ustar0000000000000000//! Server implementation of the HTTP/2.0 protocol. //! //! # Getting started //! //! Running an HTTP/2.0 server requires the caller to manage accepting the //! connections as well as getting the connections to a state that is ready to //! begin the HTTP/2.0 handshake. See [here](../index.html#handshake) for more //! details. //! //! This could be as basic as using Tokio's [`TcpListener`] to accept //! connections, but usually it means using either ALPN or HTTP/1.1 protocol //! upgrades. //! //! Once a connection is obtained, it is passed to [`handshake`], //! which will begin the [HTTP/2.0 handshake]. This returns a future that //! completes once the handshake process is performed and HTTP/2.0 streams may //! be received. //! //! [`handshake`] uses default configuration values. There are a number of //! settings that can be changed by using [`Builder`] instead. //! //! # Inbound streams //! //! The [`Connection`] instance is used to accept inbound HTTP/2.0 streams. It //! does this by implementing [`futures::Stream`]. When a new stream is //! received, a call to [`Connection::poll`] will return `(request, response)`. //! The `request` handle (of type [`http::Request`]) contains the //! HTTP request head as well as provides a way to receive the inbound data //! stream and the trailers. The `response` handle (of type [`SendStream`]) //! allows responding to the request, stream the response payload, send //! trailers, and send push promises. //! //! The send ([`SendStream`]) and receive ([`RecvStream`]) halves of the stream //! can be operated independently. //! //! # Managing the connection //! //! The [`Connection`] instance is used to manage connection state. The caller //! is required to call either [`Connection::poll`] or //! [`Connection::poll_close`] in order to advance the connection state. Simply //! operating on [`SendStream`] or [`RecvStream`] will have no effect unless the //! connection state is advanced. //! //! It is not required to call **both** [`Connection::poll`] and //! [`Connection::poll_close`]. If the caller is ready to accept a new stream, //! then only [`Connection::poll`] should be called. When the caller **does //! not** want to accept a new stream, [`Connection::poll_close`] should be //! called. //! //! The [`Connection`] instance should only be dropped once //! [`Connection::poll_close`] returns `Ready`. Once [`Connection::poll`] //! returns `Ready(None)`, there will no longer be any more inbound streams. At //! this point, only [`Connection::poll_close`] should be called. //! //! # Shutting down the server //! //! Graceful shutdown of the server is [not yet //! implemented](https://github.com/hyperium/h2/issues/69). //! //! # Example //! //! A basic HTTP/2.0 server example that runs over TCP and assumes [prior //! knowledge], i.e. both the client and the server assume that the TCP socket //! will use the HTTP/2.0 protocol without prior negotiation. //! //! ```rust //! extern crate futures; //! extern crate h2; //! extern crate http; //! extern crate tokio; //! //! use futures::{Future, Stream}; //! # use futures::future::ok; //! use h2::server; //! use http::{Response, StatusCode}; //! use tokio::net::TcpListener; //! //! pub fn main () { //! let addr = "127.0.0.1:5928".parse().unwrap(); //! let listener = TcpListener::bind(&addr,).unwrap(); //! //! tokio::run({ //! // Accept all incoming TCP connections. //! listener.incoming().for_each(move |socket| { //! // Spawn a new task to process each connection. //! tokio::spawn({ //! // Start the HTTP/2.0 connection handshake //! server::handshake(socket) //! .and_then(|h2| { //! // Accept all inbound HTTP/2.0 streams sent over the //! // connection. //! h2.for_each(|(request, mut respond)| { //! println!("Received request: {:?}", request); //! //! // Build a response with no body //! let response = Response::builder() //! .status(StatusCode::OK) //! .body(()) //! .unwrap(); //! //! // Send the response back to the client //! respond.send_response(response, true) //! .unwrap(); //! //! Ok(()) //! }) //! }) //! .map_err(|e| panic!("unexpected error = {:?}", e)) //! }); //! //! Ok(()) //! }) //! .map_err(|e| panic!("failed to run HTTP/2.0 server: {:?}", e)) //! # .select(ok(())).map(|_|()).map_err(|_|()) //! }); //! } //! ``` //! //! [prior knowledge]: http://httpwg.org/specs/rfc7540.html#known-http //! [`handshake`]: fn.handshake.html //! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader //! [`Builder`]: struct.Builder.html //! [`Connection`]: struct.Connection.html //! [`Connection::poll`]: struct.Connection.html#method.poll //! [`Connection::poll_close`]: struct.Connection.html#method.poll_close //! [`futures::Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html //! [`http::Request`]: ../struct.RecvStream.html //! [`RecvStream`]: ../struct.RecvStream.html //! [`SendStream`]: ../struct.SendStream.html //! [`TcpListener`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpListener.html use {SendStream, RecvStream, ReleaseCapacity, PingPong}; use codec::{Codec, RecvError}; use frame::{self, Pseudo, Reason, Settings, StreamId}; use proto::{self, Config, Prioritized}; use bytes::{Buf, Bytes, IntoBuf}; use futures::{self, Async, Future, Poll}; use http::{HeaderMap, Request, Response}; use std::{convert, fmt, io, mem}; use std::time::Duration; use tokio_io::{AsyncRead, AsyncWrite}; /// In progress HTTP/2.0 connection handshake future. /// /// This type implements `Future`, yielding a `Connection` instance once the /// handshake has completed. /// /// The handshake is completed once the connection preface is fully received /// from the client **and** the initial settings frame is sent to the client. /// /// The handshake future does not wait for the initial settings frame from the /// client. /// /// See [module] level docs for more details. /// /// [module]: index.html #[must_use = "futures do nothing unless polled"] pub struct Handshake { /// The config to pass to Connection::new after handshake succeeds. builder: Builder, /// The current state of the handshake. state: Handshaking } /// Accepts inbound HTTP/2.0 streams on a connection. /// /// A `Connection` is backed by an I/O resource (usually a TCP socket) and /// implements the HTTP/2.0 server logic for that connection. It is responsible /// for receiving inbound streams initiated by the client as well as driving the /// internal state forward. /// /// `Connection` values are created by calling [`handshake`]. Once a /// `Connection` value is obtained, the caller must call [`poll`] or /// [`poll_close`] in order to drive the internal connection state forward. /// /// See [module level] documentation for more details /// /// [module level]: index.html /// [`handshake`]: struct.Connection.html#method.handshake /// [`poll`]: struct.Connection.html#method.poll /// [`poll_close`]: struct.Connection.html#method.poll_close /// /// # Examples /// /// ``` /// # extern crate futures; /// # extern crate h2; /// # extern crate tokio_io; /// # use futures::{Future, Stream}; /// # use tokio_io::*; /// # use h2::server; /// # use h2::server::*; /// # /// # fn doc(my_io: T) { /// server::handshake(my_io) /// .and_then(|server| { /// server.for_each(|(request, respond)| { /// // Process the request and send the response back to the client /// // using `respond`. /// # Ok(()) /// }) /// }) /// # .wait().unwrap(); /// # } /// # /// # pub fn main() {} /// ``` #[must_use = "streams do nothing unless polled"] pub struct Connection { connection: proto::Connection, } /// Builds server connections with custom configuration values. /// /// Methods can be chained in order to set the configuration values. /// /// The server is constructed by calling [`handshake`] and passing the I/O /// handle that will back the HTTP/2.0 server. /// /// New instances of `Builder` are obtained via [`Builder::new`]. /// /// See function level documentation for details on the various server /// configuration settings. /// /// [`Builder::new`]: struct.Builder.html#method.new /// [`handshake`]: struct.Builder.html#method.handshake /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .initial_window_size(1_000_000) /// .max_concurrent_streams(1000) /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` #[derive(Clone, Debug)] pub struct Builder { /// Time to keep locally reset streams around before reaping. reset_stream_duration: Duration, /// Maximum number of locally reset streams to keep at a time. reset_stream_max: usize, /// Initial `Settings` frame to send as part of the handshake. settings: Settings, /// Initial target window size for new connections. initial_target_connection_window_size: Option, } /// Send a response back to the client /// /// A `SendResponse` instance is provided when receiving a request and is used /// to send the associated response back to the client. It is also used to /// explicitly reset the stream with a custom reason. /// /// It will also be used to initiate push promises linked with the associated /// stream. This is [not yet /// implemented](https://github.com/hyperium/h2/issues/185). /// /// If the `SendResponse` instance is dropped without sending a response, then /// the HTTP/2.0 stream will be reset. /// /// See [module] level docs for more details. /// /// [module]: index.html #[derive(Debug)] pub struct SendResponse { inner: proto::StreamRef, } /// Stages of an in-progress handshake. enum Handshaking { /// State 1. Connection is flushing pending SETTINGS frame. Flushing(Flush>), /// State 2. Connection is waiting for the client preface. ReadingPreface(ReadPreface>), /// Dummy state for `mem::replace`. Empty, } /// Flush a Sink struct Flush { codec: Option>, } /// Read the client connection preface struct ReadPreface { codec: Option>, pos: usize, } #[derive(Debug)] pub(crate) struct Peer; const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /// Creates a new configured HTTP/2.0 server with default configuration /// values backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence /// the [HTTP/2.0 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] instance once the /// HTTP/2.0 handshake has been completed. The returned [`Connection`] /// instance will be using default configuration values. Use [`Builder`] to /// customize the configuration values used by a [`Connection`] instance. /// /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// /// # Examples /// /// ``` /// # extern crate futures; /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use futures::*; /// # use h2::server; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # { /// server::handshake(my_io) /// .and_then(|connection| { /// // The HTTP/2.0 handshake has completed, now use `connection` to /// // accept inbound HTTP/2.0 streams. /// # Ok(()) /// }) /// # .wait().unwrap(); /// # } /// # /// # pub fn main() {} /// ``` pub fn handshake(io: T) -> Handshake where T: AsyncRead + AsyncWrite, { Builder::new().handshake(io) } // ===== impl Connection ===== impl Connection where T: AsyncRead + AsyncWrite, B: IntoBuf, { fn handshake2(io: T, builder: Builder) -> Handshake { // Create the codec. let mut codec = Codec::new(io); if let Some(max) = builder.settings.max_frame_size() { codec.set_max_recv_frame_size(max as usize); } if let Some(max) = builder.settings.max_header_list_size() { codec.set_max_recv_header_list_size(max as usize); } // Send initial settings frame. codec .buffer(builder.settings.clone().into()) .expect("invalid SETTINGS frame"); // Create the handshake future. let state = Handshaking::from(codec); Handshake { builder, state } } /// Sets the target window size for the whole connection. /// /// If `size` is greater than the current value, then a `WINDOW_UPDATE` /// frame will be immediately sent to the remote, increasing the connection /// level window by `size - current_value`. /// /// If `size` is less than the current value, nothing will happen /// immediately. However, as window capacity is released by /// [`ReleaseCapacity`] instances, no `WINDOW_UPDATE` frames will be sent /// out until the number of "in flight" bytes drops below `size`. /// /// The default value is 65,535. /// /// See [`ReleaseCapacity`] documentation for more details. /// /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html /// [library level]: ../index.html#flow-control pub fn set_target_window_size(&mut self, size: u32) { assert!(size <= proto::MAX_WINDOW_SIZE); self.connection.set_target_window_size(size); } /// Returns `Ready` when the underlying connection has closed. /// /// If any new inbound streams are received during a call to `poll_close`, /// they will be queued and returned on the next call to [`poll`]. /// /// This function will advance the internal connection state, driving /// progress on all the other handles (e.g. [`RecvStream`] and [`SendStream`]). /// /// See [here](index.html#managing-the-connection) for more details. /// /// [`poll`]: struct.Connection.html#method.poll /// [`RecvStream`]: ../struct.RecvStream.html /// [`SendStream`]: ../struct.SendStream.html pub fn poll_close(&mut self) -> Poll<(), ::Error> { self.connection.poll().map_err(Into::into) } #[deprecated(note="use abrupt_shutdown or graceful_shutdown instead", since="0.1.4")] #[doc(hidden)] pub fn close_connection(&mut self) { self.graceful_shutdown(); } /// Sets the connection to a GOAWAY state. /// /// Does not terminate the connection. Must continue being polled to close /// connection. /// /// After flushing the GOAWAY frame, the connection is closed. Any /// outstanding streams do not prevent the connection from closing. This /// should usually be reserved for shutting down when something bad /// external to `h2` has happened, and open streams cannot be properly /// handled. /// /// For graceful shutdowns, see [`graceful_shutdown`](Connection::graceful_shutdown). pub fn abrupt_shutdown(&mut self, reason: Reason) { self.connection.go_away_from_user(reason); } /// Starts a [graceful shutdown][1] process. /// /// Must continue being polled to close connection. /// /// It's possible to receive more requests after calling this method, since /// they might have been in-flight from the client already. After about /// 1 RTT, no new requests should be accepted. Once all active streams /// have completed, the connection is closed. /// /// [1]: http://httpwg.org/specs/rfc7540.html#GOAWAY pub fn graceful_shutdown(&mut self) { self.connection.go_away_gracefully(); } /// Takes a `PingPong` instance from the connection. /// /// # Note /// /// This may only be called once. Calling multiple times will return `None`. pub fn ping_pong(&mut self) -> Option { self.connection .take_user_pings() .map(PingPong::new) } } impl futures::Stream for Connection where T: AsyncRead + AsyncWrite, B: IntoBuf, B::Buf: 'static, { type Item = (Request, SendResponse); type Error = ::Error; fn poll(&mut self) -> Poll, ::Error> { // Always try to advance the internal state. Getting NotReady also is // needed to allow this function to return NotReady. match self.poll_close()? { Async::Ready(_) => { // If the socket is closed, don't return anything // TODO: drop any pending streams return Ok(None.into()); }, _ => {}, } if let Some(inner) = self.connection.next_incoming() { trace!("received incoming"); let (head, _) = inner.take_request().into_parts(); let body = RecvStream::new(ReleaseCapacity::new(inner.clone_to_opaque())); let request = Request::from_parts(head, body); let respond = SendResponse { inner }; return Ok(Some((request, respond)).into()); } Ok(Async::NotReady) } } impl fmt::Debug for Connection where T: fmt::Debug, B: fmt::Debug + IntoBuf, B::Buf: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("Connection") .field("connection", &self.connection) .finish() } } // ===== impl Builder ===== impl Builder { /// Returns a new server builder instance initialized with default /// configuration values. /// /// Configuration methods can be chained on the return value. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .initial_window_size(1_000_000) /// .max_concurrent_streams(1000) /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn new() -> Builder { Builder { reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, settings: Settings::default(), initial_target_connection_window_size: None, } } /// Indicates the initial window size (in octets) for stream-level /// flow control for received data. /// /// The initial window of a stream is used as part of flow control. For more /// details, see [`ReleaseCapacity`]. /// /// The default value is 65,535. /// /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .initial_window_size(1_000_000) /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn initial_window_size(&mut self, size: u32) -> &mut Self { self.settings.set_initial_window_size(Some(size)); self } /// Indicates the initial window size (in octets) for connection-level flow control /// for received data. /// /// The initial window of a connection is used as part of flow control. For more details, /// see [`ReleaseCapacity`]. /// /// The default value is 65,535. /// /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .initial_connection_window_size(1_000_000) /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn initial_connection_window_size(&mut self, size: u32) -> &mut Self { self.initial_target_connection_window_size = Some(size); self } /// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the /// configured server is able to accept. /// /// The sender may send data frames that are **smaller** than this value, /// but any data larger than `max` will be broken up into multiple `DATA` /// frames. /// /// The value **must** be between 16,384 and 16,777,215. The default value is 16,384. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .max_frame_size(1_000_000) /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` /// /// # Panics /// /// This function panics if `max` is not within the legal range specified /// above. pub fn max_frame_size(&mut self, max: u32) -> &mut Self { self.settings.set_max_frame_size(Some(max)); self } /// Sets the max size of received header frames. /// /// This advisory setting informs a peer of the maximum size of header list /// that the sender is prepared to accept, in octets. The value is based on /// the uncompressed size of header fields, including the length of the name /// and value in octets plus an overhead of 32 octets for each header field. /// /// This setting is also used to limit the maximum amount of data that is /// buffered to decode HEADERS frames. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .max_header_list_size(16 * 1024) /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { self.settings.set_max_header_list_size(Some(max)); self } // TODO: fn header_table_size // It's a little trickier to add since we need to only enforce it // once the client has ACKed the settings... /// Sets the maximum number of concurrent streams. /// /// The maximum concurrent streams setting only controls the maximum number /// of streams that can be initiated by the remote peer. In other words, /// when this setting is set to 100, this does not limit the number of /// concurrent streams that can be created by the caller. /// /// It is recommended that this value be no smaller than 100, so as to not /// unnecessarily limit parallelism. However, any value is legal, including /// 0. If `max` is set to 0, then the remote will not be permitted to /// initiate streams. /// /// Note that streams in the reserved state, i.e., push promises that have /// been reserved but the stream has not started, do not count against this /// setting. /// /// Also note that if the remote *does* exceed the value set here, it is not /// a protocol level error. Instead, the `h2` library will immediately reset /// the stream. /// /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .max_concurrent_streams(1000) /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn max_concurrent_streams(&mut self, max: u32) -> &mut Self { self.settings.set_max_concurrent_streams(Some(max)); self } /// Sets the maximum number of concurrent locally reset streams. /// /// When a stream is explicitly reset by either calling /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance /// before completing the stream, the HTTP/2.0 specification requires that /// any further frames received for that stream must be ignored for "some /// time". /// /// In order to satisfy the specification, internal state must be maintained /// to implement the behavior. This state grows linearly with the number of /// streams that are locally reset. /// /// The `max_concurrent_reset_streams` setting configures sets an upper /// bound on the amount of state that is maintained. When this max value is /// reached, the oldest reset stream is purged from memory. /// /// Once the stream has been fully purged from memory, any additional frames /// received for that stream will result in a connection level protocol /// error, forcing the connection to terminate. /// /// The default value is 10. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .max_concurrent_reset_streams(1000) /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { self.reset_stream_max = max; self } /// Sets the maximum number of concurrent locally reset streams. /// /// When a stream is explicitly reset by either calling /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance /// before completing the stream, the HTTP/2.0 specification requires that /// any further frames received for that stream must be ignored for "some /// time". /// /// In order to satisfy the specification, internal state must be maintained /// to implement the behavior. This state grows linearly with the number of /// streams that are locally reset. /// /// The `reset_stream_duration` setting configures the max amount of time /// this state will be maintained in memory. Once the duration elapses, the /// stream state is purged from memory. /// /// Once the stream has been fully purged from memory, any additional frames /// received for that stream will result in a connection level protocol /// error, forcing the connection to terminate. /// /// The default value is 30 seconds. /// /// # Examples /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # use std::time::Duration; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .reset_stream_duration(Duration::from_secs(10)) /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn reset_stream_duration(&mut self, dur: Duration) -> &mut Self { self.reset_stream_duration = dur; self } /// Creates a new configured HTTP/2.0 server backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence /// the [HTTP/2.0 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] instance once the /// HTTP/2.0 handshake has been completed. /// /// This function also allows the caller to configure the send payload data /// type. See [Outbound data type] for more details. /// /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// [Outbound data type]: ../index.html#outbound-data-type. /// /// # Examples /// /// Basic usage: /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut = Builder::new() /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` /// /// Configures the send-payload data type. In this case, the outbound data /// type will be `&'static [u8]`. /// /// ``` /// # extern crate h2; /// # extern crate tokio_io; /// # use tokio_io::*; /// # use h2::server::*; /// # /// # fn doc(my_io: T) /// # -> Handshake /// # { /// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // handshake. /// let server_fut: Handshake<_, &'static [u8]> = Builder::new() /// .handshake(my_io); /// # server_fut /// # } /// # /// # pub fn main() {} /// ``` pub fn handshake(&self, io: T) -> Handshake where T: AsyncRead + AsyncWrite, B: IntoBuf, B::Buf: 'static, { Connection::handshake2(io, self.clone()) } } impl Default for Builder { fn default() -> Builder { Builder::new() } } // ===== impl SendResponse ===== impl SendResponse { /// Send a response to a client request. /// /// On success, a [`SendStream`] instance is returned. This instance can be /// used to stream the response body and send trailers. /// /// If a body or trailers will be sent on the returned [`SendStream`] /// instance, then `end_of_stream` must be set to `false` when calling this /// function. /// /// The [`SendResponse`] instance is already associated with a received /// request. This function may only be called once per instance and only if /// [`send_reset`] has not been previously called. /// /// [`SendResponse`]: # /// [`SendStream`]: ../struct.SendStream.html /// [`send_reset`]: #method.send_reset pub fn send_response( &mut self, response: Response<()>, end_of_stream: bool, ) -> Result, ::Error> { self.inner .send_response(response, end_of_stream) .map(|_| SendStream::new(self.inner.clone())) .map_err(Into::into) } /// Send a stream reset to the peer. /// /// This essentially cancels the stream, including any inbound or outbound /// data streams. /// /// If this function is called before [`send_response`], a call to /// [`send_response`] will result in an error. /// /// If this function is called while a [`SendStream`] instance is active, /// any further use of the instance will result in an error. /// /// This function should only be called once. /// /// [`send_response`]: #method.send_response /// [`SendStream`]: ../struct.SendStream.html pub fn send_reset(&mut self, reason: Reason) { self.inner.send_reset(reason) } /// Polls to be notified when the client resets this stream. /// /// If stream is still open, this returns `Ok(Async::NotReady)`, and /// registers the task to be notified if a `RST_STREAM` is received. /// /// If a `RST_STREAM` frame is received for this stream, calling this /// method will yield the `Reason` for the reset. /// /// # Error /// /// Calling this method after having called `send_response` will return /// a user error. pub fn poll_reset(&mut self) -> Poll { self.inner.poll_reset(proto::PollReset::AwaitingHeaders) } /// Returns the stream ID of the response stream. /// /// # Panics /// /// If the lock on the strean store has been poisoned. pub fn stream_id(&self) -> ::StreamId { ::StreamId::from_internal(self.inner.stream_id()) } // TODO: Support reserving push promises. } // ===== impl Flush ===== impl Flush { fn new(codec: Codec) -> Self { Flush { codec: Some(codec), } } } impl Future for Flush where T: AsyncWrite, B: Buf, { type Item = Codec; type Error = ::Error; fn poll(&mut self) -> Poll { // Flush the codec try_ready!(self.codec.as_mut().unwrap().flush()); // Return the codec Ok(Async::Ready(self.codec.take().unwrap())) } } impl ReadPreface { fn new(codec: Codec) -> Self { ReadPreface { codec: Some(codec), pos: 0, } } fn inner_mut(&mut self) -> &mut T { self.codec.as_mut().unwrap().get_mut() } } impl Future for ReadPreface where T: AsyncRead, B: Buf, { type Item = Codec; type Error = ::Error; fn poll(&mut self) -> Poll { let mut buf = [0; 24]; let mut rem = PREFACE.len() - self.pos; while rem > 0 { let n = try_nb!(self.inner_mut().read(&mut buf[..rem])); if n == 0 { return Err(io::Error::new( io::ErrorKind::ConnectionReset, "connection closed unexpectedly", ).into()); } if PREFACE[self.pos..self.pos + n] != buf[..n] { proto_err!(conn: "read_preface: invalid preface"); // TODO: Should this just write the GO_AWAY frame directly? return Err(Reason::PROTOCOL_ERROR.into()); } self.pos += n; rem -= n; // TODO test } Ok(Async::Ready(self.codec.take().unwrap())) } } // ===== impl Handshake ===== impl Future for Handshake where T: AsyncRead + AsyncWrite, B: IntoBuf, { type Item = Connection; type Error = ::Error; fn poll(&mut self) -> Poll { trace!("Handshake::poll(); state={:?};", self.state); use server::Handshaking::*; self.state = if let Flushing(ref mut flush) = self.state { // We're currently flushing a pending SETTINGS frame. Poll the // flush future, and, if it's completed, advance our state to wait // for the client preface. let codec = match flush.poll()? { Async::NotReady => { trace!("Handshake::poll(); flush.poll()=NotReady"); return Ok(Async::NotReady); }, Async::Ready(flushed) => { trace!("Handshake::poll(); flush.poll()=Ready"); flushed } }; Handshaking::from(ReadPreface::new(codec)) } else { // Otherwise, we haven't actually advanced the state, but we have // to replace it with itself, because we have to return a value. // (note that the assignment to `self.state` has to be outside of // the `if let` block above in order to placate the borrow checker). mem::replace(&mut self.state, Handshaking::Empty) }; let poll = if let ReadingPreface(ref mut read) = self.state { // We're now waiting for the client preface. Poll the `ReadPreface` // future. If it has completed, we will create a `Connection` handle // for the connection. read.poll() // Actually creating the `Connection` has to occur outside of this // `if let` block, because we've borrowed `self` mutably in order // to poll the state and won't be able to borrow the SETTINGS frame // as well until we release the borrow for `poll()`. } else { unreachable!("Handshake::poll() state was not advanced completely!") }; let server = poll?.map(|codec| { let connection = proto::Connection::new(codec, Config { next_stream_id: 2.into(), // Server does not need to locally initiate any streams initial_max_send_streams: 0, reset_stream_duration: self.builder.reset_stream_duration, reset_stream_max: self.builder.reset_stream_max, settings: self.builder.settings.clone(), }); trace!("Handshake::poll(); connection established!"); let mut c = Connection { connection }; if let Some(sz) = self.builder.initial_target_connection_window_size { c.set_target_window_size(sz); } c }); Ok(server) } } impl fmt::Debug for Handshake where T: AsyncRead + AsyncWrite + fmt::Debug, B: fmt::Debug + IntoBuf, { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "server::Handshake") } } impl Peer { pub fn convert_send_message( id: StreamId, response: Response<()>, end_of_stream: bool) -> frame::Headers { use http::response::Parts; // Extract the components of the HTTP request let ( Parts { status, headers, .. }, _, ) = response.into_parts(); // Build the set pseudo header set. All requests will include `method` // and `path`. let pseudo = Pseudo::response(status); // Create the HEADERS frame let mut frame = frame::Headers::new(id, pseudo, headers); if end_of_stream { frame.set_end_stream() } frame } } impl proto::Peer for Peer { type Poll = Request<()>; fn is_server() -> bool { true } fn dyn() -> proto::DynPeer { proto::DynPeer::Server } fn convert_poll_message( pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId ) -> Result { use http::{uri, Version}; let mut b = Request::builder(); macro_rules! malformed { ($($arg:tt)*) => {{ debug!($($arg)*); return Err(RecvError::Stream { id: stream_id, reason: Reason::PROTOCOL_ERROR, }); }} }; b.version(Version::HTTP_2); if let Some(method) = pseudo.method { b.method(method); } else { malformed!("malformed headers: missing method"); } // Specifying :status for a request is a protocol error if pseudo.status.is_some() { trace!("malformed headers: :status field on request; PROTOCOL_ERROR"); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } // Convert the URI let mut parts = uri::Parts::default(); // A request translated from HTTP/1 must not include the :authority // header if let Some(authority) = pseudo.authority { let maybe_authority = uri::Authority::from_shared(authority.clone().into_inner()); parts.authority = Some(maybe_authority.or_else(|why| malformed!( "malformed headers: malformed authority ({:?}): {}", authority, why, ))?); } // A :scheme is always required. if let Some(scheme) = pseudo.scheme { let maybe_scheme = uri::Scheme::from_shared(scheme.clone().into_inner()); let scheme = maybe_scheme.or_else(|why| malformed!( "malformed headers: malformed scheme ({:?}): {}", scheme, why, ))?; // It's not possible to build an `Uri` from a scheme and path. So, // after validating is was a valid scheme, we just have to drop it // if there isn't an :authority. if parts.authority.is_some() { parts.scheme = Some(scheme); } } else { malformed!("malformed headers: missing scheme"); } if let Some(path) = pseudo.path { // This cannot be empty if path.is_empty() { malformed!("malformed headers: missing path"); } let maybe_path = uri::PathAndQuery::from_shared(path.clone().into_inner()); parts.path_and_query = Some(maybe_path.or_else(|why| malformed!( "malformed headers: malformed path ({:?}): {}", path, why, ))?); } b.uri(parts); let mut request = match b.body(()) { Ok(request) => request, Err(e) => { // TODO: Should there be more specialized handling for different // kinds of errors proto_err!(stream: "error building request: {}; stream={:?}", e, stream_id); return Err(RecvError::Stream { id: stream_id, reason: Reason::PROTOCOL_ERROR, }); }, }; *request.headers_mut() = fields; Ok(request) } } // ===== impl Handshaking ===== impl fmt::Debug for Handshaking where B: IntoBuf { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { Handshaking::Flushing(_) => write!(f, "Handshaking::Flushing(_)"), Handshaking::ReadingPreface(_) => write!(f, "Handshaking::ReadingPreface(_)"), Handshaking::Empty => write!(f, "Handshaking::Empty"), } } } impl convert::From>> for Handshaking where T: AsyncRead + AsyncWrite, B: IntoBuf, { #[inline] fn from(flush: Flush>) -> Self { Handshaking::Flushing(flush) } } impl convert::From>> for Handshaking where T: AsyncRead + AsyncWrite, B: IntoBuf, { #[inline] fn from(read: ReadPreface>) -> Self { Handshaking::ReadingPreface(read) } } impl convert::From>> for Handshaking where T: AsyncRead + AsyncWrite, B: IntoBuf, { #[inline] fn from(codec: Codec>) -> Self { Handshaking::from(Flush::new(codec)) } } h2-0.1.26/src/share.rs010066400017500001750000000544731347562215500126220ustar0000000000000000use codec::UserError; use frame::Reason; use proto::{self, WindowSize}; use bytes::{Bytes, IntoBuf}; use futures::{self, Poll, Async}; use http::{HeaderMap}; use std::fmt; /// Sends the body stream and trailers to the remote peer. /// /// # Overview /// /// A `SendStream` is provided by [`SendRequest`] and [`SendResponse`] once the /// HTTP/2.0 message header has been sent sent. It is used to stream the message /// body and send the message trailers. See method level documentation for more /// details. /// /// The `SendStream` instance is also used to manage outbound flow control. /// /// If a `SendStream` is dropped without explicitly closing the send stream, a /// `RST_STREAM` frame will be sent. This essentially cancels the request / /// response exchange. /// /// The ways to explicitly close the send stream are: /// /// * Set `end_of_stream` to true when calling [`send_request`], /// [`send_response`], or [`send_data`]. /// * Send trailers with [`send_trailers`]. /// * Explicitly reset the stream with [`send_reset`]. /// /// # Flow control /// /// In HTTP/2.0, data cannot be sent to the remote peer unless there is /// available window capacity on both the stream and the connection. When a data /// frame is sent, both the stream window and the connection window are /// decremented. When the stream level window reaches zero, no further data can /// be sent on that stream. When the connection level window reaches zero, no /// further data can be sent on any stream for that connection. /// /// When the remote peer is ready to receive more data, it sends `WINDOW_UPDATE` /// frames. These frames increment the windows. See the [specification] for more /// details on the principles of HTTP/2.0 flow control. /// /// The implications for sending data are that the caller **should** ensure that /// both the stream and the connection has available window capacity before /// loading the data to send into memory. The `SendStream` instance provides the /// necessary APIs to perform this logic. This, however, is not an obligation. /// If the caller attempts to send data on a stream when there is no available /// window capacity, the library will buffer the data until capacity becomes /// available, at which point the buffer will be flushed to the connection. /// /// **NOTE**: There is no bound on the amount of data that the library will /// buffer. If you are sending large amounts of data, you really should hook /// into the flow control lifecycle. Otherwise, you risk using up significant /// amounts of memory. /// /// To hook into the flow control lifecycle, the caller signals to the library /// that it intends to send data by calling [`reserve_capacity`], specifying the /// amount of data, in octets, that the caller intends to send. After this, /// `poll_capacity` is used to be notified when the requested capacity is /// assigned to the stream. Once [`poll_capacity`] returns `Ready` with the number /// of octets available to the stream, the caller is able to actually send the /// data using [`send_data`]. /// /// Because there is also a connection level window that applies to **all** /// streams on a connection, when capacity is assigned to a stream (indicated by /// `poll_capacity` returning `Ready`), this capacity is reserved on the /// connection and will **not** be assigned to any other stream. If data is /// never written to the stream, that capacity is effectively lost to other /// streams and this introduces the risk of deadlocking a connection. /// /// To avoid throttling data on a connection, the caller should not reserve /// capacity until ready to send data and once any capacity is assigned to the /// stream, the caller should immediately send data consuming this capacity. /// There is no guarantee as to when the full capacity requested will become /// available. For example, if the caller requests 64 KB of data and 512 bytes /// become available, the caller should immediately send 512 bytes of data. /// /// See [`reserve_capacity`] documentation for more details. /// /// [`SendRequest`]: client/struct.SendRequest.html /// [`SendResponse`]: server/struct.SendResponse.html /// [specification]: http://httpwg.org/specs/rfc7540.html#FlowControl /// [`reserve_capacity`]: #method.reserve_capacity /// [`poll_capacity`]: #method.poll_capacity /// [`send_data`]: #method.send_data /// [`send_request`]: client/struct.SendRequest.html#method.send_request /// [`send_response`]: server/struct.SendResponse.html#method.send_response /// [`send_data`]: #method.send_data /// [`send_trailers`]: #method.send_trailers /// [`send_reset`]: #method.send_reset #[derive(Debug)] pub struct SendStream { inner: proto::StreamRef, } /// A stream identifier, as described in [Section 5.1.1] of RFC 7540. /// /// Streams are identified with an unsigned 31-bit integer. Streams /// initiated by a client MUST use odd-numbered stream identifiers; those /// initiated by the server MUST use even-numbered stream identifiers. A /// stream identifier of zero (0x0) is used for connection control /// messages; the stream identifier of zero cannot be used to establish a /// new stream. /// /// [Section 5.1.1]: https://tools.ietf.org/html/rfc7540#section-5.1.1 #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct StreamId(u32); /// Receives the body stream and trailers from the remote peer. /// /// A `RecvStream` is provided by [`client::ResponseFuture`] and /// [`server::Connection`] with the received HTTP/2.0 message head (the response /// and request head respectively). /// /// A `RecvStream` instance is used to receive the streaming message body and /// any trailers from the remote peer. It is also used to manage inbound flow /// control. /// /// See method level documentation for more details on receiving data. See /// [`ReleaseCapacity`] for more details on inbound flow control. /// /// Note that this type implements [`Stream`], yielding the received data frames. /// When this implementation is used, the capacity is immediately released when /// the data is yielded. It is recommended to only use this API when the data /// will not be retained in memory for extended periods of time. /// /// [`client::ResponseFuture`]: client/struct.ResponseFuture.html /// [`server::Connection`]: server/struct.Connection.html /// [`ReleaseCapacity`]: struct.ReleaseCapacity.html /// [`Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html #[must_use = "streams do nothing unless polled"] pub struct RecvStream { inner: ReleaseCapacity, } /// A handle to release window capacity to a remote stream. /// /// This type allows the caller to manage inbound data [flow control]. The /// caller is expected to call [`release_capacity`] after dropping data frames. /// /// # Overview /// /// Each stream has a window size. This window size is the maximum amount of /// inbound data that can be in-flight. In-flight data is defined as data that /// has been received, but not yet released. /// /// When a stream is created, the window size is set to the connection's initial /// window size value. When a data frame is received, the window size is then /// decremented by size of the data frame before the data is provided to the /// caller. As the caller finishes using the data, [`release_capacity`] must be /// called. This will then increment the window size again, allowing the peer to /// send more data. /// /// There is also a connection level window as well as the stream level window. /// Received data counts against the connection level window as well and calls /// to [`release_capacity`] will also increment the connection level window. /// /// # Sending `WINDOW_UPDATE` frames /// /// `WINDOW_UPDATE` frames will not be sent out for **every** call to /// `release_capacity`, as this would end up slowing down the protocol. Instead, /// `h2` waits until the window size is increased to a certain threshold and /// then sends out a single `WINDOW_UPDATE` frame representing all the calls to /// `release_capacity` since the last `WINDOW_UPDATE` frame. /// /// This essentially batches window updating. /// /// # Scenarios /// /// Following is a basic scenario with an HTTP/2.0 connection containing a /// single active stream. /// /// * A new stream is activated. The receive window is initialized to 1024 (the /// value of the initial window size for this connection). /// * A `DATA` frame is received containing a payload of 400 bytes. /// * The receive window size is reduced to 424 bytes. /// * [`release_capacity`] is called with 200. /// * The receive window size is now 624 bytes. The peer may send no more than /// this. /// * A `DATA` frame is received with a payload of 624 bytes. /// * The window size is now 0 bytes. The peer may not send any more data. /// * [`release_capacity`] is called with 1024. /// * The receive window size is now 1024 bytes. The peer may now send more /// data. /// /// [flow control]: ../index.html#flow-control /// [`release_capacity`]: struct.ReleaseCapacity.html#method.release_capacity #[derive(Debug)] pub struct ReleaseCapacity { inner: proto::OpaqueStreamRef, } /// A handle to send and receive PING frames with the peer. // NOT Clone on purpose pub struct PingPong { inner: proto::UserPings, } /// Sent via [`PingPong`][] to send a PING frame to a peer. /// /// [`PingPong`]: struct.PingPong.html pub struct Ping { _p: (), } /// Received via [`PingPong`][] when a peer acknowledges a [`Ping`][]. /// /// [`PingPong`]: struct.PingPong.html /// [`Ping`]: struct.Ping.html pub struct Pong { _p: (), } // ===== impl SendStream ===== impl SendStream { pub(crate) fn new(inner: proto::StreamRef) -> Self { SendStream { inner } } /// Requests capacity to send data. /// /// This function is used to express intent to send data. This requests /// connection level capacity. Once the capacity is available, it is /// assigned to the stream and not reused by other streams. /// /// This function may be called repeatedly. The `capacity` argument is the /// **total** amount of requested capacity. Sequential calls to /// `reserve_capacity` are *not* additive. Given the following: /// /// ```rust /// # use h2::*; /// # fn doc(mut send_stream: SendStream<&'static [u8]>) { /// send_stream.reserve_capacity(100); /// send_stream.reserve_capacity(200); /// # } /// ``` /// /// After the second call to `reserve_capacity`, the *total* requested /// capacity will be 200. /// /// `reserve_capacity` is also used to cancel previous capacity requests. /// Given the following: /// /// ```rust /// # use h2::*; /// # fn doc(mut send_stream: SendStream<&'static [u8]>) { /// send_stream.reserve_capacity(100); /// send_stream.reserve_capacity(0); /// # } /// ``` /// /// After the second call to `reserve_capacity`, the *total* requested /// capacity will be 0, i.e. there is no requested capacity for the stream. /// /// If `reserve_capacity` is called with a lower value than the amount of /// capacity **currently** assigned to the stream, this capacity will be /// returned to the connection to be re-assigned to other streams. /// /// Also, the amount of capacity that is reserved gets decremented as data /// is sent. For example: /// /// ```rust /// # use h2::*; /// # fn doc(mut send_stream: SendStream<&'static [u8]>) { /// send_stream.reserve_capacity(100); /// /// let capacity = send_stream.poll_capacity(); /// // capacity == 5; /// /// send_stream.send_data(b"hello", false).unwrap(); /// // At this point, the total amount of requested capacity is 95 bytes. /// /// // Calling `reserve_capacity` with `100` again essentially requests an /// // additional 5 bytes. /// send_stream.reserve_capacity(100); /// # } /// ``` /// /// See [Flow control](struct.SendStream.html#flow-control) for an overview /// of how send flow control works. pub fn reserve_capacity(&mut self, capacity: usize) { // TODO: Check for overflow self.inner.reserve_capacity(capacity as WindowSize) } /// Returns the stream's current send capacity. /// /// This allows the caller to check the current amount of available capacity /// before sending data. pub fn capacity(&self) -> usize { self.inner.capacity() as usize } /// Requests to be notified when the stream's capacity increases. /// /// Before calling this, capacity should be requested with /// `reserve_capacity`. Once capacity is requested, the connection will /// assign capacity to the stream **as it becomes available**. There is no /// guarantee as to when and in what increments capacity gets assigned to /// the stream. /// /// To get notified when the available capacity increases, the caller calls /// `poll_capacity`, which returns `Ready(Some(n))` when `n` has been /// increased by the connection. Note that `n` here represents the **total** /// amount of assigned capacity at that point in time. It is also possible /// that `n` is lower than the previous call if, since then, the caller has /// sent data. pub fn poll_capacity(&mut self) -> Poll, ::Error> { let res = try_ready!(self.inner.poll_capacity()); Ok(Async::Ready(res.map(|v| v as usize))) } /// Sends a single data frame to the remote peer. /// /// This function may be called repeatedly as long as `end_of_stream` is set /// to `false`. Setting `end_of_stream` to `true` sets the end stream flag /// on the data frame. Any further calls to `send_data` or `send_trailers` /// will return an [`Error`]. /// /// `send_data` can be called without reserving capacity. In this case, the /// data is buffered and the capacity is implicitly requested. Once the /// capacity becomes available, the data is flushed to the connection. /// However, this buffering is unbounded. As such, sending large amounts of /// data without reserving capacity before hand could result in large /// amounts of data being buffered in memory. /// /// [`Error`]: struct.Error.html pub fn send_data(&mut self, data: B, end_of_stream: bool) -> Result<(), ::Error> { self.inner .send_data(data.into_buf(), end_of_stream) .map_err(Into::into) } /// Sends trailers to the remote peer. /// /// Sending trailers implicitly closes the send stream. Once the send stream /// is closed, no more data can be sent. pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), ::Error> { self.inner.send_trailers(trailers).map_err(Into::into) } /// Resets the stream. /// /// This cancels the request / response exchange. If the response has not /// yet been received, the associated `ResponseFuture` will return an /// [`Error`] to reflect the canceled exchange. /// /// [`Error`]: struct.Error.html pub fn send_reset(&mut self, reason: Reason) { self.inner.send_reset(reason) } /// Polls to be notified when the client resets this stream. /// /// If stream is still open, this returns `Ok(Async::NotReady)`, and /// registers the task to be notified if a `RST_STREAM` is received. /// /// If a `RST_STREAM` frame is received for this stream, calling this /// method will yield the `Reason` for the reset. /// /// # Error /// /// If connection sees an error, this returns that error instead of a /// `Reason`. pub fn poll_reset(&mut self) -> Poll { self.inner.poll_reset(proto::PollReset::Streaming) } /// Returns the stream ID of this `SendStream`. /// /// # Panics /// /// If the lock on the stream store has been poisoned. pub fn stream_id(&self) -> StreamId { StreamId::from_internal(self.inner.stream_id()) } } // ===== impl StreamId ===== impl StreamId { pub(crate) fn from_internal(id: ::frame::StreamId) -> Self { StreamId(id.into()) } } // ===== impl RecvStream ===== impl RecvStream { pub(crate) fn new(inner: ReleaseCapacity) -> Self { RecvStream { inner } } #[deprecated(since = "0.0.0")] #[doc(hidden)] pub fn is_empty(&self) -> bool { // If the recv side is closed and the receive queue is empty, the body is empty. self.inner.inner.body_is_empty() } /// Returns true if the receive half has reached the end of stream. /// /// A return value of `true` means that calls to `poll` and `poll_trailers` /// will both return `None`. pub fn is_end_stream(&self) -> bool { self.inner.inner.is_end_stream() } /// Get a mutable reference to this streams `ReleaseCapacity`. /// /// It can be used immediately, or cloned to be used later. pub fn release_capacity(&mut self) -> &mut ReleaseCapacity { &mut self.inner } /// Returns received trailers. pub fn poll_trailers(&mut self) -> Poll, ::Error> { self.inner.inner.poll_trailers().map_err(Into::into) } /// Returns the stream ID of this stream. /// /// # Panics /// /// If the lock on the stream store has been poisoned. pub fn stream_id(&self) -> StreamId { self.inner.stream_id() } } impl futures::Stream for RecvStream { type Item = Bytes; type Error = ::Error; fn poll(&mut self) -> Poll, Self::Error> { self.inner.inner.poll_data().map_err(Into::into) } } impl fmt::Debug for RecvStream { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("RecvStream") .field("inner", &self.inner) .finish() } } impl Drop for RecvStream { fn drop(&mut self) { // Eagerly clear any received DATA frames now, since its no longer // possible to retrieve them. However, this will be called // again once *all* stream refs have been dropped, since // this won't send a RST_STREAM frame, in case the user wishes to // still *send* DATA. self.inner.inner.clear_recv_buffer(); } } // ===== impl ReleaseCapacity ===== impl ReleaseCapacity { pub(crate) fn new(inner: proto::OpaqueStreamRef) -> Self { ReleaseCapacity { inner } } /// Returns the stream ID of the stream whose capacity will /// be released by this `ReleaseCapacity`. /// /// # Panics /// /// If the lock on the stream store has been poisoned. pub fn stream_id(&self) -> StreamId { StreamId::from_internal(self.inner.stream_id()) } /// Release window capacity back to remote stream. /// /// This releases capacity back to the stream level and the connection level /// windows. Both window sizes will be increased by `sz`. /// /// See [struct level] documentation for more details. /// /// # Panics /// /// This function panics if increasing the receive window size by `sz` would /// result in a window size greater than the target window size set by /// [`set_target_window_size`]. In other words, the caller cannot release /// more capacity than data has been received. If 1024 bytes of data have /// been received, at most 1024 bytes can be released. /// /// [struct level]: # /// [`set_target_window_size`]: server/struct.Server.html#method.set_target_window_size pub fn release_capacity(&mut self, sz: usize) -> Result<(), ::Error> { if sz > proto::MAX_WINDOW_SIZE as usize { return Err(UserError::ReleaseCapacityTooBig.into()); } self.inner .release_capacity(sz as proto::WindowSize) .map_err(Into::into) } } impl Clone for ReleaseCapacity { fn clone(&self) -> Self { let inner = self.inner.clone(); ReleaseCapacity { inner } } } // ===== impl PingPong ===== impl PingPong { pub(crate) fn new(inner: proto::UserPings) -> Self { PingPong { inner, } } /// Send a `PING` frame to the peer. /// /// Only one ping can be pending at a time, so trying to send while /// a pong has not be received means this will return a user error. /// /// # Example /// /// ``` /// # fn doc(mut ping_pong: h2::PingPong) { /// // let mut ping_pong = ... /// ping_pong /// .send_ping(h2::Ping::opaque()) /// .unwrap(); /// # } /// ``` pub fn send_ping(&mut self, ping: Ping) -> Result<(), ::Error> { // Passing a `Ping` here is just to be forwards-compatible with // eventually allowing choosing a ping payload. For now, we can // just drop it. drop(ping); self.inner .send_ping() .map_err(|err| match err { Some(err) => err.into(), None => UserError::SendPingWhilePending.into() }) } /// Polls for the acknowledgement of a previously [sent][] `PING` frame. /// /// # Example /// /// ``` /// # extern crate futures; /// # extern crate h2; /// # use futures::Future; /// # fn doc(mut ping_pong: h2::PingPong) { /// // let mut ping_pong = ... /// /// // First, send a PING. /// ping_pong /// .send_ping(h2::Ping::opaque()) /// .unwrap(); /// /// // And then wait for the PONG. /// futures::future::poll_fn(move || { /// ping_pong.poll_pong() /// }).wait().unwrap(); /// # } /// # fn main() {} /// ``` /// /// [sent]: struct.PingPong.html#method.send_ping pub fn poll_pong(&mut self) -> Poll { try_ready!(self.inner.poll_pong()); Ok(Async::Ready(Pong { _p: (), })) } } impl fmt::Debug for PingPong { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("PingPong") .finish() } } // ===== impl Ping ===== impl Ping { /// Creates a new opaque `Ping` to be sent via a [`PingPong`][]. /// /// The payload is "opaque", such that it shouldn't be depended on. /// /// [`PingPong`]: struct.PingPong.html pub fn opaque() -> Ping { Ping { _p: (), } } } impl fmt::Debug for Ping { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("Ping") .finish() } } // ===== impl Pong ===== impl fmt::Debug for Pong { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("Pong") .finish() } } h2-0.1.26/.cargo_vcs_info.json0000644000000001120000000000000114640ustar00{ "git": { "sha1": "1eecedf37c3f78550ca509967deb5c9bea485f7b" } } h2-0.1.26/Cargo.lock0000644000001104760000000000000074560ustar00# This file is automatically @generated by Cargo. # It is not intended for manual editing. [[package]] name = "arrayvec" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "atty" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "base64" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "safemem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bitflags" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "byteorder" version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bytes" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "cc" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cfg-if" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cloudabi" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-deque" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam-epoch 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-epoch" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-utils" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "dtoa" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "env_logger" version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "atty 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "termcolor 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "fnv" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "fuchsia-zircon" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "fuchsia-zircon-sys" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "futures" version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "h2" version = "0.1.26" dependencies = [ "byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", "rustls 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.43 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "string 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-rustls 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", "webpki-roots 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "hex" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "http" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "itoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "humantime" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "indexmap" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "iovec" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itoa" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "kernel32-sys" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "lazy_static" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "lazycell" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" version = "0.2.43" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "log" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "memoffset" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "mio" version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "mio-uds" version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "miow" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "net2" version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "nodrop" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "num_cpus" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "quick-error" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "quickcheck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rand" version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rand" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rand" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rand_core" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rand_core" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "redox_syscall" version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "redox_termios" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ring" version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustls" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)", "sct 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "safemem" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "same-file" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "scopeguard" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "sct" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde" version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde_json" version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "itoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "slab" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "string" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "termcolor" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "wincolor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "termion" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", "redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-current-thread 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-fs 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-reactor 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-tcp 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-threadpool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-timer 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-udp 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-uds 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-codec" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-current-thread" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-executor" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-fs" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-threadpool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-io" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-reactor" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-rustls" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "rustls 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-tcp" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-reactor 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-threadpool" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam-deque 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-timer" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-udp" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-reactor 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-uds" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-reactor 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "untrusted" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "walkdir" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "webpki" version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "webpki-roots" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "winapi" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "winapi" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "winapi-build" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "wincolor" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ws2_32-sys" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [metadata] "checksum arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a1e964f9e24d588183fcb43503abda40d288c8657dfc27311516ce2f05675aef" "checksum atty 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6609a866dd1a1b2d0ee1362195bf3e4f6438abb2d80120b83b1e1f4fb6476dd0" "checksum base64 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9263aa6a38da271eec5c91a83ce1e800f093c8535788d403d626d8d5c3f8f007" "checksum bitflags 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1b2bf7093258c32e0825b635948de528a5949799dcd61bef39534c8aab95870c" "checksum byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "73b5bdfe7ee3ad0b99c9801d58807a9dbc9e09196365b0203853b99889ab3c87" "checksum bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0ce55bd354b095246fc34caf4e9e242f5297a7fd938b090cadfea6eee614aa62" "checksum cc 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "38fb45eeb2c9216a6700cf675b418d6c26ee15b55a3700970112da9fedfb8694" "checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" "checksum crossbeam-deque 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3486aefc4c0487b9cb52372c97df0a48b8c249514af1ee99703bf70d2f2ceda1" "checksum crossbeam-epoch 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "30fecfcac6abfef8771151f8be4abc9e4edc112c2bcb233314cafde2680536e9" "checksum crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "677d453a17e8bd2b913fa38e8b9cf04bcdbb5be790aa294f2389661d72036015" "checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab" "checksum env_logger 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "00c45cec4cde3daac5f036c74098b4956151525cdf360cff5ee0092c98823e54" "checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" "checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" "checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" "checksum futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "1a70b146671de62ec8c8ed572219ca5d594d9b06c0b364d5e67b722fc559b48c" "checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" "checksum http 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "24f58e8c2d8e886055c3ead7b28793e1455270b5fb39650984c224bc538ba581" "checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e" "checksum indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08173ba1e906efb6538785a8844dd496f5d34f0a2d88038e95195172fc667220" "checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08" "checksum itoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c069bbec61e1ca5a596166e55dfe4773ff745c3d16b700013bcaff9a6df2c682" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a374c89b9db55895453a74c1e38861d9deec0b01b405a82516e9d5de4820dea1" "checksum lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a6f08839bc70ef4a3fe1d566d5350f519c5912ea86be0df1740a7d247c7fc0ef" "checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d" "checksum log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f" "checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3" "checksum mio 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "6d771e3ef92d58a8da8df7d6976bfca9371ed1de6619d9d5a5ce5b1f29b85bfe" "checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum net2 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)" = "9044faf1413a1057267be51b5afba8eb1090bd2231c693664aa1db716fe1eae0" "checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2" "checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30" "checksum quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eda5fe9b71976e62bc81b781206aaa076401769b2143379d3eb2118388babac4" "checksum quickcheck 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "02c2411d418cea2364325b18a205664f9ef8252e06b2e911db97c0b0d98b1406" "checksum rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "15a732abf9d20f0ad8eeb6f909bf6868722d9a06e1e50802b6a70351f40b4eb1" "checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5" "checksum rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e464cd887e869cddcae8792a4ee31d23c7edd516700695608f5b98c67ee0131c" "checksum rand_core 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1961a422c4d189dfb50ffa9320bf1f2a9bd54ecb92792fb9477f99a1045f3372" "checksum rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0905b6b7079ec73b314d4c748701f6931eb79fd97c668caa3f1899b22b32c6db" "checksum redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "0d92eecebad22b767915e4d529f89f28ee96dbbf5a4810d2b844373f136417fd" "checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" "checksum ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)" = "2c4db68a2e35f3497146b7e4563df7d4773a2433230c5e4b448328e31740458a" "checksum rustls 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab72e4883a4fc9fd5cd462a51c55d79f6a7b5c9483e8d73a2b7bca0b18430bcd" "checksum safemem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e27a8b19b835f7aea908818e871f5cc3a5a186550c30773be987e155e8163d8f" "checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7" "checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" "checksum sct 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4540aed8d71a5de961a8902cf356e28122bd62695eb5be1c214f84d8704097c" "checksum serde 1.0.43 (registry+https://github.com/rust-lang/crates.io-index)" = "0c855d888276f20d140223bd06515e5bf1647fd6d02593cb5792466d9a8ec2d0" "checksum serde_json 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "8c6c4e049dc657a99e394bd85c22acbf97356feeec6dbf44150f2dcf79fb3118" "checksum slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d" "checksum string 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0bbfb8937e38e34c3444ff00afb28b0811d9554f15c5ad64d12b0308d1d1995" "checksum termcolor 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "adc4587ead41bf016f11af03e55a624c06568b5a19db4e90fde573d805074f83" "checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" "checksum tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6e93c78d23cc61aa245a8acd2c4a79c4d7fa7fb5c3ca90d5737029f043a84895" "checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f" "checksum tokio-current-thread 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f90fcd90952f0a496d438a976afba8e5c205fb12123f813d8ab3aa1c8436638c" "checksum tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c117b6cf86bb730aab4834f10df96e4dd586eff2c3c27d3781348da49e255bde" "checksum tokio-fs 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "60ae25f6b17d25116d2cba342083abe5255d3c2c79cb21ea11aa049c53bf7c75" "checksum tokio-io 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "7392fe0a70d5ce0c882c4778116c519bd5dbaa8a7c3ae3d04578b3afafdcda21" "checksum tokio-reactor 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3cedc8e5af5131dc3423ffa4f877cce78ad25259a9a62de0613735a13ebc64b" "checksum tokio-rustls 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94c0ba72dd900bf306bbf18c3cdb1dd526e8b5744439934fb2dfe3326d2caab9" "checksum tokio-tcp 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ec9b094851aadd2caf83ba3ad8e8c4ce65a42104f7b94d9e6550023f0407853f" "checksum tokio-threadpool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3929aee321c9220ed838ed6c3928be7f9b69986b0e3c22c972a66dbf8a298c68" "checksum tokio-timer 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "3a52f00c97fedb6d535d27f65cccb7181c8dd4c6edc3eda9ea93f6d45d05168e" "checksum tokio-udp 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "137bda266504893ac4774e0ec4c2108f7ccdbcb7ac8dced6305fe9e4e0b5041a" "checksum tokio-uds 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "df195376b43508f01570bacc73e13a1de0854dc59e79d1ec09913e8db6dd2a70" "checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f" "checksum walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bb08f9e670fab86099470b97cd2b252d6527f0b3cc1401acdb595ffc9dd288ff" "checksum webpki 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)" = "17d7967316d8411ca3b01821ee6c332bde138ba4363becdb492f12e514daa17f" "checksum webpki-roots 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "edbd75d6abf044ef0c9d7ec92b9e8c518bcd93a15bb7bd9a92239e035248fc17" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "04e3bd221fcbe8a271359c04f21a76db7d0c6028862d1bb5512d85e1e2eb5bb3" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum wincolor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eeb06499a3a4d44302791052df005d5232b927ed1a9658146d842165c4de7767" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"