hyper-util-0.1.10/.cargo_vcs_info.json0000644000000001360000000000100132150ustar { "git": { "sha1": "a63603772ee1bc98957cf86eb3a904ed2357ba36" }, "path_in_vcs": "" }hyper-util-0.1.10/.github/workflows/CI.yml000064400000000000000000000055741046102023000164330ustar 00000000000000name: CI on: pull_request: push: branches: - master env: RUST_BACKTRACE: 1 jobs: ci-pass: name: CI is green runs-on: ubuntu-latest needs: - style - test - msrv - miri - features - semver - doc steps: - run: exit 0 style: name: Check Style runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: components: rustfmt - run: cargo fmt --all --check test: name: Test ${{ matrix.rust }} on ${{ matrix.os }} needs: [style] strategy: matrix: rust: - stable - beta - nightly os: - ubuntu-latest - windows-latest - macos-latest runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 - name: Install Rust (${{ matrix.rust }}) uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust }} - run: cargo test --all-features msrv: name: Check MSRV (${{ matrix.rust }}) needs: [style] strategy: matrix: rust: [ 1.63 ] # keep in sync with 'rust-version' in Cargo.toml runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install Rust (${{ matrix.rust }}) uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust }} - name: Pin some dependencies for MSRV run: | cargo update cargo update --package tokio --precise 1.38.1 cargo update --package tokio-util --precise 0.7.11 - run: cargo check --features full miri: name: Test with Miri needs: [style] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly with: components: miri - name: Test env: # Can't enable tcp feature since Miri does not support the tokio runtime MIRIFLAGS: "-Zmiri-disable-isolation" run: cargo miri test --all-features features: name: features needs: [style] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@cargo-hack - run: cargo hack --no-dev-deps check --feature-powerset --depth 2 semver: name: semver runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Check semver uses: obi1kenobi/cargo-semver-checks-action@v2 with: feature-group: only-explicit-features features: full release-type: minor doc: name: Build docs needs: [style, test] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly - run: cargo rustdoc -- --cfg docsrs -D rustdoc::broken-intra-doc-links hyper-util-0.1.10/.gitignore000064400000000000000000000005001046102023000137700ustar 00000000000000# Generated by Cargo # will have compiled files and executables /target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk hyper-util-0.1.10/CHANGELOG.md000064400000000000000000000051361046102023000136230ustar 00000000000000# 0.1.10 (2024-10-28) - Add `http2_max_header_list_size(num)` option to legacy client builder. - Add `set_tcp_user_timeout(dur)` option to legacy `HttpConnector`. # 0.1.9 (2024-09-24) - Add support for `client::legacy` DNS resolvers to set non-zero ports on returned addresses. - Fix `client::legacy` wrongly retrying pooled connections that were created successfully but failed immediately after, resulting in a retry loop. # 0.1.8 (2024-09-09) - Add `server::conn::auto::upgrade::downcast()` for use with auto connection upgrades. # 0.1.7 (2024-08-06) - Add `Connected::poison()` to `legacy` client, a port from hyper v0.14.x. - Add `Error::connect_info()` to `legacy` client, a port from hyper v0.14.x. # 0.1.6 (2024-07-01) - Add support for AIX operating system to `legacy` client. - Fix `legacy` client to better use dying pooled connections. # 0.1.5 (2024-05-28) - Add `server::graceful::GracefulShutdown` helper to coordinate over many connections. - Add `server::conn::auto::Connection::into_owned()` to unlink lifetime from `Builder`. - Allow `service` module to be available with only `service` feature enabled. # 0.1.4 (2024-05-24) - Add `initial_max_send_streams()` to `legacy` client builder - Add `max_pending_accept_reset_streams()` to `legacy` client builder - Add `max_headers(usize)` to `auto` server builder - Add `http1_onl()` and `http2_only()` to `auto` server builder - Add connection capturing API to `legacy` client - Add `impl Connection for TokioIo` - Fix graceful shutdown hanging on reading the HTTP version # 0.1.3 (2024-01-31) ### Added - Add `Error::is_connect()` which returns true if error came from client `Connect`. - Add timer support to `legacy` pool. - Add support to enable http1/http2 parts of `auto::Builder` individually. ### Fixed - Fix `auto` connection so it can handle requests shorter than the h2 preface. - Fix `legacy::Client` to no longer error when keep-alive is diabled. # 0.1.2 (2023-12-20) ### Added - Add `graceful_shutdown()` method to `auto` connections. - Add `rt::TokioTimer` type that implements `hyper::rt::Timer`. - Add `service::TowerToHyperService` adapter, allowing using `tower::Service`s as a `hyper::service::Service`. - Implement `Clone` for `auto::Builder`. - Exports `legacy::{Builder, ResponseFuture}`. ### Fixed - Enable HTTP/1 upgrades on the `legacy::Client`. - Prevent divide by zero if DNS returns 0 addresses. # 0.1.1 (2023-11-17) ### Added - Make `server-auto` enable the `server` feature. ### Fixed - Reduce `Send` bounds requirements for `auto` connections. - Docs: enable all features when generating. # 0.1.0 (2023-11-16) Initial release. hyper-util-0.1.10/Cargo.lock0000644000000555610000000000100112040ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] [[package]] name = "async-stream" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", "pin-project-lite", ] [[package]] name = "async-stream-impl" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "atomic-waker" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "bitflags" version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bytes" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "libc", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "env_logger" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "humantime", "is-terminal", "log", "regex", "termcolor", ] [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "futures-channel" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", ] [[package]] name = "futures-core" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-sink" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-util" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures-core", "futures-task", "pin-project-lite", "pin-utils", ] [[package]] name = "gimli" version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "h2" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", "http", "indexmap", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" [[package]] name = "hermit-abi" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" [[package]] name = "http" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http-body" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", "http", ] [[package]] name = "http-body-util" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" dependencies = [ "bytes", "futures-util", "http", "http-body", "pin-project-lite", ] [[package]] name = "httparse" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" dependencies = [ "bytes", "futures-channel", "futures-util", "h2", "http", "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", "smallvec", "tokio", "want", ] [[package]] name = "hyper-util" version = "0.1.10" dependencies = [ "bytes", "futures-channel", "futures-util", "http", "http-body", "http-body-util", "hyper", "pin-project-lite", "pnet_datalink", "pretty_env_logger", "socket2", "tokio", "tokio-test", "tower-service", "tracing", ] [[package]] name = "indexmap" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", "hashbrown", ] [[package]] name = "ipnetwork" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e" dependencies = [ "serde", ] [[package]] name = "is-terminal" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ "hermit-abi", "rustix", "windows-sys 0.52.0", ] [[package]] name = "itoa" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "libc" version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "linux-raw-sys" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "log" version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi", "windows-sys 0.48.0", ] [[package]] name = "no-std-net" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" [[package]] name = "object" version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "pin-project-lite" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pnet_base" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffc190d4067df16af3aba49b3b74c469e611cad6314676eaf1157f31aa0fb2f7" dependencies = [ "no-std-net", ] [[package]] name = "pnet_datalink" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e79e70ec0be163102a332e1d2d5586d362ad76b01cec86f830241f2b6452a7b7" dependencies = [ "ipnetwork", "libc", "pnet_base", "pnet_sys", "winapi", ] [[package]] name = "pnet_sys" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d4643d3d4db6b08741050c2f3afa9a892c4244c085a72fcda93c9c2c9a00f4b" dependencies = [ "libc", "winapi", ] [[package]] name = "pretty_env_logger" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c" dependencies = [ "env_logger", "log", ] [[package]] name = "proc-macro2" version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "regex" version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "rustc-demangle" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustix" version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "serde" version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "signal-hook-registry" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", ] [[package]] name = "syn" version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "termcolor" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] [[package]] name = "tokio" version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" dependencies = [ "backtrace", "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tokio-stream" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", "tokio", ] [[package]] name = "tokio-test" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89b3cbabd3ae862100094ae433e1def582cf86451b4e9bf83aa7ac1d8a7d719" dependencies = [ "async-stream", "bytes", "futures-core", "tokio", "tokio-stream", ] [[package]] name = "tokio-util" version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", "tracing", ] [[package]] name = "tower-service" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", ] [[package]] name = "try-lock" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "want" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ "try-lock", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.5", ] [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.0", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm 0.48.5", "windows_aarch64_msvc 0.48.5", "windows_i686_gnu 0.48.5", "windows_i686_msvc 0.48.5", "windows_x86_64_gnu 0.48.5", "windows_x86_64_gnullvm 0.48.5", "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows-targets" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" dependencies = [ "windows_aarch64_gnullvm 0.52.0", "windows_aarch64_msvc 0.52.0", "windows_i686_gnu 0.52.0", "windows_i686_msvc 0.52.0", "windows_x86_64_gnu 0.52.0", "windows_x86_64_gnullvm 0.52.0", "windows_x86_64_msvc 0.52.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" hyper-util-0.1.10/Cargo.toml0000644000000067260000000000100112260ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.63" name = "hyper-util" version = "0.1.10" authors = ["Sean McArthur "] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "hyper utilities" homepage = "https://hyper.rs" documentation = "https://docs.rs/hyper-util" readme = "README.md" keywords = [ "http", "hyper", "hyperium", ] categories = [ "network-programming", "web-programming::http-client", "web-programming::http-server", ] license = "MIT" repository = "https://github.com/hyperium/hyper-util" [package.metadata.docs.rs] features = ["full"] rustdoc-args = [ "--cfg", "docsrs", ] [lib] name = "hyper_util" path = "src/lib.rs" [[example]] name = "client" path = "examples/client.rs" required-features = [ "client-legacy", "http1", "tokio", ] [[example]] name = "server" path = "examples/server.rs" required-features = [ "server", "http1", "tokio", ] [[example]] name = "server_graceful" path = "examples/server_graceful.rs" required-features = [ "tokio", "server-graceful", "server-auto", ] [[test]] name = "legacy_client" path = "tests/legacy_client.rs" [dependencies.bytes] version = "1.7.1" [dependencies.futures-channel] version = "0.3" optional = true [dependencies.futures-util] version = "0.3.16" default-features = false [dependencies.http] version = "1.0" [dependencies.http-body] version = "1.0.0" [dependencies.hyper] version = "1.4.0" [dependencies.pin-project-lite] version = "0.2.4" [dependencies.socket2] version = "0.5" features = ["all"] optional = true [dependencies.tokio] version = "1" optional = true default-features = false [dependencies.tower-service] version = "0.3" optional = true [dependencies.tracing] version = "0.1" features = ["std"] optional = true default-features = false [dev-dependencies.bytes] version = "1" [dev-dependencies.http-body-util] version = "0.1.0" [dev-dependencies.hyper] version = "1.4.0" features = ["full"] [dev-dependencies.pretty_env_logger] version = "0.5" [dev-dependencies.tokio] version = "1" features = [ "macros", "test-util", "signal", ] [dev-dependencies.tokio-test] version = "0.4" [features] __internal_happy_eyeballs_tests = [] client = [ "hyper/client", "dep:tracing", "dep:futures-channel", "dep:tower-service", ] client-legacy = [ "client", "dep:socket2", "tokio/sync", ] default = [] full = [ "client", "client-legacy", "server", "server-auto", "server-graceful", "service", "http1", "http2", "tokio", ] http1 = ["hyper/http1"] http2 = ["hyper/http2"] server = ["hyper/server"] server-auto = [ "server", "http1", "http2", ] server-graceful = [ "server", "tokio/sync", "futures-util/alloc", ] service = ["dep:tower-service"] tokio = [ "dep:tokio", "tokio/net", "tokio/rt", "tokio/time", ] [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies.pnet_datalink] version = "0.35.0" hyper-util-0.1.10/Cargo.toml.orig000064400000000000000000000045041046102023000146770ustar 00000000000000[package] name = "hyper-util" version = "0.1.10" description = "hyper utilities" readme = "README.md" homepage = "https://hyper.rs" documentation = "https://docs.rs/hyper-util" repository = "https://github.com/hyperium/hyper-util" license = "MIT" authors = ["Sean McArthur "] keywords = ["http", "hyper", "hyperium"] categories = ["network-programming", "web-programming::http-client", "web-programming::http-server"] edition = "2021" rust-version = "1.63" [package.metadata.docs.rs] features = ["full"] rustdoc-args = ["--cfg", "docsrs"] [dependencies] hyper = "1.4.0" futures-util = { version = "0.3.16", default-features = false } http = "1.0" http-body = "1.0.0" bytes = "1.7.1" pin-project-lite = "0.2.4" futures-channel = { version = "0.3", optional = true } socket2 = { version = "0.5", optional = true, features = ["all"] } tracing = { version = "0.1", default-features = false, features = ["std"], optional = true } tokio = { version = "1", optional = true, default-features = false } tower-service = { version = "0.3", optional = true } [dev-dependencies] hyper = { version = "1.4.0", features = ["full"] } bytes = "1" http-body-util = "0.1.0" tokio = { version = "1", features = ["macros", "test-util", "signal"] } tokio-test = "0.4" pretty_env_logger = "0.5" [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies] pnet_datalink = "0.35.0" [features] default = [] # Shorthand to enable everything full = [ "client", "client-legacy", "server", "server-auto", "server-graceful", "service", "http1", "http2", "tokio", ] client = ["hyper/client", "dep:tracing", "dep:futures-channel", "dep:tower-service"] client-legacy = ["client", "dep:socket2", "tokio/sync"] server = ["hyper/server"] server-auto = ["server", "http1", "http2"] server-graceful = ["server", "tokio/sync", "futures-util/alloc"] service = ["dep:tower-service"] http1 = ["hyper/http1"] http2 = ["hyper/http2"] tokio = ["dep:tokio", "tokio/net", "tokio/rt", "tokio/time"] # internal features used in CI __internal_happy_eyeballs_tests = [] [[example]] name = "client" required-features = ["client-legacy", "http1", "tokio"] [[example]] name = "server" required-features = ["server", "http1", "tokio"] [[example]] name = "server_graceful" required-features = ["tokio", "server-graceful", "server-auto"] hyper-util-0.1.10/LICENSE000064400000000000000000000020411046102023000130070ustar 00000000000000Copyright (c) 2023 Sean McArthur Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. hyper-util-0.1.10/README.md000064400000000000000000000006641046102023000132720ustar 00000000000000# hyper-util [![crates.io](https://img.shields.io/crates/v/hyper-util.svg)](https://crates.io/crates/hyper-util) [![Released API docs](https://docs.rs/hyper-util/badge.svg)](https://docs.rs/hyper-util) [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) A collection of utilities to do common things with [hyper](https://hyper.rs). ## License This project is licensed under the [MIT license](./LICENSE). hyper-util-0.1.10/examples/client.rs000064400000000000000000000020561046102023000154520ustar 00000000000000use std::env; use http_body_util::Empty; use hyper::Request; use hyper_util::client::legacy::{connect::HttpConnector, Client}; #[tokio::main(flavor = "current_thread")] async fn main() -> Result<(), Box> { let url = match env::args().nth(1) { Some(url) => url, None => { eprintln!("Usage: client "); return Ok(()); } }; // HTTPS requires picking a TLS implementation, so give a better // warning if the user tries to request an 'https' URL. let url = url.parse::()?; if url.scheme_str() != Some("http") { eprintln!("This example only works with 'http' URLs."); return Ok(()); } let client = Client::builder(hyper_util::rt::TokioExecutor::new()).build(HttpConnector::new()); let req = Request::builder() .uri(url) .body(Empty::::new())?; let resp = client.request(req).await?; eprintln!("{:?} {:?}", resp.version(), resp.status()); eprintln!("{:#?}", resp.headers()); Ok(()) } hyper-util-0.1.10/examples/server.rs000064400000000000000000000053421046102023000155030ustar 00000000000000//! This example runs a server that responds to any request with "Hello, world!" use std::{convert::Infallible, error::Error}; use bytes::Bytes; use http::{header::CONTENT_TYPE, Request, Response}; use http_body_util::{combinators::BoxBody, BodyExt, Full}; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ rt::{TokioExecutor, TokioIo}, server::conn::auto::Builder, }; use tokio::{net::TcpListener, task::JoinSet}; /// Function from an incoming request to an outgoing response /// /// This function gets turned into a [`hyper::service::Service`] later via /// [`service_fn`]. Instead of doing this, you could also write a type that /// implements [`hyper::service::Service`] directly and pass that in place of /// writing a function like this and calling [`service_fn`]. /// /// This function could use [`Full`] as the body type directly since that's /// the only type that can be returned in this case, but this uses [`BoxBody`] /// anyway for demonstration purposes, since this is what's usually used when /// writing a more complex webserver library. async fn handle_request( _request: Request, ) -> Result>, Infallible> { let response = Response::builder() .header(CONTENT_TYPE, "text/plain") .body(Full::new(Bytes::from("Hello, world!\n")).boxed()) .expect("values provided to the builder should be valid"); Ok(response) } #[tokio::main(flavor = "current_thread")] async fn main() -> Result<(), Box> { let listen_addr = "127.0.0.1:8000"; let tcp_listener = TcpListener::bind(listen_addr).await?; println!("listening on http://{listen_addr}"); let mut join_set = JoinSet::new(); loop { let (stream, addr) = match tcp_listener.accept().await { Ok(x) => x, Err(e) => { eprintln!("failed to accept connection: {e}"); continue; } }; let serve_connection = async move { println!("handling a request from {addr}"); let result = Builder::new(TokioExecutor::new()) .serve_connection(TokioIo::new(stream), service_fn(handle_request)) .await; if let Err(e) = result { eprintln!("error serving {addr}: {e}"); } println!("handled a request from {addr}"); }; join_set.spawn(serve_connection); } // If you add a method for breaking the above loop (i.e. graceful shutdown), // then you may also want to wait for all existing connections to finish // being served before terminating the program, which can be done like this: // // while let Some(_) = join_set.join_next().await {} } hyper-util-0.1.10/examples/server_graceful.rs000064400000000000000000000044141046102023000173520ustar 00000000000000use bytes::Bytes; use std::convert::Infallible; use std::pin::pin; use std::time::Duration; use tokio::net::TcpListener; #[tokio::main(flavor = "current_thread")] async fn main() -> Result<(), Box> { let listener = TcpListener::bind("127.0.0.1:8080").await?; let server = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new()); let graceful = hyper_util::server::graceful::GracefulShutdown::new(); let mut ctrl_c = pin!(tokio::signal::ctrl_c()); loop { tokio::select! { conn = listener.accept() => { let (stream, peer_addr) = match conn { Ok(conn) => conn, Err(e) => { eprintln!("accept error: {}", e); tokio::time::sleep(Duration::from_secs(1)).await; continue; } }; eprintln!("incomming connection accepted: {}", peer_addr); let stream = hyper_util::rt::TokioIo::new(Box::pin(stream)); let conn = server.serve_connection_with_upgrades(stream, hyper::service::service_fn(|_| async move { tokio::time::sleep(Duration::from_secs(5)).await; // emulate slow request let body = http_body_util::Full::::from("Hello World!".to_owned()); Ok::<_, Infallible>(http::Response::new(body)) })); let conn = graceful.watch(conn.into_owned()); tokio::spawn(async move { if let Err(err) = conn.await { eprintln!("connection error: {}", err); } eprintln!("connection dropped: {}", peer_addr); }); }, _ = ctrl_c.as_mut() => { drop(listener); eprintln!("Ctrl-C received, starting shutdown"); break; } } } tokio::select! { _ = graceful.shutdown() => { eprintln!("Gracefully shutdown!"); }, _ = tokio::time::sleep(Duration::from_secs(10)) => { eprintln!("Waited 10 seconds for graceful shutdown, aborting..."); } } Ok(()) } hyper-util-0.1.10/src/client/client.rs000064400000000000000000000061251046102023000157020ustar 00000000000000use hyper::{Request, Response}; use tower::{Service, MakeService}; use super::connect::Connect; use super::pool; pub struct Client { // Hi there. So, let's take a 0.14.x hyper::Client, and build up its layers // here. We don't need to fully expose the layers to start with, but that // is the end goal. // // Client = MakeSvcAsService< // SetHost< // Http1RequestTarget< // DelayedRelease< // ConnectingPool // > // > // > // > make_svc: M, } // We might change this... :shrug: type PoolKey = hyper::Uri; struct ConnectingPool { connector: C, pool: P, } struct PoolableSvc(S); /// A marker to identify what version a pooled connection is. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[allow(dead_code)] pub enum Ver { Auto, Http2, } // ===== impl Client ===== impl Client where M: MakeService< hyper::Uri, Request<()>, Response = Response<()>, Error = E, MakeError = E, >, //M: Service, //M::Response: Service, Response = Response>, { pub async fn request(&mut self, req: Request<()>) -> Result, E> { let mut svc = self.make_svc.make_service(req.uri().clone()).await?; svc.call(req).await } } impl Client where M: MakeService< hyper::Uri, Request<()>, Response = Response<()>, Error = E, MakeError = E, >, //M: Service, //M::Response: Service, Response = Response>, { } // ===== impl ConnectingPool ===== impl ConnectingPool where C: Connect, C::_Svc: Unpin + Send + 'static, { async fn connection_for(&self, target: PoolKey) -> Result, PoolKey>, ()> { todo!() } } impl pool::Poolable for PoolableSvc where S: Unpin + Send + 'static, { fn is_open(&self) -> bool { /* match self.tx { PoolTx::Http1(ref tx) => tx.is_ready(), #[cfg(feature = "http2")] PoolTx::Http2(ref tx) => tx.is_ready(), } */ true } fn reserve(self) -> pool::Reservation { /* match self.tx { PoolTx::Http1(tx) => Reservation::Unique(PoolClient { conn_info: self.conn_info, tx: PoolTx::Http1(tx), }), #[cfg(feature = "http2")] PoolTx::Http2(tx) => { let b = PoolClient { conn_info: self.conn_info.clone(), tx: PoolTx::Http2(tx.clone()), }; let a = PoolClient { conn_info: self.conn_info, tx: PoolTx::Http2(tx), }; Reservation::Shared(a, b) } } */ pool::Reservation::Unique(self) } fn can_share(&self) -> bool { false //self.is_http2() } } hyper-util-0.1.10/src/client/legacy/client.rs000064400000000000000000001606141046102023000171520ustar 00000000000000//! The legacy HTTP Client from 0.14.x //! //! This `Client` will eventually be deconstructed into more composable parts. //! For now, to enable people to use hyper 1.0 quicker, this `Client` exists //! in much the same way it did in hyper 0.14. use std::error::Error as StdError; use std::fmt; use std::future::Future; use std::pin::Pin; use std::task::{self, Poll}; use std::time::Duration; use futures_util::future::{self, Either, FutureExt, TryFutureExt}; use http::uri::Scheme; use hyper::client::conn::TrySendError as ConnTrySendError; use hyper::header::{HeaderValue, HOST}; use hyper::rt::Timer; use hyper::{body::Body, Method, Request, Response, Uri, Version}; use tracing::{debug, trace, warn}; use super::connect::capture::CaptureConnectionExtension; #[cfg(feature = "tokio")] use super::connect::HttpConnector; use super::connect::{Alpn, Connect, Connected, Connection}; use super::pool::{self, Ver}; use crate::common::{lazy as hyper_lazy, timer, Exec, Lazy, SyncWrapper}; type BoxSendFuture = Pin + Send>>; /// A Client to make outgoing HTTP requests. /// /// `Client` is cheap to clone and cloning is the recommended way to share a `Client`. The /// underlying connection pool will be reused. #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Client { config: Config, connector: C, exec: Exec, #[cfg(feature = "http1")] h1_builder: hyper::client::conn::http1::Builder, #[cfg(feature = "http2")] h2_builder: hyper::client::conn::http2::Builder, pool: pool::Pool, PoolKey>, } #[derive(Clone, Copy, Debug)] struct Config { retry_canceled_requests: bool, set_host: bool, ver: Ver, } /// Client errors pub struct Error { kind: ErrorKind, source: Option>, #[cfg(any(feature = "http1", feature = "http2"))] connect_info: Option, } #[derive(Debug)] enum ErrorKind { Canceled, ChannelClosed, Connect, UserUnsupportedRequestMethod, UserUnsupportedVersion, UserAbsoluteUriRequired, SendRequest, } macro_rules! e { ($kind:ident) => { Error { kind: ErrorKind::$kind, source: None, connect_info: None, } }; ($kind:ident, $src:expr) => { Error { kind: ErrorKind::$kind, source: Some($src.into()), connect_info: None, } }; } // We might change this... :shrug: type PoolKey = (http::uri::Scheme, http::uri::Authority); enum TrySendError { Retryable { error: Error, req: Request, connection_reused: bool, }, Nope(Error), } /// A `Future` that will resolve to an HTTP Response. /// /// This is returned by `Client::request` (and `Client::get`). #[must_use = "futures do nothing unless polled"] pub struct ResponseFuture { inner: SyncWrapper< Pin, Error>> + Send>>, >, } // ===== impl Client ===== impl Client<(), ()> { /// Create a builder to configure a new `Client`. /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use std::time::Duration; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::TokioExecutor; /// /// let client = Client::builder(TokioExecutor::new()) /// .pool_idle_timeout(Duration::from_secs(30)) /// .http2_only(true) /// .build_http(); /// # let infer: Client<_, http_body_util::Full> = client; /// # drop(infer); /// # } /// # fn main() {} /// ``` pub fn builder(executor: E) -> Builder where E: hyper::rt::Executor + Send + Sync + Clone + 'static, { Builder::new(executor) } } impl Client where C: Connect + Clone + Send + Sync + 'static, B: Body + Send + 'static + Unpin, B::Data: Send, B::Error: Into>, { /// Send a `GET` request to the supplied `Uri`. /// /// # Note /// /// This requires that the `Body` type have a `Default` implementation. /// It *should* return an "empty" version of itself, such that /// `Body::is_end_stream` is `true`. /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use hyper::Uri; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::TokioExecutor; /// use bytes::Bytes; /// use http_body_util::Full; /// /// let client: Client<_, Full> = Client::builder(TokioExecutor::new()).build_http(); /// /// let future = client.get(Uri::from_static("http://httpbin.org/ip")); /// # } /// # fn main() {} /// ``` pub fn get(&self, uri: Uri) -> ResponseFuture where B: Default, { let body = B::default(); if !body.is_end_stream() { warn!("default Body used for get() does not return true for is_end_stream"); } let mut req = Request::new(body); *req.uri_mut() = uri; self.request(req) } /// Send a constructed `Request` using this `Client`. /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use hyper::{Method, Request}; /// use hyper_util::client::legacy::Client; /// use http_body_util::Full; /// use hyper_util::rt::TokioExecutor; /// use bytes::Bytes; /// /// let client: Client<_, Full> = Client::builder(TokioExecutor::new()).build_http(); /// /// let req: Request> = Request::builder() /// .method(Method::POST) /// .uri("http://httpbin.org/post") /// .body(Full::from("Hallo!")) /// .expect("request builder"); /// /// let future = client.request(req); /// # } /// # fn main() {} /// ``` pub fn request(&self, mut req: Request) -> ResponseFuture { let is_http_connect = req.method() == Method::CONNECT; match req.version() { Version::HTTP_11 => (), Version::HTTP_10 => { if is_http_connect { warn!("CONNECT is not allowed for HTTP/1.0"); return ResponseFuture::new(future::err(e!(UserUnsupportedRequestMethod))); } } Version::HTTP_2 => (), // completely unsupported HTTP version (like HTTP/0.9)! other => return ResponseFuture::error_version(other), }; let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { Ok(s) => s, Err(err) => { return ResponseFuture::new(future::err(err)); } }; ResponseFuture::new(self.clone().send_request(req, pool_key)) } async fn send_request( self, mut req: Request, pool_key: PoolKey, ) -> Result, Error> { let uri = req.uri().clone(); loop { req = match self.try_send_request(req, pool_key.clone()).await { Ok(resp) => return Ok(resp), Err(TrySendError::Nope(err)) => return Err(err), Err(TrySendError::Retryable { mut req, error, connection_reused, }) => { if !self.config.retry_canceled_requests || !connection_reused { // if client disabled, don't retry // a fresh connection means we definitely can't retry return Err(error); } trace!( "unstarted request canceled, trying again (reason={:?})", error ); *req.uri_mut() = uri.clone(); req } } } } async fn try_send_request( &self, mut req: Request, pool_key: PoolKey, ) -> Result, TrySendError> { let mut pooled = self .connection_for(pool_key) .await // `connection_for` already retries checkout errors, so if // it returns an error, there's not much else to retry .map_err(TrySendError::Nope)?; req.extensions_mut() .get_mut::() .map(|conn| conn.set(&pooled.conn_info)); if pooled.is_http1() { if req.version() == Version::HTTP_2 { warn!("Connection is HTTP/1, but request requires HTTP/2"); return Err(TrySendError::Nope( e!(UserUnsupportedVersion).with_connect_info(pooled.conn_info.clone()), )); } if self.config.set_host { let uri = req.uri().clone(); req.headers_mut().entry(HOST).or_insert_with(|| { let hostname = uri.host().expect("authority implies host"); if let Some(port) = get_non_default_port(&uri) { let s = format!("{}:{}", hostname, port); HeaderValue::from_str(&s) } else { HeaderValue::from_str(hostname) } .expect("uri host is valid header value") }); } // CONNECT always sends authority-form, so check it first... if req.method() == Method::CONNECT { authority_form(req.uri_mut()); } else if pooled.conn_info.is_proxied { absolute_form(req.uri_mut()); } else { origin_form(req.uri_mut()); } } else if req.method() == Method::CONNECT { authority_form(req.uri_mut()); } let mut res = match pooled.try_send_request(req).await { Ok(res) => res, Err(mut err) => { return if let Some(req) = err.take_message() { Err(TrySendError::Retryable { connection_reused: pooled.is_reused(), error: e!(Canceled, err.into_error()) .with_connect_info(pooled.conn_info.clone()), req, }) } else { Err(TrySendError::Nope( e!(SendRequest, err.into_error()) .with_connect_info(pooled.conn_info.clone()), )) } } }; // If the Connector included 'extra' info, add to Response... if let Some(extra) = &pooled.conn_info.extra { extra.set(res.extensions_mut()); } // As of futures@0.1.21, there is a race condition in the mpsc // channel, such that sending when the receiver is closing can // result in the message being stuck inside the queue. It won't // ever notify until the Sender side is dropped. // // To counteract this, we must check if our senders 'want' channel // has been closed after having tried to send. If so, error out... if pooled.is_closed() { return Ok(res); } // If pooled is HTTP/2, we can toss this reference immediately. // // when pooled is dropped, it will try to insert back into the // pool. To delay that, spawn a future that completes once the // sender is ready again. // // This *should* only be once the related `Connection` has polled // for a new request to start. // // It won't be ready if there is a body to stream. if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { drop(pooled); } else if !res.body().is_end_stream() { //let (delayed_tx, delayed_rx) = oneshot::channel::<()>(); //res.body_mut().delayed_eof(delayed_rx); let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| { // At this point, `pooled` is dropped, and had a chance // to insert into the pool (if conn was idle) //drop(delayed_tx); }); self.exec.execute(on_idle); } else { // There's no body to delay, but the connection isn't // ready yet. Only re-insert when it's ready let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); self.exec.execute(on_idle); } Ok(res) } async fn connection_for( &self, pool_key: PoolKey, ) -> Result, PoolKey>, Error> { loop { match self.one_connection_for(pool_key.clone()).await { Ok(pooled) => return Ok(pooled), Err(ClientConnectError::Normal(err)) => return Err(err), Err(ClientConnectError::CheckoutIsClosed(reason)) => { if !self.config.retry_canceled_requests { return Err(e!(Connect, reason)); } trace!( "unstarted request canceled, trying again (reason={:?})", reason, ); continue; } }; } } async fn one_connection_for( &self, pool_key: PoolKey, ) -> Result, PoolKey>, ClientConnectError> { // Return a single connection if pooling is not enabled if !self.pool.is_enabled() { return self .connect_to(pool_key) .await .map_err(ClientConnectError::Normal); } // This actually races 2 different futures to try to get a ready // connection the fastest, and to reduce connection churn. // // - If the pool has an idle connection waiting, that's used // immediately. // - Otherwise, the Connector is asked to start connecting to // the destination Uri. // - Meanwhile, the pool Checkout is watching to see if any other // request finishes and tries to insert an idle connection. // - If a new connection is started, but the Checkout wins after // (an idle connection became available first), the started // connection future is spawned into the runtime to complete, // and then be inserted into the pool as an idle connection. let checkout = self.pool.checkout(pool_key.clone()); let connect = self.connect_to(pool_key); let is_ver_h2 = self.config.ver == Ver::Http2; // The order of the `select` is depended on below... match future::select(checkout, connect).await { // Checkout won, connect future may have been started or not. // // If it has, let it finish and insert back into the pool, // so as to not waste the socket... Either::Left((Ok(checked_out), connecting)) => { // This depends on the `select` above having the correct // order, such that if the checkout future were ready // immediately, the connect future will never have been // started. // // If it *wasn't* ready yet, then the connect future will // have been started... if connecting.started() { let bg = connecting .map_err(|err| { trace!("background connect error: {}", err); }) .map(|_pooled| { // dropping here should just place it in // the Pool for us... }); // An execute error here isn't important, we're just trying // to prevent a waste of a socket... self.exec.execute(bg); } Ok(checked_out) } // Connect won, checkout can just be dropped. Either::Right((Ok(connected), _checkout)) => Ok(connected), // Either checkout or connect could get canceled: // // 1. Connect is canceled if this is HTTP/2 and there is // an outstanding HTTP/2 connecting task. // 2. Checkout is canceled if the pool cannot deliver an // idle connection reliably. // // In both cases, we should just wait for the other future. Either::Left((Err(err), connecting)) => { if err.is_canceled() { connecting.await.map_err(ClientConnectError::Normal) } else { Err(ClientConnectError::Normal(e!(Connect, err))) } } Either::Right((Err(err), checkout)) => { if err.is_canceled() { checkout.await.map_err(move |err| { if is_ver_h2 && err.is_canceled() { ClientConnectError::CheckoutIsClosed(err) } else { ClientConnectError::Normal(e!(Connect, err)) } }) } else { Err(ClientConnectError::Normal(err)) } } } } #[cfg(any(feature = "http1", feature = "http2"))] fn connect_to( &self, pool_key: PoolKey, ) -> impl Lazy, PoolKey>, Error>> + Send + Unpin { let executor = self.exec.clone(); let pool = self.pool.clone(); #[cfg(feature = "http1")] let h1_builder = self.h1_builder.clone(); #[cfg(feature = "http2")] let h2_builder = self.h2_builder.clone(); let ver = self.config.ver; let is_ver_h2 = ver == Ver::Http2; let connector = self.connector.clone(); let dst = domain_as_uri(pool_key.clone()); hyper_lazy(move || { // Try to take a "connecting lock". // // If the pool_key is for HTTP/2, and there is already a // connection being established, then this can't take a // second lock. The "connect_to" future is Canceled. let connecting = match pool.connecting(&pool_key, ver) { Some(lock) => lock, None => { let canceled = e!(Canceled); // TODO //crate::Error::new_canceled().with("HTTP/2 connection in progress"); return Either::Right(future::err(canceled)); } }; Either::Left( connector .connect(super::connect::sealed::Internal, dst) .map_err(|src| e!(Connect, src)) .and_then(move |io| { let connected = io.connected(); // If ALPN is h2 and we aren't http2_only already, // then we need to convert our pool checkout into // a single HTTP2 one. let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { match connecting.alpn_h2(&pool) { Some(lock) => { trace!("ALPN negotiated h2, updating pool"); lock } None => { // Another connection has already upgraded, // the pool checkout should finish up for us. let canceled = e!(Canceled, "ALPN upgraded to HTTP/2"); return Either::Right(future::err(canceled)); } } } else { connecting }; #[cfg_attr(not(feature = "http2"), allow(unused))] let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; Either::Left(Box::pin(async move { let tx = if is_h2 { #[cfg(feature = "http2")] { let (mut tx, conn) = h2_builder.handshake(io).await.map_err(Error::tx)?; trace!( "http2 handshake complete, spawning background dispatcher task" ); executor.execute( conn.map_err(|e| debug!("client connection error: {}", e)) .map(|_| ()), ); // Wait for 'conn' to ready up before we // declare this tx as usable tx.ready().await.map_err(Error::tx)?; PoolTx::Http2(tx) } #[cfg(not(feature = "http2"))] panic!("http2 feature is not enabled"); } else { #[cfg(feature = "http1")] { let (mut tx, conn) = h1_builder.handshake(io).await.map_err(Error::tx)?; trace!( "http1 handshake complete, spawning background dispatcher task" ); executor.execute( conn.with_upgrades() .map_err(|e| debug!("client connection error: {}", e)) .map(|_| ()), ); // Wait for 'conn' to ready up before we // declare this tx as usable tx.ready().await.map_err(Error::tx)?; PoolTx::Http1(tx) } #[cfg(not(feature = "http1"))] { panic!("http1 feature is not enabled"); } }; Ok(pool.pooled( connecting, PoolClient { conn_info: connected, tx, }, )) })) }), ) }) } } impl tower_service::Service> for Client where C: Connect + Clone + Send + Sync + 'static, B: Body + Send + 'static + Unpin, B::Data: Send, B::Error: Into>, { type Response = Response; type Error = Error; type Future = ResponseFuture; fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: Request) -> Self::Future { self.request(req) } } impl tower_service::Service> for &'_ Client where C: Connect + Clone + Send + Sync + 'static, B: Body + Send + 'static + Unpin, B::Data: Send, B::Error: Into>, { type Response = Response; type Error = Error; type Future = ResponseFuture; fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: Request) -> Self::Future { self.request(req) } } impl Clone for Client { fn clone(&self) -> Client { Client { config: self.config, exec: self.exec.clone(), #[cfg(feature = "http1")] h1_builder: self.h1_builder.clone(), #[cfg(feature = "http2")] h2_builder: self.h2_builder.clone(), connector: self.connector.clone(), pool: self.pool.clone(), } } } impl fmt::Debug for Client { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Client").finish() } } // ===== impl ResponseFuture ===== impl ResponseFuture { fn new(value: F) -> Self where F: Future, Error>> + Send + 'static, { Self { inner: SyncWrapper::new(Box::pin(value)), } } fn error_version(ver: Version) -> Self { warn!("Request has unsupported version \"{:?}\"", ver); ResponseFuture::new(Box::pin(future::err(e!(UserUnsupportedVersion)))) } } impl fmt::Debug for ResponseFuture { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Future") } } impl Future for ResponseFuture { type Output = Result, Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { self.inner.get_mut().as_mut().poll(cx) } } // ===== impl PoolClient ===== // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] struct PoolClient { conn_info: Connected, tx: PoolTx, } enum PoolTx { #[cfg(feature = "http1")] Http1(hyper::client::conn::http1::SendRequest), #[cfg(feature = "http2")] Http2(hyper::client::conn::http2::SendRequest), } impl PoolClient { fn poll_ready( &mut self, #[allow(unused_variables)] cx: &mut task::Context<'_>, ) -> Poll> { match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(ref mut tx) => tx.poll_ready(cx).map_err(Error::closed), #[cfg(feature = "http2")] PoolTx::Http2(_) => Poll::Ready(Ok(())), } } fn is_http1(&self) -> bool { !self.is_http2() } fn is_http2(&self) -> bool { match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(_) => false, #[cfg(feature = "http2")] PoolTx::Http2(_) => true, } } fn is_poisoned(&self) -> bool { self.conn_info.poisoned.poisoned() } fn is_ready(&self) -> bool { match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(ref tx) => tx.is_ready(), #[cfg(feature = "http2")] PoolTx::Http2(ref tx) => tx.is_ready(), } } fn is_closed(&self) -> bool { match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(ref tx) => tx.is_closed(), #[cfg(feature = "http2")] PoolTx::Http2(ref tx) => tx.is_closed(), } } } impl PoolClient { fn try_send_request( &mut self, req: Request, ) -> impl Future, ConnTrySendError>>> where B: Send, { #[cfg(all(feature = "http1", feature = "http2"))] return match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(ref mut tx) => Either::Left(tx.try_send_request(req)), #[cfg(feature = "http2")] PoolTx::Http2(ref mut tx) => Either::Right(tx.try_send_request(req)), }; #[cfg(feature = "http1")] #[cfg(not(feature = "http2"))] return match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(ref mut tx) => tx.try_send_request(req), }; #[cfg(not(feature = "http1"))] #[cfg(feature = "http2")] return match self.tx { #[cfg(feature = "http2")] PoolTx::Http2(ref mut tx) => tx.try_send_request(req), }; } } impl pool::Poolable for PoolClient where B: Send + 'static, { fn is_open(&self) -> bool { !self.is_poisoned() && self.is_ready() } fn reserve(self) -> pool::Reservation { match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(tx) => pool::Reservation::Unique(PoolClient { conn_info: self.conn_info, tx: PoolTx::Http1(tx), }), #[cfg(feature = "http2")] PoolTx::Http2(tx) => { let b = PoolClient { conn_info: self.conn_info.clone(), tx: PoolTx::Http2(tx.clone()), }; let a = PoolClient { conn_info: self.conn_info, tx: PoolTx::Http2(tx), }; pool::Reservation::Shared(a, b) } } } fn can_share(&self) -> bool { self.is_http2() } } enum ClientConnectError { Normal(Error), CheckoutIsClosed(pool::Error), } fn origin_form(uri: &mut Uri) { let path = match uri.path_and_query() { Some(path) if path.as_str() != "/" => { let mut parts = ::http::uri::Parts::default(); parts.path_and_query = Some(path.clone()); Uri::from_parts(parts).expect("path is valid uri") } _none_or_just_slash => { debug_assert!(Uri::default() == "/"); Uri::default() } }; *uri = path } fn absolute_form(uri: &mut Uri) { debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); debug_assert!( uri.authority().is_some(), "absolute_form needs an authority" ); // If the URI is to HTTPS, and the connector claimed to be a proxy, // then it *should* have tunneled, and so we don't want to send // absolute-form in that case. if uri.scheme() == Some(&Scheme::HTTPS) { origin_form(uri); } } fn authority_form(uri: &mut Uri) { if let Some(path) = uri.path_and_query() { // `https://hyper.rs` would parse with `/` path, don't // annoy people about that... if path != "/" { warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); } } *uri = match uri.authority() { Some(auth) => { let mut parts = ::http::uri::Parts::default(); parts.authority = Some(auth.clone()); Uri::from_parts(parts).expect("authority is valid") } None => { unreachable!("authority_form with relative uri"); } }; } fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> Result { let uri_clone = uri.clone(); match (uri_clone.scheme(), uri_clone.authority()) { (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), (None, Some(auth)) if is_http_connect => { let scheme = match auth.port_u16() { Some(443) => { set_scheme(uri, Scheme::HTTPS); Scheme::HTTPS } _ => { set_scheme(uri, Scheme::HTTP); Scheme::HTTP } }; Ok((scheme, auth.clone())) } _ => { debug!("Client requires absolute-form URIs, received: {:?}", uri); Err(e!(UserAbsoluteUriRequired)) } } } fn domain_as_uri((scheme, auth): PoolKey) -> Uri { http::uri::Builder::new() .scheme(scheme) .authority(auth) .path_and_query("/") .build() .expect("domain is valid Uri") } fn set_scheme(uri: &mut Uri, scheme: Scheme) { debug_assert!( uri.scheme().is_none(), "set_scheme expects no existing scheme" ); let old = std::mem::take(uri); let mut parts: ::http::uri::Parts = old.into(); parts.scheme = Some(scheme); parts.path_and_query = Some("/".parse().expect("slash is a valid path")); *uri = Uri::from_parts(parts).expect("scheme is valid"); } fn get_non_default_port(uri: &Uri) -> Option> { match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { (Some(443), true) => None, (Some(80), false) => None, _ => uri.port(), } } fn is_schema_secure(uri: &Uri) -> bool { uri.scheme_str() .map(|scheme_str| matches!(scheme_str, "wss" | "https")) .unwrap_or_default() } /// A builder to configure a new [`Client`](Client). /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use std::time::Duration; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::TokioExecutor; /// /// let client = Client::builder(TokioExecutor::new()) /// .pool_idle_timeout(Duration::from_secs(30)) /// .http2_only(true) /// .build_http(); /// # let infer: Client<_, http_body_util::Full> = client; /// # drop(infer); /// # } /// # fn main() {} /// ``` #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] #[derive(Clone)] pub struct Builder { client_config: Config, exec: Exec, #[cfg(feature = "http1")] h1_builder: hyper::client::conn::http1::Builder, #[cfg(feature = "http2")] h2_builder: hyper::client::conn::http2::Builder, pool_config: pool::Config, pool_timer: Option, } impl Builder { /// Construct a new Builder. pub fn new(executor: E) -> Self where E: hyper::rt::Executor + Send + Sync + Clone + 'static, { let exec = Exec::new(executor); Self { client_config: Config { retry_canceled_requests: true, set_host: true, ver: Ver::Auto, }, exec: exec.clone(), #[cfg(feature = "http1")] h1_builder: hyper::client::conn::http1::Builder::new(), #[cfg(feature = "http2")] h2_builder: hyper::client::conn::http2::Builder::new(exec), pool_config: pool::Config { idle_timeout: Some(Duration::from_secs(90)), max_idle_per_host: usize::MAX, }, pool_timer: None, } } /// Set an optional timeout for idle sockets being kept-alive. /// A `Timer` is required for this to take effect. See `Builder::pool_timer` /// /// Pass `None` to disable timeout. /// /// Default is 90 seconds. /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use std::time::Duration; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::{TokioExecutor, TokioTimer}; /// /// let client = Client::builder(TokioExecutor::new()) /// .pool_idle_timeout(Duration::from_secs(30)) /// .pool_timer(TokioTimer::new()) /// .build_http(); /// /// # let infer: Client<_, http_body_util::Full> = client; /// # } /// # fn main() {} /// ``` pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self where D: Into>, { self.pool_config.idle_timeout = val.into(); self } #[doc(hidden)] #[deprecated(note = "renamed to `pool_max_idle_per_host`")] pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { self.pool_config.max_idle_per_host = max_idle; self } /// Sets the maximum idle connection per host allowed in the pool. /// /// Default is `usize::MAX` (no limit). pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { self.pool_config.max_idle_per_host = max_idle; self } // HTTP/1 options /// Sets the exact size of the read buffer to *always* use. /// /// Note that setting this option unsets the `http1_max_buf_size` option. /// /// Default is an adaptive read buffer. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { self.h1_builder.read_buf_exact_size(Some(sz)); self } /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. /// /// Note that setting this option unsets the `http1_read_exact_buf_size` option. /// /// # Panics /// /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { self.h1_builder.max_buf_size(max); self } /// Set whether HTTP/1 connections will accept spaces between header names /// and the colon that follow them in responses. /// /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when /// parsing. /// /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has /// to say about it: /// /// > No whitespace is allowed between the header field-name and colon. In /// > the past, differences in the handling of such whitespace have led to /// > security vulnerabilities in request routing and response handling. A /// > server MUST reject any received request message that contains /// > whitespace between a header field-name and colon with a response code /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a /// > response message before forwarding the message downstream. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self { self.h1_builder .allow_spaces_after_header_name_in_responses(val); self } /// Set whether HTTP/1 connections will accept obsolete line folding for /// header values. /// /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has /// to say about it: /// /// > A server that receives an obs-fold in a request message that is not /// > within a message/http container MUST either reject the message by /// > sending a 400 (Bad Request), preferably with a representation /// > explaining that obsolete line folding is unacceptable, or replace /// > each received obs-fold with one or more SP octets prior to /// > interpreting the field value or forwarding the message downstream. /// /// > A proxy or gateway that receives an obs-fold in a response message /// > that is not within a message/http container MUST either discard the /// > message and replace it with a 502 (Bad Gateway) response, preferably /// > with a representation explaining that unacceptable line folding was /// > received, or replace each received obs-fold with one or more SP /// > octets prior to interpreting the field value or forwarding the /// > message downstream. /// /// > A user agent that receives an obs-fold in a response message that is /// > not within a message/http container MUST replace each received /// > obs-fold with one or more SP octets prior to interpreting the field /// > value. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_allow_obsolete_multiline_headers_in_responses(&mut self, val: bool) -> &mut Self { self.h1_builder .allow_obsolete_multiline_headers_in_responses(val); self } /// Sets whether invalid header lines should be silently ignored in HTTP/1 responses. /// /// This mimics the behaviour of major browsers. You probably don't want this. /// You should only want this if you are implementing a proxy whose main /// purpose is to sit in front of browsers whose users access arbitrary content /// which may be malformed, and they expect everything that works without /// the proxy to keep working with the proxy. /// /// This option will prevent Hyper's client from returning an error encountered /// when parsing a header, except if the error was caused by the character NUL /// (ASCII code 0), as Chrome specifically always reject those. /// /// The ignorable errors are: /// * empty header names; /// * characters that are not allowed in header names, except for `\0` and `\r`; /// * when `allow_spaces_after_header_name_in_responses` is not enabled, /// spaces and tabs between the header name and the colon; /// * missing colon between header name and colon; /// * characters that are not allowed in header values except for `\0` and `\r`. /// /// If an ignorable error is encountered, the parser tries to find the next /// line in the input to resume parsing the rest of the headers. An error /// will be emitted nonetheless if it finds `\0` or a lone `\r` while /// looking for the next line. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_ignore_invalid_headers_in_responses(&mut self, val: bool) -> &mut Builder { self.h1_builder.ignore_invalid_headers_in_responses(val); self } /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// /// Note that setting this to false may mean more copies of body data, /// but may also improve performance when an IO transport doesn't /// support vectored writes well, such as most TLS implementations. /// /// Setting this to true will force hyper to use queued strategy /// which may eliminate unnecessary cloning on some TLS backends /// /// Default is `auto`. In this mode hyper will try to guess which /// mode to use #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder { self.h1_builder.writev(enabled); self } /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { self.h1_builder.title_case_headers(val); self } /// Set whether to support preserving original header cases. /// /// Currently, this will record the original cases received, and store them /// in a private extension on the `Response`. It will also look for and use /// such an extension in any provided `Request`. /// /// Since the relevant extension is still private, there is no way to /// interact with the original cases. The only effect this can have now is /// to forward the cases in a proxy-like fashion. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self { self.h1_builder.preserve_header_case(val); self } /// Set the maximum number of headers. /// /// When a response is received, the parser will reserve a buffer to store headers for optimal /// performance. /// /// If client receives more headers than the buffer size, the error "message header too large" /// is returned. /// /// The headers is allocated on the stack by default, which has higher performance. After /// setting this value, headers will be allocated in heap memory, that is, heap memory /// allocation will occur for each response, and there will be a performance drop of about 5%. /// /// Note that this setting does not affect HTTP/2. /// /// Default is 100. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_max_headers(&mut self, val: usize) -> &mut Self { self.h1_builder.max_headers(val); self } /// Set whether HTTP/0.9 responses should be tolerated. /// /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http09_responses(&mut self, val: bool) -> &mut Self { self.h1_builder.http09_responses(val); self } /// Set whether the connection **must** use HTTP/2. /// /// The destination must either allow HTTP2 Prior Knowledge, or the /// `Connect` should be configured to do use ALPN to upgrade to `h2` /// as part of the connection process. This will not make the `Client` /// utilize ALPN by itself. /// /// Note that setting this to true prevents HTTP/1 from being allowed. /// /// Default is false. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_only(&mut self, val: bool) -> &mut Self { self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; self } /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. /// /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). /// As of v0.4.0, it is 20. /// /// See for more information. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_pending_accept_reset_streams( &mut self, max: impl Into>, ) -> &mut Self { self.h2_builder.max_pending_accept_reset_streams(max.into()); self } /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { self.h2_builder.initial_stream_window_size(sz.into()); self } /// Sets the max connection-level flow control for HTTP2 /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_connection_window_size( &mut self, sz: impl Into>, ) -> &mut Self { self.h2_builder.initial_connection_window_size(sz.into()); self } /// Sets the initial maximum of locally initiated (send) streams. /// /// This value will be overwritten by the value included in the initial /// SETTINGS frame received from the peer as part of a [connection preface]. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. /// /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_max_send_streams( &mut self, initial: impl Into>, ) -> &mut Self { self.h2_builder.initial_max_send_streams(initial); self } /// Sets whether to use an adaptive flow control. /// /// Enabling this will override the limits set in /// `http2_initial_stream_window_size` and /// `http2_initial_connection_window_size`. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { self.h2_builder.adaptive_window(enabled); self } /// Sets the maximum frame size to use for HTTP2. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { self.h2_builder.max_frame_size(sz); self } /// Sets the max size of received header frames for HTTP2. /// /// Default is currently 16KB, but can change. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self { self.h2_builder.max_header_list_size(max); self } /// Sets an interval for HTTP2 Ping frames should be sent to keep a /// connection alive. /// /// Pass `None` to disable HTTP2 keep-alive. /// /// Default is currently disabled. /// /// # Cargo Feature /// /// Requires the `tokio` cargo feature to be enabled. #[cfg(feature = "tokio")] #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_interval( &mut self, interval: impl Into>, ) -> &mut Self { self.h2_builder.keep_alive_interval(interval); self } /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. /// /// If the ping is not acknowledged within the timeout, the connection will /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. /// /// Default is 20 seconds. /// /// # Cargo Feature /// /// Requires the `tokio` cargo feature to be enabled. #[cfg(feature = "tokio")] #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { self.h2_builder.keep_alive_timeout(timeout); self } /// Sets whether HTTP2 keep-alive should apply while the connection is idle. /// /// If disabled, keep-alive pings are only sent while there are open /// request/responses streams. If enabled, pings are also sent when no /// streams are active. Does nothing if `http2_keep_alive_interval` is /// disabled. /// /// Default is `false`. /// /// # Cargo Feature /// /// Requires the `tokio` cargo feature to be enabled. #[cfg(feature = "tokio")] #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { self.h2_builder.keep_alive_while_idle(enabled); self } /// Sets the maximum number of HTTP2 concurrent locally reset streams. /// /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more /// details. /// /// The default value is determined by the `h2` crate. /// /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { self.h2_builder.max_concurrent_reset_streams(max); self } /// Provide a timer to be used for h2 /// /// See the documentation of [`h2::client::Builder::timer`] for more /// details. /// /// [`h2::client::Builder::timer`]: https://docs.rs/h2/client/struct.Builder.html#method.timer pub fn timer(&mut self, timer: M) -> &mut Self where M: Timer + Send + Sync + 'static, { #[cfg(feature = "http2")] self.h2_builder.timer(timer); self } /// Provide a timer to be used for timeouts and intervals in connection pools. pub fn pool_timer(&mut self, timer: M) -> &mut Self where M: Timer + Clone + Send + Sync + 'static, { self.pool_timer = Some(timer::Timer::new(timer.clone())); self } /// Set the maximum write buffer size for each HTTP/2 stream. /// /// Default is currently 1MB, but may change. /// /// # Panics /// /// The value must be no larger than `u32::MAX`. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { self.h2_builder.max_send_buf_size(max); self } /// Set whether to retry requests that get disrupted before ever starting /// to write. /// /// This means a request that is queued, and gets given an idle, reused /// connection, and then encounters an error immediately as the idle /// connection was found to be unusable. /// /// When this is set to `false`, the related `ResponseFuture` would instead /// resolve to an `Error::Cancel`. /// /// Default is `true`. #[inline] pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { self.client_config.retry_canceled_requests = val; self } /// Set whether to automatically add the `Host` header to requests. /// /// If true, and a request does not include a `Host` header, one will be /// added automatically, derived from the authority of the `Uri`. /// /// Default is `true`. #[inline] pub fn set_host(&mut self, val: bool) -> &mut Self { self.client_config.set_host = val; self } /// Build a client with this configuration and the default `HttpConnector`. #[cfg(feature = "tokio")] pub fn build_http(&self) -> Client where B: Body + Send, B::Data: Send, { let mut connector = HttpConnector::new(); if self.pool_config.is_enabled() { connector.set_keepalive(self.pool_config.idle_timeout); } self.build(connector) } /// Combine the configuration of this builder with a connector to create a `Client`. pub fn build(&self, connector: C) -> Client where C: Connect + Clone, B: Body + Send, B::Data: Send, { let exec = self.exec.clone(); let timer = self.pool_timer.clone(); Client { config: self.client_config, exec: exec.clone(), #[cfg(feature = "http1")] h1_builder: self.h1_builder.clone(), #[cfg(feature = "http2")] h2_builder: self.h2_builder.clone(), connector, pool: pool::Pool::new(self.pool_config, exec, timer), } } } impl fmt::Debug for Builder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Builder") .field("client_config", &self.client_config) .field("pool_config", &self.pool_config) .finish() } } // ==== impl Error ==== impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut f = f.debug_tuple("hyper_util::client::legacy::Error"); f.field(&self.kind); if let Some(ref cause) = self.source { f.field(cause); } f.finish() } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "client error ({:?})", self.kind) } } impl StdError for Error { fn source(&self) -> Option<&(dyn StdError + 'static)> { self.source.as_ref().map(|e| &**e as _) } } impl Error { /// Returns true if this was an error from `Connect`. pub fn is_connect(&self) -> bool { matches!(self.kind, ErrorKind::Connect) } /// Returns the info of the client connection on which this error occurred. #[cfg(any(feature = "http1", feature = "http2"))] pub fn connect_info(&self) -> Option<&Connected> { self.connect_info.as_ref() } #[cfg(any(feature = "http1", feature = "http2"))] fn with_connect_info(self, connect_info: Connected) -> Self { Self { connect_info: Some(connect_info), ..self } } fn is_canceled(&self) -> bool { matches!(self.kind, ErrorKind::Canceled) } fn tx(src: hyper::Error) -> Self { e!(SendRequest, src) } fn closed(src: hyper::Error) -> Self { e!(ChannelClosed, src) } } hyper-util-0.1.10/src/client/legacy/connect/capture.rs000064400000000000000000000142121046102023000207600ustar 00000000000000use std::{ops::Deref, sync::Arc}; use http::Request; use tokio::sync::watch; use super::Connected; /// [`CaptureConnection`] allows callers to capture [`Connected`] information /// /// To capture a connection for a request, use [`capture_connection`]. #[derive(Debug, Clone)] pub struct CaptureConnection { rx: watch::Receiver>, } /// Capture the connection for a given request /// /// When making a request with Hyper, the underlying connection must implement the [`Connection`] trait. /// [`capture_connection`] allows a caller to capture the returned [`Connected`] structure as soon /// as the connection is established. /// /// *Note*: If establishing a connection fails, [`CaptureConnection::connection_metadata`] will always return none. /// /// # Examples /// /// **Synchronous access**: /// The [`CaptureConnection::connection_metadata`] method allows callers to check if a connection has been /// established. This is ideal for situations where you are certain the connection has already /// been established (e.g. after the response future has already completed). /// ```rust /// use hyper_util::client::legacy::connect::capture_connection; /// let mut request = http::Request::builder() /// .uri("http://foo.com") /// .body(()) /// .unwrap(); /// /// let captured_connection = capture_connection(&mut request); /// // some time later after the request has been sent... /// let connection_info = captured_connection.connection_metadata(); /// println!("we are connected! {:?}", connection_info.as_ref()); /// ``` /// /// **Asynchronous access**: /// The [`CaptureConnection::wait_for_connection_metadata`] method returns a future resolves as soon as the /// connection is available. /// /// ```rust /// # #[cfg(feature = "tokio")] /// # async fn example() { /// use hyper_util::client::legacy::connect::capture_connection; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::TokioExecutor; /// use bytes::Bytes; /// use http_body_util::Empty; /// let mut request = http::Request::builder() /// .uri("http://foo.com") /// .body(Empty::::new()) /// .unwrap(); /// /// let mut captured = capture_connection(&mut request); /// tokio::task::spawn(async move { /// let connection_info = captured.wait_for_connection_metadata().await; /// println!("we are connected! {:?}", connection_info.as_ref()); /// }); /// /// let client = Client::builder(TokioExecutor::new()).build_http(); /// client.request(request).await.expect("request failed"); /// # } /// ``` pub fn capture_connection(request: &mut Request) -> CaptureConnection { let (tx, rx) = CaptureConnection::new(); request.extensions_mut().insert(tx); rx } /// TxSide for [`CaptureConnection`] /// /// This is inserted into `Extensions` to allow Hyper to back channel connection info #[derive(Clone)] pub(crate) struct CaptureConnectionExtension { tx: Arc>>, } impl CaptureConnectionExtension { pub(crate) fn set(&self, connected: &Connected) { self.tx.send_replace(Some(connected.clone())); } } impl CaptureConnection { /// Internal API to create the tx and rx half of [`CaptureConnection`] pub(crate) fn new() -> (CaptureConnectionExtension, Self) { let (tx, rx) = watch::channel(None); ( CaptureConnectionExtension { tx: Arc::new(tx) }, CaptureConnection { rx }, ) } /// Retrieve the connection metadata, if available pub fn connection_metadata(&self) -> impl Deref> + '_ { self.rx.borrow() } /// Wait for the connection to be established /// /// If a connection was established, this will always return `Some(...)`. If the request never /// successfully connected (e.g. DNS resolution failure), this method will never return. pub async fn wait_for_connection_metadata( &mut self, ) -> impl Deref> + '_ { if self.rx.borrow().is_some() { return self.rx.borrow(); } let _ = self.rx.changed().await; self.rx.borrow() } } #[cfg(all(test, not(miri)))] mod test { use super::*; #[test] fn test_sync_capture_connection() { let (tx, rx) = CaptureConnection::new(); assert!( rx.connection_metadata().is_none(), "connection has not been set" ); tx.set(&Connected::new().proxy(true)); assert_eq!( rx.connection_metadata() .as_ref() .expect("connected should be set") .is_proxied(), true ); // ensure it can be called multiple times assert_eq!( rx.connection_metadata() .as_ref() .expect("connected should be set") .is_proxied(), true ); } #[tokio::test] async fn async_capture_connection() { let (tx, mut rx) = CaptureConnection::new(); assert!( rx.connection_metadata().is_none(), "connection has not been set" ); let test_task = tokio::spawn(async move { assert_eq!( rx.wait_for_connection_metadata() .await .as_ref() .expect("connection should be set") .is_proxied(), true ); // can be awaited multiple times assert!( rx.wait_for_connection_metadata().await.is_some(), "should be awaitable multiple times" ); assert_eq!(rx.connection_metadata().is_some(), true); }); // can't be finished, we haven't set the connection yet assert_eq!(test_task.is_finished(), false); tx.set(&Connected::new().proxy(true)); assert!(test_task.await.is_ok()); } #[tokio::test] async fn capture_connection_sender_side_dropped() { let (tx, mut rx) = CaptureConnection::new(); assert!( rx.connection_metadata().is_none(), "connection has not been set" ); drop(tx); assert!(rx.wait_for_connection_metadata().await.is_none()); } } hyper-util-0.1.10/src/client/legacy/connect/dns.rs000064400000000000000000000241761046102023000201130ustar 00000000000000//! DNS Resolution used by the `HttpConnector`. //! //! This module contains: //! //! - A [`GaiResolver`](GaiResolver) that is the default resolver for the //! `HttpConnector`. //! - The `Name` type used as an argument to custom resolvers. //! //! # Resolvers are `Service`s //! //! A resolver is just a //! `Service>`. //! //! A simple resolver that ignores the name and always returns a specific //! address: //! //! ```rust,ignore //! use std::{convert::Infallible, iter, net::SocketAddr}; //! //! let resolver = tower::service_fn(|_name| async { //! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080)))) //! }); //! ``` use std::error::Error; use std::future::Future; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}; use std::pin::Pin; use std::str::FromStr; use std::task::{self, Poll}; use std::{fmt, io, vec}; use tokio::task::JoinHandle; use tower_service::Service; use tracing::debug_span; pub(super) use self::sealed::Resolve; /// A domain name to resolve into IP addresses. #[derive(Clone, Hash, Eq, PartialEq)] pub struct Name { host: Box, } /// A resolver using blocking `getaddrinfo` calls in a threadpool. #[derive(Clone)] pub struct GaiResolver { _priv: (), } /// An iterator of IP addresses returned from `getaddrinfo`. pub struct GaiAddrs { inner: SocketAddrs, } /// A future to resolve a name returned by `GaiResolver`. pub struct GaiFuture { inner: JoinHandle>, } impl Name { pub(super) fn new(host: Box) -> Name { Name { host } } /// View the hostname as a string slice. pub fn as_str(&self) -> &str { &self.host } } impl fmt::Debug for Name { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.host, f) } } impl fmt::Display for Name { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.host, f) } } impl FromStr for Name { type Err = InvalidNameError; fn from_str(host: &str) -> Result { // Possibly add validation later Ok(Name::new(host.into())) } } /// Error indicating a given string was not a valid domain name. #[derive(Debug)] pub struct InvalidNameError(()); impl fmt::Display for InvalidNameError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("Not a valid domain name") } } impl Error for InvalidNameError {} impl GaiResolver { /// Construct a new `GaiResolver`. pub fn new() -> Self { GaiResolver { _priv: () } } } impl Service for GaiResolver { type Response = GaiAddrs; type Error = io::Error; type Future = GaiFuture; fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, name: Name) -> Self::Future { let span = debug_span!("resolve", host = %name.host); let blocking = tokio::task::spawn_blocking(move || { let _enter = span.enter(); (&*name.host, 0) .to_socket_addrs() .map(|i| SocketAddrs { iter: i }) }); GaiFuture { inner: blocking } } } impl fmt::Debug for GaiResolver { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("GaiResolver") } } impl Future for GaiFuture { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { Pin::new(&mut self.inner).poll(cx).map(|res| match res { Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }), Ok(Err(err)) => Err(err), Err(join_err) => { if join_err.is_cancelled() { Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) } else { panic!("gai background task failed: {:?}", join_err) } } }) } } impl fmt::Debug for GaiFuture { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("GaiFuture") } } impl Drop for GaiFuture { fn drop(&mut self) { self.inner.abort(); } } impl Iterator for GaiAddrs { type Item = SocketAddr; fn next(&mut self) -> Option { self.inner.next() } } impl fmt::Debug for GaiAddrs { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("GaiAddrs") } } pub(super) struct SocketAddrs { iter: vec::IntoIter, } impl SocketAddrs { pub(super) fn new(addrs: Vec) -> Self { SocketAddrs { iter: addrs.into_iter(), } } pub(super) fn try_parse(host: &str, port: u16) -> Option { if let Ok(addr) = host.parse::() { let addr = SocketAddrV4::new(addr, port); return Some(SocketAddrs { iter: vec![SocketAddr::V4(addr)].into_iter(), }); } if let Ok(addr) = host.parse::() { let addr = SocketAddrV6::new(addr, port, 0, 0); return Some(SocketAddrs { iter: vec![SocketAddr::V6(addr)].into_iter(), }); } None } #[inline] fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs { SocketAddrs::new(self.iter.filter(predicate).collect()) } pub(super) fn split_by_preference( self, local_addr_ipv4: Option, local_addr_ipv6: Option, ) -> (SocketAddrs, SocketAddrs) { match (local_addr_ipv4, local_addr_ipv6) { (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])), (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])), _ => { let preferring_v6 = self .iter .as_slice() .first() .map(SocketAddr::is_ipv6) .unwrap_or(false); let (preferred, fallback) = self .iter .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); (SocketAddrs::new(preferred), SocketAddrs::new(fallback)) } } } pub(super) fn is_empty(&self) -> bool { self.iter.as_slice().is_empty() } pub(super) fn len(&self) -> usize { self.iter.as_slice().len() } } impl Iterator for SocketAddrs { type Item = SocketAddr; #[inline] fn next(&mut self) -> Option { self.iter.next() } } mod sealed { use std::future::Future; use std::task::{self, Poll}; use super::{Name, SocketAddr}; use tower_service::Service; // "Trait alias" for `Service` pub trait Resolve { type Addrs: Iterator; type Error: Into>; type Future: Future>; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; fn resolve(&mut self, name: Name) -> Self::Future; } impl Resolve for S where S: Service, S::Response: Iterator, S::Error: Into>, { type Addrs = S::Response; type Error = S::Error; type Future = S::Future; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { Service::poll_ready(self, cx) } fn resolve(&mut self, name: Name) -> Self::Future { Service::call(self, name) } } } pub(super) async fn resolve(resolver: &mut R, name: Name) -> Result where R: Resolve, { futures_util::future::poll_fn(|cx| resolver.poll_ready(cx)).await?; resolver.resolve(name).await } #[cfg(test)] mod tests { use super::*; use std::net::{Ipv4Addr, Ipv6Addr}; #[test] fn test_ip_addrs_split_by_preference() { let ip_v4 = Ipv4Addr::new(127, 0, 0, 1); let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); let v4_addr = (ip_v4, 80).into(); let v6_addr = (ip_v6, 80).into(); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(None, None); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.next().unwrap().is_ipv6()); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v6_addr, v4_addr].into_iter(), } .split_by_preference(None, None); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.next().unwrap().is_ipv4()); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(Some(ip_v4), Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.next().unwrap().is_ipv6()); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v6_addr, v4_addr].into_iter(), } .split_by_preference(Some(ip_v4), Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.next().unwrap().is_ipv4()); let (mut preferred, fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(Some(ip_v4), None); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.is_empty()); let (mut preferred, fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(None, Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.is_empty()); } #[test] fn test_name_from_str() { const DOMAIN: &str = "test.example.com"; let name = Name::from_str(DOMAIN).expect("Should be a valid domain"); assert_eq!(name.as_str(), DOMAIN); assert_eq!(name.to_string(), DOMAIN); } } hyper-util-0.1.10/src/client/legacy/connect/http.rs000064400000000000000000001233041046102023000202770ustar 00000000000000use std::error::Error as StdError; use std::fmt; use std::future::Future; use std::io; use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::pin::Pin; use std::sync::Arc; use std::task::{self, Poll}; use std::time::Duration; use futures_util::future::Either; use http::uri::{Scheme, Uri}; use pin_project_lite::pin_project; use socket2::TcpKeepalive; use tokio::net::{TcpSocket, TcpStream}; use tokio::time::Sleep; use tracing::{debug, trace, warn}; use super::dns::{self, resolve, GaiResolver, Resolve}; use super::{Connected, Connection}; use crate::rt::TokioIo; /// A connector for the `http` scheme. /// /// Performs DNS resolution in a thread pool, and then connects over TCP. /// /// # Note /// /// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes /// transport information such as the remote socket address used. #[derive(Clone)] pub struct HttpConnector { config: Arc, resolver: R, } /// Extra information about the transport when an HttpConnector is used. /// /// # Example /// /// ``` /// # fn doc(res: http::Response<()>) { /// use hyper_util::client::legacy::connect::HttpInfo; /// /// // res = http::Response /// res /// .extensions() /// .get::() /// .map(|info| { /// println!("remote addr = {}", info.remote_addr()); /// }); /// # } /// ``` /// /// # Note /// /// If a different connector is used besides [`HttpConnector`](HttpConnector), /// this value will not exist in the extensions. Consult that specific /// connector to see what "extra" information it might provide to responses. #[derive(Clone, Debug)] pub struct HttpInfo { remote_addr: SocketAddr, local_addr: SocketAddr, } #[derive(Clone)] struct Config { connect_timeout: Option, enforce_http: bool, happy_eyeballs_timeout: Option, tcp_keepalive_config: TcpKeepaliveConfig, local_address_ipv4: Option, local_address_ipv6: Option, nodelay: bool, reuse_address: bool, send_buffer_size: Option, recv_buffer_size: Option, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] interface: Option, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] tcp_user_timeout: Option, } #[derive(Default, Debug, Clone, Copy)] struct TcpKeepaliveConfig { time: Option, interval: Option, retries: Option, } impl TcpKeepaliveConfig { /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration. fn into_tcpkeepalive(self) -> Option { let mut dirty = false; let mut ka = TcpKeepalive::new(); if let Some(time) = self.time { ka = ka.with_time(time); dirty = true } if let Some(interval) = self.interval { ka = Self::ka_with_interval(ka, interval, &mut dirty) }; if let Some(retries) = self.retries { ka = Self::ka_with_retries(ka, retries, &mut dirty) }; if dirty { Some(ka) } else { None } } #[cfg(not(any( target_os = "aix", target_os = "openbsd", target_os = "redox", target_os = "solaris" )))] fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive { *dirty = true; ka.with_interval(interval) } #[cfg(any( target_os = "aix", target_os = "openbsd", target_os = "redox", target_os = "solaris" ))] fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive { ka // no-op as keepalive interval is not supported on this platform } #[cfg(not(any( target_os = "aix", target_os = "openbsd", target_os = "redox", target_os = "solaris", target_os = "windows" )))] fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive { *dirty = true; ka.with_retries(retries) } #[cfg(any( target_os = "aix", target_os = "openbsd", target_os = "redox", target_os = "solaris", target_os = "windows" ))] fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive { ka // no-op as keepalive retries is not supported on this platform } } // ===== impl HttpConnector ===== impl HttpConnector { /// Construct a new HttpConnector. pub fn new() -> HttpConnector { HttpConnector::new_with_resolver(GaiResolver::new()) } } impl HttpConnector { /// Construct a new HttpConnector. /// /// Takes a [`Resolver`](crate::client::connect::dns#resolvers-are-services) to handle DNS lookups. pub fn new_with_resolver(resolver: R) -> HttpConnector { HttpConnector { config: Arc::new(Config { connect_timeout: None, enforce_http: true, happy_eyeballs_timeout: Some(Duration::from_millis(300)), tcp_keepalive_config: TcpKeepaliveConfig::default(), local_address_ipv4: None, local_address_ipv6: None, nodelay: false, reuse_address: false, send_buffer_size: None, recv_buffer_size: None, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] interface: None, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] tcp_user_timeout: None, }), resolver, } } /// Option to enforce all `Uri`s have the `http` scheme. /// /// Enabled by default. #[inline] pub fn enforce_http(&mut self, is_enforced: bool) { self.config_mut().enforce_http = is_enforced; } /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration /// to remain idle before sending TCP keepalive probes. /// /// If `None`, keepalive is disabled. /// /// Default is `None`. #[inline] pub fn set_keepalive(&mut self, time: Option) { self.config_mut().tcp_keepalive_config.time = time; } /// Set the duration between two successive TCP keepalive retransmissions, /// if acknowledgement to the previous keepalive transmission is not received. #[inline] pub fn set_keepalive_interval(&mut self, interval: Option) { self.config_mut().tcp_keepalive_config.interval = interval; } /// Set the number of retransmissions to be carried out before declaring that remote end is not available. #[inline] pub fn set_keepalive_retries(&mut self, retries: Option) { self.config_mut().tcp_keepalive_config.retries = retries; } /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`. /// /// Default is `false`. #[inline] pub fn set_nodelay(&mut self, nodelay: bool) { self.config_mut().nodelay = nodelay; } /// Sets the value of the SO_SNDBUF option on the socket. #[inline] pub fn set_send_buffer_size(&mut self, size: Option) { self.config_mut().send_buffer_size = size; } /// Sets the value of the SO_RCVBUF option on the socket. #[inline] pub fn set_recv_buffer_size(&mut self, size: Option) { self.config_mut().recv_buffer_size = size; } /// Set that all sockets are bound to the configured address before connection. /// /// If `None`, the sockets will not be bound. /// /// Default is `None`. #[inline] pub fn set_local_address(&mut self, addr: Option) { let (v4, v6) = match addr { Some(IpAddr::V4(a)) => (Some(a), None), Some(IpAddr::V6(a)) => (None, Some(a)), _ => (None, None), }; let cfg = self.config_mut(); cfg.local_address_ipv4 = v4; cfg.local_address_ipv6 = v6; } /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's /// preferences) before connection. #[inline] pub fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) { let cfg = self.config_mut(); cfg.local_address_ipv4 = Some(addr_ipv4); cfg.local_address_ipv6 = Some(addr_ipv6); } /// Set the connect timeout. /// /// If a domain resolves to multiple IP addresses, the timeout will be /// evenly divided across them. /// /// Default is `None`. #[inline] pub fn set_connect_timeout(&mut self, dur: Option) { self.config_mut().connect_timeout = dur; } /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. /// /// If hostname resolves to both IPv4 and IPv6 addresses and connection /// cannot be established using preferred address family before timeout /// elapses, then connector will in parallel attempt connection using other /// address family. /// /// If `None`, parallel connection attempts are disabled. /// /// Default is 300 milliseconds. /// /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 #[inline] pub fn set_happy_eyeballs_timeout(&mut self, dur: Option) { self.config_mut().happy_eyeballs_timeout = dur; } /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`. /// /// Default is `false`. #[inline] pub fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self { self.config_mut().reuse_address = reuse_address; self } /// Sets the value for the `SO_BINDTODEVICE` option on this socket. /// /// If a socket is bound to an interface, only packets received from that particular /// interface are processed by the socket. Note that this only works for some socket /// types, particularly AF_INET sockets. /// /// On Linux it can be used to specify a [VRF], but the binary needs /// to either have `CAP_NET_RAW` or to be run as root. /// /// This function is only available on Android、Fuchsia and Linux. /// /// [VRF]: https://www.kernel.org/doc/Documentation/networking/vrf.txt #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] #[inline] pub fn set_interface>(&mut self, interface: S) -> &mut Self { self.config_mut().interface = Some(interface.into()); self } /// Sets the value of the TCP_USER_TIMEOUT option on the socket. #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] #[inline] pub fn set_tcp_user_timeout(&mut self, time: Option) { self.config_mut().tcp_user_timeout = time; } // private fn config_mut(&mut self) -> &mut Config { // If the are HttpConnector clones, this will clone the inner // config. So mutating the config won't ever affect previous // clones. Arc::make_mut(&mut self.config) } } static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http"; static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing"; static INVALID_MISSING_HOST: &str = "invalid URL, host is missing"; // R: Debug required for now to allow adding it to debug output later... impl fmt::Debug for HttpConnector { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("HttpConnector").finish() } } impl tower_service::Service for HttpConnector where R: Resolve + Clone + Send + Sync + 'static, R::Future: Send, { type Response = TokioIo; type Error = ConnectError; type Future = HttpConnecting; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { futures_util::ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?; Poll::Ready(Ok(())) } fn call(&mut self, dst: Uri) -> Self::Future { let mut self_ = self.clone(); HttpConnecting { fut: Box::pin(async move { self_.call_async(dst).await }), _marker: PhantomData, } } } fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> { trace!( "Http::connect; scheme={:?}, host={:?}, port={:?}", dst.scheme(), dst.host(), dst.port(), ); if config.enforce_http { if dst.scheme() != Some(&Scheme::HTTP) { return Err(ConnectError { msg: INVALID_NOT_HTTP.into(), cause: None, }); } } else if dst.scheme().is_none() { return Err(ConnectError { msg: INVALID_MISSING_SCHEME.into(), cause: None, }); } let host = match dst.host() { Some(s) => s, None => { return Err(ConnectError { msg: INVALID_MISSING_HOST.into(), cause: None, }) } }; let port = match dst.port() { Some(port) => port.as_u16(), None => { if dst.scheme() == Some(&Scheme::HTTPS) { 443 } else { 80 } } }; Ok((host, port)) } impl HttpConnector where R: Resolve, { async fn call_async(&mut self, dst: Uri) -> Result, ConnectError> { let config = &self.config; let (host, port) = get_host_port(config, &dst)?; let host = host.trim_start_matches('[').trim_end_matches(']'); // If the host is already an IP addr (v4 or v6), // skip resolving the dns and start connecting right away. let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) { addrs } else { let addrs = resolve(&mut self.resolver, dns::Name::new(host.into())) .await .map_err(ConnectError::dns)?; let addrs = addrs .map(|mut addr| { set_port(&mut addr, port, dst.port().is_some()); addr }) .collect(); dns::SocketAddrs::new(addrs) }; let c = ConnectingTcp::new(addrs, config); let sock = c.connect().await?; if let Err(e) = sock.set_nodelay(config.nodelay) { warn!("tcp set_nodelay error: {}", e); } Ok(TokioIo::new(sock)) } } impl Connection for TcpStream { fn connected(&self) -> Connected { let connected = Connected::new(); if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) { connected.extra(HttpInfo { remote_addr, local_addr, }) } else { connected } } } // Implement `Connection` for generic `TokioIo` so that external crates can // implement their own `HttpConnector` with `TokioIo`. impl Connection for TokioIo where T: Connection, { fn connected(&self) -> Connected { self.inner().connected() } } impl HttpInfo { /// Get the remote address of the transport used. pub fn remote_addr(&self) -> SocketAddr { self.remote_addr } /// Get the local address of the transport used. pub fn local_addr(&self) -> SocketAddr { self.local_addr } } pin_project! { // Not publicly exported (so missing_docs doesn't trigger). // // We return this `Future` instead of the `Pin>` directly // so that users don't rely on it fitting in a `Pin>` slot // (and thus we can change the type in the future). #[must_use = "futures do nothing unless polled"] #[allow(missing_debug_implementations)] pub struct HttpConnecting { #[pin] fut: BoxConnecting, _marker: PhantomData, } } type ConnectResult = Result, ConnectError>; type BoxConnecting = Pin + Send>>; impl Future for HttpConnecting { type Output = ConnectResult; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { self.project().fut.poll(cx) } } // Not publicly exported (so missing_docs doesn't trigger). pub struct ConnectError { msg: Box, cause: Option>, } impl ConnectError { fn new(msg: S, cause: E) -> ConnectError where S: Into>, E: Into>, { ConnectError { msg: msg.into(), cause: Some(cause.into()), } } fn dns(cause: E) -> ConnectError where E: Into>, { ConnectError::new("dns error", cause) } fn m(msg: S) -> impl FnOnce(E) -> ConnectError where S: Into>, E: Into>, { move |cause| ConnectError::new(msg, cause) } } impl fmt::Debug for ConnectError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(ref cause) = self.cause { f.debug_tuple("ConnectError") .field(&self.msg) .field(cause) .finish() } else { self.msg.fmt(f) } } } impl fmt::Display for ConnectError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.msg)?; if let Some(ref cause) = self.cause { write!(f, ": {}", cause)?; } Ok(()) } } impl StdError for ConnectError { fn source(&self) -> Option<&(dyn StdError + 'static)> { self.cause.as_ref().map(|e| &**e as _) } } struct ConnectingTcp<'a> { preferred: ConnectingTcpRemote, fallback: Option, config: &'a Config, } impl<'a> ConnectingTcp<'a> { fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self { if let Some(fallback_timeout) = config.happy_eyeballs_timeout { let (preferred_addrs, fallback_addrs) = remote_addrs .split_by_preference(config.local_address_ipv4, config.local_address_ipv6); if fallback_addrs.is_empty() { return ConnectingTcp { preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), fallback: None, config, }; } ConnectingTcp { preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), fallback: Some(ConnectingTcpFallback { delay: tokio::time::sleep(fallback_timeout), remote: ConnectingTcpRemote::new(fallback_addrs, config.connect_timeout), }), config, } } else { ConnectingTcp { preferred: ConnectingTcpRemote::new(remote_addrs, config.connect_timeout), fallback: None, config, } } } } struct ConnectingTcpFallback { delay: Sleep, remote: ConnectingTcpRemote, } struct ConnectingTcpRemote { addrs: dns::SocketAddrs, connect_timeout: Option, } impl ConnectingTcpRemote { fn new(addrs: dns::SocketAddrs, connect_timeout: Option) -> Self { let connect_timeout = connect_timeout.and_then(|t| t.checked_div(addrs.len() as u32)); Self { addrs, connect_timeout, } } } impl ConnectingTcpRemote { async fn connect(&mut self, config: &Config) -> Result { let mut err = None; for addr in &mut self.addrs { debug!("connecting to {}", addr); match connect(&addr, config, self.connect_timeout)?.await { Ok(tcp) => { debug!("connected to {}", addr); return Ok(tcp); } Err(e) => { trace!("connect error for {}: {:?}", addr, e); err = Some(e); } } } match err { Some(e) => Err(e), None => Err(ConnectError::new( "tcp connect error", std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"), )), } } } fn bind_local_address( socket: &socket2::Socket, dst_addr: &SocketAddr, local_addr_ipv4: &Option, local_addr_ipv6: &Option, ) -> io::Result<()> { match (*dst_addr, local_addr_ipv4, local_addr_ipv6) { (SocketAddr::V4(_), Some(addr), _) => { socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; } (SocketAddr::V6(_), _, Some(addr)) => { socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; } _ => { if cfg!(windows) { // Windows requires a socket be bound before calling connect let any: SocketAddr = match *dst_addr { SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(), SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(), }; socket.bind(&any.into())?; } } } Ok(()) } fn connect( addr: &SocketAddr, config: &Config, connect_timeout: Option, ) -> Result>, ConnectError> { // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the // keepalive timeout, it would be nice to use that instead of socket2, // and avoid the unsafe `into_raw_fd`/`from_raw_fd` dance... use socket2::{Domain, Protocol, Socket, Type}; let domain = Domain::for_address(*addr); let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)) .map_err(ConnectError::m("tcp open error"))?; // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is // responsible for ensuring O_NONBLOCK is set. socket .set_nonblocking(true) .map_err(ConnectError::m("tcp set_nonblocking error"))?; if let Some(tcp_keepalive) = &config.tcp_keepalive_config.into_tcpkeepalive() { if let Err(e) = socket.set_tcp_keepalive(tcp_keepalive) { warn!("tcp set_keepalive error: {}", e); } } #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] // That this only works for some socket types, particularly AF_INET sockets. if let Some(interface) = &config.interface { socket .bind_device(Some(interface.as_bytes())) .map_err(ConnectError::m("tcp bind interface error"))?; } #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] if let Some(tcp_user_timeout) = &config.tcp_user_timeout { if let Err(e) = socket.set_tcp_user_timeout(Some(*tcp_user_timeout)) { warn!("tcp set_tcp_user_timeout error: {}", e); } } bind_local_address( &socket, addr, &config.local_address_ipv4, &config.local_address_ipv6, ) .map_err(ConnectError::m("tcp bind local error"))?; #[cfg(unix)] let socket = unsafe { // Safety: `from_raw_fd` is only safe to call if ownership of the raw // file descriptor is transferred. Since we call `into_raw_fd` on the // socket2 socket, it gives up ownership of the fd and will not close // it, so this is safe. use std::os::unix::io::{FromRawFd, IntoRawFd}; TcpSocket::from_raw_fd(socket.into_raw_fd()) }; #[cfg(windows)] let socket = unsafe { // Safety: `from_raw_socket` is only safe to call if ownership of the raw // Windows SOCKET is transferred. Since we call `into_raw_socket` on the // socket2 socket, it gives up ownership of the SOCKET and will not close // it, so this is safe. use std::os::windows::io::{FromRawSocket, IntoRawSocket}; TcpSocket::from_raw_socket(socket.into_raw_socket()) }; if config.reuse_address { if let Err(e) = socket.set_reuseaddr(true) { warn!("tcp set_reuse_address error: {}", e); } } if let Some(size) = config.send_buffer_size { if let Err(e) = socket.set_send_buffer_size(size.try_into().unwrap_or(u32::MAX)) { warn!("tcp set_buffer_size error: {}", e); } } if let Some(size) = config.recv_buffer_size { if let Err(e) = socket.set_recv_buffer_size(size.try_into().unwrap_or(u32::MAX)) { warn!("tcp set_recv_buffer_size error: {}", e); } } let connect = socket.connect(*addr); Ok(async move { match connect_timeout { Some(dur) => match tokio::time::timeout(dur, connect).await { Ok(Ok(s)) => Ok(s), Ok(Err(e)) => Err(e), Err(e) => Err(io::Error::new(io::ErrorKind::TimedOut, e)), }, None => connect.await, } .map_err(ConnectError::m("tcp connect error")) }) } impl ConnectingTcp<'_> { async fn connect(mut self) -> Result { match self.fallback { None => self.preferred.connect(self.config).await, Some(mut fallback) => { let preferred_fut = self.preferred.connect(self.config); futures_util::pin_mut!(preferred_fut); let fallback_fut = fallback.remote.connect(self.config); futures_util::pin_mut!(fallback_fut); let fallback_delay = fallback.delay; futures_util::pin_mut!(fallback_delay); let (result, future) = match futures_util::future::select(preferred_fut, fallback_delay).await { Either::Left((result, _fallback_delay)) => { (result, Either::Right(fallback_fut)) } Either::Right(((), preferred_fut)) => { // Delay is done, start polling both the preferred and the fallback futures_util::future::select(preferred_fut, fallback_fut) .await .factor_first() } }; if result.is_err() { // Fallback to the remaining future (could be preferred or fallback) // if we get an error future.await } else { result } } } } } /// Respect explicit ports in the URI, if none, either /// keep non `0` ports resolved from a custom dns resolver, /// or use the default port for the scheme. fn set_port(addr: &mut SocketAddr, host_port: u16, explicit: bool) { if explicit || addr.port() == 0 { addr.set_port(host_port) }; } #[cfg(test)] mod tests { use std::io; use std::net::SocketAddr; use ::http::Uri; use crate::client::legacy::connect::http::TcpKeepaliveConfig; use super::super::sealed::{Connect, ConnectSvc}; use super::{Config, ConnectError, HttpConnector}; use super::set_port; async fn connect( connector: C, dst: Uri, ) -> Result<::Connection, ::Error> where C: Connect, { connector.connect(super::super::sealed::Internal, dst).await } #[tokio::test] #[cfg_attr(miri, ignore)] async fn test_errors_enforce_http() { let dst = "https://example.domain/foo/bar?baz".parse().unwrap(); let connector = HttpConnector::new(); let err = connect(connector, dst).await.unwrap_err(); assert_eq!(&*err.msg, super::INVALID_NOT_HTTP); } #[cfg(any(target_os = "linux", target_os = "macos"))] fn get_local_ips() -> (Option, Option) { use std::net::{IpAddr, TcpListener}; let mut ip_v4 = None; let mut ip_v6 = None; let ips = pnet_datalink::interfaces() .into_iter() .flat_map(|i| i.ips.into_iter().map(|n| n.ip())); for ip in ips { match ip { IpAddr::V4(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v4 = Some(ip), IpAddr::V6(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v6 = Some(ip), _ => (), } if ip_v4.is_some() && ip_v6.is_some() { break; } } (ip_v4, ip_v6) } #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] fn default_interface() -> Option { pnet_datalink::interfaces() .iter() .find(|e| e.is_up() && !e.is_loopback() && !e.ips.is_empty()) .map(|e| e.name.clone()) } #[tokio::test] #[cfg_attr(miri, ignore)] async fn test_errors_missing_scheme() { let dst = "example.domain".parse().unwrap(); let mut connector = HttpConnector::new(); connector.enforce_http(false); let err = connect(connector, dst).await.unwrap_err(); assert_eq!(&*err.msg, super::INVALID_MISSING_SCHEME); } // NOTE: pnet crate that we use in this test doesn't compile on Windows #[cfg(any(target_os = "linux", target_os = "macos"))] #[cfg_attr(miri, ignore)] #[tokio::test] async fn local_address() { use std::net::{IpAddr, TcpListener}; let (bind_ip_v4, bind_ip_v6) = get_local_ips(); let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); let port = server4.local_addr().unwrap().port(); let server6 = TcpListener::bind(&format!("[::1]:{}", port)).unwrap(); let assert_client_ip = |dst: String, server: TcpListener, expected_ip: IpAddr| async move { let mut connector = HttpConnector::new(); match (bind_ip_v4, bind_ip_v6) { (Some(v4), Some(v6)) => connector.set_local_addresses(v4, v6), (Some(v4), None) => connector.set_local_address(Some(v4.into())), (None, Some(v6)) => connector.set_local_address(Some(v6.into())), _ => unreachable!(), } connect(connector, dst.parse().unwrap()).await.unwrap(); let (_, client_addr) = server.accept().unwrap(); assert_eq!(client_addr.ip(), expected_ip); }; if let Some(ip) = bind_ip_v4 { assert_client_ip(format!("http://127.0.0.1:{}", port), server4, ip.into()).await; } if let Some(ip) = bind_ip_v6 { assert_client_ip(format!("http://[::1]:{}", port), server6, ip.into()).await; } } // NOTE: pnet crate that we use in this test doesn't compile on Windows #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] #[tokio::test] #[ignore = "setting `SO_BINDTODEVICE` requires the `CAP_NET_RAW` capability (works when running as root)"] async fn interface() { use socket2::{Domain, Protocol, Socket, Type}; use std::net::TcpListener; let interface: Option = default_interface(); let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); let port = server4.local_addr().unwrap().port(); let server6 = TcpListener::bind(&format!("[::1]:{}", port)).unwrap(); let assert_interface_name = |dst: String, server: TcpListener, bind_iface: Option, expected_interface: Option| async move { let mut connector = HttpConnector::new(); if let Some(iface) = bind_iface { connector.set_interface(iface); } connect(connector, dst.parse().unwrap()).await.unwrap(); let domain = Domain::for_address(server.local_addr().unwrap()); let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)).unwrap(); assert_eq!( socket.device().unwrap().as_deref(), expected_interface.as_deref().map(|val| val.as_bytes()) ); }; assert_interface_name( format!("http://127.0.0.1:{}", port), server4, interface.clone(), interface.clone(), ) .await; assert_interface_name( format!("http://[::1]:{}", port), server6, interface.clone(), interface.clone(), ) .await; } #[test] #[ignore] // TODO #[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)] fn client_happy_eyeballs() { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener}; use std::time::{Duration, Instant}; use super::dns; use super::ConnectingTcp; let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server4.local_addr().unwrap(); let _server6 = TcpListener::bind(&format!("[::1]:{}", addr.port())).unwrap(); let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap(); let local_timeout = Duration::default(); let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1; let unreachable_v6_timeout = measure_connect(unreachable_ipv6_addr()).1; let fallback_timeout = std::cmp::max(unreachable_v4_timeout, unreachable_v6_timeout) + Duration::from_millis(250); let scenarios = &[ // Fast primary, without fallback. (&[local_ipv4_addr()][..], 4, local_timeout, false), (&[local_ipv6_addr()][..], 6, local_timeout, false), // Fast primary, with (unused) fallback. ( &[local_ipv4_addr(), local_ipv6_addr()][..], 4, local_timeout, false, ), ( &[local_ipv6_addr(), local_ipv4_addr()][..], 6, local_timeout, false, ), // Unreachable + fast primary, without fallback. ( &[unreachable_ipv4_addr(), local_ipv4_addr()][..], 4, unreachable_v4_timeout, false, ), ( &[unreachable_ipv6_addr(), local_ipv6_addr()][..], 6, unreachable_v6_timeout, false, ), // Unreachable + fast primary, with (unused) fallback. ( &[ unreachable_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr(), ][..], 4, unreachable_v4_timeout, false, ), ( &[ unreachable_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr(), ][..], 6, unreachable_v6_timeout, true, ), // Slow primary, with (used) fallback. ( &[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..], 6, fallback_timeout, false, ), ( &[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..], 4, fallback_timeout, true, ), // Slow primary, with (used) unreachable + fast fallback. ( &[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..], 6, fallback_timeout + unreachable_v6_timeout, false, ), ( &[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..], 4, fallback_timeout + unreachable_v4_timeout, true, ), ]; // Scenarios for IPv6 -> IPv4 fallback require that host can access IPv6 network. // Otherwise, connection to "slow" IPv6 address will error-out immediately. let ipv6_accessible = measure_connect(slow_ipv6_addr()).0; for &(hosts, family, timeout, needs_ipv6_access) in scenarios { if needs_ipv6_access && !ipv6_accessible { continue; } let (start, stream) = rt .block_on(async move { let addrs = hosts .iter() .map(|host| (host.clone(), addr.port()).into()) .collect(); let cfg = Config { local_address_ipv4: None, local_address_ipv6: None, connect_timeout: None, tcp_keepalive_config: TcpKeepaliveConfig::default(), happy_eyeballs_timeout: Some(fallback_timeout), nodelay: false, reuse_address: false, enforce_http: false, send_buffer_size: None, recv_buffer_size: None, #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "linux" ))] interface: None, #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "linux" ))] tcp_user_timeout: None, }; let connecting_tcp = ConnectingTcp::new(dns::SocketAddrs::new(addrs), &cfg); let start = Instant::now(); Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?)) }) .unwrap(); let res = if stream.peer_addr().unwrap().is_ipv4() { 4 } else { 6 }; let duration = start.elapsed(); // Allow actual duration to be +/- 150ms off. let min_duration = if timeout >= Duration::from_millis(150) { timeout - Duration::from_millis(150) } else { Duration::default() }; let max_duration = timeout + Duration::from_millis(150); assert_eq!(res, family); assert!(duration >= min_duration); assert!(duration <= max_duration); } fn local_ipv4_addr() -> IpAddr { Ipv4Addr::new(127, 0, 0, 1).into() } fn local_ipv6_addr() -> IpAddr { Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into() } fn unreachable_ipv4_addr() -> IpAddr { Ipv4Addr::new(127, 0, 0, 2).into() } fn unreachable_ipv6_addr() -> IpAddr { Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2).into() } fn slow_ipv4_addr() -> IpAddr { // RFC 6890 reserved IPv4 address. Ipv4Addr::new(198, 18, 0, 25).into() } fn slow_ipv6_addr() -> IpAddr { // RFC 6890 reserved IPv6 address. Ipv6Addr::new(2001, 2, 0, 0, 0, 0, 0, 254).into() } fn measure_connect(addr: IpAddr) -> (bool, Duration) { let start = Instant::now(); let result = std::net::TcpStream::connect_timeout(&(addr, 80).into(), Duration::from_secs(1)); let reachable = result.is_ok() || result.unwrap_err().kind() == io::ErrorKind::TimedOut; let duration = start.elapsed(); (reachable, duration) } } use std::time::Duration; #[test] fn no_tcp_keepalive_config() { assert!(TcpKeepaliveConfig::default().into_tcpkeepalive().is_none()); } #[test] fn tcp_keepalive_time_config() { let mut kac = TcpKeepaliveConfig::default(); kac.time = Some(Duration::from_secs(60)); if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { assert!(format!("{tcp_keepalive:?}").contains("time: Some(60s)")); } else { panic!("test failed"); } } #[cfg(not(any(target_os = "openbsd", target_os = "redox", target_os = "solaris")))] #[test] fn tcp_keepalive_interval_config() { let mut kac = TcpKeepaliveConfig::default(); kac.interval = Some(Duration::from_secs(1)); if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { assert!(format!("{tcp_keepalive:?}").contains("interval: Some(1s)")); } else { panic!("test failed"); } } #[cfg(not(any( target_os = "openbsd", target_os = "redox", target_os = "solaris", target_os = "windows" )))] #[test] fn tcp_keepalive_retries_config() { let mut kac = TcpKeepaliveConfig::default(); kac.retries = Some(3); if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { assert!(format!("{tcp_keepalive:?}").contains("retries: Some(3)")); } else { panic!("test failed"); } } #[test] fn test_set_port() { // Respect explicit ports no matter what the resolved port is. let mut addr = SocketAddr::from(([0, 0, 0, 0], 6881)); set_port(&mut addr, 42, true); assert_eq!(addr.port(), 42); // Ignore default host port, and use the socket port instead. let mut addr = SocketAddr::from(([0, 0, 0, 0], 6881)); set_port(&mut addr, 443, false); assert_eq!(addr.port(), 6881); // Use the default port if the resolved port is `0`. let mut addr = SocketAddr::from(([0, 0, 0, 0], 0)); set_port(&mut addr, 443, false); assert_eq!(addr.port(), 443); } } hyper-util-0.1.10/src/client/legacy/connect/mod.rs000064400000000000000000000301571046102023000201020ustar 00000000000000//! Connectors used by the `Client`. //! //! This module contains: //! //! - A default [`HttpConnector`][] that does DNS resolution and establishes //! connections over TCP. //! - Types to build custom connectors. //! //! # Connectors //! //! A "connector" is a [`Service`][] that takes a [`Uri`][] destination, and //! its `Response` is some type implementing [`Read`][], [`Write`][], //! and [`Connection`][]. //! //! ## Custom Connectors //! //! A simple connector that ignores the `Uri` destination and always returns //! a TCP connection to the same address could be written like this: //! //! ```rust,ignore //! let connector = tower::service_fn(|_dst| async { //! tokio::net::TcpStream::connect("127.0.0.1:1337") //! }) //! ``` //! //! Or, fully written out: //! //! ``` //! use std::{future::Future, net::SocketAddr, pin::Pin, task::{self, Poll}}; //! use http::Uri; //! use tokio::net::TcpStream; //! use tower_service::Service; //! //! #[derive(Clone)] //! struct LocalConnector; //! //! impl Service for LocalConnector { //! type Response = TcpStream; //! type Error = std::io::Error; //! // We can't "name" an `async` generated future. //! type Future = Pin> + Send //! >>; //! //! fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { //! // This connector is always ready, but others might not be. //! Poll::Ready(Ok(())) //! } //! //! fn call(&mut self, _: Uri) -> Self::Future { //! Box::pin(TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 1337)))) //! } //! } //! ``` //! //! It's worth noting that for `TcpStream`s, the [`HttpConnector`][] is a //! better starting place to extend from. //! //! [`HttpConnector`]: HttpConnector //! [`Service`]: tower_service::Service //! [`Uri`]: ::http::Uri //! [`Read`]: hyper::rt::Read //! [`Write`]: hyper::rt::Write //! [`Connection`]: Connection use std::{ fmt::{self, Formatter}, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, }; use ::http::Extensions; #[cfg(feature = "tokio")] pub use self::http::{HttpConnector, HttpInfo}; #[cfg(feature = "tokio")] pub mod dns; #[cfg(feature = "tokio")] mod http; pub(crate) mod capture; pub use capture::{capture_connection, CaptureConnection}; pub use self::sealed::Connect; /// Describes a type returned by a connector. pub trait Connection { /// Return metadata describing the connection. fn connected(&self) -> Connected; } /// Extra information about the connected transport. /// /// This can be used to inform recipients about things like if ALPN /// was used, or if connected to an HTTP proxy. #[derive(Debug)] pub struct Connected { pub(super) alpn: Alpn, pub(super) is_proxied: bool, pub(super) extra: Option, pub(super) poisoned: PoisonPill, } #[derive(Clone)] pub(crate) struct PoisonPill { poisoned: Arc, } impl fmt::Debug for PoisonPill { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { // print the address of the pill—this makes debugging issues much easier write!( f, "PoisonPill@{:p} {{ poisoned: {} }}", self.poisoned, self.poisoned.load(Ordering::Relaxed) ) } } impl PoisonPill { pub(crate) fn healthy() -> Self { Self { poisoned: Arc::new(AtomicBool::new(false)), } } pub(crate) fn poison(&self) { self.poisoned.store(true, Ordering::Relaxed) } pub(crate) fn poisoned(&self) -> bool { self.poisoned.load(Ordering::Relaxed) } } pub(super) struct Extra(Box); #[derive(Clone, Copy, Debug, PartialEq)] pub(super) enum Alpn { H2, None, } impl Connected { /// Create new `Connected` type with empty metadata. pub fn new() -> Connected { Connected { alpn: Alpn::None, is_proxied: false, extra: None, poisoned: PoisonPill::healthy(), } } /// Set whether the connected transport is to an HTTP proxy. /// /// This setting will affect if HTTP/1 requests written on the transport /// will have the request-target in absolute-form or origin-form: /// /// - When `proxy(false)`: /// /// ```http /// GET /guide HTTP/1.1 /// ``` /// /// - When `proxy(true)`: /// /// ```http /// GET http://hyper.rs/guide HTTP/1.1 /// ``` /// /// Default is `false`. pub fn proxy(mut self, is_proxied: bool) -> Connected { self.is_proxied = is_proxied; self } /// Determines if the connected transport is to an HTTP proxy. pub fn is_proxied(&self) -> bool { self.is_proxied } /// Set extra connection information to be set in the extensions of every `Response`. pub fn extra(mut self, extra: T) -> Connected { if let Some(prev) = self.extra { self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra)))); } else { self.extra = Some(Extra(Box::new(ExtraEnvelope(extra)))); } self } /// Copies the extra connection information into an `Extensions` map. pub fn get_extras(&self, extensions: &mut Extensions) { if let Some(extra) = &self.extra { extra.set(extensions); } } /// Set that the connected transport negotiated HTTP/2 as its next protocol. pub fn negotiated_h2(mut self) -> Connected { self.alpn = Alpn::H2; self } /// Determines if the connected transport negotiated HTTP/2 as its next protocol. pub fn is_negotiated_h2(&self) -> bool { self.alpn == Alpn::H2 } /// Poison this connection /// /// A poisoned connection will not be reused for subsequent requests by the pool pub fn poison(&self) { self.poisoned.poison(); tracing::debug!( poison_pill = ?self.poisoned, "connection was poisoned. this connection will not be reused for subsequent requests" ); } // Don't public expose that `Connected` is `Clone`, unsure if we want to // keep that contract... pub(super) fn clone(&self) -> Connected { Connected { alpn: self.alpn, is_proxied: self.is_proxied, extra: self.extra.clone(), poisoned: self.poisoned.clone(), } } } // ===== impl Extra ===== impl Extra { pub(super) fn set(&self, res: &mut Extensions) { self.0.set(res); } } impl Clone for Extra { fn clone(&self) -> Extra { Extra(self.0.clone_box()) } } impl fmt::Debug for Extra { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Extra").finish() } } trait ExtraInner: Send + Sync { fn clone_box(&self) -> Box; fn set(&self, res: &mut Extensions); } // This indirection allows the `Connected` to have a type-erased "extra" value, // while that type still knows its inner extra type. This allows the correct // TypeId to be used when inserting into `res.extensions_mut()`. #[derive(Clone)] struct ExtraEnvelope(T); impl ExtraInner for ExtraEnvelope where T: Clone + Send + Sync + 'static, { fn clone_box(&self) -> Box { Box::new(self.clone()) } fn set(&self, res: &mut Extensions) { res.insert(self.0.clone()); } } struct ExtraChain(Box, T); impl Clone for ExtraChain { fn clone(&self) -> Self { ExtraChain(self.0.clone_box(), self.1.clone()) } } impl ExtraInner for ExtraChain where T: Clone + Send + Sync + 'static, { fn clone_box(&self) -> Box { Box::new(self.clone()) } fn set(&self, res: &mut Extensions) { self.0.set(res); res.insert(self.1.clone()); } } pub(super) mod sealed { use std::error::Error as StdError; use std::future::Future; use ::http::Uri; use hyper::rt::{Read, Write}; use super::Connection; /// Connect to a destination, returning an IO transport. /// /// A connector receives a [`Uri`](::http::Uri) and returns a `Future` of the /// ready connection. /// /// # Trait Alias /// /// This is really just an *alias* for the `tower::Service` trait, with /// additional bounds set for convenience *inside* hyper. You don't actually /// implement this trait, but `tower::Service` instead. // The `Sized` bound is to prevent creating `dyn Connect`, since they cannot // fit the `Connect` bounds because of the blanket impl for `Service`. pub trait Connect: Sealed + Sized { #[doc(hidden)] type _Svc: ConnectSvc; #[doc(hidden)] fn connect(self, internal_only: Internal, dst: Uri) -> ::Future; } pub trait ConnectSvc { type Connection: Read + Write + Connection + Unpin + Send + 'static; type Error: Into>; type Future: Future> + Unpin + Send + 'static; fn connect(self, internal_only: Internal, dst: Uri) -> Self::Future; } impl Connect for S where S: tower_service::Service + Send + 'static, S::Error: Into>, S::Future: Unpin + Send, T: Read + Write + Connection + Unpin + Send + 'static, { type _Svc = S; fn connect(self, _: Internal, dst: Uri) -> crate::service::Oneshot { crate::service::Oneshot::new(self, dst) } } impl ConnectSvc for S where S: tower_service::Service + Send + 'static, S::Error: Into>, S::Future: Unpin + Send, T: Read + Write + Connection + Unpin + Send + 'static, { type Connection = T; type Error = S::Error; type Future = crate::service::Oneshot; fn connect(self, _: Internal, dst: Uri) -> Self::Future { crate::service::Oneshot::new(self, dst) } } impl Sealed for S where S: tower_service::Service + Send, S::Error: Into>, S::Future: Unpin + Send, T: Read + Write + Connection + Unpin + Send + 'static, { } pub trait Sealed {} #[allow(missing_debug_implementations)] pub struct Internal; } #[cfg(test)] mod tests { use super::Connected; #[derive(Clone, Debug, PartialEq)] struct Ex1(usize); #[derive(Clone, Debug, PartialEq)] struct Ex2(&'static str); #[derive(Clone, Debug, PartialEq)] struct Ex3(&'static str); #[test] fn test_connected_extra() { let c1 = Connected::new().extra(Ex1(41)); let mut ex = ::http::Extensions::new(); assert_eq!(ex.get::(), None); c1.extra.as_ref().expect("c1 extra").set(&mut ex); assert_eq!(ex.get::(), Some(&Ex1(41))); } #[test] fn test_connected_extra_chain() { // If a user composes connectors and at each stage, there's "extra" // info to attach, it shouldn't override the previous extras. let c1 = Connected::new() .extra(Ex1(45)) .extra(Ex2("zoom")) .extra(Ex3("pew pew")); let mut ex1 = ::http::Extensions::new(); assert_eq!(ex1.get::(), None); assert_eq!(ex1.get::(), None); assert_eq!(ex1.get::(), None); c1.extra.as_ref().expect("c1 extra").set(&mut ex1); assert_eq!(ex1.get::(), Some(&Ex1(45))); assert_eq!(ex1.get::(), Some(&Ex2("zoom"))); assert_eq!(ex1.get::(), Some(&Ex3("pew pew"))); // Just like extensions, inserting the same type overrides previous type. let c2 = Connected::new() .extra(Ex1(33)) .extra(Ex2("hiccup")) .extra(Ex1(99)); let mut ex2 = ::http::Extensions::new(); c2.extra.as_ref().expect("c2 extra").set(&mut ex2); assert_eq!(ex2.get::(), Some(&Ex1(99))); assert_eq!(ex2.get::(), Some(&Ex2("hiccup"))); } } hyper-util-0.1.10/src/client/legacy/mod.rs000064400000000000000000000004611046102023000164440ustar 00000000000000#[cfg(any(feature = "http1", feature = "http2"))] mod client; #[cfg(any(feature = "http1", feature = "http2"))] pub use client::{Builder, Client, Error, ResponseFuture}; pub mod connect; #[doc(hidden)] // Publicly available, but just for legacy purposes. A better pool will be // designed. pub mod pool; hyper-util-0.1.10/src/client/legacy/pool.rs000064400000000000000000001044261046102023000166440ustar 00000000000000#![allow(dead_code)] use std::collections::{HashMap, HashSet, VecDeque}; use std::convert::Infallible; use std::error::Error as StdError; use std::fmt::{self, Debug}; use std::future::Future; use std::hash::Hash; use std::ops::{Deref, DerefMut}; use std::pin::Pin; use std::sync::{Arc, Mutex, Weak}; use std::task::{self, Poll}; use std::time::{Duration, Instant}; use futures_channel::oneshot; use futures_util::ready; use tracing::{debug, trace}; use hyper::rt::Sleep; use hyper::rt::Timer as _; use crate::common::{exec, exec::Exec, timer::Timer}; // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] pub struct Pool { // If the pool is disabled, this is None. inner: Option>>>, } // Before using a pooled connection, make sure the sender is not dead. // // This is a trait to allow the `client::pool::tests` to work for `i32`. // // See https://github.com/hyperium/hyper/issues/1429 pub trait Poolable: Unpin + Send + Sized + 'static { fn is_open(&self) -> bool; /// Reserve this connection. /// /// Allows for HTTP/2 to return a shared reservation. fn reserve(self) -> Reservation; fn can_share(&self) -> bool; } pub trait Key: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} impl Key for T where T: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} /// A marker to identify what version a pooled connection is. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[allow(dead_code)] pub enum Ver { Auto, Http2, } /// When checking out a pooled connection, it might be that the connection /// only supports a single reservation, or it might be usable for many. /// /// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be /// used for multiple requests. // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] pub enum Reservation { /// This connection could be used multiple times, the first one will be /// reinserted into the `idle` pool, and the second will be given to /// the `Checkout`. #[cfg(feature = "http2")] Shared(T, T), /// This connection requires unique access. It will be returned after /// use is complete. Unique(T), } /// Simple type alias in case the key type needs to be adjusted. // pub type Key = (http::uri::Scheme, http::uri::Authority); //Arc; struct PoolInner { // A flag that a connection is being established, and the connection // should be shared. This prevents making multiple HTTP/2 connections // to the same host. connecting: HashSet, // These are internal Conns sitting in the event loop in the KeepAlive // state, waiting to receive a new Request to send on the socket. idle: HashMap>>, max_idle_per_host: usize, // These are outstanding Checkouts that are waiting for a socket to be // able to send a Request one. This is used when "racing" for a new // connection. // // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait // for the Pool to receive an idle Conn. When a Conn becomes idle, // this list is checked for any parked Checkouts, and tries to notify // them that the Conn could be used instead of waiting for a brand new // connection. waiters: HashMap>>, // A oneshot channel is used to allow the interval to be notified when // the Pool completely drops. That way, the interval can cancel immediately. idle_interval_ref: Option>, exec: Exec, timer: Option, timeout: Option, } // This is because `Weak::new()` *allocates* space for `T`, even if it // doesn't need it! struct WeakOpt(Option>); #[derive(Clone, Copy, Debug)] pub struct Config { pub idle_timeout: Option, pub max_idle_per_host: usize, } impl Config { pub fn is_enabled(&self) -> bool { self.max_idle_per_host > 0 } } impl Pool { pub fn new(config: Config, executor: E, timer: Option) -> Pool where E: hyper::rt::Executor + Send + Sync + Clone + 'static, M: hyper::rt::Timer + Send + Sync + Clone + 'static, { let exec = Exec::new(executor); let timer = timer.map(|t| Timer::new(t)); let inner = if config.is_enabled() { Some(Arc::new(Mutex::new(PoolInner { connecting: HashSet::new(), idle: HashMap::new(), idle_interval_ref: None, max_idle_per_host: config.max_idle_per_host, waiters: HashMap::new(), exec, timer, timeout: config.idle_timeout, }))) } else { None }; Pool { inner } } pub(crate) fn is_enabled(&self) -> bool { self.inner.is_some() } #[cfg(test)] pub(super) fn no_timer(&self) { // Prevent an actual interval from being created for this pool... { let mut inner = self.inner.as_ref().unwrap().lock().unwrap(); assert!(inner.idle_interval_ref.is_none(), "timer already spawned"); let (tx, _) = oneshot::channel(); inner.idle_interval_ref = Some(tx); } } } impl Pool { /// Returns a `Checkout` which is a future that resolves if an idle /// connection becomes available. pub fn checkout(&self, key: K) -> Checkout { Checkout { key, pool: self.clone(), waiter: None, } } /// Ensure that there is only ever 1 connecting task for HTTP/2 /// connections. This does nothing for HTTP/1. pub fn connecting(&self, key: &K, ver: Ver) -> Option> { if ver == Ver::Http2 { if let Some(ref enabled) = self.inner { let mut inner = enabled.lock().unwrap(); return if inner.connecting.insert(key.clone()) { let connecting = Connecting { key: key.clone(), pool: WeakOpt::downgrade(enabled), }; Some(connecting) } else { trace!("HTTP/2 connecting already in progress for {:?}", key); None }; } } // else Some(Connecting { key: key.clone(), // in HTTP/1's case, there is never a lock, so we don't // need to do anything in Drop. pool: WeakOpt::none(), }) } #[cfg(test)] fn locked(&self) -> std::sync::MutexGuard<'_, PoolInner> { self.inner.as_ref().expect("enabled").lock().expect("lock") } /* Used in client/tests.rs... #[cfg(test)] pub(super) fn h1_key(&self, s: &str) -> Key { Arc::new(s.to_string()) } #[cfg(test)] pub(super) fn idle_count(&self, key: &Key) -> usize { self .locked() .idle .get(key) .map(|list| list.len()) .unwrap_or(0) } */ pub fn pooled( &self, #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut connecting: Connecting, value: T, ) -> Pooled { let (value, pool_ref) = if let Some(ref enabled) = self.inner { match value.reserve() { #[cfg(feature = "http2")] Reservation::Shared(to_insert, to_return) => { let mut inner = enabled.lock().unwrap(); inner.put(connecting.key.clone(), to_insert, enabled); // Do this here instead of Drop for Connecting because we // already have a lock, no need to lock the mutex twice. inner.connected(&connecting.key); // prevent the Drop of Connecting from repeating inner.connected() connecting.pool = WeakOpt::none(); // Shared reservations don't need a reference to the pool, // since the pool always keeps a copy. (to_return, WeakOpt::none()) } Reservation::Unique(value) => { // Unique reservations must take a reference to the pool // since they hope to reinsert once the reservation is // completed (value, WeakOpt::downgrade(enabled)) } } } else { // If pool is not enabled, skip all the things... // The Connecting should have had no pool ref debug_assert!(connecting.pool.upgrade().is_none()); (value, WeakOpt::none()) }; Pooled { key: connecting.key.clone(), is_reused: false, pool: pool_ref, value: Some(value), } } fn reuse(&self, key: &K, value: T) -> Pooled { debug!("reuse idle connection for {:?}", key); // TODO: unhack this // In Pool::pooled(), which is used for inserting brand new connections, // there's some code that adjusts the pool reference taken depending // on if the Reservation can be shared or is unique. By the time // reuse() is called, the reservation has already been made, and // we just have the final value, without knowledge of if this is // unique or shared. So, the hack is to just assume Ver::Http2 means // shared... :( let mut pool_ref = WeakOpt::none(); if !value.can_share() { if let Some(ref enabled) = self.inner { pool_ref = WeakOpt::downgrade(enabled); } } Pooled { is_reused: true, key: key.clone(), pool: pool_ref, value: Some(value), } } } /// Pop off this list, looking for a usable connection that hasn't expired. struct IdlePopper<'a, T, K> { key: &'a K, list: &'a mut Vec>, } impl<'a, T: Poolable + 'a, K: Debug> IdlePopper<'a, T, K> { fn pop(self, expiration: &Expiration) -> Option> { while let Some(entry) = self.list.pop() { // If the connection has been closed, or is older than our idle // timeout, simply drop it and keep looking... if !entry.value.is_open() { trace!("removing closed connection for {:?}", self.key); continue; } // TODO: Actually, since the `idle` list is pushed to the end always, // that would imply that if *this* entry is expired, then anything // "earlier" in the list would *have* to be expired also... Right? // // In that case, we could just break out of the loop and drop the // whole list... if expiration.expires(entry.idle_at) { trace!("removing expired connection for {:?}", self.key); continue; } let value = match entry.value.reserve() { #[cfg(feature = "http2")] Reservation::Shared(to_reinsert, to_checkout) => { self.list.push(Idle { idle_at: Instant::now(), value: to_reinsert, }); to_checkout } Reservation::Unique(unique) => unique, }; return Some(Idle { idle_at: entry.idle_at, value, }); } None } } impl PoolInner { fn put(&mut self, key: K, value: T, __pool_ref: &Arc>>) { if value.can_share() && self.idle.contains_key(&key) { trace!("put; existing idle HTTP/2 connection for {:?}", key); return; } trace!("put; add idle connection for {:?}", key); let mut remove_waiters = false; let mut value = Some(value); if let Some(waiters) = self.waiters.get_mut(&key) { while let Some(tx) = waiters.pop_front() { if !tx.is_canceled() { let reserved = value.take().expect("value already sent"); let reserved = match reserved.reserve() { #[cfg(feature = "http2")] Reservation::Shared(to_keep, to_send) => { value = Some(to_keep); to_send } Reservation::Unique(uniq) => uniq, }; match tx.send(reserved) { Ok(()) => { if value.is_none() { break; } else { continue; } } Err(e) => { value = Some(e); } } } trace!("put; removing canceled waiter for {:?}", key); } remove_waiters = waiters.is_empty(); } if remove_waiters { self.waiters.remove(&key); } match value { Some(value) => { // borrow-check scope... { let idle_list = self.idle.entry(key.clone()).or_default(); if self.max_idle_per_host <= idle_list.len() { trace!("max idle per host for {:?}, dropping connection", key); return; } debug!("pooling idle connection for {:?}", key); idle_list.push(Idle { value, idle_at: Instant::now(), }); } self.spawn_idle_interval(__pool_ref); } None => trace!("put; found waiter for {:?}", key), } } /// A `Connecting` task is complete. Not necessarily successfully, /// but the lock is going away, so clean up. fn connected(&mut self, key: &K) { let existed = self.connecting.remove(key); debug_assert!(existed, "Connecting dropped, key not in pool.connecting"); // cancel any waiters. if there are any, it's because // this Connecting task didn't complete successfully. // those waiters would never receive a connection. self.waiters.remove(key); } fn spawn_idle_interval(&mut self, pool_ref: &Arc>>) { if self.idle_interval_ref.is_some() { return; } let dur = if let Some(dur) = self.timeout { dur } else { return; }; let timer = if let Some(timer) = self.timer.clone() { timer } else { return; }; let (tx, rx) = oneshot::channel(); self.idle_interval_ref = Some(tx); let interval = IdleTask { timer: timer.clone(), duration: dur, deadline: Instant::now(), fut: timer.sleep_until(Instant::now()), // ready at first tick pool: WeakOpt::downgrade(pool_ref), pool_drop_notifier: rx, }; self.exec.execute(interval); } } impl PoolInner { /// Any `FutureResponse`s that were created will have made a `Checkout`, /// and possibly inserted into the pool that it is waiting for an idle /// connection. If a user ever dropped that future, we need to clean out /// those parked senders. fn clean_waiters(&mut self, key: &K) { let mut remove_waiters = false; if let Some(waiters) = self.waiters.get_mut(key) { waiters.retain(|tx| !tx.is_canceled()); remove_waiters = waiters.is_empty(); } if remove_waiters { self.waiters.remove(key); } } } impl PoolInner { /// This should *only* be called by the IdleTask fn clear_expired(&mut self) { let dur = self.timeout.expect("interval assumes timeout"); let now = Instant::now(); //self.last_idle_check_at = now; self.idle.retain(|key, values| { values.retain(|entry| { if !entry.value.is_open() { trace!("idle interval evicting closed for {:?}", key); return false; } // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470. if now.saturating_duration_since(entry.idle_at) > dur { trace!("idle interval evicting expired for {:?}", key); return false; } // Otherwise, keep this value... true }); // returning false evicts this key/val !values.is_empty() }); } } impl Clone for Pool { fn clone(&self) -> Pool { Pool { inner: self.inner.clone(), } } } /// A wrapped poolable value that tries to reinsert to the Pool on Drop. // Note: The bounds `T: Poolable` is needed for the Drop impl. pub struct Pooled { value: Option, is_reused: bool, key: K, pool: WeakOpt>>, } impl Pooled { pub fn is_reused(&self) -> bool { self.is_reused } pub fn is_pool_enabled(&self) -> bool { self.pool.0.is_some() } fn as_ref(&self) -> &T { self.value.as_ref().expect("not dropped") } fn as_mut(&mut self) -> &mut T { self.value.as_mut().expect("not dropped") } } impl Deref for Pooled { type Target = T; fn deref(&self) -> &T { self.as_ref() } } impl DerefMut for Pooled { fn deref_mut(&mut self) -> &mut T { self.as_mut() } } impl Drop for Pooled { fn drop(&mut self) { if let Some(value) = self.value.take() { if !value.is_open() { // If we *already* know the connection is done here, // it shouldn't be re-inserted back into the pool. return; } if let Some(pool) = self.pool.upgrade() { if let Ok(mut inner) = pool.lock() { inner.put(self.key.clone(), value, &pool); } } else if !value.can_share() { trace!("pool dropped, dropping pooled ({:?})", self.key); } // Ver::Http2 is already in the Pool (or dead), so we wouldn't // have an actual reference to the Pool. } } } impl fmt::Debug for Pooled { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Pooled").field("key", &self.key).finish() } } struct Idle { idle_at: Instant, value: T, } // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] pub struct Checkout { key: K, pool: Pool, waiter: Option>, } #[derive(Debug)] #[non_exhaustive] pub enum Error { PoolDisabled, CheckoutNoLongerWanted, CheckedOutClosedValue, } impl Error { pub(super) fn is_canceled(&self) -> bool { matches!(self, Error::CheckedOutClosedValue) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match self { Error::PoolDisabled => "pool is disabled", Error::CheckedOutClosedValue => "checked out connection was closed", Error::CheckoutNoLongerWanted => "request was canceled", }) } } impl StdError for Error {} impl Checkout { fn poll_waiter( &mut self, cx: &mut task::Context<'_>, ) -> Poll, Error>>> { if let Some(mut rx) = self.waiter.take() { match Pin::new(&mut rx).poll(cx) { Poll::Ready(Ok(value)) => { if value.is_open() { Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value)))) } else { Poll::Ready(Some(Err(Error::CheckedOutClosedValue))) } } Poll::Pending => { self.waiter = Some(rx); Poll::Pending } Poll::Ready(Err(_canceled)) => { Poll::Ready(Some(Err(Error::CheckoutNoLongerWanted))) } } } else { Poll::Ready(None) } } fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option> { let entry = { let mut inner = self.pool.inner.as_ref()?.lock().unwrap(); let expiration = Expiration::new(inner.timeout); let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| { trace!("take? {:?}: expiration = {:?}", self.key, expiration.0); // A block to end the mutable borrow on list, // so the map below can check is_empty() { let popper = IdlePopper { key: &self.key, list, }; popper.pop(&expiration) } .map(|e| (e, list.is_empty())) }); let (entry, empty) = if let Some((e, empty)) = maybe_entry { (Some(e), empty) } else { // No entry found means nuke the list for sure. (None, true) }; if empty { //TODO: This could be done with the HashMap::entry API instead. inner.idle.remove(&self.key); } if entry.is_none() && self.waiter.is_none() { let (tx, mut rx) = oneshot::channel(); trace!("checkout waiting for idle connection: {:?}", self.key); inner .waiters .entry(self.key.clone()) .or_insert_with(VecDeque::new) .push_back(tx); // register the waker with this oneshot assert!(Pin::new(&mut rx).poll(cx).is_pending()); self.waiter = Some(rx); } entry }; entry.map(|e| self.pool.reuse(&self.key, e.value)) } } impl Future for Checkout { type Output = Result, Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { if let Some(pooled) = ready!(self.poll_waiter(cx)?) { return Poll::Ready(Ok(pooled)); } if let Some(pooled) = self.checkout(cx) { Poll::Ready(Ok(pooled)) } else if !self.pool.is_enabled() { Poll::Ready(Err(Error::PoolDisabled)) } else { // There's a new waiter, already registered in self.checkout() debug_assert!(self.waiter.is_some()); Poll::Pending } } } impl Drop for Checkout { fn drop(&mut self) { if self.waiter.take().is_some() { trace!("checkout dropped for {:?}", self.key); if let Some(Ok(mut inner)) = self.pool.inner.as_ref().map(|i| i.lock()) { inner.clean_waiters(&self.key); } } } } // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] pub struct Connecting { key: K, pool: WeakOpt>>, } impl Connecting { pub fn alpn_h2(self, pool: &Pool) -> Option { debug_assert!( self.pool.0.is_none(), "Connecting::alpn_h2 but already Http2" ); pool.connecting(&self.key, Ver::Http2) } } impl Drop for Connecting { fn drop(&mut self) { if let Some(pool) = self.pool.upgrade() { // No need to panic on drop, that could abort! if let Ok(mut inner) = pool.lock() { inner.connected(&self.key); } } } } struct Expiration(Option); impl Expiration { fn new(dur: Option) -> Expiration { Expiration(dur) } fn expires(&self, instant: Instant) -> bool { match self.0 { // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470. Some(timeout) => Instant::now().saturating_duration_since(instant) > timeout, None => false, } } } pin_project_lite::pin_project! { struct IdleTask { timer: Timer, duration: Duration, deadline: Instant, fut: Pin>, pool: WeakOpt>>, // This allows the IdleTask to be notified as soon as the entire // Pool is fully dropped, and shutdown. This channel is never sent on, // but Err(Canceled) will be received when the Pool is dropped. #[pin] pool_drop_notifier: oneshot::Receiver, } } impl Future for IdleTask { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let mut this = self.project(); loop { match this.pool_drop_notifier.as_mut().poll(cx) { Poll::Ready(Ok(n)) => match n {}, Poll::Pending => (), Poll::Ready(Err(_canceled)) => { trace!("pool closed, canceling idle interval"); return Poll::Ready(()); } } ready!(Pin::new(&mut this.fut).poll(cx)); // Set this task to run after the next deadline // If the poll missed the deadline by a lot, set the deadline // from the current time instead *this.deadline += *this.duration; if *this.deadline < Instant::now() - Duration::from_millis(5) { *this.deadline = Instant::now() + *this.duration; } *this.fut = this.timer.sleep_until(*this.deadline); if let Some(inner) = this.pool.upgrade() { if let Ok(mut inner) = inner.lock() { trace!("idle interval checking for expired"); inner.clear_expired(); continue; } } return Poll::Ready(()); } } } impl WeakOpt { fn none() -> Self { WeakOpt(None) } fn downgrade(arc: &Arc) -> Self { WeakOpt(Some(Arc::downgrade(arc))) } fn upgrade(&self) -> Option> { self.0.as_ref().and_then(Weak::upgrade) } } #[cfg(all(test, not(miri)))] mod tests { use std::fmt::Debug; use std::future::Future; use std::hash::Hash; use std::pin::Pin; use std::task::{self, Poll}; use std::time::Duration; use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; use crate::rt::{TokioExecutor, TokioTimer}; use crate::common::timer; #[derive(Clone, Debug, PartialEq, Eq, Hash)] struct KeyImpl(http::uri::Scheme, http::uri::Authority); type KeyTuple = (http::uri::Scheme, http::uri::Authority); /// Test unique reservations. #[derive(Debug, PartialEq, Eq)] struct Uniq(T); impl Poolable for Uniq { fn is_open(&self) -> bool { true } fn reserve(self) -> Reservation { Reservation::Unique(self) } fn can_share(&self) -> bool { false } } fn c(key: K) -> Connecting { Connecting { key, pool: WeakOpt::none(), } } fn host_key(s: &str) -> KeyImpl { KeyImpl(http::uri::Scheme::HTTP, s.parse().expect("host key")) } fn pool_no_timer() -> Pool { pool_max_idle_no_timer(::std::usize::MAX) } fn pool_max_idle_no_timer(max_idle: usize) -> Pool { let pool = Pool::new( super::Config { idle_timeout: Some(Duration::from_millis(100)), max_idle_per_host: max_idle, }, TokioExecutor::new(), Option::::None, ); pool.no_timer(); pool } #[tokio::test] async fn test_pool_checkout_smoke() { let pool = pool_no_timer(); let key = host_key("foo"); let pooled = pool.pooled(c(key.clone()), Uniq(41)); drop(pooled); match pool.checkout(key).await { Ok(pooled) => assert_eq!(*pooled, Uniq(41)), Err(_) => panic!("not ready"), }; } /// Helper to check if the future is ready after polling once. struct PollOnce<'a, F>(&'a mut F); impl Future for PollOnce<'_, F> where F: Future> + Unpin, { type Output = Option<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match Pin::new(&mut self.0).poll(cx) { Poll::Ready(Ok(_)) => Poll::Ready(Some(())), Poll::Ready(Err(_)) => Poll::Ready(Some(())), Poll::Pending => Poll::Ready(None), } } } #[tokio::test] async fn test_pool_checkout_returns_none_if_expired() { let pool = pool_no_timer(); let key = host_key("foo"); let pooled = pool.pooled(c(key.clone()), Uniq(41)); drop(pooled); tokio::time::sleep(pool.locked().timeout.unwrap()).await; let mut checkout = pool.checkout(key); let poll_once = PollOnce(&mut checkout); let is_not_ready = poll_once.await.is_none(); assert!(is_not_ready); } #[tokio::test] async fn test_pool_checkout_removes_expired() { let pool = pool_no_timer(); let key = host_key("foo"); pool.pooled(c(key.clone()), Uniq(41)); pool.pooled(c(key.clone()), Uniq(5)); pool.pooled(c(key.clone()), Uniq(99)); assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3) ); tokio::time::sleep(pool.locked().timeout.unwrap()).await; let mut checkout = pool.checkout(key.clone()); let poll_once = PollOnce(&mut checkout); // checkout.await should clean out the expired poll_once.await; assert!(pool.locked().idle.get(&key).is_none()); } #[test] fn test_pool_max_idle_per_host() { let pool = pool_max_idle_no_timer(2); let key = host_key("foo"); pool.pooled(c(key.clone()), Uniq(41)); pool.pooled(c(key.clone()), Uniq(5)); pool.pooled(c(key.clone()), Uniq(99)); // pooled and dropped 3, max_idle should only allow 2 assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(2) ); } #[tokio::test] async fn test_pool_timer_removes_expired() { let pool = Pool::new( super::Config { idle_timeout: Some(Duration::from_millis(10)), max_idle_per_host: std::usize::MAX, }, TokioExecutor::new(), Some(TokioTimer::new()), ); let key = host_key("foo"); pool.pooled(c(key.clone()), Uniq(41)); pool.pooled(c(key.clone()), Uniq(5)); pool.pooled(c(key.clone()), Uniq(99)); assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3) ); // Let the timer tick passed the expiration... tokio::time::sleep(Duration::from_millis(30)).await; // Yield so the Interval can reap... tokio::task::yield_now().await; assert!(pool.locked().idle.get(&key).is_none()); } #[tokio::test] async fn test_pool_checkout_task_unparked() { use futures_util::future::join; use futures_util::FutureExt; let pool = pool_no_timer(); let key = host_key("foo"); let pooled = pool.pooled(c(key.clone()), Uniq(41)); let checkout = join(pool.checkout(key), async { // the checkout future will park first, // and then this lazy future will be polled, which will insert // the pooled back into the pool // // this test makes sure that doing so will unpark the checkout drop(pooled); }) .map(|(entry, _)| entry); assert_eq!(*checkout.await.unwrap(), Uniq(41)); } #[tokio::test] async fn test_pool_checkout_drop_cleans_up_waiters() { let pool = pool_no_timer::, KeyImpl>(); let key = host_key("foo"); let mut checkout1 = pool.checkout(key.clone()); let mut checkout2 = pool.checkout(key.clone()); let poll_once1 = PollOnce(&mut checkout1); let poll_once2 = PollOnce(&mut checkout2); // first poll needed to get into Pool's parked poll_once1.await; assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); poll_once2.await; assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2); // on drop, clean up Pool drop(checkout1); assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); drop(checkout2); assert!(pool.locked().waiters.get(&key).is_none()); } #[derive(Debug)] struct CanClose { #[allow(unused)] val: i32, closed: bool, } impl Poolable for CanClose { fn is_open(&self) -> bool { !self.closed } fn reserve(self) -> Reservation { Reservation::Unique(self) } fn can_share(&self) -> bool { false } } #[test] fn pooled_drop_if_closed_doesnt_reinsert() { let pool = pool_no_timer(); let key = host_key("foo"); pool.pooled( c(key.clone()), CanClose { val: 57, closed: true, }, ); assert!(!pool.locked().idle.contains_key(&key)); } } hyper-util-0.1.10/src/client/mod.rs000064400000000000000000000002111046102023000151710ustar 00000000000000//! HTTP client utilities /// Legacy implementations of `connect` module and `Client` #[cfg(feature = "client-legacy")] pub mod legacy; hyper-util-0.1.10/src/client/service.rs000064400000000000000000000001341046102023000160560ustar 00000000000000struct ConnectingPool { connector: C, pool: P, } struct PoolableSvc(S); hyper-util-0.1.10/src/common/exec.rs000064400000000000000000000022001046102023000153500ustar 00000000000000#![allow(dead_code)] use hyper::rt::Executor; use std::fmt; use std::future::Future; use std::pin::Pin; use std::sync::Arc; pub(crate) type BoxSendFuture = Pin + Send>>; // Either the user provides an executor for background tasks, or we use // `tokio::spawn`. #[derive(Clone)] pub(crate) enum Exec { Executor(Arc + Send + Sync>), } // ===== impl Exec ===== impl Exec { pub(crate) fn new(inner: E) -> Self where E: Executor + Send + Sync + 'static, { Exec::Executor(Arc::new(inner)) } pub(crate) fn execute(&self, fut: F) where F: Future + Send + 'static, { match *self { Exec::Executor(ref e) => { e.execute(Box::pin(fut)); } } } } impl fmt::Debug for Exec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Exec").finish() } } impl hyper::rt::Executor for Exec where F: Future + Send + 'static, { fn execute(&self, fut: F) { Exec::execute(self, fut); } } hyper-util-0.1.10/src/common/lazy.rs000064400000000000000000000033461046102023000154170ustar 00000000000000use pin_project_lite::pin_project; use std::future::Future; use std::pin::Pin; use std::task::{self, Poll}; pub(crate) trait Started: Future { fn started(&self) -> bool; } pub(crate) fn lazy(func: F) -> Lazy where F: FnOnce() -> R, R: Future + Unpin, { Lazy { inner: Inner::Init { func }, } } // FIXME: allow() required due to `impl Trait` leaking types to this lint pin_project! { #[allow(missing_debug_implementations)] pub(crate) struct Lazy { #[pin] inner: Inner, } } pin_project! { #[project = InnerProj] #[project_replace = InnerProjReplace] enum Inner { Init { func: F }, Fut { #[pin] fut: R }, Empty, } } impl Started for Lazy where F: FnOnce() -> R, R: Future, { fn started(&self) -> bool { match self.inner { Inner::Init { .. } => false, Inner::Fut { .. } | Inner::Empty => true, } } } impl Future for Lazy where F: FnOnce() -> R, R: Future, { type Output = R::Output; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let mut this = self.project(); if let InnerProj::Fut { fut } = this.inner.as_mut().project() { return fut.poll(cx); } match this.inner.as_mut().project_replace(Inner::Empty) { InnerProjReplace::Init { func } => { this.inner.set(Inner::Fut { fut: func() }); if let InnerProj::Fut { fut } = this.inner.project() { return fut.poll(cx); } unreachable!() } _ => unreachable!("lazy state wrong"), } } } hyper-util-0.1.10/src/common/mod.rs000064400000000000000000000005431046102023000152130ustar 00000000000000#![allow(missing_docs)] pub(crate) mod exec; #[cfg(feature = "client")] mod lazy; pub(crate) mod rewind; #[cfg(feature = "client")] mod sync; pub(crate) mod timer; #[cfg(feature = "client")] pub(crate) use exec::Exec; #[cfg(feature = "client")] pub(crate) use lazy::{lazy, Started as Lazy}; #[cfg(feature = "client")] pub(crate) use sync::SyncWrapper; hyper-util-0.1.10/src/common/rewind.rs000064400000000000000000000106321046102023000157240ustar 00000000000000use std::{cmp, io}; use bytes::{Buf, Bytes}; use hyper::rt::{Read, ReadBufCursor, Write}; use std::{ pin::Pin, task::{self, Poll}, }; /// Combine a buffer with an IO, rewinding reads to use the buffer. #[derive(Debug)] pub(crate) struct Rewind { pub(crate) pre: Option, pub(crate) inner: T, } impl Rewind { #[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self { Rewind { pre: Some(buf), inner: io, } } } impl Read for Rewind where T: Read + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, mut buf: ReadBufCursor<'_>, ) -> Poll> { if let Some(mut prefix) = self.pre.take() { // If there are no remaining bytes, let the bytes get dropped. if !prefix.is_empty() { let copy_len = cmp::min(prefix.len(), remaining(&mut buf)); // TODO: There should be a way to do following two lines cleaner... put_slice(&mut buf, &prefix[..copy_len]); prefix.advance(copy_len); // Put back what's left if !prefix.is_empty() { self.pre = Some(prefix); } return Poll::Ready(Ok(())); } } Pin::new(&mut self.inner).poll_read(cx, buf) } } fn remaining(cursor: &mut ReadBufCursor<'_>) -> usize { // SAFETY: // We do not uninitialize any set bytes. unsafe { cursor.as_mut().len() } } // Copied from `ReadBufCursor::put_slice`. // If that becomes public, we could ditch this. fn put_slice(cursor: &mut ReadBufCursor<'_>, slice: &[u8]) { assert!( remaining(cursor) >= slice.len(), "buf.len() must fit in remaining()" ); let amt = slice.len(); // SAFETY: // the length is asserted above unsafe { cursor.as_mut()[..amt] .as_mut_ptr() .cast::() .copy_from_nonoverlapping(slice.as_ptr(), amt); cursor.advance(amt); } } impl Write for Rewind where T: Write + Unpin, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut self.inner).poll_write(cx, buf) } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_flush(cx) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_shutdown(cx) } fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } } /* #[cfg(test)] mod tests { use super::Rewind; use bytes::Bytes; use tokio::io::AsyncReadExt; #[cfg(not(miri))] #[tokio::test] async fn partial_rewind() { let underlying = [104, 101, 108, 108, 111]; let mock = tokio_test::io::Builder::new().read(&underlying).build(); let mut stream = Rewind::new(mock); // Read off some bytes, ensure we filled o1 let mut buf = [0; 2]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. stream.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); // At this point we should have read everything that was in the MockStream assert_eq!(&buf, &underlying); } #[cfg(not(miri))] #[tokio::test] async fn full_rewind() { let underlying = [104, 101, 108, 108, 111]; let mock = tokio_test::io::Builder::new().read(&underlying).build(); let mut stream = Rewind::new(mock); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. stream.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); } } */ hyper-util-0.1.10/src/common/sync.rs000064400000000000000000000045061046102023000154130ustar 00000000000000pub(crate) struct SyncWrapper(T); impl SyncWrapper { /// Creates a new SyncWrapper containing the given value. /// /// # Examples /// /// ```ignore /// use hyper::common::sync_wrapper::SyncWrapper; /// /// let wrapped = SyncWrapper::new(42); /// ``` pub(crate) fn new(value: T) -> Self { Self(value) } /// Acquires a reference to the protected value. /// /// This is safe because it requires an exclusive reference to the wrapper. Therefore this method /// neither panics nor does it return an error. This is in contrast to [`Mutex::get_mut`] which /// returns an error if another thread panicked while holding the lock. It is not recommended /// to send an exclusive reference to a potentially damaged value to another thread for further /// processing. /// /// [`Mutex::get_mut`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.get_mut /// /// # Examples /// /// ```ignore /// use hyper::common::sync_wrapper::SyncWrapper; /// /// let mut wrapped = SyncWrapper::new(42); /// let value = wrapped.get_mut(); /// *value = 0; /// assert_eq!(*wrapped.get_mut(), 0); /// ``` pub(crate) fn get_mut(&mut self) -> &mut T { &mut self.0 } /// Consumes this wrapper, returning the underlying data. /// /// This is safe because it requires ownership of the wrapper, aherefore this method will neither /// panic nor does it return an error. This is in contrast to [`Mutex::into_inner`] which /// returns an error if another thread panicked while holding the lock. It is not recommended /// to send an exclusive reference to a potentially damaged value to another thread for further /// processing. /// /// [`Mutex::into_inner`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.into_inner /// /// # Examples /// /// ```ignore /// use hyper::common::sync_wrapper::SyncWrapper; /// /// let mut wrapped = SyncWrapper::new(42); /// assert_eq!(wrapped.into_inner(), 42); /// ``` #[allow(dead_code)] pub(crate) fn into_inner(self) -> T { self.0 } } // this is safe because the only operations permitted on this data structure require exclusive // access or ownership unsafe impl Sync for SyncWrapper {} hyper-util-0.1.10/src/common/timer.rs000064400000000000000000000014551046102023000155570ustar 00000000000000#![allow(dead_code)] use std::fmt; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; use std::time::Instant; use hyper::rt::Sleep; #[derive(Clone)] pub(crate) struct Timer(Arc); // =====impl Timer===== impl Timer { pub(crate) fn new(inner: T) -> Self where T: hyper::rt::Timer + Send + Sync + 'static, { Self(Arc::new(inner)) } } impl fmt::Debug for Timer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Timer").finish() } } impl hyper::rt::Timer for Timer { fn sleep(&self, duration: Duration) -> Pin> { self.0.sleep(duration) } fn sleep_until(&self, deadline: Instant) -> Pin> { self.0.sleep_until(deadline) } } hyper-util-0.1.10/src/error.rs000064400000000000000000000004621046102023000142750ustar 00000000000000/* use std::error::Error; pub(crate) fn find<'a, E: Error + 'static>(top: &'a (dyn Error + 'static)) -> Option<&'a E> { let mut err = Some(top); while let Some(src) = err { if src.is::() { return src.downcast_ref(); } err = src.source(); } None } */ hyper-util-0.1.10/src/lib.rs000064400000000000000000000007221046102023000137110ustar 00000000000000#![deny(missing_docs)] #![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))] //! Utilities for working with hyper. //! //! This crate is less-stable than [`hyper`](https://docs.rs/hyper). However, //! does respect Rust's semantic version regarding breaking changes. #[cfg(feature = "client")] pub mod client; mod common; pub mod rt; #[cfg(feature = "server")] pub mod server; #[cfg(any(feature = "service", feature = "client-legacy"))] pub mod service; mod error; hyper-util-0.1.10/src/rt/mod.rs000064400000000000000000000002261046102023000143460ustar 00000000000000//! Runtime utilities #[cfg(feature = "tokio")] pub mod tokio; #[cfg(feature = "tokio")] pub use self::tokio::{TokioExecutor, TokioIo, TokioTimer}; hyper-util-0.1.10/src/rt/tokio.rs000064400000000000000000000157221046102023000147230ustar 00000000000000#![allow(dead_code)] //! Tokio IO integration for hyper use std::{ future::Future, pin::Pin, task::{Context, Poll}, time::{Duration, Instant}, }; use hyper::rt::{Executor, Sleep, Timer}; use pin_project_lite::pin_project; /// Future executor that utilises `tokio` threads. #[non_exhaustive] #[derive(Default, Debug, Clone)] pub struct TokioExecutor {} pin_project! { /// A wrapper that implements Tokio's IO traits for an inner type that /// implements hyper's IO traits, or vice versa (implements hyper's IO /// traits for a type that implements Tokio's IO traits). #[derive(Debug)] pub struct TokioIo { #[pin] inner: T, } } /// A Timer that uses the tokio runtime. #[non_exhaustive] #[derive(Default, Clone, Debug)] pub struct TokioTimer; // Use TokioSleep to get tokio::time::Sleep to implement Unpin. // see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html pin_project! { #[derive(Debug)] struct TokioSleep { #[pin] inner: tokio::time::Sleep, } } // ===== impl TokioExecutor ===== impl Executor for TokioExecutor where Fut: Future + Send + 'static, Fut::Output: Send + 'static, { fn execute(&self, fut: Fut) { tokio::spawn(fut); } } impl TokioExecutor { /// Create new executor that relies on [`tokio::spawn`] to execute futures. pub fn new() -> Self { Self {} } } // ==== impl TokioIo ===== impl TokioIo { /// Wrap a type implementing Tokio's or hyper's IO traits. pub fn new(inner: T) -> Self { Self { inner } } /// Borrow the inner type. pub fn inner(&self) -> &T { &self.inner } /// Mut borrow the inner type. pub fn inner_mut(&mut self) -> &mut T { &mut self.inner } /// Consume this wrapper and get the inner type. pub fn into_inner(self) -> T { self.inner } } impl hyper::rt::Read for TokioIo where T: tokio::io::AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, mut buf: hyper::rt::ReadBufCursor<'_>, ) -> Poll> { let n = unsafe { let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut()); match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) { Poll::Ready(Ok(())) => tbuf.filled().len(), other => return other, } }; unsafe { buf.advance(n); } Poll::Ready(Ok(())) } } impl hyper::rt::Write for TokioIo where T: tokio::io::AsyncWrite, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { tokio::io::AsyncWrite::poll_flush(self.project().inner, cx) } fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx) } fn is_write_vectored(&self) -> bool { tokio::io::AsyncWrite::is_write_vectored(&self.inner) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs) } } impl tokio::io::AsyncRead for TokioIo where T: hyper::rt::Read, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, tbuf: &mut tokio::io::ReadBuf<'_>, ) -> Poll> { //let init = tbuf.initialized().len(); let filled = tbuf.filled().len(); let sub_filled = unsafe { let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut()); match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) { Poll::Ready(Ok(())) => buf.filled().len(), other => return other, } }; let n_filled = filled + sub_filled; // At least sub_filled bytes had to have been initialized. let n_init = sub_filled; unsafe { tbuf.assume_init(n_init); tbuf.set_filled(n_filled); } Poll::Ready(Ok(())) } } impl tokio::io::AsyncWrite for TokioIo where T: hyper::rt::Write, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { hyper::rt::Write::poll_write(self.project().inner, cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { hyper::rt::Write::poll_flush(self.project().inner, cx) } fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { hyper::rt::Write::poll_shutdown(self.project().inner, cx) } fn is_write_vectored(&self) -> bool { hyper::rt::Write::is_write_vectored(&self.inner) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs) } } // ==== impl TokioTimer ===== impl Timer for TokioTimer { fn sleep(&self, duration: Duration) -> Pin> { Box::pin(TokioSleep { inner: tokio::time::sleep(duration), }) } fn sleep_until(&self, deadline: Instant) -> Pin> { Box::pin(TokioSleep { inner: tokio::time::sleep_until(deadline.into()), }) } fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { if let Some(sleep) = sleep.as_mut().downcast_mut_pin::() { sleep.reset(new_deadline) } } } impl TokioTimer { /// Create a new TokioTimer pub fn new() -> Self { Self {} } } impl Future for TokioSleep { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.project().inner.poll(cx) } } impl Sleep for TokioSleep {} impl TokioSleep { fn reset(self: Pin<&mut Self>, deadline: Instant) { self.project().inner.as_mut().reset(deadline.into()); } } #[cfg(test)] mod tests { use crate::rt::TokioExecutor; use hyper::rt::Executor; use tokio::sync::oneshot; #[cfg(not(miri))] #[tokio::test] async fn simple_execute() -> Result<(), Box> { let (tx, rx) = oneshot::channel(); let executor = TokioExecutor::new(); executor.execute(async move { tx.send(()).unwrap(); }); rx.await.map_err(Into::into) } } hyper-util-0.1.10/src/server/conn/auto/mod.rs000064400000000000000000001156011046102023000171400ustar 00000000000000//! Http1 or Http2 connection. pub mod upgrade; use futures_util::ready; use hyper::service::HttpService; use std::future::Future; use std::marker::PhantomPinned; use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; use std::{error::Error as StdError, io, time::Duration}; use bytes::Bytes; use http::{Request, Response}; use http_body::Body; use hyper::{ body::Incoming, rt::{Read, ReadBuf, Timer, Write}, service::Service, }; #[cfg(feature = "http1")] use hyper::server::conn::http1; #[cfg(feature = "http2")] use hyper::{rt::bounds::Http2ServerConnExec, server::conn::http2}; #[cfg(any(not(feature = "http2"), not(feature = "http1")))] use std::marker::PhantomData; use pin_project_lite::pin_project; use crate::common::rewind::Rewind; type Error = Box; type Result = std::result::Result; const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /// Exactly equivalent to [`Http2ServerConnExec`]. #[cfg(feature = "http2")] pub trait HttpServerConnExec: Http2ServerConnExec {} #[cfg(feature = "http2")] impl> HttpServerConnExec for T {} /// Exactly equivalent to [`Http2ServerConnExec`]. #[cfg(not(feature = "http2"))] pub trait HttpServerConnExec {} #[cfg(not(feature = "http2"))] impl HttpServerConnExec for T {} /// Http1 or Http2 connection builder. #[derive(Clone, Debug)] pub struct Builder { #[cfg(feature = "http1")] http1: http1::Builder, #[cfg(feature = "http2")] http2: http2::Builder, #[cfg(any(feature = "http1", feature = "http2"))] version: Option, #[cfg(not(feature = "http2"))] _executor: E, } impl Builder { /// Create a new auto connection builder. /// /// `executor` parameter should be a type that implements /// [`Executor`](hyper::rt::Executor) trait. /// /// # Example /// /// ``` /// use hyper_util::{ /// rt::TokioExecutor, /// server::conn::auto, /// }; /// /// auto::Builder::new(TokioExecutor::new()); /// ``` pub fn new(executor: E) -> Self { Self { #[cfg(feature = "http1")] http1: http1::Builder::new(), #[cfg(feature = "http2")] http2: http2::Builder::new(executor), #[cfg(any(feature = "http1", feature = "http2"))] version: None, #[cfg(not(feature = "http2"))] _executor: executor, } } /// Http1 configuration. #[cfg(feature = "http1")] pub fn http1(&mut self) -> Http1Builder<'_, E> { Http1Builder { inner: self } } /// Http2 configuration. #[cfg(feature = "http2")] pub fn http2(&mut self) -> Http2Builder<'_, E> { Http2Builder { inner: self } } /// Only accepts HTTP/2 /// /// Does not do anything if used with [`serve_connection_with_upgrades`] #[cfg(feature = "http2")] pub fn http2_only(mut self) -> Self { assert!(self.version.is_none()); self.version = Some(Version::H2); self } /// Only accepts HTTP/1 /// /// Does not do anything if used with [`serve_connection_with_upgrades`] #[cfg(feature = "http1")] pub fn http1_only(mut self) -> Self { assert!(self.version.is_none()); self.version = Some(Version::H1); self } /// Bind a connection together with a [`Service`]. pub fn serve_connection(&self, io: I, service: S) -> Connection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, E: HttpServerConnExec, { let state = match self.version { #[cfg(feature = "http1")] Some(Version::H1) => { let io = Rewind::new_buffered(io, Bytes::new()); let conn = self.http1.serve_connection(io, service); ConnState::H1 { conn } } #[cfg(feature = "http2")] Some(Version::H2) => { let io = Rewind::new_buffered(io, Bytes::new()); let conn = self.http2.serve_connection(io, service); ConnState::H2 { conn } } #[cfg(any(feature = "http1", feature = "http2"))] _ => ConnState::ReadVersion { read_version: read_version(io), builder: Cow::Borrowed(self), service: Some(service), }, }; Connection { state } } /// Bind a connection together with a [`Service`], with the ability to /// handle HTTP upgrades. This requires that the IO object implements /// `Send`. /// /// Note that if you ever want to use [`hyper::upgrade::Upgraded::downcast`] /// with this crate, you'll need to use [`hyper_util::server::conn::auto::upgrade::downcast`] /// instead. See the documentation of the latter to understand why. pub fn serve_connection_with_upgrades( &self, io: I, service: S, ) -> UpgradeableConnection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + Send + 'static, E: HttpServerConnExec, { UpgradeableConnection { state: UpgradeableConnState::ReadVersion { read_version: read_version(io), builder: Cow::Borrowed(self), service: Some(service), }, } } } #[derive(Copy, Clone, Debug)] enum Version { H1, H2, } impl Version { #[must_use] #[cfg(any(not(feature = "http2"), not(feature = "http1")))] pub fn unsupported(self) -> Error { match self { Version::H1 => Error::from("HTTP/1 is not supported"), Version::H2 => Error::from("HTTP/2 is not supported"), } } } fn read_version(io: I) -> ReadVersion where I: Read + Unpin, { ReadVersion { io: Some(io), buf: [MaybeUninit::uninit(); 24], filled: 0, version: Version::H2, cancelled: false, _pin: PhantomPinned, } } pin_project! { struct ReadVersion { io: Option, buf: [MaybeUninit; 24], // the amount of `buf` thats been filled filled: usize, version: Version, cancelled: bool, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } impl ReadVersion { pub fn cancel(self: Pin<&mut Self>) { *self.project().cancelled = true; } } impl Future for ReadVersion where I: Read + Unpin, { type Output = io::Result<(Version, Rewind)>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); if *this.cancelled { return Poll::Ready(Err(io::Error::new(io::ErrorKind::Interrupted, "Cancelled"))); } let mut buf = ReadBuf::uninit(&mut *this.buf); // SAFETY: `this.filled` tracks how many bytes have been read (and thus initialized) and // we're only advancing by that many. unsafe { buf.unfilled().advance(*this.filled); }; // We start as H2 and switch to H1 as soon as we don't have the preface. while buf.filled().len() < H2_PREFACE.len() { let len = buf.filled().len(); ready!(Pin::new(this.io.as_mut().unwrap()).poll_read(cx, buf.unfilled()))?; *this.filled = buf.filled().len(); // We starts as H2 and switch to H1 when we don't get the preface. if buf.filled().len() == len || buf.filled()[len..] != H2_PREFACE[len..buf.filled().len()] { *this.version = Version::H1; break; } } let io = this.io.take().unwrap(); let buf = buf.filled().to_vec(); Poll::Ready(Ok(( *this.version, Rewind::new_buffered(io, Bytes::from(buf)), ))) } } pin_project! { /// Connection future. pub struct Connection<'a, I, S, E> where S: HttpService, { #[pin] state: ConnState<'a, I, S, E>, } } // A custom COW, since the libstd is has ToOwned bounds that are too eager. enum Cow<'a, T> { Borrowed(&'a T), Owned(T), } impl<'a, T> std::ops::Deref for Cow<'a, T> { type Target = T; fn deref(&self) -> &T { match self { Cow::Borrowed(t) => &*t, Cow::Owned(ref t) => t, } } } #[cfg(feature = "http1")] type Http1Connection = hyper::server::conn::http1::Connection, S>; #[cfg(not(feature = "http1"))] type Http1Connection = (PhantomData, PhantomData); #[cfg(feature = "http2")] type Http2Connection = hyper::server::conn::http2::Connection, S, E>; #[cfg(not(feature = "http2"))] type Http2Connection = (PhantomData, PhantomData, PhantomData); pin_project! { #[project = ConnStateProj] enum ConnState<'a, I, S, E> where S: HttpService, { ReadVersion { #[pin] read_version: ReadVersion, builder: Cow<'a, Builder>, service: Option, }, H1 { #[pin] conn: Http1Connection, }, H2 { #[pin] conn: Http2Connection, }, } } impl Connection<'_, I, S, E> where S: HttpService, S::Error: Into>, I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, E: HttpServerConnExec, { /// Start a graceful shutdown process for this connection. /// /// This `Connection` should continue to be polled until shutdown can finish. /// /// # Note /// /// This should only be called while the `Connection` future is still pending. If called after /// `Connection::poll` has resolved, this does nothing. pub fn graceful_shutdown(self: Pin<&mut Self>) { match self.project().state.project() { ConnStateProj::ReadVersion { read_version, .. } => read_version.cancel(), #[cfg(feature = "http1")] ConnStateProj::H1 { conn } => conn.graceful_shutdown(), #[cfg(feature = "http2")] ConnStateProj::H2 { conn } => conn.graceful_shutdown(), #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), } } /// Make this Connection static, instead of borrowing from Builder. pub fn into_owned(self) -> Connection<'static, I, S, E> where Builder: Clone, { Connection { state: match self.state { ConnState::ReadVersion { read_version, builder, service, } => ConnState::ReadVersion { read_version, service, builder: Cow::Owned(builder.clone()), }, #[cfg(feature = "http1")] ConnState::H1 { conn } => ConnState::H1 { conn }, #[cfg(feature = "http2")] ConnState::H2 { conn } => ConnState::H2 { conn }, #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), }, } } } impl Future for Connection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, E: HttpServerConnExec, { type Output = Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { let mut this = self.as_mut().project(); match this.state.as_mut().project() { ConnStateProj::ReadVersion { read_version, builder, service, } => { let (version, io) = ready!(read_version.poll(cx))?; let service = service.take().unwrap(); match version { #[cfg(feature = "http1")] Version::H1 => { let conn = builder.http1.serve_connection(io, service); this.state.set(ConnState::H1 { conn }); } #[cfg(feature = "http2")] Version::H2 => { let conn = builder.http2.serve_connection(io, service); this.state.set(ConnState::H2 { conn }); } #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => return Poll::Ready(Err(version.unsupported())), } } #[cfg(feature = "http1")] ConnStateProj::H1 { conn } => { return conn.poll(cx).map_err(Into::into); } #[cfg(feature = "http2")] ConnStateProj::H2 { conn } => { return conn.poll(cx).map_err(Into::into); } #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), } } } } pin_project! { /// Connection future. pub struct UpgradeableConnection<'a, I, S, E> where S: HttpService, { #[pin] state: UpgradeableConnState<'a, I, S, E>, } } #[cfg(feature = "http1")] type Http1UpgradeableConnection = hyper::server::conn::http1::UpgradeableConnection; #[cfg(not(feature = "http1"))] type Http1UpgradeableConnection = (PhantomData, PhantomData); pin_project! { #[project = UpgradeableConnStateProj] enum UpgradeableConnState<'a, I, S, E> where S: HttpService, { ReadVersion { #[pin] read_version: ReadVersion, builder: Cow<'a, Builder>, service: Option, }, H1 { #[pin] conn: Http1UpgradeableConnection, S>, }, H2 { #[pin] conn: Http2Connection, }, } } impl UpgradeableConnection<'_, I, S, E> where S: HttpService, S::Error: Into>, I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, E: HttpServerConnExec, { /// Start a graceful shutdown process for this connection. /// /// This `UpgradeableConnection` should continue to be polled until shutdown can finish. /// /// # Note /// /// This should only be called while the `Connection` future is still nothing. pending. If /// called after `UpgradeableConnection::poll` has resolved, this does nothing. pub fn graceful_shutdown(self: Pin<&mut Self>) { match self.project().state.project() { UpgradeableConnStateProj::ReadVersion { read_version, .. } => read_version.cancel(), #[cfg(feature = "http1")] UpgradeableConnStateProj::H1 { conn } => conn.graceful_shutdown(), #[cfg(feature = "http2")] UpgradeableConnStateProj::H2 { conn } => conn.graceful_shutdown(), #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), } } /// Make this Connection static, instead of borrowing from Builder. pub fn into_owned(self) -> UpgradeableConnection<'static, I, S, E> where Builder: Clone, { UpgradeableConnection { state: match self.state { UpgradeableConnState::ReadVersion { read_version, builder, service, } => UpgradeableConnState::ReadVersion { read_version, service, builder: Cow::Owned(builder.clone()), }, #[cfg(feature = "http1")] UpgradeableConnState::H1 { conn } => UpgradeableConnState::H1 { conn }, #[cfg(feature = "http2")] UpgradeableConnState::H2 { conn } => UpgradeableConnState::H2 { conn }, #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), }, } } } impl Future for UpgradeableConnection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + Send + 'static, E: HttpServerConnExec, { type Output = Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { let mut this = self.as_mut().project(); match this.state.as_mut().project() { UpgradeableConnStateProj::ReadVersion { read_version, builder, service, } => { let (version, io) = ready!(read_version.poll(cx))?; let service = service.take().unwrap(); match version { #[cfg(feature = "http1")] Version::H1 => { let conn = builder.http1.serve_connection(io, service).with_upgrades(); this.state.set(UpgradeableConnState::H1 { conn }); } #[cfg(feature = "http2")] Version::H2 => { let conn = builder.http2.serve_connection(io, service); this.state.set(UpgradeableConnState::H2 { conn }); } #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => return Poll::Ready(Err(version.unsupported())), } } #[cfg(feature = "http1")] UpgradeableConnStateProj::H1 { conn } => { return conn.poll(cx).map_err(Into::into); } #[cfg(feature = "http2")] UpgradeableConnStateProj::H2 { conn } => { return conn.poll(cx).map_err(Into::into); } #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), } } } } /// Http1 part of builder. #[cfg(feature = "http1")] pub struct Http1Builder<'a, E> { inner: &'a mut Builder, } #[cfg(feature = "http1")] impl Http1Builder<'_, E> { /// Http2 configuration. #[cfg(feature = "http2")] pub fn http2(&mut self) -> Http2Builder<'_, E> { Http2Builder { inner: self.inner } } /// Set whether HTTP/1 connections should support half-closures. /// /// Clients can chose to shutdown their write-side while waiting /// for the server to respond. Setting this to `true` will /// prevent closing the connection immediately if `read` /// detects an EOF in the middle of a request. /// /// Default is `false`. pub fn half_close(&mut self, val: bool) -> &mut Self { self.inner.http1.half_close(val); self } /// Enables or disables HTTP/1 keep-alive. /// /// Default is true. pub fn keep_alive(&mut self, val: bool) -> &mut Self { self.inner.http1.keep_alive(val); self } /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self { self.inner.http1.title_case_headers(enabled); self } /// Set whether to support preserving original header cases. /// /// Currently, this will record the original cases received, and store them /// in a private extension on the `Request`. It will also look for and use /// such an extension in any provided `Response`. /// /// Since the relevant extension is still private, there is no way to /// interact with the original cases. The only effect this can have now is /// to forward the cases in a proxy-like fashion. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self { self.inner.http1.preserve_header_case(enabled); self } /// Set the maximum number of headers. /// /// When a request is received, the parser will reserve a buffer to store headers for optimal /// performance. /// /// If server receives more headers than the buffer size, it responds to the client with /// "431 Request Header Fields Too Large". /// /// The headers is allocated on the stack by default, which has higher performance. After /// setting this value, headers will be allocated in heap memory, that is, heap memory /// allocation will occur for each request, and there will be a performance drop of about 5%. /// /// Note that this setting does not affect HTTP/2. /// /// Default is 100. pub fn max_headers(&mut self, val: usize) -> &mut Self { self.inner.http1.max_headers(val); self } /// Set a timeout for reading client request headers. If a client does not /// transmit the entire header within this time, the connection is closed. /// /// Requires a [`Timer`] set by [`Http1Builder::timer`] to take effect. Panics if `header_read_timeout` is configured /// without a [`Timer`]. /// /// Pass `None` to disable. /// /// Default is currently 30 seconds, but do not depend on that. pub fn header_read_timeout(&mut self, read_timeout: impl Into>) -> &mut Self { self.inner.http1.header_read_timeout(read_timeout); self } /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// /// Note that setting this to false may mean more copies of body data, /// but may also improve performance when an IO transport doesn't /// support vectored writes well, such as most TLS implementations. /// /// Setting this to true will force hyper to use queued strategy /// which may eliminate unnecessary cloning on some TLS backends /// /// Default is `auto`. In this mode hyper will try to guess which /// mode to use pub fn writev(&mut self, val: bool) -> &mut Self { self.inner.http1.writev(val); self } /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. /// /// # Panics /// /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. pub fn max_buf_size(&mut self, max: usize) -> &mut Self { self.inner.http1.max_buf_size(max); self } /// Aggregates flushes to better support pipelined responses. /// /// Experimental, may have bugs. /// /// Default is false. pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { self.inner.http1.pipeline_flush(enabled); self } /// Set the timer used in background tasks. pub fn timer(&mut self, timer: M) -> &mut Self where M: Timer + Send + Sync + 'static, { self.inner.http1.timer(timer); self } /// Bind a connection together with a [`Service`]. #[cfg(feature = "http2")] pub async fn serve_connection(&self, io: I, service: S) -> Result<()> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, E: HttpServerConnExec, { self.inner.serve_connection(io, service).await } /// Bind a connection together with a [`Service`]. #[cfg(not(feature = "http2"))] pub async fn serve_connection(&self, io: I, service: S) -> Result<()> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, { self.inner.serve_connection(io, service).await } /// Bind a connection together with a [`Service`], with the ability to /// handle HTTP upgrades. This requires that the IO object implements /// `Send`. #[cfg(feature = "http2")] pub fn serve_connection_with_upgrades( &self, io: I, service: S, ) -> UpgradeableConnection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + Send + 'static, E: HttpServerConnExec, { self.inner.serve_connection_with_upgrades(io, service) } } /// Http2 part of builder. #[cfg(feature = "http2")] pub struct Http2Builder<'a, E> { inner: &'a mut Builder, } #[cfg(feature = "http2")] impl Http2Builder<'_, E> { #[cfg(feature = "http1")] /// Http1 configuration. pub fn http1(&mut self) -> Http1Builder<'_, E> { Http1Builder { inner: self.inner } } /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. /// /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). /// As of v0.4.0, it is 20. /// /// See for more information. pub fn max_pending_accept_reset_streams(&mut self, max: impl Into>) -> &mut Self { self.inner.http2.max_pending_accept_reset_streams(max); self } /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { self.inner.http2.initial_stream_window_size(sz); self } /// Sets the max connection-level flow control for HTTP2. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { self.inner.http2.initial_connection_window_size(sz); self } /// Sets whether to use an adaptive flow control. /// /// Enabling this will override the limits set in /// `http2_initial_stream_window_size` and /// `http2_initial_connection_window_size`. pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { self.inner.http2.adaptive_window(enabled); self } /// Sets the maximum frame size to use for HTTP2. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { self.inner.http2.max_frame_size(sz); self } /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 /// connections. /// /// Default is 200. Passing `None` will remove any limit. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { self.inner.http2.max_concurrent_streams(max); self } /// Sets an interval for HTTP2 Ping frames should be sent to keep a /// connection alive. /// /// Pass `None` to disable HTTP2 keep-alive. /// /// Default is currently disabled. /// /// # Cargo Feature /// pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { self.inner.http2.keep_alive_interval(interval); self } /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. /// /// If the ping is not acknowledged within the timeout, the connection will /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. /// /// Default is 20 seconds. /// /// # Cargo Feature /// pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { self.inner.http2.keep_alive_timeout(timeout); self } /// Set the maximum write buffer size for each HTTP/2 stream. /// /// Default is currently ~400KB, but may change. /// /// # Panics /// /// The value must be no larger than `u32::MAX`. pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { self.inner.http2.max_send_buf_size(max); self } /// Enables the [extended CONNECT protocol]. /// /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 pub fn enable_connect_protocol(&mut self) -> &mut Self { self.inner.http2.enable_connect_protocol(); self } /// Sets the max size of received header frames. /// /// Default is currently ~16MB, but may change. pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { self.inner.http2.max_header_list_size(max); self } /// Set the timer used in background tasks. pub fn timer(&mut self, timer: M) -> &mut Self where M: Timer + Send + Sync + 'static, { self.inner.http2.timer(timer); self } /// Bind a connection together with a [`Service`]. pub async fn serve_connection(&self, io: I, service: S) -> Result<()> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, E: HttpServerConnExec, { self.inner.serve_connection(io, service).await } /// Bind a connection together with a [`Service`], with the ability to /// handle HTTP upgrades. This requires that the IO object implements /// `Send`. pub fn serve_connection_with_upgrades( &self, io: I, service: S, ) -> UpgradeableConnection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + Send + 'static, E: HttpServerConnExec, { self.inner.serve_connection_with_upgrades(io, service) } } #[cfg(test)] mod tests { use crate::{ rt::{TokioExecutor, TokioIo}, server::conn::auto, }; use http::{Request, Response}; use http_body::Body; use http_body_util::{BodyExt, Empty, Full}; use hyper::{body, body::Bytes, client, service::service_fn}; use std::{convert::Infallible, error::Error as StdError, net::SocketAddr, time::Duration}; use tokio::{ net::{TcpListener, TcpStream}, pin, }; const BODY: &[u8] = b"Hello, world!"; #[test] fn configuration() { // One liner. auto::Builder::new(TokioExecutor::new()) .http1() .keep_alive(true) .http2() .keep_alive_interval(None); // .serve_connection(io, service); // Using variable. let mut builder = auto::Builder::new(TokioExecutor::new()); builder.http1().keep_alive(true); builder.http2().keep_alive_interval(None); // builder.serve_connection(io, service); } #[cfg(not(miri))] #[tokio::test] async fn http1() { let addr = start_server(false, false).await; let mut sender = connect_h1(addr).await; let response = sender .send_request(Request::new(Empty::::new())) .await .unwrap(); let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(body, BODY); } #[cfg(not(miri))] #[tokio::test] async fn http2() { let addr = start_server(false, false).await; let mut sender = connect_h2(addr).await; let response = sender .send_request(Request::new(Empty::::new())) .await .unwrap(); let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(body, BODY); } #[cfg(not(miri))] #[tokio::test] async fn http2_only() { let addr = start_server(false, true).await; let mut sender = connect_h2(addr).await; let response = sender .send_request(Request::new(Empty::::new())) .await .unwrap(); let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(body, BODY); } #[cfg(not(miri))] #[tokio::test] async fn http2_only_fail_if_client_is_http1() { let addr = start_server(false, true).await; let mut sender = connect_h1(addr).await; let _ = sender .send_request(Request::new(Empty::::new())) .await .expect_err("should fail"); } #[cfg(not(miri))] #[tokio::test] async fn http1_only() { let addr = start_server(true, false).await; let mut sender = connect_h1(addr).await; let response = sender .send_request(Request::new(Empty::::new())) .await .unwrap(); let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(body, BODY); } #[cfg(not(miri))] #[tokio::test] async fn http1_only_fail_if_client_is_http2() { let addr = start_server(true, false).await; let mut sender = connect_h2(addr).await; let _ = sender .send_request(Request::new(Empty::::new())) .await .expect_err("should fail"); } #[cfg(not(miri))] #[tokio::test] async fn graceful_shutdown() { let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) .await .unwrap(); let listener_addr = listener.local_addr().unwrap(); // Spawn the task in background so that we can connect there let listen_task = tokio::spawn(async move { listener.accept().await.unwrap() }); // Only connect a stream, do not send headers or anything let _stream = TcpStream::connect(listener_addr).await.unwrap(); let (stream, _) = listen_task.await.unwrap(); let stream = TokioIo::new(stream); let builder = auto::Builder::new(TokioExecutor::new()); let connection = builder.serve_connection(stream, service_fn(hello)); pin!(connection); connection.as_mut().graceful_shutdown(); let connection_error = tokio::time::timeout(Duration::from_millis(200), connection) .await .expect("Connection should have finished in a timely manner after graceful shutdown.") .expect_err("Connection should have been interrupted."); let connection_error = connection_error .downcast_ref::() .expect("The error should have been `std::io::Error`."); assert_eq!(connection_error.kind(), std::io::ErrorKind::Interrupted); } async fn connect_h1(addr: SocketAddr) -> client::conn::http1::SendRequest where B: Body + Send + 'static, B::Data: Send, B::Error: Into>, { let stream = TokioIo::new(TcpStream::connect(addr).await.unwrap()); let (sender, connection) = client::conn::http1::handshake(stream).await.unwrap(); tokio::spawn(connection); sender } async fn connect_h2(addr: SocketAddr) -> client::conn::http2::SendRequest where B: Body + Unpin + Send + 'static, B::Data: Send, B::Error: Into>, { let stream = TokioIo::new(TcpStream::connect(addr).await.unwrap()); let (sender, connection) = client::conn::http2::Builder::new(TokioExecutor::new()) .handshake(stream) .await .unwrap(); tokio::spawn(connection); sender } async fn start_server(h1_only: bool, h2_only: bool) -> SocketAddr { let addr: SocketAddr = ([127, 0, 0, 1], 0).into(); let listener = TcpListener::bind(addr).await.unwrap(); let local_addr = listener.local_addr().unwrap(); tokio::spawn(async move { loop { let (stream, _) = listener.accept().await.unwrap(); let stream = TokioIo::new(stream); tokio::task::spawn(async move { let mut builder = auto::Builder::new(TokioExecutor::new()); if h1_only { builder = builder.http1_only(); builder.serve_connection(stream, service_fn(hello)).await } else if h2_only { builder = builder.http2_only(); builder.serve_connection(stream, service_fn(hello)).await } else { builder .http2() .max_header_list_size(4096) .serve_connection_with_upgrades(stream, service_fn(hello)) .await } .unwrap(); }); } }); local_addr } async fn hello(_req: Request) -> Result>, Infallible> { Ok(Response::new(Full::new(Bytes::from(BODY)))) } } hyper-util-0.1.10/src/server/conn/auto/upgrade.rs000064400000000000000000000036261046102023000200130ustar 00000000000000//! Upgrade utilities. use bytes::{Bytes, BytesMut}; use hyper::{ rt::{Read, Write}, upgrade::Upgraded, }; use crate::common::rewind::Rewind; /// Tries to downcast the internal trait object to the type passed. /// /// On success, returns the downcasted parts. On error, returns the Upgraded back. /// This is a kludge to work around the fact that the machinery provided by /// [`hyper_util::server::con::auto`] wraps the inner `T` with a private type /// that is not reachable from outside the crate. /// /// This kludge will be removed when this machinery is added back to the main /// `hyper` code. pub fn downcast(upgraded: Upgraded) -> Result, Upgraded> where T: Read + Write + Unpin + 'static, { let hyper::upgrade::Parts { io: rewind, mut read_buf, .. } = upgraded.downcast::>()?; if let Some(pre) = rewind.pre { read_buf = if read_buf.is_empty() { pre } else { let mut buf = BytesMut::from(read_buf); buf.extend_from_slice(&pre); buf.freeze() }; } Ok(Parts { io: rewind.inner, read_buf, }) } /// The deconstructed parts of an [`Upgraded`] type. /// /// Includes the original IO type, and a read buffer of bytes that the /// HTTP state machine may have already read before completing an upgrade. #[derive(Debug)] #[non_exhaustive] pub struct Parts { /// The original IO object used before the upgrade. pub io: T, /// A buffer of bytes that have been read but not processed as HTTP. /// /// For instance, if the `Connection` is used for an HTTP upgrade request, /// it is possible the server sent back the first bytes of the new protocol /// along with the response upgrade. /// /// You will want to check for any existing bytes if you plan to continue /// communicating on the IO object. pub read_buf: Bytes, } hyper-util-0.1.10/src/server/conn/mod.rs000064400000000000000000000001331046102023000161610ustar 00000000000000//! Connection utilities. #[cfg(any(feature = "http1", feature = "http2"))] pub mod auto; hyper-util-0.1.10/src/server/graceful.rs000064400000000000000000000343741046102023000162530ustar 00000000000000//! Utility to gracefully shutdown a server. //! //! This module provides a [`GracefulShutdown`] type, //! which can be used to gracefully shutdown a server. //! //! See //! for an example of how to use this. use std::{ fmt::{self, Debug}, future::Future, pin::Pin, task::{self, Poll}, }; use pin_project_lite::pin_project; use tokio::sync::watch; /// A graceful shutdown utility pub struct GracefulShutdown { tx: watch::Sender<()>, } impl GracefulShutdown { /// Create a new graceful shutdown helper. pub fn new() -> Self { let (tx, _) = watch::channel(()); Self { tx } } /// Wrap a future for graceful shutdown watching. pub fn watch(&self, conn: C) -> impl Future { let mut rx = self.tx.subscribe(); GracefulConnectionFuture::new(conn, async move { let _ = rx.changed().await; // hold onto the rx until the watched future is completed rx }) } /// Signal shutdown for all watched connections. /// /// This returns a `Future` which will complete once all watched /// connections have shutdown. pub async fn shutdown(self) { let Self { tx } = self; // signal all the watched futures about the change let _ = tx.send(()); // and then wait for all of them to complete tx.closed().await; } } impl Debug for GracefulShutdown { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("GracefulShutdown").finish() } } impl Default for GracefulShutdown { fn default() -> Self { Self::new() } } pin_project! { struct GracefulConnectionFuture { #[pin] conn: C, #[pin] cancel: F, #[pin] // If cancelled, this is held until the inner conn is done. cancelled_guard: Option, } } impl GracefulConnectionFuture { fn new(conn: C, cancel: F) -> Self { Self { conn, cancel, cancelled_guard: None, } } } impl Debug for GracefulConnectionFuture { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("GracefulConnectionFuture").finish() } } impl Future for GracefulConnectionFuture where C: GracefulConnection, F: Future, { type Output = C::Output; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let mut this = self.project(); if this.cancelled_guard.is_none() { if let Poll::Ready(guard) = this.cancel.poll(cx) { this.cancelled_guard.set(Some(guard)); this.conn.as_mut().graceful_shutdown(); } } this.conn.poll(cx) } } /// An internal utility trait as an umbrella target for all (hyper) connection /// types that the [`GracefulShutdown`] can watch. pub trait GracefulConnection: Future> + private::Sealed { /// The error type returned by the connection when used as a future. type Error; /// Start a graceful shutdown process for this connection. fn graceful_shutdown(self: Pin<&mut Self>); } #[cfg(feature = "http1")] impl GracefulConnection for hyper::server::conn::http1::Connection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, { type Error = hyper::Error; fn graceful_shutdown(self: Pin<&mut Self>) { hyper::server::conn::http1::Connection::graceful_shutdown(self); } } #[cfg(feature = "http2")] impl GracefulConnection for hyper::server::conn::http2::Connection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { type Error = hyper::Error; fn graceful_shutdown(self: Pin<&mut Self>) { hyper::server::conn::http2::Connection::graceful_shutdown(self); } } #[cfg(feature = "server-auto")] impl<'a, I, B, S, E> GracefulConnection for crate::server::conn::auto::Connection<'a, I, S, E> where S: hyper::service::Service, Response = http::Response>, S::Error: Into>, S::Future: 'static, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { type Error = Box; fn graceful_shutdown(self: Pin<&mut Self>) { crate::server::conn::auto::Connection::graceful_shutdown(self); } } #[cfg(feature = "server-auto")] impl<'a, I, B, S, E> GracefulConnection for crate::server::conn::auto::UpgradeableConnection<'a, I, S, E> where S: hyper::service::Service, Response = http::Response>, S::Error: Into>, S::Future: 'static, I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { type Error = Box; fn graceful_shutdown(self: Pin<&mut Self>) { crate::server::conn::auto::UpgradeableConnection::graceful_shutdown(self); } } mod private { pub trait Sealed {} #[cfg(feature = "http1")] impl Sealed for hyper::server::conn::http1::Connection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, { } #[cfg(feature = "http1")] impl Sealed for hyper::server::conn::http1::UpgradeableConnection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, { } #[cfg(feature = "http2")] impl Sealed for hyper::server::conn::http2::Connection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { } #[cfg(feature = "server-auto")] impl<'a, I, B, S, E> Sealed for crate::server::conn::auto::Connection<'a, I, S, E> where S: hyper::service::Service< http::Request, Response = http::Response, >, S::Error: Into>, S::Future: 'static, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { } #[cfg(feature = "server-auto")] impl<'a, I, B, S, E> Sealed for crate::server::conn::auto::UpgradeableConnection<'a, I, S, E> where S: hyper::service::Service< http::Request, Response = http::Response, >, S::Error: Into>, S::Future: 'static, I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { } } #[cfg(test)] mod test { use super::*; use pin_project_lite::pin_project; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; pin_project! { #[derive(Debug)] struct DummyConnection { #[pin] future: F, shutdown_counter: Arc, } } impl private::Sealed for DummyConnection {} impl GracefulConnection for DummyConnection { type Error = (); fn graceful_shutdown(self: Pin<&mut Self>) { self.shutdown_counter.fetch_add(1, Ordering::SeqCst); } } impl Future for DummyConnection { type Output = Result<(), ()>; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project().future.poll(cx) { Poll::Ready(_) => Poll::Ready(Ok(())), Poll::Pending => Poll::Pending, } } } #[cfg(not(miri))] #[tokio::test] async fn test_graceful_shutdown_ok() { let graceful = GracefulShutdown::new(); let shutdown_counter = Arc::new(AtomicUsize::new(0)); let (dummy_tx, _) = tokio::sync::broadcast::channel(1); for i in 1..=3 { let mut dummy_rx = dummy_tx.subscribe(); let shutdown_counter = shutdown_counter.clone(); let future = async move { tokio::time::sleep(std::time::Duration::from_millis(i * 10)).await; let _ = dummy_rx.recv().await; }; let dummy_conn = DummyConnection { future, shutdown_counter, }; let conn = graceful.watch(dummy_conn); tokio::spawn(async move { conn.await.unwrap(); }); } assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); let _ = dummy_tx.send(()); tokio::select! { _ = tokio::time::sleep(std::time::Duration::from_millis(100)) => { panic!("timeout") }, _ = graceful.shutdown() => { assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3); } } } #[cfg(not(miri))] #[tokio::test] async fn test_graceful_shutdown_delayed_ok() { let graceful = GracefulShutdown::new(); let shutdown_counter = Arc::new(AtomicUsize::new(0)); for i in 1..=3 { let shutdown_counter = shutdown_counter.clone(); //tokio::time::sleep(std::time::Duration::from_millis(i * 5)).await; let future = async move { tokio::time::sleep(std::time::Duration::from_millis(i * 50)).await; }; let dummy_conn = DummyConnection { future, shutdown_counter, }; let conn = graceful.watch(dummy_conn); tokio::spawn(async move { conn.await.unwrap(); }); } assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); tokio::select! { _ = tokio::time::sleep(std::time::Duration::from_millis(200)) => { panic!("timeout") }, _ = graceful.shutdown() => { assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3); } } } #[cfg(not(miri))] #[tokio::test] async fn test_graceful_shutdown_multi_per_watcher_ok() { let graceful = GracefulShutdown::new(); let shutdown_counter = Arc::new(AtomicUsize::new(0)); for i in 1..=3 { let shutdown_counter = shutdown_counter.clone(); let mut futures = Vec::new(); for u in 1..=i { let future = tokio::time::sleep(std::time::Duration::from_millis(u * 50)); let dummy_conn = DummyConnection { future, shutdown_counter: shutdown_counter.clone(), }; let conn = graceful.watch(dummy_conn); futures.push(conn); } tokio::spawn(async move { futures_util::future::join_all(futures).await; }); } assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); tokio::select! { _ = tokio::time::sleep(std::time::Duration::from_millis(200)) => { panic!("timeout") }, _ = graceful.shutdown() => { assert_eq!(shutdown_counter.load(Ordering::SeqCst), 6); } } } #[cfg(not(miri))] #[tokio::test] async fn test_graceful_shutdown_timeout() { let graceful = GracefulShutdown::new(); let shutdown_counter = Arc::new(AtomicUsize::new(0)); for i in 1..=3 { let shutdown_counter = shutdown_counter.clone(); let future = async move { if i == 1 { std::future::pending::<()>().await } else { std::future::ready(()).await } }; let dummy_conn = DummyConnection { future, shutdown_counter, }; let conn = graceful.watch(dummy_conn); tokio::spawn(async move { conn.await.unwrap(); }); } assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); tokio::select! { _ = tokio::time::sleep(std::time::Duration::from_millis(100)) => { assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3); }, _ = graceful.shutdown() => { panic!("shutdown should not be completed: as not all our conns finish") } } } } hyper-util-0.1.10/src/server/mod.rs000064400000000000000000000001341046102023000152250ustar 00000000000000//! Server utilities. pub mod conn; #[cfg(feature = "server-graceful")] pub mod graceful; hyper-util-0.1.10/src/service/glue.rs000064400000000000000000000025431046102023000155420ustar 00000000000000use pin_project_lite::pin_project; use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; use super::Oneshot; /// A tower service converted into a hyper service. #[derive(Debug, Copy, Clone)] pub struct TowerToHyperService { service: S, } impl TowerToHyperService { /// Create a new `TowerToHyperService` from a tower service. pub fn new(tower_service: S) -> Self { Self { service: tower_service, } } } impl hyper::service::Service for TowerToHyperService where S: tower_service::Service + Clone, { type Response = S::Response; type Error = S::Error; type Future = TowerToHyperServiceFuture; fn call(&self, req: R) -> Self::Future { TowerToHyperServiceFuture { future: Oneshot::new(self.service.clone(), req), } } } pin_project! { /// Response future for [`TowerToHyperService`]. pub struct TowerToHyperServiceFuture where S: tower_service::Service, { #[pin] future: Oneshot, } } impl Future for TowerToHyperServiceFuture where S: tower_service::Service, { type Output = Result; #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.project().future.poll(cx) } } hyper-util-0.1.10/src/service/mod.rs000064400000000000000000000005151046102023000153620ustar 00000000000000//! Service utilities. #[cfg(feature = "service")] mod glue; #[cfg(any(feature = "client-legacy", feature = "service"))] mod oneshot; #[cfg(feature = "service")] pub use self::glue::{TowerToHyperService, TowerToHyperServiceFuture}; #[cfg(any(feature = "client-legacy", feature = "service"))] pub(crate) use self::oneshot::Oneshot; hyper-util-0.1.10/src/service/oneshot.rs000064400000000000000000000031731046102023000162650ustar 00000000000000use futures_util::ready; use pin_project_lite::pin_project; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use tower_service::Service; // Vendored from tower::util to reduce dependencies, the code is small enough. // Not really pub, but used in a trait for bounds pin_project! { #[project = OneshotProj] #[derive(Debug)] pub enum Oneshot, Req> { NotReady { svc: S, req: Option, }, Called { #[pin] fut: S::Future, }, Done, } } impl Oneshot where S: Service, { pub(crate) const fn new(svc: S, req: Req) -> Self { Oneshot::NotReady { svc, req: Some(req), } } } impl Future for Oneshot where S: Service, { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { let this = self.as_mut().project(); match this { OneshotProj::NotReady { svc, req } => { let _ = ready!(svc.poll_ready(cx))?; let fut = svc.call(req.take().expect("already called")); self.set(Oneshot::Called { fut }); } OneshotProj::Called { fut } => { let res = ready!(fut.poll(cx))?; self.set(Oneshot::Done); return Poll::Ready(Ok(res)); } OneshotProj::Done => panic!("polled after complete"), } } } } hyper-util-0.1.10/tests/legacy_client.rs000064400000000000000000001001161046102023000163160ustar 00000000000000mod test_utils; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener}; use std::pin::Pin; use std::sync::atomic::Ordering; use std::sync::Arc; use std::task::Poll; use std::thread; use std::time::Duration; use futures_channel::{mpsc, oneshot}; use futures_util::future::{self, FutureExt, TryFutureExt}; use futures_util::stream::StreamExt; use futures_util::{self, Stream}; use http_body_util::BodyExt; use http_body_util::{Empty, Full, StreamBody}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use hyper::body::Bytes; use hyper::body::Frame; use hyper::Request; use hyper_util::client::legacy::connect::{capture_connection, HttpConnector}; use hyper_util::client::legacy::Client; use hyper_util::rt::{TokioExecutor, TokioIo}; use test_utils::{DebugConnector, DebugStream}; pub fn runtime() -> tokio::runtime::Runtime { tokio::runtime::Builder::new_current_thread() .enable_all() .build() .expect("new rt") } fn s(buf: &[u8]) -> &str { std::str::from_utf8(buf).expect("from_utf8") } #[cfg(not(miri))] #[test] fn drop_body_before_eof_closes_connection() { // https://github.com/hyperium/hyper/issues/1353 let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let (closes_tx, closes) = mpsc::channel::<()>(10); let client = Client::builder(hyper_util::rt::TokioExecutor::new()).build( DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx), ); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); let body = vec![b'x'; 1024 * 128]; write!( sock, "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n", body.len() ) .expect("write head"); let _ = sock.write_all(&body); let _ = tx1.send(()); }); let req = Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); let rx = rx1; rt.block_on(async move { let (res, _) = future::join(res, rx).await; res.unwrap(); tokio::time::sleep(Duration::from_secs(1)).await; }); rt.block_on(closes.into_future()).0.expect("closes"); } #[cfg(not(miri))] #[tokio::test] async fn drop_client_closes_idle_connections() { let _ = pretty_env_logger::try_init(); let server = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, mut closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); let t1 = tokio::spawn(async move { let mut sock = server.accept().await.unwrap().0; let mut buf = [0; 4096]; sock.read(&mut buf).await.expect("read 1"); let body = [b'x'; 64]; let headers = format!("HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n", body.len()); sock.write_all(headers.as_bytes()) .await .expect("write head"); sock.write_all(&body).await.expect("write body"); let _ = tx1.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped match sock.read(&mut buf).await { Ok(n) => assert_eq!(n, 0), Err(_) => (), } }); let client = Client::builder(TokioExecutor::new()).build(DebugConnector::with_http_and_closes( HttpConnector::new(), closes_tx, )); let req = Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); let rx = rx1; let (res, _) = future::join(res, rx).await; res.unwrap(); // not closed yet, just idle future::poll_fn(|ctx| { assert!(Pin::new(&mut closes).poll_next(ctx).is_pending()); Poll::Ready(()) }) .await; // drop to start the connections closing drop(client); // and wait a few ticks for the connections to close let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; t1.await.unwrap(); } #[cfg(not(miri))] #[tokio::test] async fn drop_response_future_closes_in_progress_connection() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); // we never write a response head // simulates a slow server operation let _ = tx1.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped let _ = client_drop_rx.recv(); }); let res = { let client = Client::builder(TokioExecutor::new()).build( DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx), ); let req = Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); client.request(req).map(|_| unreachable!()) }; future::select(res, rx1).await; // res now dropped let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; } #[cfg(not(miri))] #[tokio::test] async fn drop_response_body_closes_in_progress_connection() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); write!( sock, "HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n" ) .expect("write head"); let _ = tx1.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped let _ = client_drop_rx.recv(); }); let rx = rx1; let res = { let client = Client::builder(TokioExecutor::new()).build( DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx), ); let req = Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); // notably, haven't read body yet client.request(req) }; let (res, _) = future::join(res, rx).await; // drop the body res.unwrap(); // and wait a few ticks to see the connection drop let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; } #[cfg(not(miri))] #[tokio::test] async fn no_keep_alive_closes_connection() { // https://github.com/hyperium/hyper/issues/1383 let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); let (_tx2, rx2) = std::sync::mpsc::channel::<()>(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .unwrap(); let _ = tx1.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped let _ = rx2.recv(); }); let client = Client::builder(TokioExecutor::new()) .pool_max_idle_per_host(0) .build(DebugConnector::with_http_and_closes( HttpConnector::new(), closes_tx, )); let req = Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); let rx = rx1; let (res, _) = future::join(res, rx).await; res.unwrap(); let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(close, t).await; } #[cfg(not(miri))] #[tokio::test] async fn socket_disconnect_closes_idle_conn() { // notably when keep-alive is enabled let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .unwrap(); let _ = tx1.send(()); }); let client = Client::builder(TokioExecutor::new()).build(DebugConnector::with_http_and_closes( HttpConnector::new(), closes_tx, )); let req = Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); let rx = rx1; let (res, _) = future::join(res, rx).await; res.unwrap(); let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; } #[cfg(not(miri))] #[test] fn connect_call_is_lazy() { // We especially don't want connects() triggered if there's // idle connections that the Checkout would have found let _ = pretty_env_logger::try_init(); let _rt = runtime(); let connector = DebugConnector::new(); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); assert_eq!(connects.load(Ordering::Relaxed), 0); let req = Request::builder() .uri("http://hyper.local/a") .body(Empty::::new()) .unwrap(); let _fut = client.request(req); // internal Connect::connect should have been lazy, and not // triggered an actual connect yet. assert_eq!(connects.load(Ordering::Relaxed), 0); } #[cfg(not(miri))] #[test] fn client_keep_alive_0() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new(); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; //drop(server); sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); let _ = tx1.send(()); let n2 = sock.read(&mut buf).expect("read 2"); assert_ne!(n2, 0); let second_get = "GET /b HTTP/1.1\r\n"; assert_eq!(s(&buf[..second_get.len()]), second_get); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 2"); let _ = tx2.send(()); }); assert_eq!(connects.load(Ordering::SeqCst), 0); let rx = rx1; let req = Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!(connects.load(Ordering::SeqCst), 1); // sleep real quick to let the threadpool put connection in ready // state and back into client pool thread::sleep(Duration::from_millis(50)); let rx = rx2; let req = Request::builder() .uri(&*format!("http://{}/b", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!( connects.load(Ordering::SeqCst), 1, "second request should still only have 1 connect" ); drop(client); } #[cfg(not(miri))] #[test] fn client_keep_alive_extra_body() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new(); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello") .expect("write 1"); // the body "hello", while ignored because its a HEAD request, should mean the connection // cannot be put back in the pool let _ = tx1.send(()); let mut sock2 = server.accept().unwrap().0; let n2 = sock2.read(&mut buf).expect("read 2"); assert_ne!(n2, 0); let second_get = "GET /b HTTP/1.1\r\n"; assert_eq!(s(&buf[..second_get.len()]), second_get); sock2 .write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 2"); let _ = tx2.send(()); }); assert_eq!(connects.load(Ordering::Relaxed), 0); let rx = rx1; let req = Request::builder() .method("HEAD") .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!(connects.load(Ordering::Relaxed), 1); let rx = rx2; let req = Request::builder() .uri(&*format!("http://{}/b", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!(connects.load(Ordering::Relaxed), 2); } #[cfg(not(miri))] #[tokio::test] async fn client_keep_alive_when_response_before_request_body_ends() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, mut closes) = mpsc::channel::<()>(10); let connector = DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector.clone()); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); let (_tx3, rx3) = std::sync::mpsc::channel::<()>(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); // after writing the response, THEN stream the body let _ = tx1.send(()); sock.read(&mut buf).expect("read 2"); let _ = tx2.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped let _ = rx3.recv(); }); assert_eq!(connects.load(Ordering::Relaxed), 0); let delayed_body = rx1 .then(|_| Box::pin(tokio::time::sleep(Duration::from_millis(200)))) .map(|_| Ok::<_, ()>(Frame::data(&b"hello a"[..]))) .map_err(|_| -> hyper::Error { panic!("rx1") }) .into_stream(); let req = Request::builder() .method("POST") .uri(&*format!("http://{}/a", addr)) .body(StreamBody::new(delayed_body)) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); future::join(res, rx2).await.0.unwrap(); future::poll_fn(|ctx| { assert!(Pin::new(&mut closes).poll_next(ctx).is_pending()); Poll::Ready(()) }) .await; assert_eq!(connects.load(Ordering::Relaxed), 1); drop(client); let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; } #[cfg(not(miri))] #[tokio::test] async fn client_keep_alive_eager_when_chunked() { // If a response body has been read to completion, with completion // determined by some other factor, like decompression, and thus // it is in't polled a final time to clear the final 0-len chunk, // try to eagerly clear it so the connection can still be used. let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let connector = DebugConnector::new(); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; //drop(server); sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all( b"\ HTTP/1.1 200 OK\r\n\ transfer-encoding: chunked\r\n\ \r\n\ 5\r\n\ hello\r\n\ 0\r\n\r\n\ ", ) .expect("write 1"); let _ = tx1.send(()); let n2 = sock.read(&mut buf).expect("read 2"); assert_ne!(n2, 0, "bytes of second request"); let second_get = "GET /b HTTP/1.1\r\n"; assert_eq!(s(&buf[..second_get.len()]), second_get); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 2"); let _ = tx2.send(()); }); assert_eq!(connects.load(Ordering::SeqCst), 0); let rx = rx1; let req = Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); let fut = client.request(req); let resp = future::join(fut, rx).map(|r| r.0).await.unwrap(); assert_eq!(connects.load(Ordering::SeqCst), 1); assert_eq!(resp.status(), 200); assert_eq!(resp.headers()["transfer-encoding"], "chunked"); // Read the "hello" chunk... let chunk = resp.collect().await.unwrap().to_bytes(); assert_eq!(chunk, "hello"); // sleep real quick to let the threadpool put connection in ready // state and back into client pool tokio::time::sleep(Duration::from_millis(50)).await; let rx = rx2; let req = Request::builder() .uri(&*format!("http://{}/b", addr)) .body(Empty::::new()) .unwrap(); let fut = client.request(req); future::join(fut, rx).map(|r| r.0).await.unwrap(); assert_eq!( connects.load(Ordering::SeqCst), 1, "second request should still only have 1 connect" ); drop(client); } #[cfg(not(miri))] #[test] fn connect_proxy_sends_absolute_uri() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new().proxy(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; //drop(server); sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; let n = sock.read(&mut buf).expect("read 1"); let expected = format!( "GET http://{addr}/foo/bar HTTP/1.1\r\nhost: {addr}\r\n\r\n", addr = addr ); assert_eq!(s(&buf[..n]), expected); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); let _ = tx1.send(()); }); let rx = rx1; let req = Request::builder() .uri(&*format!("http://{}/foo/bar", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); } #[cfg(not(miri))] #[test] fn connect_proxy_http_connect_sends_authority_form() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new().proxy(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; //drop(server); sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; let n = sock.read(&mut buf).expect("read 1"); let expected = format!( "CONNECT {addr} HTTP/1.1\r\nhost: {addr}\r\n\r\n", addr = addr ); assert_eq!(s(&buf[..n]), expected); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); let _ = tx1.send(()); }); let rx = rx1; let req = Request::builder() .method("CONNECT") .uri(&*format!("http://{}/useless/path", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); } #[cfg(not(miri))] #[test] fn client_upgrade() { use tokio::io::{AsyncReadExt, AsyncWriteExt}; let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all( b"\ HTTP/1.1 101 Switching Protocols\r\n\ Upgrade: foobar\r\n\ \r\n\ foobar=ready\ ", ) .unwrap(); let _ = tx1.send(()); let n = sock.read(&mut buf).expect("read 2"); assert_eq!(&buf[..n], b"foo=bar"); sock.write_all(b"bar=foo").expect("write 2"); }); let rx = rx1; let req = Request::builder() .method("GET") .uri(&*format!("http://{}/up", addr)) .body(Empty::::new()) .unwrap(); let res = client.request(req); let res = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!(res.status(), 101); let upgraded = rt.block_on(hyper::upgrade::on(res)).expect("on_upgrade"); let parts = upgraded.downcast::().unwrap(); assert_eq!(s(&parts.read_buf), "foobar=ready"); let mut io = parts.io; rt.block_on(io.write_all(b"foo=bar")).unwrap(); let mut vec = vec![]; rt.block_on(io.read_to_end(&mut vec)).unwrap(); assert_eq!(vec, b"bar=foo"); } #[cfg(not(miri))] #[test] fn alpn_h2() { use http::Response; use hyper::service::service_fn; use tokio::net::TcpListener; let _ = pretty_env_logger::try_init(); let rt = runtime(); let listener = rt .block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))) .unwrap(); let addr = listener.local_addr().unwrap(); let mut connector = DebugConnector::new(); connector.alpn_h2 = true; let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); rt.spawn(async move { let (stream, _) = listener.accept().await.expect("accept"); let stream = TokioIo::new(stream); let _ = hyper::server::conn::http2::Builder::new(TokioExecutor::new()) .serve_connection( stream, service_fn(|req| async move { assert_eq!(req.headers().get("host"), None); Ok::<_, hyper::Error>(Response::new(Full::::from("Hello, world"))) }), ) .await .expect("server"); }); assert_eq!(connects.load(Ordering::SeqCst), 0); let url = format!("http://{}/a", addr) .parse::<::hyper::Uri>() .unwrap(); let res1 = client.get(url.clone()); let res2 = client.get(url.clone()); let res3 = client.get(url.clone()); rt.block_on(future::try_join3(res1, res2, res3)).unwrap(); // Since the client doesn't know it can ALPN at first, it will have // started 3 connections. But, the server above will only handle 1, // so the unwrapped responses futures show it still worked. assert_eq!(connects.load(Ordering::SeqCst), 3); let res4 = client.get(url.clone()); rt.block_on(res4).unwrap(); // HTTP/2 request allowed let res5 = client.request( Request::builder() .uri(url) .version(hyper::Version::HTTP_2) .body(Empty::::new()) .unwrap(), ); rt.block_on(res5).unwrap(); assert_eq!( connects.load(Ordering::SeqCst), 3, "after ALPN, no more connects" ); drop(client); } #[cfg(not(miri))] #[test] fn capture_connection_on_client() { let _ = pretty_env_logger::try_init(); let rt = runtime(); let connector = DebugConnector::new(); let client = Client::builder(TokioExecutor::new()).build(connector); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); }); let mut req = Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap(); let captured_conn = capture_connection(&mut req); rt.block_on(client.request(req)).expect("200 OK"); assert!(captured_conn.connection_metadata().is_some()); } #[cfg(not(miri))] #[test] fn connection_poisoning() { use std::sync::atomic::AtomicUsize; let _ = pretty_env_logger::try_init(); let rt = runtime(); let connector = DebugConnector::new(); let client = Client::builder(TokioExecutor::new()).build(connector); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let num_conns: Arc = Default::default(); let num_requests: Arc = Default::default(); let num_requests_tracker = num_requests.clone(); let num_conns_tracker = num_conns.clone(); thread::spawn(move || loop { let mut sock = server.accept().unwrap().0; num_conns_tracker.fetch_add(1, Ordering::Relaxed); let num_requests_tracker = num_requests_tracker.clone(); thread::spawn(move || { sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; loop { if sock.read(&mut buf).expect("read 1") > 0 { num_requests_tracker.fetch_add(1, Ordering::Relaxed); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); } } }); }); let make_request = || { Request::builder() .uri(&*format!("http://{}/a", addr)) .body(Empty::::new()) .unwrap() }; let mut req = make_request(); let captured_conn = capture_connection(&mut req); rt.block_on(client.request(req)).expect("200 OK"); assert_eq!(num_conns.load(Ordering::SeqCst), 1); assert_eq!(num_requests.load(Ordering::SeqCst), 1); rt.block_on(client.request(make_request())).expect("200 OK"); rt.block_on(client.request(make_request())).expect("200 OK"); // Before poisoning the connection is reused assert_eq!(num_conns.load(Ordering::SeqCst), 1); assert_eq!(num_requests.load(Ordering::SeqCst), 3); captured_conn .connection_metadata() .as_ref() .unwrap() .poison(); rt.block_on(client.request(make_request())).expect("200 OK"); // After poisoning, a new connection is established assert_eq!(num_conns.load(Ordering::SeqCst), 2); assert_eq!(num_requests.load(Ordering::SeqCst), 4); rt.block_on(client.request(make_request())).expect("200 OK"); // another request can still reuse: assert_eq!(num_conns.load(Ordering::SeqCst), 2); assert_eq!(num_requests.load(Ordering::SeqCst), 5); } hyper-util-0.1.10/tests/test_utils/mod.rs000064400000000000000000000113161046102023000164750ustar 00000000000000use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use futures_channel::mpsc; use futures_util::task::{Context, Poll}; use futures_util::Future; use futures_util::TryFutureExt; use hyper::Uri; use tokio::io::{self, AsyncRead, AsyncWrite, ReadBuf}; use tokio::net::TcpStream; use hyper::rt::ReadBufCursor; use hyper_util::client::legacy::connect::HttpConnector; use hyper_util::client::legacy::connect::{Connected, Connection}; use hyper_util::rt::TokioIo; #[derive(Clone)] pub struct DebugConnector { pub http: HttpConnector, pub closes: mpsc::Sender<()>, pub connects: Arc, pub is_proxy: bool, pub alpn_h2: bool, } impl DebugConnector { pub fn new() -> DebugConnector { let http = HttpConnector::new(); let (tx, _) = mpsc::channel(10); DebugConnector::with_http_and_closes(http, tx) } pub fn with_http_and_closes(http: HttpConnector, closes: mpsc::Sender<()>) -> DebugConnector { DebugConnector { http, closes, connects: Arc::new(AtomicUsize::new(0)), is_proxy: false, alpn_h2: false, } } pub fn proxy(mut self) -> Self { self.is_proxy = true; self } } impl tower_service::Service for DebugConnector { type Response = DebugStream; type Error = >::Error; type Future = Pin> + Send>>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { // don't forget to check inner service is ready :) tower_service::Service::::poll_ready(&mut self.http, cx) } fn call(&mut self, dst: Uri) -> Self::Future { self.connects.fetch_add(1, Ordering::SeqCst); let closes = self.closes.clone(); let is_proxy = self.is_proxy; let is_alpn_h2 = self.alpn_h2; Box::pin(self.http.call(dst).map_ok(move |tcp| DebugStream { tcp, on_drop: closes, is_alpn_h2, is_proxy, })) } } pub struct DebugStream { tcp: TokioIo, on_drop: mpsc::Sender<()>, is_alpn_h2: bool, is_proxy: bool, } impl Drop for DebugStream { fn drop(&mut self) { let _ = self.on_drop.try_send(()); } } impl Connection for DebugStream { fn connected(&self) -> Connected { let connected = self.tcp.connected().proxy(self.is_proxy); if self.is_alpn_h2 { connected.negotiated_h2() } else { connected } } } impl hyper::rt::Read for DebugStream { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: ReadBufCursor<'_>, ) -> Poll> { hyper::rt::Read::poll_read(Pin::new(&mut self.tcp), cx, buf) } } impl hyper::rt::Write for DebugStream { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { hyper::rt::Write::poll_write(Pin::new(&mut self.tcp), cx, buf) } fn poll_flush( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { hyper::rt::Write::poll_flush(Pin::new(&mut self.tcp), cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { hyper::rt::Write::poll_shutdown(Pin::new(&mut self.tcp), cx) } fn is_write_vectored(&self) -> bool { hyper::rt::Write::is_write_vectored(&self.tcp) } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { hyper::rt::Write::poll_write_vectored(Pin::new(&mut self.tcp), cx, bufs) } } impl AsyncWrite for DebugStream { fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { Pin::new(self.tcp.inner_mut()).poll_shutdown(cx) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(self.tcp.inner_mut()).poll_flush(cx) } fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(self.tcp.inner_mut()).poll_write(cx, buf) } } impl AsyncRead for DebugStream { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { Pin::new(self.tcp.inner_mut()).poll_read(cx, buf) } }