quinn-0.11.6/.cargo_vcs_info.json0000644000000001430000000000100122510ustar { "git": { "sha1": "d23e4e494f7446e21184bf58acd17a861ae73bba" }, "path_in_vcs": "quinn" }quinn-0.11.6/Cargo.lock0000644000001640050000000000100102340ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] name = "adler2" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "anstream" version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", "windows-sys 0.59.0", ] [[package]] name = "anyhow" version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "async-channel" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", "event-listener 2.5.3", "futures-core", ] [[package]] name = "async-channel" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", "event-listener-strategy", "futures-core", "pin-project-lite", ] [[package]] name = "async-executor" version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", "fastrand", "futures-lite", "slab", ] [[package]] name = "async-fs" version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" dependencies = [ "async-lock", "blocking", "futures-lite", ] [[package]] name = "async-global-executor" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", "async-io", "async-lock", "blocking", "futures-lite", "once_cell", ] [[package]] name = "async-io" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ "async-lock", "cfg-if", "concurrent-queue", "futures-io", "futures-lite", "parking", "polling", "rustix", "slab", "tracing", "windows-sys 0.59.0", ] [[package]] name = "async-lock" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ "event-listener 5.3.1", "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-net" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" dependencies = [ "async-io", "blocking", "futures-lite", ] [[package]] name = "async-process" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ "async-channel 2.3.1", "async-io", "async-lock", "async-signal", "async-task", "blocking", "cfg-if", "event-listener 5.3.1", "futures-lite", "rustix", "tracing", ] [[package]] name = "async-signal" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" dependencies = [ "async-io", "async-lock", "atomic-waker", "cfg-if", "futures-core", "futures-io", "rustix", "signal-hook-registry", "slab", "windows-sys 0.59.0", ] [[package]] name = "async-std" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-channel 1.9.0", "async-global-executor", "async-io", "async-lock", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", "futures-lite", "gloo-timers", "kv-log-macro", "log", "memchr", "once_cell", "pin-project-lite", "pin-utils", "slab", "wasm-bindgen-futures", ] [[package]] name = "async-task" version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "atomic-waker" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-fips-sys" version = "0.12.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf12b67bc9c5168f68655aadb2a12081689a58f1d9b1484705e4d1810ed6e4ac" dependencies = [ "bindgen", "cc", "cmake", "dunce", "fs_extra", "libc", "paste", ] [[package]] name = "aws-lc-rs" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" dependencies = [ "aws-lc-fips-sys", "aws-lc-sys", "mirai-annotations", "paste", "zeroize", ] [[package]] name = "aws-lc-sys" version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" dependencies = [ "bindgen", "cc", "cmake", "dunce", "fs_extra", "libc", "paste", ] [[package]] name = "backtrace" version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", "windows-targets", ] [[package]] name = "base64" version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bencher" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dfdb4953a096c551ce9ace855a604d702e6e62d77fac690575ae347571717f5" [[package]] name = "bindgen" version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ "bitflags", "cexpr", "clang-sys", "itertools", "lazy_static", "lazycell", "log", "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash 1.1.0", "shlex", "syn", "which", ] [[package]] name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "blocking" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ "async-channel 2.3.1", "async-task", "futures-io", "futures-lite", "piper", ] [[package]] name = "bumpalo" version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byteorder" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" [[package]] name = "cc" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8" dependencies = [ "jobserver", "libc", "shlex", ] [[package]] name = "cesu8" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" [[package]] name = "cexpr" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ "nom", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "clang-sys" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", "libloading", ] [[package]] name = "clap" version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", ] [[package]] name = "clap_builder" version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", "clap_lex", "strsim", ] [[package]] name = "clap_derive" version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", "syn", ] [[package]] name = "clap_lex" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "cmake" version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" dependencies = [ "cc", ] [[package]] name = "colorchoice" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ "bytes", "memchr", ] [[package]] name = "concurrent-queue" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] [[package]] name = "core-foundation" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "core-foundation-sys" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "crc" version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crossbeam-utils" version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "deranged" version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] [[package]] name = "directories-next" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" dependencies = [ "cfg-if", "dirs-sys-next", ] [[package]] name = "dirs-sys-next" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ "libc", "redox_users", "winapi", ] [[package]] name = "displaydoc" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "dunce" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "errno" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "event-listener" version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", "pin-project-lite", ] [[package]] name = "event-listener-strategy" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ "event-listener 5.3.1", "pin-project-lite", ] [[package]] name = "fastrand" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "form_urlencoded" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] [[package]] name = "fs_extra" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" [[package]] name = "futures-channel" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", ] [[package]] name = "futures-core" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-io" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ "fastrand", "futures-core", "futures-io", "parking", "pin-project-lite", ] [[package]] name = "getrandom" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", "libc", "wasi", "wasm-bindgen", ] [[package]] name = "gimli" version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-timers" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" dependencies = [ "futures-channel", "futures-core", "js-sys", "wasm-bindgen", ] [[package]] name = "heck" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hermit-abi" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "home" version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "icu_collections" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" dependencies = [ "displaydoc", "yoke", "zerofrom", "zerovec", ] [[package]] name = "icu_locid" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" dependencies = [ "displaydoc", "litemap", "tinystr", "writeable", "zerovec", ] [[package]] name = "icu_locid_transform" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" dependencies = [ "displaydoc", "icu_locid", "icu_locid_transform_data", "icu_provider", "tinystr", "zerovec", ] [[package]] name = "icu_locid_transform_data" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" [[package]] name = "icu_normalizer" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" dependencies = [ "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", "icu_provider", "smallvec", "utf16_iter", "utf8_iter", "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" [[package]] name = "icu_properties" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" dependencies = [ "displaydoc", "icu_collections", "icu_locid_transform", "icu_properties_data", "icu_provider", "tinystr", "zerovec", ] [[package]] name = "icu_properties_data" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" [[package]] name = "icu_provider" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" dependencies = [ "displaydoc", "icu_locid", "icu_provider_macros", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", "zerovec", ] [[package]] name = "icu_provider_macros" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "idna" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ "idna_adapter", "smallvec", "utf8_iter", ] [[package]] name = "idna_adapter" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ "icu_normalizer", "icu_properties", ] [[package]] name = "is_terminal_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jni" version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" dependencies = [ "cesu8", "combine", "jni-sys", "log", "thiserror 1.0.69", "walkdir", ] [[package]] name = "jni-sys" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "js-sys" version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] [[package]] name = "kv-log-macro" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ "log", ] [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lazycell" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", "windows-targets", ] [[package]] name = "libredox" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags", "libc", ] [[package]] name = "linux-raw-sys" version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" [[package]] name = "log" version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" dependencies = [ "value-bag", ] [[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata 0.1.10", ] [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "minimal-lexical" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ "adler2", ] [[package]] name = "mio" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", "wasi", "windows-sys 0.52.0", ] [[package]] name = "mirai-annotations" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" [[package]] name = "nom" version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] [[package]] name = "nu-ansi-term" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ "overload", "winapi", ] [[package]] name = "num-bigint" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", ] [[package]] name = "num-conv" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-integer" version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ "num-traits", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "num_threads" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" dependencies = [ "libc", ] [[package]] name = "object" version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking" version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "paste" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pem" version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ "base64", "serde", ] [[package]] name = "percent-encoding" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "pin-project-lite" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", "fastrand", "futures-io", ] [[package]] name = "polling" version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", "rustix", "tracing", "windows-sys 0.59.0", ] [[package]] name = "powerfmt" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ "zerocopy", ] [[package]] name = "prettyplease" version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", "syn", ] [[package]] name = "proc-macro2" version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] [[package]] name = "quinn" version = "0.11.6" dependencies = [ "anyhow", "async-io", "async-std", "bencher", "bytes", "clap", "crc", "directories-next", "futures-io", "pin-project-lite", "quinn-proto", "quinn-udp", "rand", "rcgen", "rustc-hash 2.0.0", "rustls", "rustls-pemfile", "smol", "socket2", "thiserror 2.0.3", "tokio", "tracing", "tracing-futures", "tracing-subscriber", "url", ] [[package]] name = "quinn-proto" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "aws-lc-rs", "bytes", "getrandom", "rand", "ring", "rustc-hash 2.0.0", "rustls", "rustls-pki-types", "rustls-platform-verifier", "slab", "thiserror 2.0.3", "tinyvec", "tracing", "web-time", ] [[package]] name = "quinn-udp" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ "cfg_aliases", "libc", "once_cell", "socket2", "tracing", "windows-sys 0.52.0", ] [[package]] name = "quote" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "rcgen" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779" dependencies = [ "pem", "ring", "rustls-pki-types", "time", "yasna", ] [[package]] name = "redox_users" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", "thiserror 1.0.69", ] [[package]] name = "regex" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", "regex-automata 0.4.9", "regex-syntax 0.8.5", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ "regex-syntax 0.6.29", ] [[package]] name = "regex-automata" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", "regex-syntax 0.8.5", ] [[package]] name = "regex-syntax" version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "ring" version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", "getrandom", "libc", "spin", "untrusted", "windows-sys 0.52.0", ] [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" [[package]] name = "rustix" version = "0.38.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "rustls" version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "aws-lc-rs", "log", "once_cell", "ring", "rustls-pki-types", "rustls-webpki", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ "rustls-pki-types", ] [[package]] name = "rustls-pki-types" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" dependencies = [ "web-time", ] [[package]] name = "rustls-platform-verifier" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c7dc240fec5517e6c4eab3310438636cfe6391dfc345ba013109909a90d136" dependencies = [ "core-foundation", "core-foundation-sys", "jni", "log", "once_cell", "rustls", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki", "security-framework", "security-framework-sys", "webpki-root-certs", "windows-sys 0.52.0", ] [[package]] name = "rustls-platform-verifier-android" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", ] [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "schannel" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "security-framework" version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags", "core-foundation", "core-foundation-sys", "libc", "num-bigint", "security-framework-sys", ] [[package]] name = "security-framework-sys" version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "serde" version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "sharded-slab" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smol" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" dependencies = [ "async-channel 2.3.1", "async-executor", "async-fs", "async-io", "async-lock", "async-net", "async-process", "blocking", "futures-lite", ] [[package]] name = "socket2" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subtle" version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "synstructure" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "thiserror" version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ "thiserror-impl 1.0.69", ] [[package]] name = "thiserror" version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" dependencies = [ "thiserror-impl 2.0.3", ] [[package]] name = "thiserror-impl" version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "thiserror-impl" version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "thread_local" version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", ] [[package]] name = "time" version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", "libc", "num-conv", "num_threads", "powerfmt", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", ] [[package]] name = "tinystr" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" dependencies = [ "displaydoc", "zerovec", ] [[package]] name = "tinyvec" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "libc", "mio", "pin-project-lite", "socket2", "tokio-macros", "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tracing" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", ] [[package]] name = "tracing-futures" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ "pin-project", "tracing", ] [[package]] name = "tracing-subscriber" version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "sharded-slab", "thread_local", "time", "tracing", "tracing-core", ] [[package]] name = "unicode-ident" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "untrusted" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] [[package]] name = "utf16_iter" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" [[package]] name = "utf8_iter" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "utf8parse" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "value-bag" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" [[package]] name = "walkdir" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "web-sys" version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "web-time" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "webpki-root-certs" version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c6dfa3ac045bc517de14c7b1384298de1dbd229d38e08e169d9ae8c170937c" dependencies = [ "rustls-pki-types", ] [[package]] name = "which" version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", "home", "once_cell", "rustix", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "write16" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" [[package]] name = "writeable" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" [[package]] name = "yasna" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ "time", ] [[package]] name = "yoke" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" dependencies = [ "serde", "stable_deref_trait", "yoke-derive", "zerofrom", ] [[package]] name = "yoke-derive" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "zerocopy" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "zerofrom" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] name = "zerovec" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" dependencies = [ "yoke", "zerofrom", "zerovec-derive", ] [[package]] name = "zerovec-derive" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", "syn", ] quinn-0.11.6/Cargo.toml0000644000000102450000000000100102530ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.70.0" name = "quinn" version = "0.11.6" build = false autobins = false autoexamples = false autotests = false autobenches = false description = "Versatile QUIC transport protocol implementation" readme = "README.md" keywords = ["quic"] categories = [ "network-programming", "asynchronous", ] license = "MIT OR Apache-2.0" repository = "https://github.com/quinn-rs/quinn" [package.metadata.docs.rs] all-features = true [lib] name = "quinn" path = "src/lib.rs" [[example]] name = "client" path = "examples/client.rs" required-features = ["rustls-ring"] [[example]] name = "connection" path = "examples/connection.rs" required-features = ["rustls-ring"] [[example]] name = "insecure_connection" path = "examples/insecure_connection.rs" required-features = ["rustls-ring"] [[example]] name = "server" path = "examples/server.rs" required-features = ["rustls-ring"] [[example]] name = "single_socket" path = "examples/single_socket.rs" required-features = ["rustls-ring"] [[test]] name = "many_connections" path = "tests/many_connections.rs" [[bench]] name = "bench" path = "benches/bench.rs" harness = false required-features = ["rustls-ring"] [dependencies.async-io] version = "2" optional = true [dependencies.async-std] version = "1.11" optional = true [dependencies.bytes] version = "1" [dependencies.futures-io] version = "0.3.19" optional = true [dependencies.pin-project-lite] version = "0.2" [dependencies.proto] version = "0.11.7" default-features = false package = "quinn-proto" [dependencies.rustc-hash] version = "2" [dependencies.rustls] version = "0.23.5" features = ["std"] optional = true default-features = false [dependencies.smol] version = "2" optional = true [dependencies.socket2] version = "0.5" [dependencies.thiserror] version = "2.0.3" [dependencies.tokio] version = "1.28.1" features = ["sync"] [dependencies.tracing] version = "0.1.10" features = ["std"] default-features = false [dependencies.udp] version = "0.5" features = ["tracing"] default-features = false package = "quinn-udp" [dev-dependencies.anyhow] version = "1.0.22" [dev-dependencies.bencher] version = "0.1.5" [dev-dependencies.clap] version = "4" features = ["derive"] [dev-dependencies.crc] version = "3" [dev-dependencies.directories-next] version = "2" [dev-dependencies.rand] version = "0.8" [dev-dependencies.rcgen] version = "0.13" [dev-dependencies.rustls-pemfile] version = "2" [dev-dependencies.tokio] version = "1.28.1" features = [ "sync", "rt", "rt-multi-thread", "time", "macros", ] [dev-dependencies.tracing-futures] version = "0.2.0" features = ["std-future"] default-features = false [dev-dependencies.tracing-subscriber] version = "0.3.0" features = [ "env-filter", "fmt", "ansi", "time", "local-time", ] default-features = false [dev-dependencies.url] version = "2" [features] aws-lc-rs = ["proto/aws-lc-rs"] aws-lc-rs-fips = ["proto/aws-lc-rs-fips"] default = [ "log", "platform-verifier", "runtime-tokio", "rustls-ring", ] lock_tracking = [] log = [ "tracing/log", "proto/log", "udp/log", ] platform-verifier = ["proto/platform-verifier"] ring = ["proto/ring"] runtime-async-std = [ "async-io", "async-std", ] runtime-smol = [ "async-io", "smol", ] runtime-tokio = [ "tokio/time", "tokio/rt", "tokio/net", ] rustls = ["rustls-ring"] rustls-aws-lc-rs = [ "dep:rustls", "aws-lc-rs", "proto/rustls-aws-lc-rs", "proto/aws-lc-rs", ] rustls-aws-lc-rs-fips = [ "dep:rustls", "aws-lc-rs-fips", "proto/rustls-aws-lc-rs-fips", "proto/aws-lc-rs-fips", ] rustls-log = ["rustls?/logging"] rustls-ring = [ "dep:rustls", "ring", "proto/rustls-ring", "proto/ring", ] quinn-0.11.6/Cargo.toml.orig000064400000000000000000000064141046102023000137370ustar 00000000000000[package] name = "quinn" version = "0.11.6" license.workspace = true repository.workspace = true description = "Versatile QUIC transport protocol implementation" readme = "../README.md" keywords.workspace = true categories.workspace = true workspace = ".." edition.workspace = true rust-version.workspace = true [package.metadata.docs.rs] all-features = true [features] default = ["log", "platform-verifier", "runtime-tokio", "rustls-ring"] # Enables `Endpoint::client` and `Endpoint::server` conveniences aws-lc-rs = ["proto/aws-lc-rs"] aws-lc-rs-fips = ["proto/aws-lc-rs-fips"] # Records how long locks are held, and warns if they are held >= 1ms lock_tracking = [] # Provides `ClientConfig::with_platform_verifier()` convenience method platform-verifier = ["proto/platform-verifier"] # For backwards compatibility, `rustls` forwards to `rustls-ring` rustls = ["rustls-ring"] # Enable rustls with the `aws-lc-rs` crypto provider rustls-aws-lc-rs = ["dep:rustls", "aws-lc-rs", "proto/rustls-aws-lc-rs", "proto/aws-lc-rs"] rustls-aws-lc-rs-fips = ["dep:rustls", "aws-lc-rs-fips", "proto/rustls-aws-lc-rs-fips", "proto/aws-lc-rs-fips"] # Enable rustls with the `ring` crypto provider rustls-ring = ["dep:rustls", "ring", "proto/rustls-ring", "proto/ring"] # Enables `Endpoint::client` and `Endpoint::server` conveniences ring = ["proto/ring"] runtime-tokio = ["tokio/time", "tokio/rt", "tokio/net"] runtime-async-std = ["async-io", "async-std"] runtime-smol = ["async-io", "smol"] # Configure `tracing` to log events via `log` if no `tracing` subscriber exists. log = ["tracing/log", "proto/log", "udp/log"] # Enable rustls logging rustls-log = ["rustls?/logging"] [dependencies] async-io = { workspace = true, optional = true } async-std = { workspace = true, optional = true } bytes = { workspace = true } # Enables futures::io::{AsyncRead, AsyncWrite} support for streams futures-io = { workspace = true, optional = true } rustc-hash = { workspace = true } pin-project-lite = { workspace = true } proto = { package = "quinn-proto", path = "../quinn-proto", version = "0.11.7", default-features = false } rustls = { workspace = true, optional = true } smol = { workspace = true, optional = true } socket2 = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } tokio = { workspace = true } udp = { package = "quinn-udp", path = "../quinn-udp", version = "0.5", default-features = false, features = ["tracing"] } [dev-dependencies] anyhow = { workspace = true } crc = { workspace = true } bencher = { workspace = true } directories-next = { workspace = true } rand = { workspace = true } rcgen = { workspace = true } rustls-pemfile = { workspace = true } clap = { workspace = true } tokio = { workspace = true, features = ["rt", "rt-multi-thread", "time", "macros"] } tracing-subscriber = { workspace = true } tracing-futures = { workspace = true } url = { workspace = true } [[example]] name = "server" required-features = ["rustls-ring"] [[example]] name = "client" required-features = ["rustls-ring"] [[example]] name = "insecure_connection" required-features = ["rustls-ring"] [[example]] name = "single_socket" required-features = ["rustls-ring"] [[example]] name = "connection" required-features = ["rustls-ring"] [[bench]] name = "bench" harness = false required-features = ["rustls-ring"] quinn-0.11.6/LICENSE-APACHE000064400000000000000000000261351046102023000127760ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. quinn-0.11.6/LICENSE-MIT000064400000000000000000000020501046102023000124740ustar 00000000000000Copyright (c) 2018 The quinn Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. quinn-0.11.6/README.md000064400000000000000000000141171046102023000123260ustar 00000000000000

[![Documentation](https://docs.rs/quinn/badge.svg)](https://docs.rs/quinn/) [![Crates.io](https://img.shields.io/crates/v/quinn.svg)](https://crates.io/crates/quinn) [![Build status](https://github.com/quinn-rs/quinn/workflows/CI/badge.svg)](https://github.com/djc/quinn/actions?query=workflow%3ACI) [![codecov](https://codecov.io/gh/quinn-rs/quinn/branch/main/graph/badge.svg)](https://codecov.io/gh/quinn-rs/quinn) [![Chat](https://img.shields.io/badge/chat-%23quinn:matrix.org-%2346BC99?logo=matrix)](https://matrix.to/#/#quinn:matrix.org) [![Chat](https://img.shields.io/discord/976380008299917365?logo=discord)](https://discord.gg/SGPEcDfVzh) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE-MIT) [![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE-APACHE) Quinn is a pure-Rust, async-compatible implementation of the IETF [QUIC][quic] transport protocol. The project was founded by [Dirkjan Ochtman](https://github.com/djc) and [Benjamin Saunders](https://github.com/Ralith) as a side project in 2018, and has seen more than 30 releases since then. If you're using Quinn in a commercial setting, please consider [sponsoring](https://opencollective.com/quinn-rs) the project. ## Features - Simultaneous client/server operation - Ordered and unordered stream reads for improved performance - Works on stable Rust, tested on Linux, macOS and Windows - Pluggable cryptography, with a standard implementation backed by [rustls][rustls] and [*ring*][ring] - Application-layer datagrams for small, unreliable messages - Future-based async API - Minimum supported Rust version of 1.66 ## Overview - **quinn:** High-level async API based on tokio, see [examples][examples] for usage. This will be used by most developers. (Basic benchmarks are included.) - **quinn-proto:** Deterministic state machine of the protocol which performs [**no** I/O][sans-io] internally and is suitable for use with custom event loops (and potentially a C or C++ API). - **quinn-udp:** UDP sockets with ECN information tuned for the protocol. - **bench:** Benchmarks without any framework. - **fuzz:** Fuzz tests. # Getting Started **Examples** ```sh $ cargo run --example server ./ $ cargo run --example client https://localhost:4433/Cargo.toml ``` This launches an HTTP 0.9 server on the loopback address serving the current working directory, with the client fetching `./Cargo.toml`. By default, the server generates a self-signed certificate and stores it to disk, where the client will automatically find and trust it. **Links** - Talk at [RustFest Paris (May 2018) presentation][talk]; [slides][slides]; [YouTube][youtube] - Usage [examples][examples] - Guide [book][documentation] ## Usage Notes
Click to show the notes ### Buffers A Quinn endpoint corresponds to a single UDP socket, no matter how many connections are in use. Handling high aggregate data rates on a single endpoint can require a larger UDP buffer than is configured by default in most environments. If you observe erratic latency and/or throughput over a stable network link, consider increasing the buffer sizes used. For example, you could adjust the `SO_SNDBUF` and `SO_RCVBUF` options of the UDP socket to be used before passing it in to Quinn. Note that some platforms (e.g. Linux) require elevated privileges or modified system configuration for a process to increase its UDP buffer sizes. ### Certificates By default, Quinn clients validate the cryptographic identity of servers they connect to. This prevents an active, on-path attacker from intercepting messages, but requires trusting some certificate authority. For many purposes, this can be accomplished by using certificates from [Let's Encrypt][letsencrypt] for servers, and relying on the default configuration for clients. For some cases, including peer-to-peer, trust-on-first-use, deliberately insecure applications, or any case where servers are not identified by domain name, this isn't practical. Arbitrary certificate validation logic can be implemented by enabling the `dangerous_configuration` feature of `rustls` and constructing a Quinn `ClientConfig` with an overridden certificate verifier by hand. When operating your own certificate authority doesn't make sense, [rcgen][rcgen] can be used to generate self-signed certificates on demand. To support trust-on-first-use, servers that automatically generate self-signed certificates should write their generated certificate to persistent storage and reuse it on future runs.

## Contribution All feedback welcome. Feel free to file bugs, requests for documentation and any other feedback to the [issue tracker][issues]. The quinn-proto test suite uses simulated IO for reproducibility and to avoid long sleeps in certain timing-sensitive tests. If the `SSLKEYLOGFILE` environment variable is set, the tests will emit UDP packets for inspection using external protocol analyzers like Wireshark, and NSS-compatible key logs for the client side of each connection will be written to the path specified in the variable. The minimum supported Rust version for published releases of our crates will always be at least 6 months old at the time of release. [quic]: https://quicwg.github.io/ [issues]: https://github.com/djc/quinn/issues [rustls]: https://github.com/ctz/rustls [ring]: https://github.com/briansmith/ring [talk]: https://paris.rustfest.eu/sessions/a-quic-future-in-rust [slides]: https://github.com/djc/talks/blob/ff760845b51ba4836cce82e7f2c640ecb5fd59fa/2018-05-26%20A%20QUIC%20future%20in%20Rust/Quinn-Speaker.pdf [animation]: https://dirkjan.ochtman.nl/files/head-of-line-blocking.html [youtube]: https://www.youtube.com/watch?v=EHgyY5DNdvI [letsencrypt]: https://letsencrypt.org/ [rcgen]: https://crates.io/crates/rcgen [examples]: https://github.com/djc/quinn/tree/main/quinn/examples [documentation]: https://quinn-rs.github.io/quinn/networking-introduction.html [sans-io]: https://sans-io.readthedocs.io/how-to-sans-io.html quinn-0.11.6/benches/bench.rs000064400000000000000000000122331046102023000141000ustar 00000000000000use std::{ net::{IpAddr, Ipv6Addr, SocketAddr, UdpSocket}, sync::Arc, thread, }; use bencher::{benchmark_group, benchmark_main, Bencher}; use rustls::pki_types::{CertificateDer, PrivatePkcs8KeyDer}; use tokio::runtime::{Builder, Runtime}; use tracing::error_span; use tracing_futures::Instrument as _; use quinn::{Endpoint, TokioRuntime}; benchmark_group!( benches, large_data_1_stream, large_data_10_streams, small_data_1_stream, small_data_100_streams ); benchmark_main!(benches); fn large_data_1_stream(bench: &mut Bencher) { send_data(bench, LARGE_DATA, 1); } fn large_data_10_streams(bench: &mut Bencher) { send_data(bench, LARGE_DATA, 10); } fn small_data_1_stream(bench: &mut Bencher) { send_data(bench, SMALL_DATA, 1); } fn small_data_100_streams(bench: &mut Bencher) { send_data(bench, SMALL_DATA, 100); } fn send_data(bench: &mut Bencher, data: &'static [u8], concurrent_streams: usize) { let _ = tracing_subscriber::fmt::try_init(); let ctx = Context::new(); let (addr, thread) = ctx.spawn_server(); let (endpoint, client, runtime) = ctx.make_client(addr); let client = Arc::new(client); bench.bytes = (data.len() as u64) * (concurrent_streams as u64); bench.iter(|| { let mut handles = Vec::new(); for _ in 0..concurrent_streams { let client = client.clone(); handles.push(runtime.spawn(async move { let mut stream = client.open_uni().await.unwrap(); stream.write_all(data).await.unwrap(); stream.finish().unwrap(); // Wait for stream to close _ = stream.stopped().await; })); } runtime.block_on(async { for handle in handles { handle.await.unwrap(); } }); }); drop(client); runtime.block_on(endpoint.wait_idle()); thread.join().unwrap() } struct Context { server_config: quinn::ServerConfig, client_config: quinn::ClientConfig, } impl Context { fn new() -> Self { let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()); let cert = CertificateDer::from(cert.cert); let mut server_config = quinn::ServerConfig::with_single_cert(vec![cert.clone()], key.into()).unwrap(); let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); transport_config.max_concurrent_uni_streams(1024_u16.into()); let mut roots = rustls::RootCertStore::empty(); roots.add(cert).unwrap(); Self { server_config, client_config: quinn::ClientConfig::with_root_certificates(Arc::new(roots)).unwrap(), } } pub fn spawn_server(&self) -> (SocketAddr, thread::JoinHandle<()>) { let sock = UdpSocket::bind(SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 0)).unwrap(); let addr = sock.local_addr().unwrap(); let config = self.server_config.clone(); let handle = thread::spawn(move || { let runtime = rt(); let endpoint = { let _guard = runtime.enter(); Endpoint::new( Default::default(), Some(config), sock, Arc::new(TokioRuntime), ) .unwrap() }; let handle = runtime.spawn( async move { let connection = endpoint .accept() .await .expect("accept") .await .expect("connect"); while let Ok(mut stream) = connection.accept_uni().await { tokio::spawn(async move { while stream .read_chunk(usize::MAX, false) .await .unwrap() .is_some() {} }); } } .instrument(error_span!("server")), ); runtime.block_on(handle).unwrap(); }); (addr, handle) } pub fn make_client( &self, server_addr: SocketAddr, ) -> (quinn::Endpoint, quinn::Connection, Runtime) { let runtime = rt(); let endpoint = { let _guard = runtime.enter(); Endpoint::client(SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 0)).unwrap() }; let connection = runtime .block_on(async { endpoint .connect_with(self.client_config.clone(), server_addr, "localhost") .unwrap() .instrument(error_span!("client")) .await }) .unwrap(); (endpoint, connection, runtime) } } fn rt() -> Runtime { Builder::new_current_thread().enable_all().build().unwrap() } const LARGE_DATA: &[u8] = &[0xAB; 1024 * 1024]; const SMALL_DATA: &[u8] = &[0xAB; 1]; quinn-0.11.6/examples/README.md000064400000000000000000000056221046102023000141450ustar 00000000000000## HTTP/0.9 File Serving Example The `server` and `client` examples demonstrate fetching files using a HTTP-like toy protocol. 1. Server (`server.rs`) The server listens for any client requesting a file. If the file path is valid and allowed, it returns the contents. Open up a terminal and execute: ```text $ cargo run --example server ./ ``` 2. Client (`client.rs`) The client requests a file and prints it to the console. If the file is on the server, it will receive the response. In a new terminal execute: ```test $ cargo run --example client https://localhost:4433/Cargo.toml ``` where `Cargo.toml` is any file in the directory passed to the server. **Result:** The output will be the contents of this README. **Troubleshooting:** If the client times out with no activity on the server, try forcing the server to run on IPv4 by running it with `cargo run --example server -- ./ --listen 127.0.0.1:4433`. The server listens on IPv6 by default, `localhost` tends to resolve to IPv4, and support for accepting IPv4 packets on IPv6 sockets varies between platforms. If the client prints `failed to process request: failed reading file`, the request was processed successfully but the path segment of the URL did not correspond to a file in the directory being served. ## Minimal Example The `connection.rs` example intends to use the smallest amount of code to make a simple QUIC connection. The server issues it's own certificate and passes it to the client to trust. ```text $ cargo run --example connection ``` This example will make a QUIC connection on localhost, and you should see output like: ```text [client] connected: addr=127.0.0.1:5000 [server] connection accepted: addr=127.0.0.1:53712 ``` ## Insecure Connection Example The `insecure_connection.rs` example demonstrates how to make a QUIC connection that ignores the server certificate. ```text $ cargo run --example insecure_connection --features="rustls/dangerous_configuration" ``` ## Single Socket Example You can have multiple QUIC connections over a single UDP socket. This is especially useful, if you are building a peer-to-peer system where you potentially need to communicate with thousands of peers or if you have a [hole punched](https://en.wikipedia.org/wiki/UDP_hole_punching) UDP socket. Additionally, QUIC servers and clients can both operate on the same UDP socket. This example demonstrates how to make multiple outgoing connections on a single UDP socket. ```text $ cargo run --example single_socket ``` The expected output should be something like: ```text [client] connected: addr=127.0.0.1:5000 [server] incoming connection: addr=127.0.0.1:48930 [client] connected: addr=127.0.0.1:5001 [client] connected: addr=127.0.0.1:5002 [server] incoming connection: addr=127.0.0.1:48930 [server] incoming connection: addr=127.0.0.1:48930 ``` Notice how the server sees multiple incoming connections with different IDs coming from the same endpoint. quinn-0.11.6/examples/client.rs000064400000000000000000000121121046102023000145020ustar 00000000000000//! This example demonstrates an HTTP client that requests files from a server. //! //! Checkout the `README.md` for guidance. use std::{ fs, io::{self, Write}, net::{SocketAddr, ToSocketAddrs}, path::PathBuf, sync::Arc, time::{Duration, Instant}, }; use anyhow::{anyhow, Result}; use clap::Parser; use proto::crypto::rustls::QuicClientConfig; use rustls::pki_types::CertificateDer; use tracing::{error, info}; use url::Url; mod common; /// HTTP/0.9 over QUIC client #[derive(Parser, Debug)] #[clap(name = "client")] struct Opt { /// Perform NSS-compatible TLS key logging to the file specified in `SSLKEYLOGFILE`. #[clap(long = "keylog")] keylog: bool, url: Url, /// Override hostname used for certificate verification #[clap(long = "host")] host: Option, /// Custom certificate authority to trust, in DER format #[clap(long = "ca")] ca: Option, /// Simulate NAT rebinding after connecting #[clap(long = "rebind")] rebind: bool, /// Address to bind on #[clap(long = "bind", default_value = "[::]:0")] bind: SocketAddr, } fn main() { tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .finish(), ) .unwrap(); let opt = Opt::parse(); let code = { if let Err(e) = run(opt) { eprintln!("ERROR: {e}"); 1 } else { 0 } }; ::std::process::exit(code); } #[tokio::main] async fn run(options: Opt) -> Result<()> { let url = options.url; let url_host = strip_ipv6_brackets(url.host_str().unwrap()); let remote = (url_host, url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .ok_or_else(|| anyhow!("couldn't resolve to an address"))?; let mut roots = rustls::RootCertStore::empty(); if let Some(ca_path) = options.ca { roots.add(CertificateDer::from(fs::read(ca_path)?))?; } else { let dirs = directories_next::ProjectDirs::from("org", "quinn", "quinn-examples").unwrap(); match fs::read(dirs.data_local_dir().join("cert.der")) { Ok(cert) => { roots.add(CertificateDer::from(cert))?; } Err(ref e) if e.kind() == io::ErrorKind::NotFound => { info!("local server certificate not found"); } Err(e) => { error!("failed to open local server certificate: {}", e); } } } let mut client_crypto = rustls::ClientConfig::builder() .with_root_certificates(roots) .with_no_client_auth(); client_crypto.alpn_protocols = common::ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); if options.keylog { client_crypto.key_log = Arc::new(rustls::KeyLogFile::new()); } let client_config = quinn::ClientConfig::new(Arc::new(QuicClientConfig::try_from(client_crypto)?)); let mut endpoint = quinn::Endpoint::client(options.bind)?; endpoint.set_default_client_config(client_config); let request = format!("GET {}\r\n", url.path()); let start = Instant::now(); let rebind = options.rebind; let host = options.host.as_deref().unwrap_or(url_host); eprintln!("connecting to {host} at {remote}"); let conn = endpoint .connect(remote, host)? .await .map_err(|e| anyhow!("failed to connect: {}", e))?; eprintln!("connected at {:?}", start.elapsed()); let (mut send, mut recv) = conn .open_bi() .await .map_err(|e| anyhow!("failed to open stream: {}", e))?; if rebind { let socket = std::net::UdpSocket::bind("[::]:0").unwrap(); let addr = socket.local_addr().unwrap(); eprintln!("rebinding to {addr}"); endpoint.rebind(socket).expect("rebind failed"); } send.write_all(request.as_bytes()) .await .map_err(|e| anyhow!("failed to send request: {}", e))?; send.finish().unwrap(); let response_start = Instant::now(); eprintln!("request sent at {:?}", response_start - start); let resp = recv .read_to_end(usize::MAX) .await .map_err(|e| anyhow!("failed to read response: {}", e))?; let duration = response_start.elapsed(); eprintln!( "response received in {:?} - {} KiB/s", duration, resp.len() as f32 / (duration_secs(&duration) * 1024.0) ); io::stdout().write_all(&resp).unwrap(); io::stdout().flush().unwrap(); conn.close(0u32.into(), b"done"); // Give the server a fair chance to receive the close packet endpoint.wait_idle().await; Ok(()) } fn strip_ipv6_brackets(host: &str) -> &str { // An ipv6 url looks like eg https://[::1]:4433/Cargo.toml, wherein the host [::1] is the // ipv6 address ::1 wrapped in brackets, per RFC 2732. This strips those. if host.starts_with('[') && host.ends_with(']') { &host[1..host.len() - 1] } else { host } } fn duration_secs(x: &Duration) -> f32 { x.as_secs() as f32 + x.subsec_nanos() as f32 * 1e-9 } quinn-0.11.6/examples/common/mod.rs000064400000000000000000000047611046102023000153060ustar 00000000000000#![cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] //! Commonly used code in most examples. use quinn::{ClientConfig, Endpoint, ServerConfig}; use rustls::pki_types::{CertificateDer, PrivatePkcs8KeyDer}; use std::{error::Error, net::SocketAddr, sync::Arc}; /// Constructs a QUIC endpoint configured for use a client only. /// /// ## Args /// /// - server_certs: list of trusted certificates. #[allow(unused)] pub fn make_client_endpoint( bind_addr: SocketAddr, server_certs: &[&[u8]], ) -> Result> { let client_cfg = configure_client(server_certs)?; let mut endpoint = Endpoint::client(bind_addr)?; endpoint.set_default_client_config(client_cfg); Ok(endpoint) } /// Constructs a QUIC endpoint configured to listen for incoming connections on a certain address /// and port. /// /// ## Returns /// /// - a stream of incoming QUIC connections /// - server certificate serialized into DER format #[allow(unused)] pub fn make_server_endpoint( bind_addr: SocketAddr, ) -> Result<(Endpoint, CertificateDer<'static>), Box> { let (server_config, server_cert) = configure_server()?; let endpoint = Endpoint::server(server_config, bind_addr)?; Ok((endpoint, server_cert)) } /// Builds default quinn client config and trusts given certificates. /// /// ## Args /// /// - server_certs: a list of trusted certificates in DER format. fn configure_client( server_certs: &[&[u8]], ) -> Result> { let mut certs = rustls::RootCertStore::empty(); for cert in server_certs { certs.add(CertificateDer::from(*cert))?; } Ok(ClientConfig::with_root_certificates(Arc::new(certs))?) } /// Returns default server configuration along with its certificate. fn configure_server( ) -> Result<(ServerConfig, CertificateDer<'static>), Box> { let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let cert_der = CertificateDer::from(cert.cert); let priv_key = PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()); let mut server_config = ServerConfig::with_single_cert(vec![cert_der.clone()], priv_key.into())?; let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); transport_config.max_concurrent_uni_streams(0_u8.into()); Ok((server_config, cert_der)) } #[allow(unused)] pub const ALPN_QUIC_HTTP: &[&[u8]] = &[b"hq-29"]; quinn-0.11.6/examples/connection.rs000064400000000000000000000027261046102023000153750ustar 00000000000000//! This example intends to use the smallest amount of code to make a simple QUIC connection. //! //! Checkout the `README.md` for guidance. use std::{ error::Error, net::{IpAddr, Ipv4Addr, SocketAddr}, }; mod common; use common::{make_client_endpoint, make_server_endpoint}; #[tokio::main] async fn main() -> Result<(), Box> { let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5000); let (endpoint, server_cert) = make_server_endpoint(server_addr)?; // accept a single connection let endpoint2 = endpoint.clone(); tokio::spawn(async move { let incoming_conn = endpoint2.accept().await.unwrap(); let conn = incoming_conn.await.unwrap(); println!( "[server] connection accepted: addr={}", conn.remote_address() ); // Dropping all handles associated with a connection implicitly closes it }); let endpoint = make_client_endpoint("0.0.0.0:0".parse().unwrap(), &[&server_cert])?; // connect to server let connection = endpoint .connect(server_addr, "localhost") .unwrap() .await .unwrap(); println!("[client] connected: addr={}", connection.remote_address()); // Waiting for a stream will complete with an error when the server closes the connection let _ = connection.accept_uni().await; // Make sure the server has a chance to clean up endpoint.wait_idle().await; Ok(()) } quinn-0.11.6/examples/insecure_connection.rs000064400000000000000000000074001046102023000172640ustar 00000000000000//! This example demonstrates how to make a QUIC connection that ignores the server certificate. //! //! Checkout the `README.md` for guidance. use std::{ error::Error, net::{IpAddr, Ipv4Addr, SocketAddr}, sync::Arc, }; use proto::crypto::rustls::QuicClientConfig; use quinn::{ClientConfig, Endpoint}; use rustls::pki_types::{CertificateDer, ServerName, UnixTime}; mod common; use common::make_server_endpoint; #[tokio::main] async fn main() -> Result<(), Box> { // server and client are running on the same thread asynchronously let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080); tokio::spawn(run_server(addr)); run_client(addr).await?; Ok(()) } /// Runs a QUIC server bound to given address. async fn run_server(addr: SocketAddr) { let (endpoint, _server_cert) = make_server_endpoint(addr).unwrap(); // accept a single connection let incoming_conn = endpoint.accept().await.unwrap(); let conn = incoming_conn.await.unwrap(); println!( "[server] connection accepted: addr={}", conn.remote_address() ); } async fn run_client(server_addr: SocketAddr) -> Result<(), Box> { let mut endpoint = Endpoint::client(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0))?; endpoint.set_default_client_config(ClientConfig::new(Arc::new(QuicClientConfig::try_from( rustls::ClientConfig::builder() .dangerous() .with_custom_certificate_verifier(SkipServerVerification::new()) .with_no_client_auth(), )?))); // connect to server let connection = endpoint .connect(server_addr, "localhost") .unwrap() .await .unwrap(); println!("[client] connected: addr={}", connection.remote_address()); // Dropping handles allows the corresponding objects to automatically shut down drop(connection); // Make sure the server has a chance to clean up endpoint.wait_idle().await; Ok(()) } /// Dummy certificate verifier that treats any certificate as valid. /// NOTE, such verification is vulnerable to MITM attacks, but convenient for testing. #[derive(Debug)] struct SkipServerVerification(Arc); impl SkipServerVerification { fn new() -> Arc { Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider()))) } } impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { fn verify_server_cert( &self, _end_entity: &CertificateDer<'_>, _intermediates: &[CertificateDer<'_>], _server_name: &ServerName<'_>, _ocsp: &[u8], _now: UnixTime, ) -> Result { Ok(rustls::client::danger::ServerCertVerified::assertion()) } fn verify_tls12_signature( &self, message: &[u8], cert: &CertificateDer<'_>, dss: &rustls::DigitallySignedStruct, ) -> Result { rustls::crypto::verify_tls12_signature( message, cert, dss, &self.0.signature_verification_algorithms, ) } fn verify_tls13_signature( &self, message: &[u8], cert: &CertificateDer<'_>, dss: &rustls::DigitallySignedStruct, ) -> Result { rustls::crypto::verify_tls13_signature( message, cert, dss, &self.0.signature_verification_algorithms, ) } fn supported_verify_schemes(&self) -> Vec { self.0.signature_verification_algorithms.supported_schemes() } } quinn-0.11.6/examples/server.rs000064400000000000000000000222751046102023000145450ustar 00000000000000//! This example demonstrates an HTTP server that serves files from a directory. //! //! Checkout the `README.md` for guidance. use std::{ ascii, fs, io, net::SocketAddr, path::{self, Path, PathBuf}, str, sync::Arc, }; use anyhow::{anyhow, bail, Context, Result}; use clap::Parser; use proto::crypto::rustls::QuicServerConfig; use rustls::pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer}; use tracing::{error, info, info_span}; use tracing_futures::Instrument as _; mod common; #[derive(Parser, Debug)] #[clap(name = "server")] struct Opt { /// file to log TLS keys to for debugging #[clap(long = "keylog")] keylog: bool, /// directory to serve files from root: PathBuf, /// TLS private key in PEM format #[clap(short = 'k', long = "key", requires = "cert")] key: Option, /// TLS certificate in PEM format #[clap(short = 'c', long = "cert", requires = "key")] cert: Option, /// Enable stateless retries #[clap(long = "stateless-retry")] stateless_retry: bool, /// Address to listen on #[clap(long = "listen", default_value = "[::1]:4433")] listen: SocketAddr, /// Client address to block #[clap(long = "block")] block: Option, /// Maximum number of concurrent connections to allow #[clap(long = "connection-limit")] connection_limit: Option, } fn main() { tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .finish(), ) .unwrap(); let opt = Opt::parse(); let code = { if let Err(e) = run(opt) { eprintln!("ERROR: {e}"); 1 } else { 0 } }; ::std::process::exit(code); } #[tokio::main] async fn run(options: Opt) -> Result<()> { let (certs, key) = if let (Some(key_path), Some(cert_path)) = (&options.key, &options.cert) { let key = fs::read(key_path).context("failed to read private key")?; let key = if key_path.extension().map_or(false, |x| x == "der") { PrivateKeyDer::Pkcs8(PrivatePkcs8KeyDer::from(key)) } else { rustls_pemfile::private_key(&mut &*key) .context("malformed PKCS #1 private key")? .ok_or_else(|| anyhow::Error::msg("no private keys found"))? }; let cert_chain = fs::read(cert_path).context("failed to read certificate chain")?; let cert_chain = if cert_path.extension().map_or(false, |x| x == "der") { vec![CertificateDer::from(cert_chain)] } else { rustls_pemfile::certs(&mut &*cert_chain) .collect::>() .context("invalid PEM-encoded certificate")? }; (cert_chain, key) } else { let dirs = directories_next::ProjectDirs::from("org", "quinn", "quinn-examples").unwrap(); let path = dirs.data_local_dir(); let cert_path = path.join("cert.der"); let key_path = path.join("key.der"); let (cert, key) = match fs::read(&cert_path).and_then(|x| Ok((x, fs::read(&key_path)?))) { Ok((cert, key)) => ( CertificateDer::from(cert), PrivateKeyDer::try_from(key).map_err(anyhow::Error::msg)?, ), Err(ref e) if e.kind() == io::ErrorKind::NotFound => { info!("generating self-signed certificate"); let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()); let cert = cert.cert.into(); fs::create_dir_all(path).context("failed to create certificate directory")?; fs::write(&cert_path, &cert).context("failed to write certificate")?; fs::write(&key_path, key.secret_pkcs8_der()) .context("failed to write private key")?; (cert, key.into()) } Err(e) => { bail!("failed to read certificate: {}", e); } }; (vec![cert], key) }; let mut server_crypto = rustls::ServerConfig::builder() .with_no_client_auth() .with_single_cert(certs, key)?; server_crypto.alpn_protocols = common::ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); if options.keylog { server_crypto.key_log = Arc::new(rustls::KeyLogFile::new()); } let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(QuicServerConfig::try_from(server_crypto)?)); let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); transport_config.max_concurrent_uni_streams(0_u8.into()); let root = Arc::::from(options.root.clone()); if !root.exists() { bail!("root path does not exist"); } let endpoint = quinn::Endpoint::server(server_config, options.listen)?; eprintln!("listening on {}", endpoint.local_addr()?); while let Some(conn) = endpoint.accept().await { if options .connection_limit .map_or(false, |n| endpoint.open_connections() >= n) { info!("refusing due to open connection limit"); conn.refuse(); } else if Some(conn.remote_address()) == options.block { info!("refusing blocked client IP address"); conn.refuse(); } else if options.stateless_retry && !conn.remote_address_validated() { info!("requiring connection to validate its address"); conn.retry().unwrap(); } else { info!("accepting connection"); let fut = handle_connection(root.clone(), conn); tokio::spawn(async move { if let Err(e) = fut.await { error!("connection failed: {reason}", reason = e.to_string()) } }); } } Ok(()) } async fn handle_connection(root: Arc, conn: quinn::Incoming) -> Result<()> { let connection = conn.await?; let span = info_span!( "connection", remote = %connection.remote_address(), protocol = %connection .handshake_data() .unwrap() .downcast::().unwrap() .protocol .map_or_else(|| "".into(), |x| String::from_utf8_lossy(&x).into_owned()) ); async { info!("established"); // Each stream initiated by the client constitutes a new request. loop { let stream = connection.accept_bi().await; let stream = match stream { Err(quinn::ConnectionError::ApplicationClosed { .. }) => { info!("connection closed"); return Ok(()); } Err(e) => { return Err(e); } Ok(s) => s, }; let fut = handle_request(root.clone(), stream); tokio::spawn( async move { if let Err(e) = fut.await { error!("failed: {reason}", reason = e.to_string()); } } .instrument(info_span!("request")), ); } } .instrument(span) .await?; Ok(()) } async fn handle_request( root: Arc, (mut send, mut recv): (quinn::SendStream, quinn::RecvStream), ) -> Result<()> { let req = recv .read_to_end(64 * 1024) .await .map_err(|e| anyhow!("failed reading request: {}", e))?; let mut escaped = String::new(); for &x in &req[..] { let part = ascii::escape_default(x).collect::>(); escaped.push_str(str::from_utf8(&part).unwrap()); } info!(content = %escaped); // Execute the request let resp = process_get(&root, &req).unwrap_or_else(|e| { error!("failed: {}", e); format!("failed to process request: {e}\n").into_bytes() }); // Write the response send.write_all(&resp) .await .map_err(|e| anyhow!("failed to send response: {}", e))?; // Gracefully terminate the stream send.finish().unwrap(); info!("complete"); Ok(()) } fn process_get(root: &Path, x: &[u8]) -> Result> { if x.len() < 4 || &x[0..4] != b"GET " { bail!("missing GET"); } if x[4..].len() < 2 || &x[x.len() - 2..] != b"\r\n" { bail!("missing \\r\\n"); } let x = &x[4..x.len() - 2]; let end = x.iter().position(|&c| c == b' ').unwrap_or(x.len()); let path = str::from_utf8(&x[..end]).context("path is malformed UTF-8")?; let path = Path::new(&path); let mut real_path = PathBuf::from(root); let mut components = path.components(); match components.next() { Some(path::Component::RootDir) => {} _ => { bail!("path must be absolute"); } } for c in components { match c { path::Component::Normal(x) => { real_path.push(x); } x => { bail!("illegal component in path: {:?}", x); } } } let data = fs::read(&real_path).context("failed reading file")?; Ok(data) } quinn-0.11.6/examples/single_socket.rs000064400000000000000000000041041046102023000160570ustar 00000000000000//! This example demonstrates how to make multiple outgoing connections on a single UDP socket. //! //! Checkout the `README.md` for guidance. use std::{ error::Error, net::{IpAddr, Ipv4Addr, SocketAddr}, }; use quinn::Endpoint; mod common; use common::{make_client_endpoint, make_server_endpoint}; use rustls::pki_types::CertificateDer; #[tokio::main] async fn main() -> Result<(), Box> { let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5000); let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5001); let addr3 = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5002); let server1_cert = run_server(addr1)?; let server2_cert = run_server(addr2)?; let server3_cert = run_server(addr3)?; let client = make_client_endpoint( SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), &[&server1_cert, &server2_cert, &server3_cert], )?; // connect to multiple endpoints using the same socket/endpoint tokio::join!( run_client(&client, addr1), run_client(&client, addr2), run_client(&client, addr3), ); // Make sure the server has a chance to clean up client.wait_idle().await; Ok(()) } /// Runs a QUIC server bound to given address and returns server certificate. fn run_server( addr: SocketAddr, ) -> Result, Box> { let (endpoint, server_cert) = make_server_endpoint(addr)?; // accept a single connection tokio::spawn(async move { let connection = endpoint.accept().await.unwrap().await.unwrap(); println!( "[server] incoming connection: addr={}", connection.remote_address() ); }); Ok(server_cert) } /// Attempt QUIC connection with the given server address. async fn run_client(endpoint: &Endpoint, server_addr: SocketAddr) { let connect = endpoint.connect(server_addr, "localhost").unwrap(); let connection = connect.await.unwrap(); println!("[client] connected: addr={}", connection.remote_address()); } quinn-0.11.6/src/connection.rs000064400000000000000000001414231046102023000143440ustar 00000000000000use std::{ any::Any, fmt, future::Future, io, net::{IpAddr, SocketAddr}, pin::Pin, sync::Arc, task::{Context, Poll, Waker}, time::{Duration, Instant}, }; use bytes::Bytes; use pin_project_lite::pin_project; use rustc_hash::FxHashMap; use thiserror::Error; use tokio::sync::{futures::Notified, mpsc, oneshot, Notify}; use tracing::{debug_span, Instrument, Span}; use crate::{ mutex::Mutex, recv_stream::RecvStream, runtime::{AsyncTimer, AsyncUdpSocket, Runtime, UdpPoller}, send_stream::SendStream, udp_transmit, ConnectionEvent, VarInt, }; use proto::{ congestion::Controller, ConnectionError, ConnectionHandle, ConnectionStats, Dir, EndpointEvent, StreamEvent, StreamId, }; /// In-progress connection attempt future #[derive(Debug)] #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] pub struct Connecting { conn: Option, connected: oneshot::Receiver, handshake_data_ready: Option>, } impl Connecting { pub(crate) fn new( handle: ConnectionHandle, conn: proto::Connection, endpoint_events: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, conn_events: mpsc::UnboundedReceiver, socket: Arc, runtime: Arc, ) -> Self { let (on_handshake_data_send, on_handshake_data_recv) = oneshot::channel(); let (on_connected_send, on_connected_recv) = oneshot::channel(); let conn = ConnectionRef::new( handle, conn, endpoint_events, conn_events, on_handshake_data_send, on_connected_send, socket, runtime.clone(), ); let driver = ConnectionDriver(conn.clone()); runtime.spawn(Box::pin( async { if let Err(e) = driver.await { tracing::error!("I/O error: {e}"); } } .instrument(Span::current()), )); Self { conn: Some(conn), connected: on_connected_recv, handshake_data_ready: Some(on_handshake_data_recv), } } /// Convert into a 0-RTT or 0.5-RTT connection at the cost of weakened security /// /// Returns `Ok` immediately if the local endpoint is able to attempt sending 0/0.5-RTT data. /// If so, the returned [`Connection`] can be used to send application data without waiting for /// the rest of the handshake to complete, at the cost of weakened cryptographic security /// guarantees. The returned [`ZeroRttAccepted`] future resolves when the handshake does /// complete, at which point subsequently opened streams and written data will have full /// cryptographic protection. /// /// ## Outgoing /// /// For outgoing connections, the initial attempt to convert to a [`Connection`] which sends /// 0-RTT data will proceed if the [`crypto::ClientConfig`][crate::crypto::ClientConfig] /// attempts to resume a previous TLS session. However, **the remote endpoint may not actually /// _accept_ the 0-RTT data**--yet still accept the connection attempt in general. This /// possibility is conveyed through the [`ZeroRttAccepted`] future--when the handshake /// completes, it resolves to true if the 0-RTT data was accepted and false if it was rejected. /// If it was rejected, the existence of streams opened and other application data sent prior /// to the handshake completing will not be conveyed to the remote application, and local /// operations on them will return `ZeroRttRejected` errors. /// /// A server may reject 0-RTT data at its discretion, but accepting 0-RTT data requires the /// relevant resumption state to be stored in the server, which servers may limit or lose for /// various reasons including not persisting resumption state across server restarts. /// /// If manually providing a [`crypto::ClientConfig`][crate::crypto::ClientConfig], check your /// implementation's docs for 0-RTT pitfalls. /// /// ## Incoming /// /// For incoming connections, conversion to 0.5-RTT will always fully succeed. `into_0rtt` will /// always return `Ok` and the [`ZeroRttAccepted`] will always resolve to true. /// /// If manually providing a [`crypto::ServerConfig`][crate::crypto::ServerConfig], check your /// implementation's docs for 0-RTT pitfalls. /// /// ## Security /// /// On outgoing connections, this enables transmission of 0-RTT data, which is vulnerable to /// replay attacks, and should therefore never invoke non-idempotent operations. /// /// On incoming connections, this enables transmission of 0.5-RTT data, which may be sent /// before TLS client authentication has occurred, and should therefore not be used to send /// data for which client authentication is being used. pub fn into_0rtt(mut self) -> Result<(Connection, ZeroRttAccepted), Self> { // This lock borrows `self` and would normally be dropped at the end of this scope, so we'll // have to release it explicitly before returning `self` by value. let conn = (self.conn.as_mut().unwrap()).state.lock("into_0rtt"); let is_ok = conn.inner.has_0rtt() || conn.inner.side().is_server(); drop(conn); if is_ok { let conn = self.conn.take().unwrap(); Ok((Connection(conn), ZeroRttAccepted(self.connected))) } else { Err(self) } } /// Parameters negotiated during the handshake /// /// The dynamic type returned is determined by the configured /// [`Session`](proto::crypto::Session). For the default `rustls` session, the return value can /// be [`downcast`](Box::downcast) to a /// [`crypto::rustls::HandshakeData`](crate::crypto::rustls::HandshakeData). pub async fn handshake_data(&mut self) -> Result, ConnectionError> { // Taking &mut self allows us to use a single oneshot channel rather than dealing with // potentially many tasks waiting on the same event. It's a bit of a hack, but keeps things // simple. if let Some(x) = self.handshake_data_ready.take() { let _ = x.await; } let conn = self.conn.as_ref().unwrap(); let inner = conn.state.lock("handshake"); inner .inner .crypto_session() .handshake_data() .ok_or_else(|| { inner .error .clone() .expect("spurious handshake data ready notification") }) } /// The local IP address which was used when the peer established /// the connection /// /// This can be different from the address the endpoint is bound to, in case /// the endpoint is bound to a wildcard address like `0.0.0.0` or `::`. /// /// This will return `None` for clients, or when the platform does not expose this /// information. See [`quinn_udp::RecvMeta::dst_ip`](udp::RecvMeta::dst_ip) for a list of /// supported platforms when using [`quinn_udp`](udp) for I/O, which is the default. pub fn local_ip(&self) -> Option { let conn = self.conn.as_ref().unwrap(); let inner = conn.state.lock("local_ip"); inner.inner.local_ip() } /// The peer's UDP address. /// /// Will panic if called after `poll` has returned `Ready`. pub fn remote_address(&self) -> SocketAddr { let conn_ref: &ConnectionRef = self.conn.as_ref().expect("used after yielding Ready"); conn_ref.state.lock("remote_address").inner.remote_address() } } impl Future for Connecting { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { Pin::new(&mut self.connected).poll(cx).map(|_| { let conn = self.conn.take().unwrap(); let inner = conn.state.lock("connecting"); if inner.connected { drop(inner); Ok(Connection(conn)) } else { Err(inner .error .clone() .expect("connected signaled without connection success or error")) } }) } } /// Future that completes when a connection is fully established /// /// For clients, the resulting value indicates if 0-RTT was accepted. For servers, the resulting /// value is meaningless. #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] pub struct ZeroRttAccepted(oneshot::Receiver); impl Future for ZeroRttAccepted { type Output = bool; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { Pin::new(&mut self.0).poll(cx).map(|x| x.unwrap_or(false)) } } /// A future that drives protocol logic for a connection /// /// This future handles the protocol logic for a single connection, routing events from the /// `Connection` API object to the `Endpoint` task and the related stream-related interfaces. /// It also keeps track of outstanding timeouts for the `Connection`. /// /// If the connection encounters an error condition, this future will yield an error. It will /// terminate (yielding `Ok(())`) if the connection was closed without error. Unlike other /// connection-related futures, this waits for the draining period to complete to ensure that /// packets still in flight from the peer are handled gracefully. #[must_use = "connection drivers must be spawned for their connections to function"] #[derive(Debug)] struct ConnectionDriver(ConnectionRef); impl Future for ConnectionDriver { type Output = Result<(), io::Error>; #[allow(unused_mut)] // MSRV fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { let conn = &mut *self.0.state.lock("poll"); let span = debug_span!("drive", id = conn.handle.0); let _guard = span.enter(); if let Err(e) = conn.process_conn_events(&self.0.shared, cx) { conn.terminate(e, &self.0.shared); return Poll::Ready(Ok(())); } let mut keep_going = conn.drive_transmit(cx)?; // If a timer expires, there might be more to transmit. When we transmit something, we // might need to reset a timer. Hence, we must loop until neither happens. keep_going |= conn.drive_timer(cx); conn.forward_endpoint_events(); conn.forward_app_events(&self.0.shared); if !conn.inner.is_drained() { if keep_going { // If the connection hasn't processed all tasks, schedule it again cx.waker().wake_by_ref(); } else { conn.driver = Some(cx.waker().clone()); } return Poll::Pending; } if conn.error.is_none() { unreachable!("drained connections always have an error"); } Poll::Ready(Ok(())) } } /// A QUIC connection. /// /// If all references to a connection (including every clone of the `Connection` handle, streams of /// incoming streams, and the various stream types) have been dropped, then the connection will be /// automatically closed with an `error_code` of 0 and an empty `reason`. You can also close the /// connection explicitly by calling [`Connection::close()`]. /// /// Closing the connection immediately abandons efforts to deliver data to the peer. Upon /// receiving CONNECTION_CLOSE the peer *may* drop any stream data not yet delivered to the /// application. [`Connection::close()`] describes in more detail how to gracefully close a /// connection without losing application data. /// /// May be cloned to obtain another handle to the same connection. /// /// [`Connection::close()`]: Connection::close #[derive(Debug, Clone)] pub struct Connection(ConnectionRef); impl Connection { /// Initiate a new outgoing unidirectional stream. /// /// Streams are cheap and instantaneous to open unless blocked by flow control. As a /// consequence, the peer won't be notified that a stream has been opened until the stream is /// actually used. pub fn open_uni(&self) -> OpenUni<'_> { OpenUni { conn: &self.0, notify: self.0.shared.stream_budget_available[Dir::Uni as usize].notified(), } } /// Initiate a new outgoing bidirectional stream. /// /// Streams are cheap and instantaneous to open unless blocked by flow control. As a /// consequence, the peer won't be notified that a stream has been opened until the stream is /// actually used. Calling [`open_bi()`] then waiting on the [`RecvStream`] without writing /// anything to [`SendStream`] will never succeed. /// /// [`open_bi()`]: crate::Connection::open_bi /// [`SendStream`]: crate::SendStream /// [`RecvStream`]: crate::RecvStream pub fn open_bi(&self) -> OpenBi<'_> { OpenBi { conn: &self.0, notify: self.0.shared.stream_budget_available[Dir::Bi as usize].notified(), } } /// Accept the next incoming uni-directional stream pub fn accept_uni(&self) -> AcceptUni<'_> { AcceptUni { conn: &self.0, notify: self.0.shared.stream_incoming[Dir::Uni as usize].notified(), } } /// Accept the next incoming bidirectional stream /// /// **Important Note**: The `Connection` that calls [`open_bi()`] must write to its [`SendStream`] /// before the other `Connection` is able to `accept_bi()`. Calling [`open_bi()`] then /// waiting on the [`RecvStream`] without writing anything to [`SendStream`] will never succeed. /// /// [`accept_bi()`]: crate::Connection::accept_bi /// [`open_bi()`]: crate::Connection::open_bi /// [`SendStream`]: crate::SendStream /// [`RecvStream`]: crate::RecvStream pub fn accept_bi(&self) -> AcceptBi<'_> { AcceptBi { conn: &self.0, notify: self.0.shared.stream_incoming[Dir::Bi as usize].notified(), } } /// Receive an application datagram pub fn read_datagram(&self) -> ReadDatagram<'_> { ReadDatagram { conn: &self.0, notify: self.0.shared.datagram_received.notified(), } } /// Wait for the connection to be closed for any reason /// /// Despite the return type's name, closed connections are often not an error condition at the /// application layer. Cases that might be routine include [`ConnectionError::LocallyClosed`] /// and [`ConnectionError::ApplicationClosed`]. pub async fn closed(&self) -> ConnectionError { { let conn = self.0.state.lock("closed"); if let Some(error) = conn.error.as_ref() { return error.clone(); } // Construct the future while the lock is held to ensure we can't miss a wakeup if // the `Notify` is signaled immediately after we release the lock. `await` it after // the lock guard is out of scope. self.0.shared.closed.notified() } .await; self.0 .state .lock("closed") .error .as_ref() .expect("closed without an error") .clone() } /// If the connection is closed, the reason why. /// /// Returns `None` if the connection is still open. pub fn close_reason(&self) -> Option { self.0.state.lock("close_reason").error.clone() } /// Close the connection immediately. /// /// Pending operations will fail immediately with [`ConnectionError::LocallyClosed`]. No /// more data is sent to the peer and the peer may drop buffered data upon receiving /// the CONNECTION_CLOSE frame. /// /// `error_code` and `reason` are not interpreted, and are provided directly to the peer. /// /// `reason` will be truncated to fit in a single packet with overhead; to improve odds that it /// is preserved in full, it should be kept under 1KiB. /// /// # Gracefully closing a connection /// /// Only the peer last receiving application data can be certain that all data is /// delivered. The only reliable action it can then take is to close the connection, /// potentially with a custom error code. The delivery of the final CONNECTION_CLOSE /// frame is very likely if both endpoints stay online long enough, and /// [`Endpoint::wait_idle()`] can be used to provide sufficient time. Otherwise, the /// remote peer will time out the connection, provided that the idle timeout is not /// disabled. /// /// The sending side can not guarantee all stream data is delivered to the remote /// application. It only knows the data is delivered to the QUIC stack of the remote /// endpoint. Once the local side sends a CONNECTION_CLOSE frame in response to calling /// [`close()`] the remote endpoint may drop any data it received but is as yet /// undelivered to the application, including data that was acknowledged as received to /// the local endpoint. /// /// [`ConnectionError::LocallyClosed`]: crate::ConnectionError::LocallyClosed /// [`Endpoint::wait_idle()`]: crate::Endpoint::wait_idle /// [`close()`]: Connection::close pub fn close(&self, error_code: VarInt, reason: &[u8]) { let conn = &mut *self.0.state.lock("close"); conn.close(error_code, Bytes::copy_from_slice(reason), &self.0.shared); } /// Transmit `data` as an unreliable, unordered application datagram /// /// Application datagrams are a low-level primitive. They may be lost or delivered out of order, /// and `data` must both fit inside a single QUIC packet and be smaller than the maximum /// dictated by the peer. pub fn send_datagram(&self, data: Bytes) -> Result<(), SendDatagramError> { let conn = &mut *self.0.state.lock("send_datagram"); if let Some(ref x) = conn.error { return Err(SendDatagramError::ConnectionLost(x.clone())); } use proto::SendDatagramError::*; match conn.inner.datagrams().send(data, true) { Ok(()) => { conn.wake(); Ok(()) } Err(e) => Err(match e { Blocked(..) => unreachable!(), UnsupportedByPeer => SendDatagramError::UnsupportedByPeer, Disabled => SendDatagramError::Disabled, TooLarge => SendDatagramError::TooLarge, }), } } /// Transmit `data` as an unreliable, unordered application datagram /// /// Unlike [`send_datagram()`], this method will wait for buffer space during congestion /// conditions, which effectively prioritizes old datagrams over new datagrams. /// /// See [`send_datagram()`] for details. /// /// [`send_datagram()`]: Connection::send_datagram pub fn send_datagram_wait(&self, data: Bytes) -> SendDatagram<'_> { SendDatagram { conn: &self.0, data: Some(data), notify: self.0.shared.datagrams_unblocked.notified(), } } /// Compute the maximum size of datagrams that may be passed to [`send_datagram()`]. /// /// Returns `None` if datagrams are unsupported by the peer or disabled locally. /// /// This may change over the lifetime of a connection according to variation in the path MTU /// estimate. The peer can also enforce an arbitrarily small fixed limit, but if the peer's /// limit is large this is guaranteed to be a little over a kilobyte at minimum. /// /// Not necessarily the maximum size of received datagrams. /// /// [`send_datagram()`]: Connection::send_datagram pub fn max_datagram_size(&self) -> Option { self.0 .state .lock("max_datagram_size") .inner .datagrams() .max_size() } /// Bytes available in the outgoing datagram buffer /// /// When greater than zero, calling [`send_datagram()`](Self::send_datagram) with a datagram of /// at most this size is guaranteed not to cause older datagrams to be dropped. pub fn datagram_send_buffer_space(&self) -> usize { self.0 .state .lock("datagram_send_buffer_space") .inner .datagrams() .send_buffer_space() } /// The peer's UDP address /// /// If `ServerConfig::migration` is `true`, clients may change addresses at will, e.g. when /// switching to a cellular internet connection. pub fn remote_address(&self) -> SocketAddr { self.0.state.lock("remote_address").inner.remote_address() } /// The local IP address which was used when the peer established /// the connection /// /// This can be different from the address the endpoint is bound to, in case /// the endpoint is bound to a wildcard address like `0.0.0.0` or `::`. /// /// This will return `None` for clients, or when the platform does not expose this /// information. See [`quinn_udp::RecvMeta::dst_ip`](udp::RecvMeta::dst_ip) for a list of /// supported platforms when using [`quinn_udp`](udp) for I/O, which is the default. pub fn local_ip(&self) -> Option { self.0.state.lock("local_ip").inner.local_ip() } /// Current best estimate of this connection's latency (round-trip-time) pub fn rtt(&self) -> Duration { self.0.state.lock("rtt").inner.rtt() } /// Returns connection statistics pub fn stats(&self) -> ConnectionStats { self.0.state.lock("stats").inner.stats() } /// Current state of the congestion control algorithm, for debugging purposes pub fn congestion_state(&self) -> Box { self.0 .state .lock("congestion_state") .inner .congestion_state() .clone_box() } /// Parameters negotiated during the handshake /// /// Guaranteed to return `Some` on fully established connections or after /// [`Connecting::handshake_data()`] succeeds. See that method's documentations for details on /// the returned value. /// /// [`Connection::handshake_data()`]: crate::Connecting::handshake_data pub fn handshake_data(&self) -> Option> { self.0 .state .lock("handshake_data") .inner .crypto_session() .handshake_data() } /// Cryptographic identity of the peer /// /// The dynamic type returned is determined by the configured /// [`Session`](proto::crypto::Session). For the default `rustls` session, the return value can /// be [`downcast`](Box::downcast) to a Vec<[rustls::pki_types::CertificateDer]> pub fn peer_identity(&self) -> Option> { self.0 .state .lock("peer_identity") .inner .crypto_session() .peer_identity() } /// A stable identifier for this connection /// /// Peer addresses and connection IDs can change, but this value will remain /// fixed for the lifetime of the connection. pub fn stable_id(&self) -> usize { self.0.stable_id() } // Update traffic keys spontaneously for testing purposes. #[doc(hidden)] pub fn force_key_update(&self) { self.0 .state .lock("force_key_update") .inner .initiate_key_update() } /// Derive keying material from this connection's TLS session secrets. /// /// When both peers call this method with the same `label` and `context` /// arguments and `output` buffers of equal length, they will get the /// same sequence of bytes in `output`. These bytes are cryptographically /// strong and pseudorandom, and are suitable for use as keying material. /// /// See [RFC5705](https://tools.ietf.org/html/rfc5705) for more information. pub fn export_keying_material( &self, output: &mut [u8], label: &[u8], context: &[u8], ) -> Result<(), proto::crypto::ExportKeyingMaterialError> { self.0 .state .lock("export_keying_material") .inner .crypto_session() .export_keying_material(output, label, context) } /// Modify the number of remotely initiated unidirectional streams that may be concurrently open /// /// No streams may be opened by the peer unless fewer than `count` are already open. Large /// `count`s increase both minimum and worst-case memory consumption. pub fn set_max_concurrent_uni_streams(&self, count: VarInt) { let mut conn = self.0.state.lock("set_max_concurrent_uni_streams"); conn.inner.set_max_concurrent_streams(Dir::Uni, count); // May need to send MAX_STREAMS to make progress conn.wake(); } /// See [`proto::TransportConfig::receive_window()`] pub fn set_receive_window(&self, receive_window: VarInt) { let mut conn = self.0.state.lock("set_receive_window"); conn.inner.set_receive_window(receive_window); conn.wake(); } /// Modify the number of remotely initiated bidirectional streams that may be concurrently open /// /// No streams may be opened by the peer unless fewer than `count` are already open. Large /// `count`s increase both minimum and worst-case memory consumption. pub fn set_max_concurrent_bi_streams(&self, count: VarInt) { let mut conn = self.0.state.lock("set_max_concurrent_bi_streams"); conn.inner.set_max_concurrent_streams(Dir::Bi, count); // May need to send MAX_STREAMS to make progress conn.wake(); } } pin_project! { /// Future produced by [`Connection::open_uni`] pub struct OpenUni<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for OpenUni<'_> { type Output = Result; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let this = self.project(); let (conn, id, is_0rtt) = ready!(poll_open(ctx, this.conn, this.notify, Dir::Uni))?; Poll::Ready(Ok(SendStream::new(conn, id, is_0rtt))) } } pin_project! { /// Future produced by [`Connection::open_bi`] pub struct OpenBi<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for OpenBi<'_> { type Output = Result<(SendStream, RecvStream), ConnectionError>; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let this = self.project(); let (conn, id, is_0rtt) = ready!(poll_open(ctx, this.conn, this.notify, Dir::Bi))?; Poll::Ready(Ok(( SendStream::new(conn.clone(), id, is_0rtt), RecvStream::new(conn, id, is_0rtt), ))) } } fn poll_open<'a>( ctx: &mut Context<'_>, conn: &'a ConnectionRef, mut notify: Pin<&mut Notified<'a>>, dir: Dir, ) -> Poll> { let mut state = conn.state.lock("poll_open"); if let Some(ref e) = state.error { return Poll::Ready(Err(e.clone())); } else if let Some(id) = state.inner.streams().open(dir) { let is_0rtt = state.inner.side().is_client() && state.inner.is_handshaking(); drop(state); // Release the lock so clone can take it return Poll::Ready(Ok((conn.clone(), id, is_0rtt))); } loop { match notify.as_mut().poll(ctx) { // `state` lock ensures we didn't race with readiness Poll::Pending => return Poll::Pending, // Spurious wakeup, get a new future Poll::Ready(()) => { notify.set(conn.shared.stream_budget_available[dir as usize].notified()) } } } } pin_project! { /// Future produced by [`Connection::accept_uni`] pub struct AcceptUni<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for AcceptUni<'_> { type Output = Result; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let this = self.project(); let (conn, id, is_0rtt) = ready!(poll_accept(ctx, this.conn, this.notify, Dir::Uni))?; Poll::Ready(Ok(RecvStream::new(conn, id, is_0rtt))) } } pin_project! { /// Future produced by [`Connection::accept_bi`] pub struct AcceptBi<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for AcceptBi<'_> { type Output = Result<(SendStream, RecvStream), ConnectionError>; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let this = self.project(); let (conn, id, is_0rtt) = ready!(poll_accept(ctx, this.conn, this.notify, Dir::Bi))?; Poll::Ready(Ok(( SendStream::new(conn.clone(), id, is_0rtt), RecvStream::new(conn, id, is_0rtt), ))) } } fn poll_accept<'a>( ctx: &mut Context<'_>, conn: &'a ConnectionRef, mut notify: Pin<&mut Notified<'a>>, dir: Dir, ) -> Poll> { let mut state = conn.state.lock("poll_accept"); // Check for incoming streams before checking `state.error` so that already-received streams, // which are necessarily finite, can be drained from a closed connection. if let Some(id) = state.inner.streams().accept(dir) { let is_0rtt = state.inner.is_handshaking(); state.wake(); // To send additional stream ID credit drop(state); // Release the lock so clone can take it return Poll::Ready(Ok((conn.clone(), id, is_0rtt))); } else if let Some(ref e) = state.error { return Poll::Ready(Err(e.clone())); } loop { match notify.as_mut().poll(ctx) { // `state` lock ensures we didn't race with readiness Poll::Pending => return Poll::Pending, // Spurious wakeup, get a new future Poll::Ready(()) => notify.set(conn.shared.stream_incoming[dir as usize].notified()), } } } pin_project! { /// Future produced by [`Connection::read_datagram`] pub struct ReadDatagram<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for ReadDatagram<'_> { type Output = Result; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let mut this = self.project(); let mut state = this.conn.state.lock("ReadDatagram::poll"); // Check for buffered datagrams before checking `state.error` so that already-received // datagrams, which are necessarily finite, can be drained from a closed connection. if let Some(x) = state.inner.datagrams().recv() { return Poll::Ready(Ok(x)); } else if let Some(ref e) = state.error { return Poll::Ready(Err(e.clone())); } loop { match this.notify.as_mut().poll(ctx) { // `state` lock ensures we didn't race with readiness Poll::Pending => return Poll::Pending, // Spurious wakeup, get a new future Poll::Ready(()) => this .notify .set(this.conn.shared.datagram_received.notified()), } } } } pin_project! { /// Future produced by [`Connection::send_datagram_wait`] pub struct SendDatagram<'a> { conn: &'a ConnectionRef, data: Option, #[pin] notify: Notified<'a>, } } impl Future for SendDatagram<'_> { type Output = Result<(), SendDatagramError>; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let mut this = self.project(); let mut state = this.conn.state.lock("SendDatagram::poll"); if let Some(ref e) = state.error { return Poll::Ready(Err(SendDatagramError::ConnectionLost(e.clone()))); } use proto::SendDatagramError::*; match state .inner .datagrams() .send(this.data.take().unwrap(), false) { Ok(()) => { state.wake(); Poll::Ready(Ok(())) } Err(e) => Poll::Ready(Err(match e { Blocked(data) => { this.data.replace(data); loop { match this.notify.as_mut().poll(ctx) { Poll::Pending => return Poll::Pending, // Spurious wakeup, get a new future Poll::Ready(()) => this .notify .set(this.conn.shared.datagrams_unblocked.notified()), } } } UnsupportedByPeer => SendDatagramError::UnsupportedByPeer, Disabled => SendDatagramError::Disabled, TooLarge => SendDatagramError::TooLarge, })), } } } #[derive(Debug)] pub(crate) struct ConnectionRef(Arc); impl ConnectionRef { #[allow(clippy::too_many_arguments)] fn new( handle: ConnectionHandle, conn: proto::Connection, endpoint_events: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, conn_events: mpsc::UnboundedReceiver, on_handshake_data: oneshot::Sender<()>, on_connected: oneshot::Sender, socket: Arc, runtime: Arc, ) -> Self { Self(Arc::new(ConnectionInner { state: Mutex::new(State { inner: conn, driver: None, handle, on_handshake_data: Some(on_handshake_data), on_connected: Some(on_connected), connected: false, timer: None, timer_deadline: None, conn_events, endpoint_events, blocked_writers: FxHashMap::default(), blocked_readers: FxHashMap::default(), stopped: FxHashMap::default(), error: None, ref_count: 0, io_poller: socket.clone().create_io_poller(), socket, runtime, send_buffer: Vec::new(), buffered_transmit: None, }), shared: Shared::default(), })) } fn stable_id(&self) -> usize { &*self.0 as *const _ as usize } } impl Clone for ConnectionRef { fn clone(&self) -> Self { self.state.lock("clone").ref_count += 1; Self(self.0.clone()) } } impl Drop for ConnectionRef { fn drop(&mut self) { let conn = &mut *self.state.lock("drop"); if let Some(x) = conn.ref_count.checked_sub(1) { conn.ref_count = x; if x == 0 && !conn.inner.is_closed() { // If the driver is alive, it's just it and us, so we'd better shut it down. If it's // not, we can't do any harm. If there were any streams being opened, then either // the connection will be closed for an unrelated reason or a fresh reference will // be constructed for the newly opened stream. conn.implicit_close(&self.shared); } } } } impl std::ops::Deref for ConnectionRef { type Target = ConnectionInner; fn deref(&self) -> &Self::Target { &self.0 } } #[derive(Debug)] pub(crate) struct ConnectionInner { pub(crate) state: Mutex, pub(crate) shared: Shared, } #[derive(Debug, Default)] pub(crate) struct Shared { /// Notified when new streams may be locally initiated due to an increase in stream ID flow /// control budget stream_budget_available: [Notify; 2], /// Notified when the peer has initiated a new stream stream_incoming: [Notify; 2], datagram_received: Notify, datagrams_unblocked: Notify, closed: Notify, } pub(crate) struct State { pub(crate) inner: proto::Connection, driver: Option, handle: ConnectionHandle, on_handshake_data: Option>, on_connected: Option>, connected: bool, timer: Option>>, timer_deadline: Option, conn_events: mpsc::UnboundedReceiver, endpoint_events: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, pub(crate) blocked_writers: FxHashMap, pub(crate) blocked_readers: FxHashMap, pub(crate) stopped: FxHashMap, /// Always set to Some before the connection becomes drained pub(crate) error: Option, /// Number of live handles that can be used to initiate or handle I/O; excludes the driver ref_count: usize, socket: Arc, io_poller: Pin>, runtime: Arc, send_buffer: Vec, /// We buffer a transmit when the underlying I/O would block buffered_transmit: Option, } impl State { fn drive_transmit(&mut self, cx: &mut Context) -> io::Result { let now = self.runtime.now(); let mut transmits = 0; let max_datagrams = self.socket.max_transmit_segments(); loop { // Retry the last transmit, or get a new one. let t = match self.buffered_transmit.take() { Some(t) => t, None => { self.send_buffer.clear(); self.send_buffer.reserve(self.inner.current_mtu() as usize); match self .inner .poll_transmit(now, max_datagrams, &mut self.send_buffer) { Some(t) => { transmits += match t.segment_size { None => 1, Some(s) => (t.size + s - 1) / s, // round up }; t } None => break, } } }; if self.io_poller.as_mut().poll_writable(cx)?.is_pending() { // Retry after a future wakeup self.buffered_transmit = Some(t); return Ok(false); } let len = t.size; let retry = match self .socket .try_send(&udp_transmit(&t, &self.send_buffer[..len])) { Ok(()) => false, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => true, Err(e) => return Err(e), }; if retry { // We thought the socket was writable, but it wasn't. Retry so that either another // `poll_writable` call determines that the socket is indeed not writable and // registers us for a wakeup, or the send succeeds if this really was just a // transient failure. self.buffered_transmit = Some(t); continue; } if transmits >= MAX_TRANSMIT_DATAGRAMS { // TODO: What isn't ideal here yet is that if we don't poll all // datagrams that could be sent we don't go into the `app_limited` // state and CWND continues to grow until we get here the next time. // See https://github.com/quinn-rs/quinn/issues/1126 return Ok(true); } } Ok(false) } fn forward_endpoint_events(&mut self) { while let Some(event) = self.inner.poll_endpoint_events() { // If the endpoint driver is gone, noop. let _ = self.endpoint_events.send((self.handle, event)); } } /// If this returns `Err`, the endpoint is dead, so the driver should exit immediately. fn process_conn_events( &mut self, shared: &Shared, cx: &mut Context, ) -> Result<(), ConnectionError> { loop { match self.conn_events.poll_recv(cx) { Poll::Ready(Some(ConnectionEvent::Rebind(socket))) => { self.socket = socket; self.io_poller = self.socket.clone().create_io_poller(); self.inner.local_address_changed(); } Poll::Ready(Some(ConnectionEvent::Proto(event))) => { self.inner.handle_event(event); } Poll::Ready(Some(ConnectionEvent::Close { reason, error_code })) => { self.close(error_code, reason, shared); } Poll::Ready(None) => { return Err(ConnectionError::TransportError(proto::TransportError { code: proto::TransportErrorCode::INTERNAL_ERROR, frame: None, reason: "endpoint driver future was dropped".to_string(), })); } Poll::Pending => { return Ok(()); } } } } fn forward_app_events(&mut self, shared: &Shared) { while let Some(event) = self.inner.poll() { use proto::Event::*; match event { HandshakeDataReady => { if let Some(x) = self.on_handshake_data.take() { let _ = x.send(()); } } Connected => { self.connected = true; if let Some(x) = self.on_connected.take() { // We don't care if the on-connected future was dropped let _ = x.send(self.inner.accepted_0rtt()); } if self.inner.side().is_client() && !self.inner.accepted_0rtt() { // Wake up rejected 0-RTT streams so they can fail immediately with // `ZeroRttRejected` errors. wake_all(&mut self.blocked_writers); wake_all(&mut self.blocked_readers); wake_all(&mut self.stopped); } } ConnectionLost { reason } => { self.terminate(reason, shared); } Stream(StreamEvent::Writable { id }) => wake_stream(id, &mut self.blocked_writers), Stream(StreamEvent::Opened { dir: Dir::Uni }) => { shared.stream_incoming[Dir::Uni as usize].notify_waiters(); } Stream(StreamEvent::Opened { dir: Dir::Bi }) => { shared.stream_incoming[Dir::Bi as usize].notify_waiters(); } DatagramReceived => { shared.datagram_received.notify_waiters(); } DatagramsUnblocked => { shared.datagrams_unblocked.notify_waiters(); } Stream(StreamEvent::Readable { id }) => wake_stream(id, &mut self.blocked_readers), Stream(StreamEvent::Available { dir }) => { // Might mean any number of streams are ready, so we wake up everyone shared.stream_budget_available[dir as usize].notify_waiters(); } Stream(StreamEvent::Finished { id }) => wake_stream(id, &mut self.stopped), Stream(StreamEvent::Stopped { id, .. }) => { wake_stream(id, &mut self.stopped); wake_stream(id, &mut self.blocked_writers); } } } } fn drive_timer(&mut self, cx: &mut Context) -> bool { // Check whether we need to (re)set the timer. If so, we must poll again to ensure the // timer is registered with the runtime (and check whether it's already // expired). match self.inner.poll_timeout() { Some(deadline) => { if let Some(delay) = &mut self.timer { // There is no need to reset the tokio timer if the deadline // did not change if self .timer_deadline .map(|current_deadline| current_deadline != deadline) .unwrap_or(true) { delay.as_mut().reset(deadline); } } else { self.timer = Some(self.runtime.new_timer(deadline)); } // Store the actual expiration time of the timer self.timer_deadline = Some(deadline); } None => { self.timer_deadline = None; return false; } } if self.timer_deadline.is_none() { return false; } let delay = self .timer .as_mut() .expect("timer must exist in this state") .as_mut(); if delay.poll(cx).is_pending() { // Since there wasn't a timeout event, there is nothing new // for the connection to do return false; } // A timer expired, so the caller needs to check for // new transmits, which might cause new timers to be set. self.inner.handle_timeout(self.runtime.now()); self.timer_deadline = None; true } /// Wake up a blocked `Driver` task to process I/O pub(crate) fn wake(&mut self) { if let Some(x) = self.driver.take() { x.wake(); } } /// Used to wake up all blocked futures when the connection becomes closed for any reason fn terminate(&mut self, reason: ConnectionError, shared: &Shared) { self.error = Some(reason.clone()); if let Some(x) = self.on_handshake_data.take() { let _ = x.send(()); } wake_all(&mut self.blocked_writers); wake_all(&mut self.blocked_readers); shared.stream_budget_available[Dir::Uni as usize].notify_waiters(); shared.stream_budget_available[Dir::Bi as usize].notify_waiters(); shared.stream_incoming[Dir::Uni as usize].notify_waiters(); shared.stream_incoming[Dir::Bi as usize].notify_waiters(); shared.datagram_received.notify_waiters(); shared.datagrams_unblocked.notify_waiters(); if let Some(x) = self.on_connected.take() { let _ = x.send(false); } wake_all(&mut self.stopped); shared.closed.notify_waiters(); } fn close(&mut self, error_code: VarInt, reason: Bytes, shared: &Shared) { self.inner.close(self.runtime.now(), error_code, reason); self.terminate(ConnectionError::LocallyClosed, shared); self.wake(); } /// Close for a reason other than the application's explicit request pub(crate) fn implicit_close(&mut self, shared: &Shared) { self.close(0u32.into(), Bytes::new(), shared); } pub(crate) fn check_0rtt(&self) -> Result<(), ()> { if self.inner.is_handshaking() || self.inner.accepted_0rtt() || self.inner.side().is_server() { Ok(()) } else { Err(()) } } } impl Drop for State { fn drop(&mut self) { if !self.inner.is_drained() { // Ensure the endpoint can tidy up let _ = self .endpoint_events .send((self.handle, proto::EndpointEvent::drained())); } } } impl fmt::Debug for State { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("State").field("inner", &self.inner).finish() } } fn wake_stream(stream_id: StreamId, wakers: &mut FxHashMap) { if let Some(waker) = wakers.remove(&stream_id) { waker.wake(); } } fn wake_all(wakers: &mut FxHashMap) { wakers.drain().for_each(|(_, waker)| waker.wake()) } /// Errors that can arise when sending a datagram #[derive(Debug, Error, Clone, Eq, PartialEq)] pub enum SendDatagramError { /// The peer does not support receiving datagram frames #[error("datagrams not supported by peer")] UnsupportedByPeer, /// Datagram support is disabled locally #[error("datagram support disabled")] Disabled, /// The datagram is larger than the connection can currently accommodate /// /// Indicates that the path MTU minus overhead or the limit advertised by the peer has been /// exceeded. #[error("datagram too large")] TooLarge, /// The connection was lost #[error("connection lost")] ConnectionLost(#[from] ConnectionError), } /// The maximum amount of datagrams which will be produced in a single `drive_transmit` call /// /// This limits the amount of CPU resources consumed by datagram generation, /// and allows other tasks (like receiving ACKs) to run in between. const MAX_TRANSMIT_DATAGRAMS: usize = 20; quinn-0.11.6/src/endpoint.rs000064400000000000000000000756121046102023000140330ustar 00000000000000use std::{ collections::VecDeque, future::Future, io, io::IoSliceMut, mem, net::{SocketAddr, SocketAddrV6}, pin::Pin, str, sync::{Arc, Mutex}, task::{Context, Poll, Waker}, time::Instant, }; #[cfg(any(feature = "aws-lc-rs", feature = "ring"))] use crate::runtime::default_runtime; use crate::{ runtime::{AsyncUdpSocket, Runtime}, udp_transmit, }; use bytes::{Bytes, BytesMut}; use pin_project_lite::pin_project; use proto::{ self as proto, ClientConfig, ConnectError, ConnectionError, ConnectionHandle, DatagramEvent, EndpointEvent, ServerConfig, }; use rustc_hash::FxHashMap; #[cfg(any(feature = "aws-lc-rs", feature = "ring"))] use socket2::{Domain, Protocol, Socket, Type}; use tokio::sync::{futures::Notified, mpsc, Notify}; use tracing::{Instrument, Span}; use udp::{RecvMeta, BATCH_SIZE}; use crate::{ connection::Connecting, incoming::Incoming, work_limiter::WorkLimiter, ConnectionEvent, EndpointConfig, VarInt, IO_LOOP_BOUND, RECV_TIME_BOUND, }; /// A QUIC endpoint. /// /// An endpoint corresponds to a single UDP socket, may host many connections, and may act as both /// client and server for different connections. /// /// May be cloned to obtain another handle to the same endpoint. #[derive(Debug, Clone)] pub struct Endpoint { pub(crate) inner: EndpointRef, pub(crate) default_client_config: Option, runtime: Arc, } impl Endpoint { /// Helper to construct an endpoint for use with outgoing connections only /// /// Note that `addr` is the *local* address to bind to, which should usually be a wildcard /// address like `0.0.0.0:0` or `[::]:0`, which allow communication with any reachable IPv4 or /// IPv6 address respectively from an OS-assigned port. /// /// If an IPv6 address is provided, attempts to make the socket dual-stack so as to allow /// communication with both IPv4 and IPv6 addresses. As such, calling `Endpoint::client` with /// the address `[::]:0` is a reasonable default to maximize the ability to connect to other /// address. For example: /// /// ``` /// quinn::Endpoint::client((std::net::Ipv6Addr::UNSPECIFIED, 0).into()); /// ``` /// /// Some environments may not allow creation of dual-stack sockets, in which case an IPv6 /// client will only be able to connect to IPv6 servers. An IPv4 client is never dual-stack. #[cfg(any(feature = "aws-lc-rs", feature = "ring"))] // `EndpointConfig::default()` is only available with these pub fn client(addr: SocketAddr) -> io::Result { let socket = Socket::new(Domain::for_address(addr), Type::DGRAM, Some(Protocol::UDP))?; if addr.is_ipv6() { if let Err(e) = socket.set_only_v6(false) { tracing::debug!(%e, "unable to make socket dual-stack"); } } socket.bind(&addr.into())?; let runtime = default_runtime() .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "no async runtime found"))?; Self::new_with_abstract_socket( EndpointConfig::default(), None, runtime.wrap_udp_socket(socket.into())?, runtime, ) } /// Returns relevant stats from this Endpoint pub fn stats(&self) -> EndpointStats { self.inner.state.lock().unwrap().stats } /// Helper to construct an endpoint for use with both incoming and outgoing connections /// /// Platform defaults for dual-stack sockets vary. For example, any socket bound to a wildcard /// IPv6 address on Windows will not by default be able to communicate with IPv4 /// addresses. Portable applications should bind an address that matches the family they wish to /// communicate within. #[cfg(any(feature = "aws-lc-rs", feature = "ring"))] // `EndpointConfig::default()` is only available with these pub fn server(config: ServerConfig, addr: SocketAddr) -> io::Result { let socket = std::net::UdpSocket::bind(addr)?; let runtime = default_runtime() .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "no async runtime found"))?; Self::new_with_abstract_socket( EndpointConfig::default(), Some(config), runtime.wrap_udp_socket(socket)?, runtime, ) } /// Construct an endpoint with arbitrary configuration and socket pub fn new( config: EndpointConfig, server_config: Option, socket: std::net::UdpSocket, runtime: Arc, ) -> io::Result { let socket = runtime.wrap_udp_socket(socket)?; Self::new_with_abstract_socket(config, server_config, socket, runtime) } /// Construct an endpoint with arbitrary configuration and pre-constructed abstract socket /// /// Useful when `socket` has additional state (e.g. sidechannels) attached for which shared /// ownership is needed. pub fn new_with_abstract_socket( config: EndpointConfig, server_config: Option, socket: Arc, runtime: Arc, ) -> io::Result { let addr = socket.local_addr()?; let allow_mtud = !socket.may_fragment(); let rc = EndpointRef::new( socket, proto::Endpoint::new( Arc::new(config), server_config.map(Arc::new), allow_mtud, None, ), addr.is_ipv6(), runtime.clone(), ); let driver = EndpointDriver(rc.clone()); runtime.spawn(Box::pin( async { if let Err(e) = driver.await { tracing::error!("I/O error: {}", e); } } .instrument(Span::current()), )); Ok(Self { inner: rc, default_client_config: None, runtime, }) } /// Get the next incoming connection attempt from a client /// /// Yields [`Incoming`]s, or `None` if the endpoint is [`close`](Self::close)d. [`Incoming`] /// can be `await`ed to obtain the final [`Connection`](crate::Connection), or used to e.g. /// filter connection attempts or force address validation, or converted into an intermediate /// `Connecting` future which can be used to e.g. send 0.5-RTT data. pub fn accept(&self) -> Accept<'_> { Accept { endpoint: self, notify: self.inner.shared.incoming.notified(), } } /// Set the client configuration used by `connect` pub fn set_default_client_config(&mut self, config: ClientConfig) { self.default_client_config = Some(config); } /// Connect to a remote endpoint /// /// `server_name` must be covered by the certificate presented by the server. This prevents a /// connection from being intercepted by an attacker with a valid certificate for some other /// server. /// /// May fail immediately due to configuration errors, or in the future if the connection could /// not be established. pub fn connect(&self, addr: SocketAddr, server_name: &str) -> Result { let config = match &self.default_client_config { Some(config) => config.clone(), None => return Err(ConnectError::NoDefaultClientConfig), }; self.connect_with(config, addr, server_name) } /// Connect to a remote endpoint using a custom configuration. /// /// See [`connect()`] for details. /// /// [`connect()`]: Endpoint::connect pub fn connect_with( &self, config: ClientConfig, addr: SocketAddr, server_name: &str, ) -> Result { let mut endpoint = self.inner.state.lock().unwrap(); if endpoint.driver_lost || endpoint.recv_state.connections.close.is_some() { return Err(ConnectError::EndpointStopping); } if addr.is_ipv6() && !endpoint.ipv6 { return Err(ConnectError::InvalidRemoteAddress(addr)); } let addr = if endpoint.ipv6 { SocketAddr::V6(ensure_ipv6(addr)) } else { addr }; let (ch, conn) = endpoint .inner .connect(self.runtime.now(), config, addr, server_name)?; let socket = endpoint.socket.clone(); endpoint.stats.outgoing_handshakes += 1; Ok(endpoint .recv_state .connections .insert(ch, conn, socket, self.runtime.clone())) } /// Switch to a new UDP socket /// /// See [`Endpoint::rebind_abstract()`] for details. pub fn rebind(&self, socket: std::net::UdpSocket) -> io::Result<()> { self.rebind_abstract(self.runtime.wrap_udp_socket(socket)?) } /// Switch to a new UDP socket /// /// Allows the endpoint's address to be updated live, affecting all active connections. Incoming /// connections and connections to servers unreachable from the new address will be lost. /// /// On error, the old UDP socket is retained. pub fn rebind_abstract(&self, socket: Arc) -> io::Result<()> { let addr = socket.local_addr()?; let mut inner = self.inner.state.lock().unwrap(); inner.prev_socket = Some(mem::replace(&mut inner.socket, socket)); inner.ipv6 = addr.is_ipv6(); // Update connection socket references for sender in inner.recv_state.connections.senders.values() { // Ignoring errors from dropped connections let _ = sender.send(ConnectionEvent::Rebind(inner.socket.clone())); } Ok(()) } /// Replace the server configuration, affecting new incoming connections only /// /// Useful for e.g. refreshing TLS certificates without disrupting existing connections. pub fn set_server_config(&self, server_config: Option) { self.inner .state .lock() .unwrap() .inner .set_server_config(server_config.map(Arc::new)) } /// Get the local `SocketAddr` the underlying socket is bound to pub fn local_addr(&self) -> io::Result { self.inner.state.lock().unwrap().socket.local_addr() } /// Get the number of connections that are currently open pub fn open_connections(&self) -> usize { self.inner.state.lock().unwrap().inner.open_connections() } /// Close all of this endpoint's connections immediately and cease accepting new connections. /// /// See [`Connection::close()`] for details. /// /// [`Connection::close()`]: crate::Connection::close pub fn close(&self, error_code: VarInt, reason: &[u8]) { let reason = Bytes::copy_from_slice(reason); let mut endpoint = self.inner.state.lock().unwrap(); endpoint.recv_state.connections.close = Some((error_code, reason.clone())); for sender in endpoint.recv_state.connections.senders.values() { // Ignoring errors from dropped connections let _ = sender.send(ConnectionEvent::Close { error_code, reason: reason.clone(), }); } self.inner.shared.incoming.notify_waiters(); } /// Wait for all connections on the endpoint to be cleanly shut down /// /// Waiting for this condition before exiting ensures that a good-faith effort is made to notify /// peers of recent connection closes, whereas exiting immediately could force them to wait out /// the idle timeout period. /// /// Does not proactively close existing connections or cause incoming connections to be /// rejected. Consider calling [`close()`] if that is desired. /// /// [`close()`]: Endpoint::close pub async fn wait_idle(&self) { loop { { let endpoint = &mut *self.inner.state.lock().unwrap(); if endpoint.recv_state.connections.is_empty() { break; } // Construct future while lock is held to avoid race self.inner.shared.idle.notified() } .await; } } } /// Statistics on [Endpoint] activity #[non_exhaustive] #[derive(Debug, Default, Copy, Clone)] pub struct EndpointStats { /// Cummulative number of Quic handshakes accepted by this [Endpoint] pub accepted_handshakes: u64, /// Cummulative number of Quic handshakees sent from this [Endpoint] pub outgoing_handshakes: u64, /// Cummulative number of Quic handshakes refused on this [Endpoint] pub refused_handshakes: u64, /// Cummulative number of Quic handshakes ignored on this [Endpoint] pub ignored_handshakes: u64, } /// A future that drives IO on an endpoint /// /// This task functions as the switch point between the UDP socket object and the /// `Endpoint` responsible for routing datagrams to their owning `Connection`. /// In order to do so, it also facilitates the exchange of different types of events /// flowing between the `Endpoint` and the tasks managing `Connection`s. As such, /// running this task is necessary to keep the endpoint's connections running. /// /// `EndpointDriver` futures terminate when all clones of the `Endpoint` have been dropped, or when /// an I/O error occurs. #[must_use = "endpoint drivers must be spawned for I/O to occur"] #[derive(Debug)] pub(crate) struct EndpointDriver(pub(crate) EndpointRef); impl Future for EndpointDriver { type Output = Result<(), io::Error>; #[allow(unused_mut)] // MSRV fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { let mut endpoint = self.0.state.lock().unwrap(); if endpoint.driver.is_none() { endpoint.driver = Some(cx.waker().clone()); } let now = endpoint.runtime.now(); let mut keep_going = false; keep_going |= endpoint.drive_recv(cx, now)?; keep_going |= endpoint.handle_events(cx, &self.0.shared); if !endpoint.recv_state.incoming.is_empty() { self.0.shared.incoming.notify_waiters(); } if endpoint.ref_count == 0 && endpoint.recv_state.connections.is_empty() { Poll::Ready(Ok(())) } else { drop(endpoint); // If there is more work to do schedule the endpoint task again. // `wake_by_ref()` is called outside the lock to minimize // lock contention on a multithreaded runtime. if keep_going { cx.waker().wake_by_ref(); } Poll::Pending } } } impl Drop for EndpointDriver { fn drop(&mut self) { let mut endpoint = self.0.state.lock().unwrap(); endpoint.driver_lost = true; self.0.shared.incoming.notify_waiters(); // Drop all outgoing channels, signaling the termination of the endpoint to the associated // connections. endpoint.recv_state.connections.senders.clear(); } } #[derive(Debug)] pub(crate) struct EndpointInner { pub(crate) state: Mutex, pub(crate) shared: Shared, } impl EndpointInner { pub(crate) fn accept( &self, incoming: proto::Incoming, server_config: Option>, ) -> Result { let mut state = self.state.lock().unwrap(); let mut response_buffer = Vec::new(); let now = state.runtime.now(); match state .inner .accept(incoming, now, &mut response_buffer, server_config) { Ok((handle, conn)) => { state.stats.accepted_handshakes += 1; let socket = state.socket.clone(); let runtime = state.runtime.clone(); Ok(state .recv_state .connections .insert(handle, conn, socket, runtime)) } Err(error) => { if let Some(transmit) = error.response { respond(transmit, &response_buffer, &*state.socket); } Err(error.cause) } } } pub(crate) fn refuse(&self, incoming: proto::Incoming) { let mut state = self.state.lock().unwrap(); state.stats.refused_handshakes += 1; let mut response_buffer = Vec::new(); let transmit = state.inner.refuse(incoming, &mut response_buffer); respond(transmit, &response_buffer, &*state.socket); } pub(crate) fn retry(&self, incoming: proto::Incoming) -> Result<(), proto::RetryError> { let mut state = self.state.lock().unwrap(); let mut response_buffer = Vec::new(); let transmit = state.inner.retry(incoming, &mut response_buffer)?; respond(transmit, &response_buffer, &*state.socket); Ok(()) } pub(crate) fn ignore(&self, incoming: proto::Incoming) { let mut state = self.state.lock().unwrap(); state.stats.ignored_handshakes += 1; state.inner.ignore(incoming); } } #[derive(Debug)] pub(crate) struct State { socket: Arc, /// During an active migration, abandoned_socket receives traffic /// until the first packet arrives on the new socket. prev_socket: Option>, inner: proto::Endpoint, recv_state: RecvState, driver: Option, ipv6: bool, events: mpsc::UnboundedReceiver<(ConnectionHandle, EndpointEvent)>, /// Number of live handles that can be used to initiate or handle I/O; excludes the driver ref_count: usize, driver_lost: bool, runtime: Arc, stats: EndpointStats, } #[derive(Debug)] pub(crate) struct Shared { incoming: Notify, idle: Notify, } impl State { fn drive_recv(&mut self, cx: &mut Context, now: Instant) -> Result { let get_time = || self.runtime.now(); self.recv_state.recv_limiter.start_cycle(get_time); if let Some(socket) = &self.prev_socket { // We don't care about the `PollProgress` from old sockets. let poll_res = self.recv_state .poll_socket(cx, &mut self.inner, &**socket, &*self.runtime, now); if poll_res.is_err() { self.prev_socket = None; } }; let poll_res = self.recv_state .poll_socket(cx, &mut self.inner, &*self.socket, &*self.runtime, now); self.recv_state.recv_limiter.finish_cycle(get_time); let poll_res = poll_res?; if poll_res.received_connection_packet { // Traffic has arrived on self.socket, therefore there is no need for the abandoned // one anymore. TODO: Account for multiple outgoing connections. self.prev_socket = None; } Ok(poll_res.keep_going) } fn handle_events(&mut self, cx: &mut Context, shared: &Shared) -> bool { for _ in 0..IO_LOOP_BOUND { let (ch, event) = match self.events.poll_recv(cx) { Poll::Ready(Some(x)) => x, Poll::Ready(None) => unreachable!("EndpointInner owns one sender"), Poll::Pending => { return false; } }; if event.is_drained() { self.recv_state.connections.senders.remove(&ch); if self.recv_state.connections.is_empty() { shared.idle.notify_waiters(); } } let Some(event) = self.inner.handle_event(ch, event) else { continue; }; // Ignoring errors from dropped connections that haven't yet been cleaned up let _ = self .recv_state .connections .senders .get_mut(&ch) .unwrap() .send(ConnectionEvent::Proto(event)); } true } } impl Drop for State { fn drop(&mut self) { for incoming in self.recv_state.incoming.drain(..) { self.inner.ignore(incoming); } } } fn respond(transmit: proto::Transmit, response_buffer: &[u8], socket: &dyn AsyncUdpSocket) { // Send if there's kernel buffer space; otherwise, drop it // // As an endpoint-generated packet, we know this is an // immediate, stateless response to an unconnected peer, // one of: // // - A version negotiation response due to an unknown version // - A `CLOSE` due to a malformed or unwanted connection attempt // - A stateless reset due to an unrecognized connection // - A `Retry` packet due to a connection attempt when // `use_retry` is set // // In each case, a well-behaved peer can be trusted to retry a // few times, which is guaranteed to produce the same response // from us. Repeated failures might at worst cause a peer's new // connection attempt to time out, which is acceptable if we're // under such heavy load that there's never room for this code // to transmit. This is morally equivalent to the packet getting // lost due to congestion further along the link, which // similarly relies on peer retries for recovery. _ = socket.try_send(&udp_transmit(&transmit, &response_buffer[..transmit.size])); } #[inline] fn proto_ecn(ecn: udp::EcnCodepoint) -> proto::EcnCodepoint { match ecn { udp::EcnCodepoint::Ect0 => proto::EcnCodepoint::Ect0, udp::EcnCodepoint::Ect1 => proto::EcnCodepoint::Ect1, udp::EcnCodepoint::Ce => proto::EcnCodepoint::Ce, } } #[derive(Debug)] struct ConnectionSet { /// Senders for communicating with the endpoint's connections senders: FxHashMap>, /// Stored to give out clones to new ConnectionInners sender: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, /// Set if the endpoint has been manually closed close: Option<(VarInt, Bytes)>, } impl ConnectionSet { fn insert( &mut self, handle: ConnectionHandle, conn: proto::Connection, socket: Arc, runtime: Arc, ) -> Connecting { let (send, recv) = mpsc::unbounded_channel(); if let Some((error_code, ref reason)) = self.close { send.send(ConnectionEvent::Close { error_code, reason: reason.clone(), }) .unwrap(); } self.senders.insert(handle, send); Connecting::new(handle, conn, self.sender.clone(), recv, socket, runtime) } fn is_empty(&self) -> bool { self.senders.is_empty() } } fn ensure_ipv6(x: SocketAddr) -> SocketAddrV6 { match x { SocketAddr::V6(x) => x, SocketAddr::V4(x) => SocketAddrV6::new(x.ip().to_ipv6_mapped(), x.port(), 0, 0), } } pin_project! { /// Future produced by [`Endpoint::accept`] pub struct Accept<'a> { endpoint: &'a Endpoint, #[pin] notify: Notified<'a>, } } impl Future for Accept<'_> { type Output = Option; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let mut this = self.project(); let mut endpoint = this.endpoint.inner.state.lock().unwrap(); if endpoint.driver_lost { return Poll::Ready(None); } if let Some(incoming) = endpoint.recv_state.incoming.pop_front() { // Release the mutex lock on endpoint so cloning it doesn't deadlock drop(endpoint); let incoming = Incoming::new(incoming, this.endpoint.inner.clone()); return Poll::Ready(Some(incoming)); } if endpoint.recv_state.connections.close.is_some() { return Poll::Ready(None); } loop { match this.notify.as_mut().poll(ctx) { // `state` lock ensures we didn't race with readiness Poll::Pending => return Poll::Pending, // Spurious wakeup, get a new future Poll::Ready(()) => this .notify .set(this.endpoint.inner.shared.incoming.notified()), } } } } #[derive(Debug)] pub(crate) struct EndpointRef(Arc); impl EndpointRef { pub(crate) fn new( socket: Arc, inner: proto::Endpoint, ipv6: bool, runtime: Arc, ) -> Self { let (sender, events) = mpsc::unbounded_channel(); let recv_state = RecvState::new(sender, socket.max_receive_segments(), &inner); Self(Arc::new(EndpointInner { shared: Shared { incoming: Notify::new(), idle: Notify::new(), }, state: Mutex::new(State { socket, prev_socket: None, inner, ipv6, events, driver: None, ref_count: 0, driver_lost: false, recv_state, runtime, stats: EndpointStats::default(), }), })) } } impl Clone for EndpointRef { fn clone(&self) -> Self { self.0.state.lock().unwrap().ref_count += 1; Self(self.0.clone()) } } impl Drop for EndpointRef { fn drop(&mut self) { let endpoint = &mut *self.0.state.lock().unwrap(); if let Some(x) = endpoint.ref_count.checked_sub(1) { endpoint.ref_count = x; if x == 0 { // If the driver is about to be on its own, ensure it can shut down if the last // connection is gone. if let Some(task) = endpoint.driver.take() { task.wake(); } } } } } impl std::ops::Deref for EndpointRef { type Target = EndpointInner; fn deref(&self) -> &Self::Target { &self.0 } } /// State directly involved in handling incoming packets #[derive(Debug)] struct RecvState { incoming: VecDeque, connections: ConnectionSet, recv_buf: Box<[u8]>, recv_limiter: WorkLimiter, } impl RecvState { fn new( sender: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, max_receive_segments: usize, endpoint: &proto::Endpoint, ) -> Self { let recv_buf = vec![ 0; endpoint.config().get_max_udp_payload_size().min(64 * 1024) as usize * max_receive_segments * BATCH_SIZE ]; Self { connections: ConnectionSet { senders: FxHashMap::default(), sender, close: None, }, incoming: VecDeque::new(), recv_buf: recv_buf.into(), recv_limiter: WorkLimiter::new(RECV_TIME_BOUND), } } fn poll_socket( &mut self, cx: &mut Context, endpoint: &mut proto::Endpoint, socket: &dyn AsyncUdpSocket, runtime: &dyn Runtime, now: Instant, ) -> Result { let mut received_connection_packet = false; let mut metas = [RecvMeta::default(); BATCH_SIZE]; let mut iovs: [IoSliceMut; BATCH_SIZE] = { let mut bufs = self .recv_buf .chunks_mut(self.recv_buf.len() / BATCH_SIZE) .map(IoSliceMut::new); // expect() safe as self.recv_buf is chunked into BATCH_SIZE items // and iovs will be of size BATCH_SIZE, thus from_fn is called // exactly BATCH_SIZE times. std::array::from_fn(|_| bufs.next().expect("BATCH_SIZE elements")) }; loop { match socket.poll_recv(cx, &mut iovs, &mut metas) { Poll::Ready(Ok(msgs)) => { self.recv_limiter.record_work(msgs); for (meta, buf) in metas.iter().zip(iovs.iter()).take(msgs) { let mut data: BytesMut = buf[0..meta.len].into(); while !data.is_empty() { let buf = data.split_to(meta.stride.min(data.len())); let mut response_buffer = Vec::new(); match endpoint.handle( now, meta.addr, meta.dst_ip, meta.ecn.map(proto_ecn), buf, &mut response_buffer, ) { Some(DatagramEvent::NewConnection(incoming)) => { if self.connections.close.is_none() { self.incoming.push_back(incoming); } else { let transmit = endpoint.refuse(incoming, &mut response_buffer); respond(transmit, &response_buffer, socket); } } Some(DatagramEvent::ConnectionEvent(handle, event)) => { // Ignoring errors from dropped connections that haven't yet been cleaned up received_connection_packet = true; let _ = self .connections .senders .get_mut(&handle) .unwrap() .send(ConnectionEvent::Proto(event)); } Some(DatagramEvent::Response(transmit)) => { respond(transmit, &response_buffer, socket); } None => {} } } } } Poll::Pending => { return Ok(PollProgress { received_connection_packet, keep_going: false, }); } // Ignore ECONNRESET as it's undefined in QUIC and may be injected by an // attacker Poll::Ready(Err(ref e)) if e.kind() == io::ErrorKind::ConnectionReset => { continue; } Poll::Ready(Err(e)) => { return Err(e); } } if !self.recv_limiter.allow_work(|| runtime.now()) { return Ok(PollProgress { received_connection_packet, keep_going: true, }); } } } } #[derive(Default)] struct PollProgress { /// Whether a datagram was routed to an existing connection received_connection_packet: bool, /// Whether datagram handling was interrupted early by the work limiter for fairness keep_going: bool, } quinn-0.11.6/src/incoming.rs000064400000000000000000000104141046102023000140030ustar 00000000000000use std::{ future::{Future, IntoFuture}, net::{IpAddr, SocketAddr}, pin::Pin, sync::Arc, task::{Context, Poll}, }; use proto::{ConnectionError, ConnectionId, ServerConfig}; use thiserror::Error; use crate::{ connection::{Connecting, Connection}, endpoint::EndpointRef, }; /// An incoming connection for which the server has not yet begun its part of the handshake #[derive(Debug)] pub struct Incoming(Option); impl Incoming { pub(crate) fn new(inner: proto::Incoming, endpoint: EndpointRef) -> Self { Self(Some(State { inner, endpoint })) } /// Attempt to accept this incoming connection (an error may still occur) pub fn accept(mut self) -> Result { let state = self.0.take().unwrap(); state.endpoint.accept(state.inner, None) } /// Accept this incoming connection using a custom configuration. /// /// See [`accept()`] for more details. /// /// [`accept()`]: Incoming::accept pub fn accept_with( mut self, server_config: Arc, ) -> Result { let state = self.0.take().unwrap(); state.endpoint.accept(state.inner, Some(server_config)) } /// Reject this incoming connection attempt pub fn refuse(mut self) { let state = self.0.take().unwrap(); state.endpoint.refuse(state.inner); } /// Respond with a retry packet, requiring the client to retry with address validation /// /// Errors if `remote_address_validated()` is true. pub fn retry(mut self) -> Result<(), RetryError> { let state = self.0.take().unwrap(); state.endpoint.retry(state.inner).map_err(|e| { RetryError(Self(Some(State { inner: e.into_incoming(), endpoint: state.endpoint, }))) }) } /// Ignore this incoming connection attempt, not sending any packet in response pub fn ignore(mut self) { let state = self.0.take().unwrap(); state.endpoint.ignore(state.inner); } /// The local IP address which was used when the peer established /// the connection pub fn local_ip(&self) -> Option { self.0.as_ref().unwrap().inner.local_ip() } /// The peer's UDP address pub fn remote_address(&self) -> SocketAddr { self.0.as_ref().unwrap().inner.remote_address() } /// Whether the socket address that is initiating this connection has been validated /// /// This means that the sender of the initial packet has proved that they can receive traffic /// sent to `self.remote_address()`. pub fn remote_address_validated(&self) -> bool { self.0.as_ref().unwrap().inner.remote_address_validated() } /// The original destination CID when initiating the connection pub fn orig_dst_cid(&self) -> ConnectionId { *self.0.as_ref().unwrap().inner.orig_dst_cid() } } impl Drop for Incoming { fn drop(&mut self) { // Implicit reject, similar to Connection's implicit close if let Some(state) = self.0.take() { state.endpoint.refuse(state.inner); } } } #[derive(Debug)] struct State { inner: proto::Incoming, endpoint: EndpointRef, } /// Error for attempting to retry an [`Incoming`] which already bears an address /// validation token from a previous retry #[derive(Debug, Error)] #[error("retry() with validated Incoming")] pub struct RetryError(Incoming); impl RetryError { /// Get the [`Incoming`] pub fn into_incoming(self) -> Incoming { self.0 } } /// Basic adapter to let [`Incoming`] be `await`-ed like a [`Connecting`] #[derive(Debug)] pub struct IncomingFuture(Result); impl Future for IncomingFuture { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { match &mut self.0 { Ok(ref mut connecting) => Pin::new(connecting).poll(cx), Err(e) => Poll::Ready(Err(e.clone())), } } } impl IntoFuture for Incoming { type Output = Result; type IntoFuture = IncomingFuture; fn into_future(self) -> Self::IntoFuture { IncomingFuture(self.accept()) } } quinn-0.11.6/src/lib.rs000064400000000000000000000131661046102023000127550ustar 00000000000000//! QUIC transport protocol implementation //! //! [QUIC](https://en.wikipedia.org/wiki/QUIC) is a modern transport protocol addressing //! shortcomings of TCP, such as head-of-line blocking, poor security, slow handshakes, and //! inefficient congestion control. This crate provides a portable userspace implementation. It //! builds on top of quinn-proto, which implements protocol logic independent of any particular //! runtime. //! //! The entry point of this crate is the [`Endpoint`]. //! //! # About QUIC //! //! A QUIC connection is an association between two endpoints. The endpoint which initiates the //! connection is termed the client, and the endpoint which accepts it is termed the server. A //! single endpoint may function as both client and server for different connections, for example //! in a peer-to-peer application. To communicate application data, each endpoint may open streams //! up to a limit dictated by its peer. Typically, that limit is increased as old streams are //! finished. //! //! Streams may be unidirectional or bidirectional, and are cheap to create and disposable. For //! example, a traditionally datagram-oriented application could use a new stream for every //! message it wants to send, no longer needing to worry about MTUs. Bidirectional streams behave //! much like a traditional TCP connection, and are useful for sending messages that have an //! immediate response, such as an HTTP request. Stream data is delivered reliably, and there is no //! ordering enforced between data on different streams. //! //! By avoiding head-of-line blocking and providing unified congestion control across all streams //! of a connection, QUIC is able to provide higher throughput and lower latency than one or //! multiple TCP connections between the same two hosts, while providing more useful behavior than //! raw UDP sockets. //! //! Quinn also exposes unreliable datagrams, which are a low-level primitive preferred when //! automatic fragmentation and retransmission of certain data is not desired. //! //! QUIC uses encryption and identity verification built directly on TLS 1.3. Just as with a TLS //! server, it is useful for a QUIC server to be identified by a certificate signed by a trusted //! authority. If this is infeasible--for example, if servers are short-lived or not associated //! with a domain name--then as with TLS, self-signed certificates can be used to provide //! encryption alone. #![warn(missing_docs)] #![warn(unreachable_pub)] #![warn(clippy::use_self)] use std::{sync::Arc, time::Duration}; macro_rules! ready { ($e:expr $(,)?) => { match $e { std::task::Poll::Ready(t) => t, std::task::Poll::Pending => return std::task::Poll::Pending, } }; } mod connection; mod endpoint; mod incoming; mod mutex; mod recv_stream; mod runtime; mod send_stream; mod work_limiter; pub use proto::{ congestion, crypto, AckFrequencyConfig, ApplicationClose, Chunk, ClientConfig, ClosedStream, ConfigError, ConnectError, ConnectionClose, ConnectionError, ConnectionId, ConnectionIdGenerator, ConnectionStats, Dir, EcnCodepoint, EndpointConfig, FrameStats, FrameType, IdleTimeout, MtuDiscoveryConfig, PathStats, ServerConfig, Side, StreamId, Transmit, TransportConfig, TransportErrorCode, UdpStats, VarInt, VarIntBoundsExceeded, Written, }; #[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] pub use rustls; pub use udp; pub use crate::connection::{ AcceptBi, AcceptUni, Connecting, Connection, OpenBi, OpenUni, ReadDatagram, SendDatagram, SendDatagramError, ZeroRttAccepted, }; pub use crate::endpoint::{Accept, Endpoint, EndpointStats}; pub use crate::incoming::{Incoming, IncomingFuture, RetryError}; pub use crate::recv_stream::{ReadError, ReadExactError, ReadToEndError, RecvStream, ResetError}; #[cfg(feature = "runtime-async-std")] pub use crate::runtime::AsyncStdRuntime; #[cfg(feature = "runtime-smol")] pub use crate::runtime::SmolRuntime; #[cfg(feature = "runtime-tokio")] pub use crate::runtime::TokioRuntime; pub use crate::runtime::{default_runtime, AsyncTimer, AsyncUdpSocket, Runtime, UdpPoller}; pub use crate::send_stream::{SendStream, StoppedError, WriteError}; #[cfg(test)] mod tests; #[derive(Debug)] enum ConnectionEvent { Close { error_code: VarInt, reason: bytes::Bytes, }, Proto(proto::ConnectionEvent), Rebind(Arc), } fn udp_transmit<'a>(t: &proto::Transmit, buffer: &'a [u8]) -> udp::Transmit<'a> { udp::Transmit { destination: t.destination, ecn: t.ecn.map(udp_ecn), contents: buffer, segment_size: t.segment_size, src_ip: t.src_ip, } } fn udp_ecn(ecn: proto::EcnCodepoint) -> udp::EcnCodepoint { match ecn { proto::EcnCodepoint::Ect0 => udp::EcnCodepoint::Ect0, proto::EcnCodepoint::Ect1 => udp::EcnCodepoint::Ect1, proto::EcnCodepoint::Ce => udp::EcnCodepoint::Ce, } } /// Maximum number of datagrams processed in send/recv calls to make before moving on to other processing /// /// This helps ensure we don't starve anything when the CPU is slower than the link. /// Value is selected by picking a low number which didn't degrade throughput in benchmarks. const IO_LOOP_BOUND: usize = 160; /// The maximum amount of time that should be spent in `recvmsg()` calls per endpoint iteration /// /// 50us are chosen so that an endpoint iteration with a 50us sendmsg limit blocks /// the runtime for a maximum of about 100us. /// Going much lower does not yield any noticeable difference, since a single `recvmmsg` /// batch of size 32 was observed to take 30us on some systems. const RECV_TIME_BOUND: Duration = Duration::from_micros(50); quinn-0.11.6/src/mutex.rs000064400000000000000000000105331046102023000133440ustar 00000000000000use std::{ fmt::Debug, ops::{Deref, DerefMut}, }; #[cfg(feature = "lock_tracking")] mod tracking { use super::*; use std::{ collections::VecDeque, time::{Duration, Instant}, }; use tracing::warn; #[derive(Debug)] struct Inner { last_lock_owner: VecDeque<(&'static str, Duration)>, value: T, } /// A Mutex which optionally allows to track the time a lock was held and /// emit warnings in case of excessive lock times pub(crate) struct Mutex { inner: std::sync::Mutex>, } impl std::fmt::Debug for Mutex { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.inner, f) } } impl Mutex { pub(crate) fn new(value: T) -> Self { Self { inner: std::sync::Mutex::new(Inner { last_lock_owner: VecDeque::new(), value, }), } } /// Acquires the lock for a certain purpose /// /// The purpose will be recorded in the list of last lock owners pub(crate) fn lock(&self, purpose: &'static str) -> MutexGuard { // We don't bother dispatching through Runtime::now because they're pure performance // diagnostics. let now = Instant::now(); let guard = self.inner.lock().unwrap(); let lock_time = Instant::now(); let elapsed = lock_time.duration_since(now); if elapsed > Duration::from_millis(1) { warn!( "Locking the connection for {} took {:?}. Last owners: {:?}", purpose, elapsed, guard.last_lock_owner ); } MutexGuard { guard, start_time: lock_time, purpose, } } } pub(crate) struct MutexGuard<'a, T> { guard: std::sync::MutexGuard<'a, Inner>, start_time: Instant, purpose: &'static str, } impl<'a, T> Drop for MutexGuard<'a, T> { fn drop(&mut self) { if self.guard.last_lock_owner.len() == MAX_LOCK_OWNERS { self.guard.last_lock_owner.pop_back(); } let duration = self.start_time.elapsed(); if duration > Duration::from_millis(1) { warn!( "Utilizing the connection for {} took {:?}", self.purpose, duration ); } self.guard .last_lock_owner .push_front((self.purpose, duration)); } } impl<'a, T> Deref for MutexGuard<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { &self.guard.value } } impl<'a, T> DerefMut for MutexGuard<'a, T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.guard.value } } const MAX_LOCK_OWNERS: usize = 20; } #[cfg(feature = "lock_tracking")] pub(crate) use tracking::Mutex; #[cfg(not(feature = "lock_tracking"))] mod non_tracking { use super::*; /// A Mutex which optionally allows to track the time a lock was held and /// emit warnings in case of excessive lock times #[derive(Debug)] pub(crate) struct Mutex { inner: std::sync::Mutex, } impl Mutex { pub(crate) fn new(value: T) -> Self { Self { inner: std::sync::Mutex::new(value), } } /// Acquires the lock for a certain purpose /// /// The purpose will be recorded in the list of last lock owners pub(crate) fn lock(&self, _purpose: &'static str) -> MutexGuard { MutexGuard { guard: self.inner.lock().unwrap(), } } } pub(crate) struct MutexGuard<'a, T> { guard: std::sync::MutexGuard<'a, T>, } impl Deref for MutexGuard<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { self.guard.deref() } } impl DerefMut for MutexGuard<'_, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.guard.deref_mut() } } } #[cfg(not(feature = "lock_tracking"))] pub(crate) use non_tracking::Mutex; quinn-0.11.6/src/recv_stream.rs000064400000000000000000000601651046102023000145220ustar 00000000000000use std::{ future::{poll_fn, Future}, io, pin::Pin, task::{Context, Poll}, }; use bytes::Bytes; use proto::{Chunk, Chunks, ClosedStream, ConnectionError, ReadableError, StreamId}; use thiserror::Error; use tokio::io::ReadBuf; use crate::{connection::ConnectionRef, VarInt}; /// A stream that can only be used to receive data /// /// `stop(0)` is implicitly called on drop unless: /// - A variant of [`ReadError`] has been yielded by a read call /// - [`stop()`] was called explicitly /// /// # Cancellation /// /// A `read` method is said to be *cancel-safe* when dropping its future before the future becomes /// ready cannot lead to loss of stream data. This is true of methods which succeed immediately when /// any progress is made, and is not true of methods which might need to perform multiple reads /// internally before succeeding. Each `read` method documents whether it is cancel-safe. /// /// # Common issues /// /// ## Data never received on a locally-opened stream /// /// Peers are not notified of streams until they or a later-numbered stream are used to send /// data. If a bidirectional stream is locally opened but never used to send, then the peer may /// never see it. Application protocols should always arrange for the endpoint which will first /// transmit on a stream to be the endpoint responsible for opening it. /// /// ## Data never received on a remotely-opened stream /// /// Verify that the stream you are receiving is the same one that the server is sending on, e.g. by /// logging the [`id`] of each. Streams are always accepted in the same order as they are created, /// i.e. ascending order by [`StreamId`]. For example, even if a sender first transmits on /// bidirectional stream 1, the first stream yielded by [`Connection::accept_bi`] on the receiver /// will be bidirectional stream 0. /// /// [`ReadError`]: crate::ReadError /// [`stop()`]: RecvStream::stop /// [`SendStream::finish`]: crate::SendStream::finish /// [`WriteError::Stopped`]: crate::WriteError::Stopped /// [`id`]: RecvStream::id /// [`Connection::accept_bi`]: crate::Connection::accept_bi #[derive(Debug)] pub struct RecvStream { conn: ConnectionRef, stream: StreamId, is_0rtt: bool, all_data_read: bool, reset: Option, } impl RecvStream { pub(crate) fn new(conn: ConnectionRef, stream: StreamId, is_0rtt: bool) -> Self { Self { conn, stream, is_0rtt, all_data_read: false, reset: None, } } /// Read data contiguously from the stream. /// /// Yields the number of bytes read into `buf` on success, or `None` if the stream was finished. /// /// This operation is cancel-safe. pub async fn read(&mut self, buf: &mut [u8]) -> Result, ReadError> { Read { stream: self, buf: ReadBuf::new(buf), } .await } /// Read an exact number of bytes contiguously from the stream. /// /// See [`read()`] for details. This operation is *not* cancel-safe. /// /// [`read()`]: RecvStream::read pub async fn read_exact(&mut self, buf: &mut [u8]) -> Result<(), ReadExactError> { ReadExact { stream: self, buf: ReadBuf::new(buf), } .await } /// Attempts to read from the stream into buf. /// /// On success, returns Poll::Ready(Ok(num_bytes_read)) and places data in /// the buf. If no data was read, it implies that EOF has been reached. /// /// If no data is available for reading, the method returns Poll::Pending /// and arranges for the current task (via cx.waker()) to receive a notification /// when the stream becomes readable or is closed. pub fn poll_read( &mut self, cx: &mut Context, buf: &mut [u8], ) -> Poll> { let mut buf = ReadBuf::new(buf); ready!(self.poll_read_buf(cx, &mut buf))?; Poll::Ready(Ok(buf.filled().len())) } fn poll_read_buf( &mut self, cx: &mut Context, buf: &mut ReadBuf<'_>, ) -> Poll> { if buf.remaining() == 0 { return Poll::Ready(Ok(())); } self.poll_read_generic(cx, true, |chunks| { let mut read = false; loop { if buf.remaining() == 0 { // We know `read` is `true` because `buf.remaining()` was not 0 before return ReadStatus::Readable(()); } match chunks.next(buf.remaining()) { Ok(Some(chunk)) => { buf.put_slice(&chunk.bytes); read = true; } res => return (if read { Some(()) } else { None }, res.err()).into(), } } }) .map(|res| res.map(|_| ())) } /// Read the next segment of data /// /// Yields `None` if the stream was finished. Otherwise, yields a segment of data and its /// offset in the stream. If `ordered` is `true`, the chunk's offset will be immediately after /// the last data yielded by `read()` or `read_chunk()`. If `ordered` is `false`, segments may /// be received in any order, and the `Chunk`'s `offset` field can be used to determine /// ordering in the caller. Unordered reads are less prone to head-of-line blocking within a /// stream, but require the application to manage reassembling the original data. /// /// Slightly more efficient than `read` due to not copying. Chunk boundaries do not correspond /// to peer writes, and hence cannot be used as framing. /// /// This operation is cancel-safe. pub async fn read_chunk( &mut self, max_length: usize, ordered: bool, ) -> Result, ReadError> { ReadChunk { stream: self, max_length, ordered, } .await } /// Attempts to read a chunk from the stream. /// /// On success, returns `Poll::Ready(Ok(Some(chunk)))`. If `Poll::Ready(Ok(None))` /// is returned, it implies that EOF has been reached. /// /// If no data is available for reading, the method returns `Poll::Pending` /// and arranges for the current task (via cx.waker()) to receive a notification /// when the stream becomes readable or is closed. fn poll_read_chunk( &mut self, cx: &mut Context, max_length: usize, ordered: bool, ) -> Poll, ReadError>> { self.poll_read_generic(cx, ordered, |chunks| match chunks.next(max_length) { Ok(Some(chunk)) => ReadStatus::Readable(chunk), res => (None, res.err()).into(), }) } /// Read the next segments of data /// /// Fills `bufs` with the segments of data beginning immediately after the /// last data yielded by `read` or `read_chunk`, or `None` if the stream was /// finished. /// /// Slightly more efficient than `read` due to not copying. Chunk boundaries /// do not correspond to peer writes, and hence cannot be used as framing. /// /// This operation is cancel-safe. pub async fn read_chunks(&mut self, bufs: &mut [Bytes]) -> Result, ReadError> { ReadChunks { stream: self, bufs }.await } /// Foundation of [`Self::read_chunks`] fn poll_read_chunks( &mut self, cx: &mut Context, bufs: &mut [Bytes], ) -> Poll, ReadError>> { if bufs.is_empty() { return Poll::Ready(Ok(Some(0))); } self.poll_read_generic(cx, true, |chunks| { let mut read = 0; loop { if read >= bufs.len() { // We know `read > 0` because `bufs` cannot be empty here return ReadStatus::Readable(read); } match chunks.next(usize::MAX) { Ok(Some(chunk)) => { bufs[read] = chunk.bytes; read += 1; } res => return (if read == 0 { None } else { Some(read) }, res.err()).into(), } } }) } /// Convenience method to read all remaining data into a buffer /// /// Fails with [`ReadToEndError::TooLong`] on reading more than `size_limit` bytes, discarding /// all data read. Uses unordered reads to be more efficient than using `AsyncRead` would /// allow. `size_limit` should be set to limit worst-case memory use. /// /// If unordered reads have already been made, the resulting buffer may have gaps containing /// arbitrary data. /// /// This operation is *not* cancel-safe. /// /// [`ReadToEndError::TooLong`]: crate::ReadToEndError::TooLong pub async fn read_to_end(&mut self, size_limit: usize) -> Result, ReadToEndError> { ReadToEnd { stream: self, size_limit, read: Vec::new(), start: u64::MAX, end: 0, } .await } /// Stop accepting data /// /// Discards unread data and notifies the peer to stop transmitting. Once stopped, further /// attempts to operate on a stream will yield `ClosedStream` errors. pub fn stop(&mut self, error_code: VarInt) -> Result<(), ClosedStream> { let mut conn = self.conn.state.lock("RecvStream::stop"); if self.is_0rtt && conn.check_0rtt().is_err() { return Ok(()); } conn.inner.recv_stream(self.stream).stop(error_code)?; conn.wake(); self.all_data_read = true; Ok(()) } /// Check if this stream has been opened during 0-RTT. /// /// In which case any non-idempotent request should be considered dangerous at the application /// level. Because read data is subject to replay attacks. pub fn is_0rtt(&self) -> bool { self.is_0rtt } /// Get the identity of this stream pub fn id(&self) -> StreamId { self.stream } /// Completes when the stream has been reset by the peer or otherwise closed /// /// Yields `Some` with the reset error code when the stream is reset by the peer. Yields `None` /// when the stream was previously [`stop()`](Self::stop)ed, or when the stream was /// [`finish()`](crate::SendStream::finish)ed by the peer and all data has been received, after /// which it is no longer meaningful for the stream to be reset. /// /// This operation is cancel-safe. pub async fn received_reset(&mut self) -> Result, ResetError> { poll_fn(|cx| { let mut conn = self.conn.state.lock("RecvStream::reset"); if self.is_0rtt && conn.check_0rtt().is_err() { return Poll::Ready(Err(ResetError::ZeroRttRejected)); } if let Some(code) = self.reset { return Poll::Ready(Ok(Some(code))); } match conn.inner.recv_stream(self.stream).received_reset() { Err(_) => Poll::Ready(Ok(None)), Ok(Some(error_code)) => { // Stream state has just now been freed, so the connection may need to issue new // stream ID flow control credit conn.wake(); Poll::Ready(Ok(Some(error_code))) } Ok(None) => { if let Some(e) = &conn.error { return Poll::Ready(Err(e.clone().into())); } // Resets always notify readers, since a reset is an immediate read error. We // could introduce a dedicated channel to reduce the risk of spurious wakeups, // but that increased complexity is probably not justified, as an application // that is expecting a reset is not likely to receive large amounts of data. conn.blocked_readers.insert(self.stream, cx.waker().clone()); Poll::Pending } } }) .await } /// Handle common logic related to reading out of a receive stream /// /// This takes an `FnMut` closure that takes care of the actual reading process, matching /// the detailed read semantics for the calling function with a particular return type. /// The closure can read from the passed `&mut Chunks` and has to return the status after /// reading: the amount of data read, and the status after the final read call. fn poll_read_generic( &mut self, cx: &mut Context, ordered: bool, mut read_fn: T, ) -> Poll, ReadError>> where T: FnMut(&mut Chunks) -> ReadStatus, { use proto::ReadError::*; if self.all_data_read { return Poll::Ready(Ok(None)); } let mut conn = self.conn.state.lock("RecvStream::poll_read"); if self.is_0rtt { conn.check_0rtt().map_err(|()| ReadError::ZeroRttRejected)?; } // If we stored an error during a previous call, return it now. This can happen if a // `read_fn` both wants to return data and also returns an error in its final stream status. let status = match self.reset { Some(code) => ReadStatus::Failed(None, Reset(code)), None => { let mut recv = conn.inner.recv_stream(self.stream); let mut chunks = recv.read(ordered)?; let status = read_fn(&mut chunks); if chunks.finalize().should_transmit() { conn.wake(); } status } }; match status { ReadStatus::Readable(read) => Poll::Ready(Ok(Some(read))), ReadStatus::Finished(read) => { self.all_data_read = true; Poll::Ready(Ok(read)) } ReadStatus::Failed(read, Blocked) => match read { Some(val) => Poll::Ready(Ok(Some(val))), None => { if let Some(ref x) = conn.error { return Poll::Ready(Err(ReadError::ConnectionLost(x.clone()))); } conn.blocked_readers.insert(self.stream, cx.waker().clone()); Poll::Pending } }, ReadStatus::Failed(read, Reset(error_code)) => match read { None => { self.all_data_read = true; self.reset = Some(error_code); Poll::Ready(Err(ReadError::Reset(error_code))) } done => { self.reset = Some(error_code); Poll::Ready(Ok(done)) } }, } } } enum ReadStatus { Readable(T), Finished(Option), Failed(Option, proto::ReadError), } impl From<(Option, Option)> for ReadStatus { fn from(status: (Option, Option)) -> Self { match status { (read, None) => Self::Finished(read), (read, Some(e)) => Self::Failed(read, e), } } } /// Future produced by [`RecvStream::read_to_end()`]. /// /// [`RecvStream::read_to_end()`]: crate::RecvStream::read_to_end #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct ReadToEnd<'a> { stream: &'a mut RecvStream, read: Vec<(Bytes, u64)>, start: u64, end: u64, size_limit: usize, } impl Future for ReadToEnd<'_> { type Output = Result, ReadToEndError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { loop { match ready!(self.stream.poll_read_chunk(cx, usize::MAX, false))? { Some(chunk) => { self.start = self.start.min(chunk.offset); let end = chunk.bytes.len() as u64 + chunk.offset; if (end - self.start) > self.size_limit as u64 { return Poll::Ready(Err(ReadToEndError::TooLong)); } self.end = self.end.max(end); self.read.push((chunk.bytes, chunk.offset)); } None => { if self.end == 0 { // Never received anything return Poll::Ready(Ok(Vec::new())); } let start = self.start; let mut buffer = vec![0; (self.end - start) as usize]; for (data, offset) in self.read.drain(..) { let offset = (offset - start) as usize; buffer[offset..offset + data.len()].copy_from_slice(&data); } return Poll::Ready(Ok(buffer)); } } } } } /// Errors from [`RecvStream::read_to_end`] #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum ReadToEndError { /// An error occurred during reading #[error("read error: {0}")] Read(#[from] ReadError), /// The stream is larger than the user-supplied limit #[error("stream too long")] TooLong, } #[cfg(feature = "futures-io")] impl futures_io::AsyncRead for RecvStream { fn poll_read( self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8], ) -> Poll> { let mut buf = ReadBuf::new(buf); ready!(Self::poll_read_buf(self.get_mut(), cx, &mut buf))?; Poll::Ready(Ok(buf.filled().len())) } } impl tokio::io::AsyncRead for RecvStream { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { ready!(Self::poll_read_buf(self.get_mut(), cx, buf))?; Poll::Ready(Ok(())) } } impl Drop for RecvStream { fn drop(&mut self) { let mut conn = self.conn.state.lock("RecvStream::drop"); // clean up any previously registered wakers conn.blocked_readers.remove(&self.stream); if conn.error.is_some() || (self.is_0rtt && conn.check_0rtt().is_err()) { return; } if !self.all_data_read { // Ignore ClosedStream errors let _ = conn.inner.recv_stream(self.stream).stop(0u32.into()); conn.wake(); } } } /// Errors that arise from reading from a stream. #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum ReadError { /// The peer abandoned transmitting data on this stream /// /// Carries an application-defined error code. #[error("stream reset by peer: error {0}")] Reset(VarInt), /// The connection was lost #[error("connection lost")] ConnectionLost(#[from] ConnectionError), /// The stream has already been stopped, finished, or reset #[error("closed stream")] ClosedStream, /// Attempted an ordered read following an unordered read /// /// Performing an unordered read allows discontinuities to arise in the receive buffer of a /// stream which cannot be recovered, making further ordered reads impossible. #[error("ordered read after unordered read")] IllegalOrderedRead, /// This was a 0-RTT stream and the server rejected it /// /// Can only occur on clients for 0-RTT streams, which can be opened using /// [`Connecting::into_0rtt()`]. /// /// [`Connecting::into_0rtt()`]: crate::Connecting::into_0rtt() #[error("0-RTT rejected")] ZeroRttRejected, } impl From for ReadError { fn from(e: ReadableError) -> Self { match e { ReadableError::ClosedStream => Self::ClosedStream, ReadableError::IllegalOrderedRead => Self::IllegalOrderedRead, } } } impl From for ReadError { fn from(e: ResetError) -> Self { match e { ResetError::ConnectionLost(e) => Self::ConnectionLost(e), ResetError::ZeroRttRejected => Self::ZeroRttRejected, } } } impl From for io::Error { fn from(x: ReadError) -> Self { use self::ReadError::*; let kind = match x { Reset { .. } | ZeroRttRejected => io::ErrorKind::ConnectionReset, ConnectionLost(_) | ClosedStream => io::ErrorKind::NotConnected, IllegalOrderedRead => io::ErrorKind::InvalidInput, }; Self::new(kind, x) } } /// Errors that arise while waiting for a stream to be reset #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum ResetError { /// The connection was lost #[error("connection lost")] ConnectionLost(#[from] ConnectionError), /// This was a 0-RTT stream and the server rejected it /// /// Can only occur on clients for 0-RTT streams, which can be opened using /// [`Connecting::into_0rtt()`]. /// /// [`Connecting::into_0rtt()`]: crate::Connecting::into_0rtt() #[error("0-RTT rejected")] ZeroRttRejected, } impl From for io::Error { fn from(x: ResetError) -> Self { use ResetError::*; let kind = match x { ZeroRttRejected => io::ErrorKind::ConnectionReset, ConnectionLost(_) => io::ErrorKind::NotConnected, }; Self::new(kind, x) } } /// Future produced by [`RecvStream::read()`]. /// /// [`RecvStream::read()`]: crate::RecvStream::read #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct Read<'a> { stream: &'a mut RecvStream, buf: ReadBuf<'a>, } impl Future for Read<'_> { type Output = Result, ReadError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); ready!(this.stream.poll_read_buf(cx, &mut this.buf))?; match this.buf.filled().len() { 0 if this.buf.capacity() != 0 => Poll::Ready(Ok(None)), n => Poll::Ready(Ok(Some(n))), } } } /// Future produced by [`RecvStream::read_exact()`]. /// /// [`RecvStream::read_exact()`]: crate::RecvStream::read_exact #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct ReadExact<'a> { stream: &'a mut RecvStream, buf: ReadBuf<'a>, } impl Future for ReadExact<'_> { type Output = Result<(), ReadExactError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); let mut remaining = this.buf.remaining(); while remaining > 0 { ready!(this.stream.poll_read_buf(cx, &mut this.buf))?; let new = this.buf.remaining(); if new == remaining { return Poll::Ready(Err(ReadExactError::FinishedEarly(this.buf.filled().len()))); } remaining = new; } Poll::Ready(Ok(())) } } /// Errors that arise from reading from a stream. #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum ReadExactError { /// The stream finished before all bytes were read #[error("stream finished early ({0} bytes read)")] FinishedEarly(usize), /// A read error occurred #[error(transparent)] ReadError(#[from] ReadError), } /// Future produced by [`RecvStream::read_chunk()`]. /// /// [`RecvStream::read_chunk()`]: crate::RecvStream::read_chunk #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct ReadChunk<'a> { stream: &'a mut RecvStream, max_length: usize, ordered: bool, } impl Future for ReadChunk<'_> { type Output = Result, ReadError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { let (max_length, ordered) = (self.max_length, self.ordered); self.stream.poll_read_chunk(cx, max_length, ordered) } } /// Future produced by [`RecvStream::read_chunks()`]. /// /// [`RecvStream::read_chunks()`]: crate::RecvStream::read_chunks #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct ReadChunks<'a> { stream: &'a mut RecvStream, bufs: &'a mut [Bytes], } impl Future for ReadChunks<'_> { type Output = Result, ReadError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); this.stream.poll_read_chunks(cx, this.bufs) } } quinn-0.11.6/src/runtime/async_io.rs000064400000000000000000000063721046102023000154770ustar 00000000000000use std::{ future::Future, io, pin::Pin, sync::Arc, task::{Context, Poll}, time::Instant, }; use async_io::{Async, Timer}; use super::{AsyncTimer, AsyncUdpSocket, Runtime, UdpPollHelper}; #[cfg(feature = "smol")] pub use self::smol::SmolRuntime; #[cfg(feature = "smol")] mod smol { use super::*; /// A Quinn runtime for smol #[derive(Debug)] pub struct SmolRuntime; impl Runtime for SmolRuntime { fn new_timer(&self, t: Instant) -> Pin> { Box::pin(Timer::at(t)) } fn spawn(&self, future: Pin + Send>>) { ::smol::spawn(future).detach(); } fn wrap_udp_socket( &self, sock: std::net::UdpSocket, ) -> io::Result> { Ok(Arc::new(UdpSocket::new(sock)?)) } } } #[cfg(feature = "async-std")] pub use self::async_std::AsyncStdRuntime; #[cfg(feature = "async-std")] mod async_std { use super::*; /// A Quinn runtime for async-std #[derive(Debug)] pub struct AsyncStdRuntime; impl Runtime for AsyncStdRuntime { fn new_timer(&self, t: Instant) -> Pin> { Box::pin(Timer::at(t)) } fn spawn(&self, future: Pin + Send>>) { ::async_std::task::spawn(future); } fn wrap_udp_socket( &self, sock: std::net::UdpSocket, ) -> io::Result> { Ok(Arc::new(UdpSocket::new(sock)?)) } } } impl AsyncTimer for Timer { fn reset(mut self: Pin<&mut Self>, t: Instant) { self.set_at(t) } fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> { Future::poll(self, cx).map(|_| ()) } } #[derive(Debug)] struct UdpSocket { io: Async, inner: udp::UdpSocketState, } impl UdpSocket { fn new(sock: std::net::UdpSocket) -> io::Result { Ok(Self { inner: udp::UdpSocketState::new((&sock).into())?, io: Async::new(sock)?, }) } } impl AsyncUdpSocket for UdpSocket { fn create_io_poller(self: Arc) -> Pin> { Box::pin(UdpPollHelper::new(move || { let socket = self.clone(); async move { socket.io.writable().await } })) } fn try_send(&self, transmit: &udp::Transmit) -> io::Result<()> { self.inner.send((&self.io).into(), transmit) } fn poll_recv( &self, cx: &mut Context, bufs: &mut [io::IoSliceMut<'_>], meta: &mut [udp::RecvMeta], ) -> Poll> { loop { ready!(self.io.poll_readable(cx))?; if let Ok(res) = self.inner.recv((&self.io).into(), bufs, meta) { return Poll::Ready(Ok(res)); } } } fn local_addr(&self) -> io::Result { self.io.as_ref().local_addr() } fn may_fragment(&self) -> bool { self.inner.may_fragment() } fn max_transmit_segments(&self) -> usize { self.inner.max_gso_segments() } fn max_receive_segments(&self) -> usize { self.inner.gro_segments() } } quinn-0.11.6/src/runtime/tokio.rs000064400000000000000000000047611046102023000150200ustar 00000000000000use std::{ future::Future, io, pin::Pin, sync::Arc, task::{Context, Poll}, time::Instant, }; use tokio::{ io::Interest, time::{sleep_until, Sleep}, }; use super::{AsyncTimer, AsyncUdpSocket, Runtime, UdpPollHelper}; /// A Quinn runtime for Tokio #[derive(Debug)] pub struct TokioRuntime; impl Runtime for TokioRuntime { fn new_timer(&self, t: Instant) -> Pin> { Box::pin(sleep_until(t.into())) } fn spawn(&self, future: Pin + Send>>) { tokio::spawn(future); } fn wrap_udp_socket(&self, sock: std::net::UdpSocket) -> io::Result> { Ok(Arc::new(UdpSocket { inner: udp::UdpSocketState::new((&sock).into())?, io: tokio::net::UdpSocket::from_std(sock)?, })) } fn now(&self) -> Instant { tokio::time::Instant::now().into_std() } } impl AsyncTimer for Sleep { fn reset(self: Pin<&mut Self>, t: Instant) { Self::reset(self, t.into()) } fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> { Future::poll(self, cx) } } #[derive(Debug)] struct UdpSocket { io: tokio::net::UdpSocket, inner: udp::UdpSocketState, } impl AsyncUdpSocket for UdpSocket { fn create_io_poller(self: Arc) -> Pin> { Box::pin(UdpPollHelper::new(move || { let socket = self.clone(); async move { socket.io.writable().await } })) } fn try_send(&self, transmit: &udp::Transmit) -> io::Result<()> { self.io.try_io(Interest::WRITABLE, || { self.inner.send((&self.io).into(), transmit) }) } fn poll_recv( &self, cx: &mut Context, bufs: &mut [std::io::IoSliceMut<'_>], meta: &mut [udp::RecvMeta], ) -> Poll> { loop { ready!(self.io.poll_recv_ready(cx))?; if let Ok(res) = self.io.try_io(Interest::READABLE, || { self.inner.recv((&self.io).into(), bufs, meta) }) { return Poll::Ready(Ok(res)); } } } fn local_addr(&self) -> io::Result { self.io.local_addr() } fn may_fragment(&self) -> bool { self.inner.may_fragment() } fn max_transmit_segments(&self) -> usize { self.inner.max_gso_segments() } fn max_receive_segments(&self) -> usize { self.inner.gro_segments() } } quinn-0.11.6/src/runtime.rs000064400000000000000000000164221046102023000136700ustar 00000000000000use std::{ fmt::Debug, future::Future, io::{self, IoSliceMut}, net::SocketAddr, pin::Pin, sync::Arc, task::{Context, Poll}, time::Instant, }; use udp::{RecvMeta, Transmit}; /// Abstracts I/O and timer operations for runtime independence pub trait Runtime: Send + Sync + Debug + 'static { /// Construct a timer that will expire at `i` fn new_timer(&self, i: Instant) -> Pin>; /// Drive `future` to completion in the background fn spawn(&self, future: Pin + Send>>); /// Convert `t` into the socket type used by this runtime fn wrap_udp_socket(&self, t: std::net::UdpSocket) -> io::Result>; /// Look up the current time /// /// Allows simulating the flow of time for testing. fn now(&self) -> Instant { Instant::now() } } /// Abstract implementation of an async timer for runtime independence pub trait AsyncTimer: Send + Debug + 'static { /// Update the timer to expire at `i` fn reset(self: Pin<&mut Self>, i: Instant); /// Check whether the timer has expired, and register to be woken if not fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()>; } /// Abstract implementation of a UDP socket for runtime independence pub trait AsyncUdpSocket: Send + Sync + Debug + 'static { /// Create a [`UdpPoller`] that can register a single task for write-readiness notifications /// /// A `poll_send` method on a single object can usually store only one [`Waker`] at a time, /// i.e. allow at most one caller to wait for an event. This method allows any number of /// interested tasks to construct their own [`UdpPoller`] object. They can all then wait for the /// same event and be notified concurrently, because each [`UdpPoller`] can store a separate /// [`Waker`]. /// /// [`Waker`]: std::task::Waker fn create_io_poller(self: Arc) -> Pin>; /// Send UDP datagrams from `transmits`, or return `WouldBlock` and clear the underlying /// socket's readiness, or return an I/O error /// /// If this returns [`io::ErrorKind::WouldBlock`], [`UdpPoller::poll_writable`] must be called /// to register the calling task to be woken when a send should be attempted again. fn try_send(&self, transmit: &Transmit) -> io::Result<()>; /// Receive UDP datagrams, or register to be woken if receiving may succeed in the future fn poll_recv( &self, cx: &mut Context, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta], ) -> Poll>; /// Look up the local IP address and port used by this socket fn local_addr(&self) -> io::Result; /// Maximum number of datagrams that a [`Transmit`] may encode fn max_transmit_segments(&self) -> usize { 1 } /// Maximum number of datagrams that might be described by a single [`RecvMeta`] fn max_receive_segments(&self) -> usize { 1 } /// Whether datagrams might get fragmented into multiple parts /// /// Sockets should prevent this for best performance. See e.g. the `IPV6_DONTFRAG` socket /// option. fn may_fragment(&self) -> bool { true } } /// An object polled to detect when an associated [`AsyncUdpSocket`] is writable /// /// Any number of `UdpPoller`s may exist for a single [`AsyncUdpSocket`]. Each `UdpPoller` is /// responsible for notifying at most one task when that socket becomes writable. pub trait UdpPoller: Send + Sync + Debug + 'static { /// Check whether the associated socket is likely to be writable /// /// Must be called after [`AsyncUdpSocket::try_send`] returns [`io::ErrorKind::WouldBlock`] to /// register the task associated with `cx` to be woken when a send should be attempted /// again. Unlike in [`Future::poll`], a [`UdpPoller`] may be reused indefinitely no matter how /// many times `poll_writable` returns [`Poll::Ready`]. fn poll_writable(self: Pin<&mut Self>, cx: &mut Context) -> Poll>; } pin_project_lite::pin_project! { /// Helper adapting a function `MakeFut` that constructs a single-use future `Fut` into a /// [`UdpPoller`] that may be reused indefinitely struct UdpPollHelper { make_fut: MakeFut, #[pin] fut: Option, } } impl UdpPollHelper { /// Construct a [`UdpPoller`] that calls `make_fut` to get the future to poll, storing it until /// it yields [`Poll::Ready`], then creating a new one on the next /// [`poll_writable`](UdpPoller::poll_writable) #[cfg(any( feature = "runtime-async-std", feature = "runtime-smol", feature = "runtime-tokio" ))] fn new(make_fut: MakeFut) -> Self { Self { make_fut, fut: None, } } } impl UdpPoller for UdpPollHelper where MakeFut: Fn() -> Fut + Send + Sync + 'static, Fut: Future> + Send + Sync + 'static, { fn poll_writable(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = self.project(); if this.fut.is_none() { this.fut.set(Some((this.make_fut)())); } // We're forced to `unwrap` here because `Fut` may be `!Unpin`, which means we can't safely // obtain an `&mut Fut` after storing it in `self.fut` when `self` is already behind `Pin`, // and if we didn't store it then we wouldn't be able to keep it alive between // `poll_writable` calls. let result = this.fut.as_mut().as_pin_mut().unwrap().poll(cx); if result.is_ready() { // Polling an arbitrary `Future` after it becomes ready is a logic error, so arrange for // a new `Future` to be created on the next call. this.fut.set(None); } result } } impl Debug for UdpPollHelper { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("UdpPollHelper").finish_non_exhaustive() } } /// Automatically select an appropriate runtime from those enabled at compile time /// /// If `runtime-tokio` is enabled and this function is called from within a Tokio runtime context, /// then `TokioRuntime` is returned. Otherwise, if `runtime-async-std` is enabled, `AsyncStdRuntime` /// is returned. Otherwise, if `runtime-smol` is enabled, `SmolRuntime` is returned. /// Otherwise, `None` is returned. #[allow(clippy::needless_return)] // Be sure we return the right thing pub fn default_runtime() -> Option> { #[cfg(feature = "runtime-tokio")] { if ::tokio::runtime::Handle::try_current().is_ok() { return Some(Arc::new(TokioRuntime)); } } #[cfg(feature = "runtime-async-std")] { return Some(Arc::new(AsyncStdRuntime)); } #[cfg(all(feature = "runtime-smol", not(feature = "runtime-async-std")))] { return Some(Arc::new(SmolRuntime)); } #[cfg(not(any(feature = "runtime-async-std", feature = "runtime-smol")))] None } #[cfg(feature = "runtime-tokio")] mod tokio; #[cfg(feature = "runtime-tokio")] pub use self::tokio::TokioRuntime; #[cfg(feature = "async-io")] mod async_io; #[cfg(feature = "async-io")] pub use self::async_io::*; quinn-0.11.6/src/send_stream.rs000064400000000000000000000427461046102023000145210ustar 00000000000000use std::{ future::Future, io, pin::Pin, task::{Context, Poll}, }; use bytes::Bytes; use proto::{ClosedStream, ConnectionError, FinishError, StreamId, Written}; use thiserror::Error; use crate::{connection::ConnectionRef, VarInt}; /// A stream that can only be used to send data /// /// If dropped, streams that haven't been explicitly [`reset()`] will be implicitly [`finish()`]ed, /// continuing to (re)transmit previously written data until it has been fully acknowledged or the /// connection is closed. /// /// # Cancellation /// /// A `write` method is said to be *cancel-safe* when dropping its future before the future becomes /// ready will always result in no data being written to the stream. This is true of methods which /// succeed immediately when any progress is made, and is not true of methods which might need to /// perform multiple writes internally before succeeding. Each `write` method documents whether it is /// cancel-safe. /// /// [`reset()`]: SendStream::reset /// [`finish()`]: SendStream::finish #[derive(Debug)] pub struct SendStream { conn: ConnectionRef, stream: StreamId, is_0rtt: bool, } impl SendStream { pub(crate) fn new(conn: ConnectionRef, stream: StreamId, is_0rtt: bool) -> Self { Self { conn, stream, is_0rtt, } } /// Write bytes to the stream /// /// Yields the number of bytes written on success. Congestion and flow control may cause this to /// be shorter than `buf.len()`, indicating that only a prefix of `buf` was written. /// /// This operation is cancel-safe. pub async fn write(&mut self, buf: &[u8]) -> Result { Write { stream: self, buf }.await } /// Convenience method to write an entire buffer to the stream /// /// This operation is *not* cancel-safe. pub async fn write_all(&mut self, buf: &[u8]) -> Result<(), WriteError> { WriteAll { stream: self, buf }.await } /// Write chunks to the stream /// /// Yields the number of bytes and chunks written on success. /// Congestion and flow control may cause this to be shorter than `buf.len()`, /// indicating that only a prefix of `bufs` was written /// /// This operation is cancel-safe. pub async fn write_chunks(&mut self, bufs: &mut [Bytes]) -> Result { WriteChunks { stream: self, bufs }.await } /// Convenience method to write a single chunk in its entirety to the stream /// /// This operation is *not* cancel-safe. pub async fn write_chunk(&mut self, buf: Bytes) -> Result<(), WriteError> { WriteChunk { stream: self, buf: [buf], } .await } /// Convenience method to write an entire list of chunks to the stream /// /// This operation is *not* cancel-safe. pub async fn write_all_chunks(&mut self, bufs: &mut [Bytes]) -> Result<(), WriteError> { WriteAllChunks { stream: self, bufs, offset: 0, } .await } fn execute_poll(&mut self, cx: &mut Context, write_fn: F) -> Poll> where F: FnOnce(&mut proto::SendStream) -> Result, { use proto::WriteError::*; let mut conn = self.conn.state.lock("SendStream::poll_write"); if self.is_0rtt { conn.check_0rtt() .map_err(|()| WriteError::ZeroRttRejected)?; } if let Some(ref x) = conn.error { return Poll::Ready(Err(WriteError::ConnectionLost(x.clone()))); } let result = match write_fn(&mut conn.inner.send_stream(self.stream)) { Ok(result) => result, Err(Blocked) => { conn.blocked_writers.insert(self.stream, cx.waker().clone()); return Poll::Pending; } Err(Stopped(error_code)) => { return Poll::Ready(Err(WriteError::Stopped(error_code))); } Err(ClosedStream) => { return Poll::Ready(Err(WriteError::ClosedStream)); } }; conn.wake(); Poll::Ready(Ok(result)) } /// Notify the peer that no more data will ever be written to this stream /// /// It is an error to write to a [`SendStream`] after `finish()`ing it. [`reset()`](Self::reset) /// may still be called after `finish` to abandon transmission of any stream data that might /// still be buffered. /// /// To wait for the peer to receive all buffered stream data, see [`stopped()`](Self::stopped). /// /// May fail if [`finish()`](Self::finish) or [`reset()`](Self::reset) was previously /// called. This error is harmless and serves only to indicate that the caller may have /// incorrect assumptions about the stream's state. pub fn finish(&mut self) -> Result<(), ClosedStream> { let mut conn = self.conn.state.lock("finish"); match conn.inner.send_stream(self.stream).finish() { Ok(()) => { conn.wake(); Ok(()) } Err(FinishError::ClosedStream) => Err(ClosedStream::new()), // Harmless. If the application needs to know about stopped streams at this point, it // should call `stopped`. Err(FinishError::Stopped(_)) => Ok(()), } } /// Close the send stream immediately. /// /// No new data can be written after calling this method. Locally buffered data is dropped, and /// previously transmitted data will no longer be retransmitted if lost. If an attempt has /// already been made to finish the stream, the peer may still receive all written data. /// /// May fail if [`finish()`](Self::finish) or [`reset()`](Self::reset) was previously /// called. This error is harmless and serves only to indicate that the caller may have /// incorrect assumptions about the stream's state. pub fn reset(&mut self, error_code: VarInt) -> Result<(), ClosedStream> { let mut conn = self.conn.state.lock("SendStream::reset"); if self.is_0rtt && conn.check_0rtt().is_err() { return Ok(()); } conn.inner.send_stream(self.stream).reset(error_code)?; conn.wake(); Ok(()) } /// Set the priority of the send stream /// /// Every send stream has an initial priority of 0. Locally buffered data from streams with /// higher priority will be transmitted before data from streams with lower priority. Changing /// the priority of a stream with pending data may only take effect after that data has been /// transmitted. Using many different priority levels per connection may have a negative /// impact on performance. pub fn set_priority(&self, priority: i32) -> Result<(), ClosedStream> { let mut conn = self.conn.state.lock("SendStream::set_priority"); conn.inner.send_stream(self.stream).set_priority(priority)?; Ok(()) } /// Get the priority of the send stream pub fn priority(&self) -> Result { let mut conn = self.conn.state.lock("SendStream::priority"); conn.inner.send_stream(self.stream).priority() } /// Completes when the peer stops the stream or reads the stream to completion /// /// Yields `Some` with the stop error code if the peer stops the stream. Yields `None` if the /// local side [`finish()`](Self::finish)es the stream and then the peer acknowledges receipt /// of all stream data (although not necessarily the processing of it), after which the peer /// closing the stream is no longer meaningful. /// /// For a variety of reasons, the peer may not send acknowledgements immediately upon receiving /// data. As such, relying on `stopped` to know when the peer has read a stream to completion /// may introduce more latency than using an application-level response of some sort. pub async fn stopped(&mut self) -> Result, StoppedError> { Stopped { stream: self }.await } #[doc(hidden)] pub fn poll_stopped(&mut self, cx: &mut Context) -> Poll, StoppedError>> { let mut conn = self.conn.state.lock("SendStream::poll_stopped"); if self.is_0rtt { conn.check_0rtt() .map_err(|()| StoppedError::ZeroRttRejected)?; } match conn.inner.send_stream(self.stream).stopped() { Err(_) => Poll::Ready(Ok(None)), Ok(Some(error_code)) => Poll::Ready(Ok(Some(error_code))), Ok(None) => { if let Some(e) = &conn.error { return Poll::Ready(Err(e.clone().into())); } conn.stopped.insert(self.stream, cx.waker().clone()); Poll::Pending } } } /// Get the identity of this stream pub fn id(&self) -> StreamId { self.stream } /// Attempt to write bytes from buf into the stream. /// /// On success, returns Poll::Ready(Ok(num_bytes_written)). /// /// If the stream is not ready for writing, the method returns Poll::Pending and arranges /// for the current task (via cx.waker().wake_by_ref()) to receive a notification when the /// stream becomes writable or is closed. pub fn poll_write( self: Pin<&mut Self>, cx: &mut Context, buf: &[u8], ) -> Poll> { self.get_mut().execute_poll(cx, |stream| stream.write(buf)) } } #[cfg(feature = "futures-io")] impl futures_io::AsyncWrite for SendStream { fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { Self::execute_poll(self.get_mut(), cx, |stream| stream.write(buf)).map_err(Into::into) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(Ok(())) } fn poll_close(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(self.get_mut().finish().map_err(Into::into)) } } #[cfg(feature = "runtime-tokio")] impl tokio::io::AsyncWrite for SendStream { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Self::execute_poll(self.get_mut(), cx, |stream| stream.write(buf)).map_err(Into::into) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(Ok(())) } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(self.get_mut().finish().map_err(Into::into)) } } impl Drop for SendStream { fn drop(&mut self) { let mut conn = self.conn.state.lock("SendStream::drop"); // clean up any previously registered wakers conn.stopped.remove(&self.stream); conn.blocked_writers.remove(&self.stream); if conn.error.is_some() || (self.is_0rtt && conn.check_0rtt().is_err()) { return; } match conn.inner.send_stream(self.stream).finish() { Ok(()) => conn.wake(), Err(FinishError::Stopped(reason)) => { if conn.inner.send_stream(self.stream).reset(reason).is_ok() { conn.wake(); } } // Already finished or reset, which is fine. Err(FinishError::ClosedStream) => {} } } } /// Future produced by `SendStream::stopped` #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct Stopped<'a> { stream: &'a mut SendStream, } impl Future for Stopped<'_> { type Output = Result, StoppedError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { self.get_mut().stream.poll_stopped(cx) } } /// Future produced by [`SendStream::write()`]. /// /// [`SendStream::write()`]: crate::SendStream::write #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct Write<'a> { stream: &'a mut SendStream, buf: &'a [u8], } impl Future for Write<'_> { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); let buf = this.buf; this.stream.execute_poll(cx, |s| s.write(buf)) } } /// Future produced by [`SendStream::write_all()`]. /// /// [`SendStream::write_all()`]: crate::SendStream::write_all #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct WriteAll<'a> { stream: &'a mut SendStream, buf: &'a [u8], } impl Future for WriteAll<'_> { type Output = Result<(), WriteError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); loop { if this.buf.is_empty() { return Poll::Ready(Ok(())); } let buf = this.buf; let n = ready!(this.stream.execute_poll(cx, |s| s.write(buf)))?; this.buf = &this.buf[n..]; } } } /// Future produced by [`SendStream::write_chunks()`]. /// /// [`SendStream::write_chunks()`]: crate::SendStream::write_chunks #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct WriteChunks<'a> { stream: &'a mut SendStream, bufs: &'a mut [Bytes], } impl Future for WriteChunks<'_> { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); let bufs = &mut *this.bufs; this.stream.execute_poll(cx, |s| s.write_chunks(bufs)) } } /// Future produced by [`SendStream::write_chunk()`]. /// /// [`SendStream::write_chunk()`]: crate::SendStream::write_chunk #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct WriteChunk<'a> { stream: &'a mut SendStream, buf: [Bytes; 1], } impl Future for WriteChunk<'_> { type Output = Result<(), WriteError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); loop { if this.buf[0].is_empty() { return Poll::Ready(Ok(())); } let bufs = &mut this.buf[..]; ready!(this.stream.execute_poll(cx, |s| s.write_chunks(bufs)))?; } } } /// Future produced by [`SendStream::write_all_chunks()`]. /// /// [`SendStream::write_all_chunks()`]: crate::SendStream::write_all_chunks #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct WriteAllChunks<'a> { stream: &'a mut SendStream, bufs: &'a mut [Bytes], offset: usize, } impl Future for WriteAllChunks<'_> { type Output = Result<(), WriteError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); loop { if this.offset == this.bufs.len() { return Poll::Ready(Ok(())); } let bufs = &mut this.bufs[this.offset..]; let written = ready!(this.stream.execute_poll(cx, |s| s.write_chunks(bufs)))?; this.offset += written.chunks; } } } /// Errors that arise from writing to a stream #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum WriteError { /// The peer is no longer accepting data on this stream /// /// Carries an application-defined error code. #[error("sending stopped by peer: error {0}")] Stopped(VarInt), /// The connection was lost #[error("connection lost")] ConnectionLost(#[from] ConnectionError), /// The stream has already been finished or reset #[error("closed stream")] ClosedStream, /// This was a 0-RTT stream and the server rejected it /// /// Can only occur on clients for 0-RTT streams, which can be opened using /// [`Connecting::into_0rtt()`]. /// /// [`Connecting::into_0rtt()`]: crate::Connecting::into_0rtt() #[error("0-RTT rejected")] ZeroRttRejected, } impl From for WriteError { #[inline] fn from(_: ClosedStream) -> Self { Self::ClosedStream } } impl From for WriteError { fn from(x: StoppedError) -> Self { match x { StoppedError::ConnectionLost(e) => Self::ConnectionLost(e), StoppedError::ZeroRttRejected => Self::ZeroRttRejected, } } } impl From for io::Error { fn from(x: WriteError) -> Self { use self::WriteError::*; let kind = match x { Stopped(_) | ZeroRttRejected => io::ErrorKind::ConnectionReset, ConnectionLost(_) | ClosedStream => io::ErrorKind::NotConnected, }; Self::new(kind, x) } } /// Errors that arise while monitoring for a send stream stop from the peer #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum StoppedError { /// The connection was lost #[error("connection lost")] ConnectionLost(#[from] ConnectionError), /// This was a 0-RTT stream and the server rejected it /// /// Can only occur on clients for 0-RTT streams, which can be opened using /// [`Connecting::into_0rtt()`]. /// /// [`Connecting::into_0rtt()`]: crate::Connecting::into_0rtt() #[error("0-RTT rejected")] ZeroRttRejected, } impl From for io::Error { fn from(x: StoppedError) -> Self { use StoppedError::*; let kind = match x { ZeroRttRejected => io::ErrorKind::ConnectionReset, ConnectionLost(_) => io::ErrorKind::NotConnected, }; Self::new(kind, x) } } quinn-0.11.6/src/tests.rs000075500000000000000000000674011046102023000133550ustar 00000000000000#![cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] #[cfg(all(feature = "rustls-aws-lc-rs", not(feature = "rustls-ring")))] use rustls::crypto::aws_lc_rs::default_provider; #[cfg(feature = "rustls-ring")] use rustls::crypto::ring::default_provider; use std::{ convert::TryInto, io, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, str, sync::Arc, }; use crate::runtime::TokioRuntime; use bytes::Bytes; use proto::{crypto::rustls::QuicClientConfig, RandomConnectionIdGenerator}; use rand::{rngs::StdRng, RngCore, SeedableRng}; use rustls::{ pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer}, RootCertStore, }; use tokio::{ runtime::{Builder, Runtime}, time::{Duration, Instant}, }; use tracing::{error_span, info}; use tracing_futures::Instrument as _; use tracing_subscriber::EnvFilter; use super::{ClientConfig, Endpoint, EndpointConfig, RecvStream, SendStream, TransportConfig}; #[test] fn handshake_timeout() { let _guard = subscribe(); let runtime = rt_threaded(); let client = { let _guard = runtime.enter(); Endpoint::client(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap() }; // Avoid NoRootAnchors error let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let mut roots = RootCertStore::empty(); roots.add(cert.cert.into()).unwrap(); let mut client_config = crate::ClientConfig::with_root_certificates(Arc::new(roots)).unwrap(); const IDLE_TIMEOUT: Duration = Duration::from_millis(500); let mut transport_config = crate::TransportConfig::default(); transport_config .max_idle_timeout(Some(IDLE_TIMEOUT.try_into().unwrap())) .initial_rtt(Duration::from_millis(10)); client_config.transport_config(Arc::new(transport_config)); let start = Instant::now(); runtime.block_on(async move { match client .connect_with( client_config, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1), "localhost", ) .unwrap() .await { Err(crate::ConnectionError::TimedOut) => {} Err(e) => panic!("unexpected error: {e:?}"), Ok(_) => panic!("unexpected success"), } }); let dt = start.elapsed(); assert!(dt > IDLE_TIMEOUT && dt < 2 * IDLE_TIMEOUT); } #[tokio::test] async fn close_endpoint() { let _guard = subscribe(); // Avoid NoRootAnchors error let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let mut roots = RootCertStore::empty(); roots.add(cert.cert.into()).unwrap(); let mut endpoint = Endpoint::client(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap(); endpoint .set_default_client_config(ClientConfig::with_root_certificates(Arc::new(roots)).unwrap()); let conn = endpoint .connect( SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1234), "localhost", ) .unwrap(); tokio::spawn(async move { let _ = conn.await; }); let conn = endpoint .connect( SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1234), "localhost", ) .unwrap(); endpoint.close(0u32.into(), &[]); match conn.await { Err(crate::ConnectionError::LocallyClosed) => (), Err(e) => panic!("unexpected error: {e}"), Ok(_) => { panic!("unexpected success"); } } } #[test] fn local_addr() { let socket = UdpSocket::bind((Ipv6Addr::LOCALHOST, 0)).unwrap(); let addr = socket.local_addr().unwrap(); let runtime = rt_basic(); let ep = { let _guard = runtime.enter(); Endpoint::new(Default::default(), None, socket, Arc::new(TokioRuntime)).unwrap() }; assert_eq!( addr, ep.local_addr() .expect("Could not obtain our local endpoint") ); } #[test] fn read_after_close() { let _guard = subscribe(); let runtime = rt_basic(); let endpoint = { let _guard = runtime.enter(); endpoint() }; const MSG: &[u8] = b"goodbye!"; let endpoint2 = endpoint.clone(); runtime.spawn(async move { let new_conn = endpoint2 .accept() .await .expect("endpoint") .await .expect("connection"); let mut s = new_conn.open_uni().await.unwrap(); s.write_all(MSG).await.unwrap(); s.finish().unwrap(); // Wait for the stream to be closed, one way or another. _ = s.stopped().await; }); runtime.block_on(async move { let new_conn = endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap() .await .expect("connect"); tokio::time::sleep_until(Instant::now() + Duration::from_millis(100)).await; let mut stream = new_conn.accept_uni().await.expect("incoming streams"); let msg = stream.read_to_end(usize::MAX).await.expect("read_to_end"); assert_eq!(msg, MSG); }); } #[test] fn export_keying_material() { let _guard = subscribe(); let runtime = rt_basic(); let endpoint = { let _guard = runtime.enter(); endpoint() }; runtime.block_on(async move { let outgoing_conn_fut = tokio::spawn({ let endpoint = endpoint.clone(); async move { endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap() .await .expect("connect") } }); let incoming_conn_fut = tokio::spawn({ let endpoint = endpoint.clone(); async move { endpoint .accept() .await .expect("endpoint") .await .expect("connection") } }); let outgoing_conn = outgoing_conn_fut.await.unwrap(); let incoming_conn = incoming_conn_fut.await.unwrap(); let mut i_buf = [0u8; 64]; incoming_conn .export_keying_material(&mut i_buf, b"asdf", b"qwer") .unwrap(); let mut o_buf = [0u8; 64]; outgoing_conn .export_keying_material(&mut o_buf, b"asdf", b"qwer") .unwrap(); assert_eq!(&i_buf[..], &o_buf[..]); }); } #[tokio::test] async fn ip_blocking() { let _guard = subscribe(); let endpoint_factory = EndpointFactory::new(); let client_1 = endpoint_factory.endpoint(); let client_1_addr = client_1.local_addr().unwrap(); let client_2 = endpoint_factory.endpoint(); let server = endpoint_factory.endpoint(); let server_addr = server.local_addr().unwrap(); let server_task = tokio::spawn(async move { loop { let accepting = server.accept().await.unwrap(); if accepting.remote_address() == client_1_addr { accepting.refuse(); } else if accepting.remote_address_validated() { accepting.await.expect("connection"); } else { accepting.retry().unwrap(); } } }); tokio::join!( async move { let e = client_1 .connect(server_addr, "localhost") .unwrap() .await .expect_err("server should have blocked this"); assert!( matches!(e, crate::ConnectionError::ConnectionClosed(_)), "wrong error" ); }, async move { client_2 .connect(server_addr, "localhost") .unwrap() .await .expect("connect"); } ); server_task.abort(); } /// Construct an endpoint suitable for connecting to itself fn endpoint() -> Endpoint { EndpointFactory::new().endpoint() } fn endpoint_with_config(transport_config: TransportConfig) -> Endpoint { EndpointFactory::new().endpoint_with_config(transport_config) } /// Constructs endpoints suitable for connecting to themselves and each other struct EndpointFactory { cert: rcgen::CertifiedKey, endpoint_config: EndpointConfig, } impl EndpointFactory { fn new() -> Self { Self { cert: rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(), endpoint_config: EndpointConfig::default(), } } fn endpoint(&self) -> Endpoint { self.endpoint_with_config(TransportConfig::default()) } fn endpoint_with_config(&self, transport_config: TransportConfig) -> Endpoint { let key = PrivateKeyDer::Pkcs8(self.cert.key_pair.serialize_der().into()); let transport_config = Arc::new(transport_config); let mut server_config = crate::ServerConfig::with_single_cert(vec![self.cert.cert.der().clone()], key).unwrap(); server_config.transport_config(transport_config.clone()); let mut roots = rustls::RootCertStore::empty(); roots.add(self.cert.cert.der().clone()).unwrap(); let mut endpoint = Endpoint::new( self.endpoint_config.clone(), Some(server_config), UdpSocket::bind(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap(), Arc::new(TokioRuntime), ) .unwrap(); let mut client_config = ClientConfig::with_root_certificates(Arc::new(roots)).unwrap(); client_config.transport_config(transport_config); endpoint.set_default_client_config(client_config); endpoint } } #[tokio::test] async fn zero_rtt() { let _guard = subscribe(); let endpoint = endpoint(); const MSG0: &[u8] = b"zero"; const MSG1: &[u8] = b"one"; let endpoint2 = endpoint.clone(); tokio::spawn(async move { for _ in 0..2 { let incoming = endpoint2.accept().await.unwrap().accept().unwrap(); let (connection, established) = incoming.into_0rtt().unwrap_or_else(|_| unreachable!()); let c = connection.clone(); tokio::spawn(async move { while let Ok(mut x) = c.accept_uni().await { let msg = x.read_to_end(usize::MAX).await.unwrap(); assert_eq!(msg, MSG0); } }); info!("sending 0.5-RTT"); let mut s = connection.open_uni().await.expect("open_uni"); s.write_all(MSG0).await.expect("write"); s.finish().unwrap(); established.await; info!("sending 1-RTT"); let mut s = connection.open_uni().await.expect("open_uni"); s.write_all(MSG1).await.expect("write"); // The peer might close the connection before ACKing let _ = s.finish(); } }); let connection = endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap() .into_0rtt() .err() .expect("0-RTT succeeded without keys") .await .expect("connect"); { let mut stream = connection.accept_uni().await.expect("incoming streams"); let msg = stream.read_to_end(usize::MAX).await.expect("read_to_end"); assert_eq!(msg, MSG0); // Read a 1-RTT message to ensure the handshake completes fully, allowing the server's // NewSessionTicket frame to be received. let mut stream = connection.accept_uni().await.expect("incoming streams"); let msg = stream.read_to_end(usize::MAX).await.expect("read_to_end"); assert_eq!(msg, MSG1); drop(connection); } info!("initial connection complete"); let (connection, zero_rtt) = endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap() .into_0rtt() .unwrap_or_else(|_| panic!("missing 0-RTT keys")); // Send something ASAP to use 0-RTT let c = connection.clone(); tokio::spawn(async move { let mut s = c.open_uni().await.expect("0-RTT open uni"); info!("sending 0-RTT"); s.write_all(MSG0).await.expect("0-RTT write"); s.finish().unwrap(); }); let mut stream = connection.accept_uni().await.expect("incoming streams"); let msg = stream.read_to_end(usize::MAX).await.expect("read_to_end"); assert_eq!(msg, MSG0); assert!(zero_rtt.await); drop((stream, connection)); endpoint.wait_idle().await; } #[test] fn echo_v6() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 0), nr_streams: 1, stream_size: 10 * 1024, receive_window: None, stream_receive_window: None, }); } #[test] fn echo_v4() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 1, stream_size: 10 * 1024, receive_window: None, stream_receive_window: None, }); } #[test] fn echo_dualstack() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 1, stream_size: 10 * 1024, receive_window: None, stream_receive_window: None, }); } #[test] fn stress_receive_window() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 50, stream_size: 25 * 1024 + 11, receive_window: Some(37), stream_receive_window: Some(100 * 1024 * 1024), }); } #[test] fn stress_stream_receive_window() { // Note that there is no point in running this with too many streams, // since the window is only active within a stream. run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 2, stream_size: 250 * 1024 + 11, receive_window: Some(100 * 1024 * 1024), stream_receive_window: Some(37), }); } #[test] fn stress_both_windows() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 50, stream_size: 25 * 1024 + 11, receive_window: Some(37), stream_receive_window: Some(37), }); } fn run_echo(args: EchoArgs) { let _guard = subscribe(); let runtime = rt_basic(); let handle = { // Use small receive windows let mut transport_config = TransportConfig::default(); if let Some(receive_window) = args.receive_window { transport_config.receive_window(receive_window.try_into().unwrap()); } if let Some(stream_receive_window) = args.stream_receive_window { transport_config.stream_receive_window(stream_receive_window.try_into().unwrap()); } transport_config.max_concurrent_bidi_streams(1_u8.into()); transport_config.max_concurrent_uni_streams(1_u8.into()); let transport_config = Arc::new(transport_config); // We don't use the `endpoint` helper here because we want two different endpoints with // different addresses. let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()); let cert = CertificateDer::from(cert.cert); let mut server_config = crate::ServerConfig::with_single_cert(vec![cert.clone()], key.into()).unwrap(); server_config.transport = transport_config.clone(); let server_sock = UdpSocket::bind(args.server_addr).unwrap(); let server_addr = server_sock.local_addr().unwrap(); let server = { let _guard = runtime.enter(); let _guard = error_span!("server").entered(); Endpoint::new( Default::default(), Some(server_config), server_sock, Arc::new(TokioRuntime), ) .unwrap() }; let mut roots = rustls::RootCertStore::empty(); roots.add(cert).unwrap(); let mut client_crypto = rustls::ClientConfig::builder_with_provider(default_provider().into()) .with_safe_default_protocol_versions() .unwrap() .with_root_certificates(roots) .with_no_client_auth(); client_crypto.key_log = Arc::new(rustls::KeyLogFile::new()); let mut client = { let _guard = runtime.enter(); let _guard = error_span!("client").entered(); Endpoint::client(args.client_addr).unwrap() }; let mut client_config = ClientConfig::new(Arc::new(QuicClientConfig::try_from(client_crypto).unwrap())); client_config.transport_config(transport_config); client.set_default_client_config(client_config); let handle = runtime.spawn(async move { let incoming = server.accept().await.unwrap(); // Note for anyone modifying the platform support in this test: // If `local_ip` gets available on additional platforms - which // requires modifying this test - please update the list of supported // platforms in the doc comment of `quinn_udp::RecvMeta::dst_ip`. if cfg!(target_os = "linux") || cfg!(target_os = "android") || cfg!(target_os = "freebsd") || cfg!(target_os = "openbsd") || cfg!(target_os = "netbsd") || cfg!(target_os = "macos") || cfg!(target_os = "windows") { let local_ip = incoming.local_ip().expect("Local IP must be available"); assert!(local_ip.is_loopback()); } else { assert_eq!(None, incoming.local_ip()); } let new_conn = incoming.await.unwrap(); tokio::spawn(async move { while let Ok(stream) = new_conn.accept_bi().await { tokio::spawn(echo(stream)); } }); server.wait_idle().await; }); info!("connecting from {} to {}", args.client_addr, server_addr); runtime.block_on( async move { let new_conn = client .connect(server_addr, "localhost") .unwrap() .await .expect("connect"); /// This is just an arbitrary number to generate deterministic test data const SEED: u64 = 0x12345678; for i in 0..args.nr_streams { println!("Opening stream {i}"); let (mut send, mut recv) = new_conn.open_bi().await.expect("stream open"); let msg = gen_data(args.stream_size, SEED); let send_task = async { send.write_all(&msg).await.expect("write"); send.finish().unwrap(); }; let recv_task = async { recv.read_to_end(usize::MAX).await.expect("read") }; let (_, data) = tokio::join!(send_task, recv_task); assert_eq!(data[..], msg[..], "Data mismatch"); } new_conn.close(0u32.into(), b"done"); client.wait_idle().await; } .instrument(error_span!("client")), ); handle }; runtime.block_on(handle).unwrap(); } struct EchoArgs { client_addr: SocketAddr, server_addr: SocketAddr, nr_streams: usize, stream_size: usize, receive_window: Option, stream_receive_window: Option, } async fn echo((mut send, mut recv): (SendStream, RecvStream)) { loop { // These are 32 buffers, for reading approximately 32kB at once #[rustfmt::skip] let mut bufs = [ Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), ]; match recv.read_chunks(&mut bufs).await.expect("read chunks") { Some(n) => { send.write_all_chunks(&mut bufs[..n]) .await .expect("write chunks"); } None => break, } } let _ = send.finish(); } fn gen_data(size: usize, seed: u64) -> Vec { let mut rng: StdRng = SeedableRng::seed_from_u64(seed); let mut buf = vec![0; size]; rng.fill_bytes(&mut buf); buf } fn subscribe() -> tracing::subscriber::DefaultGuard { let sub = tracing_subscriber::FmtSubscriber::builder() .with_env_filter(EnvFilter::from_default_env()) .with_writer(|| TestWriter) .finish(); tracing::subscriber::set_default(sub) } struct TestWriter; impl std::io::Write for TestWriter { fn write(&mut self, buf: &[u8]) -> io::Result { print!( "{}", str::from_utf8(buf).expect("tried to log invalid UTF-8") ); Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { io::stdout().flush() } } fn rt_basic() -> Runtime { Builder::new_current_thread().enable_all().build().unwrap() } fn rt_threaded() -> Runtime { Builder::new_multi_thread().enable_all().build().unwrap() } #[tokio::test] async fn rebind_recv() { let _guard = subscribe(); let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()); let cert = CertificateDer::from(cert.cert); let mut roots = rustls::RootCertStore::empty(); roots.add(cert.clone()).unwrap(); let mut client = Endpoint::client(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap(); let mut client_config = ClientConfig::with_root_certificates(Arc::new(roots)).unwrap(); client_config.transport_config(Arc::new({ let mut cfg = TransportConfig::default(); cfg.max_concurrent_uni_streams(1u32.into()); cfg })); client.set_default_client_config(client_config); let server_config = crate::ServerConfig::with_single_cert(vec![cert.clone()], key.into()).unwrap(); let server = { let _guard = tracing::error_span!("server").entered(); Endpoint::server( server_config, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), ) .unwrap() }; let server_addr = server.local_addr().unwrap(); const MSG: &[u8; 5] = b"hello"; let write_send = Arc::new(tokio::sync::Notify::new()); let write_recv = write_send.clone(); let connected_send = Arc::new(tokio::sync::Notify::new()); let connected_recv = connected_send.clone(); let server = tokio::spawn(async move { let connection = server.accept().await.unwrap().await.unwrap(); info!("got conn"); connected_send.notify_one(); write_recv.notified().await; let mut stream = connection.open_uni().await.unwrap(); stream.write_all(MSG).await.unwrap(); stream.finish().unwrap(); // Wait for the stream to be closed, one way or another. _ = stream.stopped().await; }); let connection = { let _guard = tracing::error_span!("client").entered(); client .connect(server_addr, "localhost") .unwrap() .await .unwrap() }; info!("connected"); connected_recv.notified().await; client .rebind(UdpSocket::bind(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap()) .unwrap(); info!("rebound"); write_send.notify_one(); let mut stream = connection.accept_uni().await.unwrap(); assert_eq!(stream.read_to_end(MSG.len()).await.unwrap(), MSG); server.await.unwrap(); } #[tokio::test] async fn stream_id_flow_control() { let _guard = subscribe(); let mut cfg = TransportConfig::default(); cfg.max_concurrent_uni_streams(1u32.into()); let endpoint = endpoint_with_config(cfg); let (client, server) = tokio::join!( endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap(), async { endpoint.accept().await.unwrap().await } ); let client = client.unwrap(); let server = server.unwrap(); // If `open_uni` doesn't get unblocked when the previous stream is dropped, this will time out. tokio::join!( async { client.open_uni().await.unwrap(); }, async { client.open_uni().await.unwrap(); }, async { client.open_uni().await.unwrap(); }, async { server.accept_uni().await.unwrap(); server.accept_uni().await.unwrap(); } ); } #[tokio::test] async fn two_datagram_readers() { let _guard = subscribe(); let endpoint = endpoint(); let (client, server) = tokio::join!( endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap(), async { endpoint.accept().await.unwrap().await } ); let client = client.unwrap(); let server = server.unwrap(); let done = tokio::sync::Notify::new(); let (a, b, ()) = tokio::join!( async { let x = client.read_datagram().await.unwrap(); done.notify_waiters(); x }, async { let x = client.read_datagram().await.unwrap(); done.notify_waiters(); x }, async { server.send_datagram(b"one"[..].into()).unwrap(); done.notified().await; server.send_datagram_wait(b"two"[..].into()).await.unwrap(); } ); assert!(*a == *b"one" || *b == *b"one"); assert!(*a == *b"two" || *b == *b"two"); } #[tokio::test] async fn multiple_conns_with_zero_length_cids() { let _guard = subscribe(); let mut factory = EndpointFactory::new(); factory .endpoint_config .cid_generator(|| Box::new(RandomConnectionIdGenerator::new(0))); let server = { let _guard = error_span!("server").entered(); factory.endpoint() }; let server_addr = server.local_addr().unwrap(); let client1 = { let _guard = error_span!("client1").entered(); factory.endpoint() }; let client2 = { let _guard = error_span!("client2").entered(); factory.endpoint() }; let client1 = async move { let conn = client1 .connect(server_addr, "localhost") .unwrap() .await .unwrap(); conn.closed().await; } .instrument(error_span!("client1")); let client2 = async move { let conn = client2 .connect(server_addr, "localhost") .unwrap() .await .unwrap(); conn.closed().await; } .instrument(error_span!("client2")); let server = async move { let client1 = server.accept().await.unwrap().await.unwrap(); let client2 = server.accept().await.unwrap().await.unwrap(); // Both connections are now concurrently live. client1.close(42u32.into(), &[]); client2.close(42u32.into(), &[]); } .instrument(error_span!("server")); tokio::join!(client1, client2, server); } quinn-0.11.6/src/work_limiter.rs000064400000000000000000000176661046102023000147270ustar 00000000000000use std::time::{Duration, Instant}; /// Limits the amount of time spent on a certain type of work in a cycle /// /// The limiter works dynamically: For a sampled subset of cycles it measures /// the time that is approximately required for fulfilling 1 work item, and /// calculates the amount of allowed work items per cycle. /// The estimates are smoothed over all cycles where the exact duration is measured. /// /// In cycles where no measurement is performed the previously determined work limit /// is used. /// /// For the limiter the exact definition of a work item does not matter. /// It could for example track the amount of transmitted bytes per cycle, /// or the amount of transmitted datagrams per cycle. /// It will however work best if the required time to complete a work item is /// constant. #[derive(Debug)] pub(crate) struct WorkLimiter { /// Whether to measure the required work time, or to use the previous estimates mode: Mode, /// The current cycle number cycle: u16, /// The time the cycle started - only used in measurement mode start_time: Option, /// How many work items have been completed in the cycle completed: usize, /// The amount of work items which are allowed for a cycle allowed: usize, /// The desired cycle time desired_cycle_time: Duration, /// The estimated and smoothed time per work item in nanoseconds smoothed_time_per_work_item_nanos: f64, } impl WorkLimiter { pub(crate) fn new(desired_cycle_time: Duration) -> Self { Self { mode: Mode::Measure, cycle: 0, start_time: None, completed: 0, allowed: 0, desired_cycle_time, smoothed_time_per_work_item_nanos: 0.0, } } /// Starts one work cycle pub(crate) fn start_cycle(&mut self, now: impl Fn() -> Instant) { self.completed = 0; if let Mode::Measure = self.mode { self.start_time = Some(now()); } } /// Returns whether more work can be performed inside the `desired_cycle_time` /// /// Requires that previous work was tracked using `record_work`. pub(crate) fn allow_work(&mut self, now: impl Fn() -> Instant) -> bool { match self.mode { Mode::Measure => (now() - self.start_time.unwrap()) < self.desired_cycle_time, Mode::HistoricData => self.completed < self.allowed, } } /// Records that `work` additional work items have been completed inside the cycle /// /// Must be called between `start_cycle` and `finish_cycle`. pub(crate) fn record_work(&mut self, work: usize) { self.completed += work; } /// Finishes one work cycle /// /// For cycles where the exact duration is measured this will update the estimates /// for the time per work item and the limit of allowed work items per cycle. /// The estimate is updated using the same exponential averaging (smoothing) /// mechanism which is used for determining QUIC path rtts: The last value is /// weighted by 1/8, and the previous average by 7/8. pub(crate) fn finish_cycle(&mut self, now: impl Fn() -> Instant) { // If no work was done in the cycle drop the measurement, it won't be useful if self.completed == 0 { return; } if let Mode::Measure = self.mode { let elapsed = now() - self.start_time.unwrap(); let time_per_work_item_nanos = (elapsed.as_nanos()) as f64 / self.completed as f64; // Calculate the time per work item. We set this to at least 1ns to avoid // dividing by 0 when calculating the allowed amount of work items. self.smoothed_time_per_work_item_nanos = if self.allowed == 0 { // Initial estimate time_per_work_item_nanos } else { // Smoothed estimate (7.0 * self.smoothed_time_per_work_item_nanos + time_per_work_item_nanos) / 8.0 } .max(1.0); // Allow at least 1 work item in order to make progress self.allowed = (((self.desired_cycle_time.as_nanos()) as f64 / self.smoothed_time_per_work_item_nanos) as usize) .max(1); self.start_time = None; } self.cycle = self.cycle.wrapping_add(1); self.mode = match self.cycle % SAMPLING_INTERVAL { 0 => Mode::Measure, _ => Mode::HistoricData, }; } } /// We take a measurement sample once every `SAMPLING_INTERVAL` cycles const SAMPLING_INTERVAL: u16 = 256; #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Mode { Measure, HistoricData, } #[cfg(test)] mod tests { use super::*; use std::cell::RefCell; #[test] fn limit_work() { const CYCLE_TIME: Duration = Duration::from_millis(500); const BATCH_WORK_ITEMS: usize = 12; const BATCH_TIME: Duration = Duration::from_millis(100); const EXPECTED_INITIAL_BATCHES: usize = (CYCLE_TIME.as_nanos() / BATCH_TIME.as_nanos()) as usize; const EXPECTED_ALLOWED_WORK_ITEMS: usize = EXPECTED_INITIAL_BATCHES * BATCH_WORK_ITEMS; let mut limiter = WorkLimiter::new(CYCLE_TIME); reset_time(); // The initial cycle is measuring limiter.start_cycle(get_time); let mut initial_batches = 0; while limiter.allow_work(get_time) { limiter.record_work(BATCH_WORK_ITEMS); advance_time(BATCH_TIME); initial_batches += 1; } limiter.finish_cycle(get_time); assert_eq!(initial_batches, EXPECTED_INITIAL_BATCHES); assert_eq!(limiter.allowed, EXPECTED_ALLOWED_WORK_ITEMS); let initial_time_per_work_item = limiter.smoothed_time_per_work_item_nanos; // The next cycles are using historic data const BATCH_SIZES: [usize; 4] = [1, 2, 3, 5]; for &batch_size in &BATCH_SIZES { limiter.start_cycle(get_time); let mut allowed_work = 0; while limiter.allow_work(get_time) { limiter.record_work(batch_size); allowed_work += batch_size; } limiter.finish_cycle(get_time); assert_eq!(allowed_work, EXPECTED_ALLOWED_WORK_ITEMS); } // After `SAMPLING_INTERVAL`, we get into measurement mode again for _ in 0..(SAMPLING_INTERVAL as usize - BATCH_SIZES.len() - 1) { limiter.start_cycle(get_time); limiter.record_work(1); limiter.finish_cycle(get_time); } // We now do more work per cycle, and expect the estimate of allowed // work items to go up const BATCH_WORK_ITEMS_2: usize = 96; const TIME_PER_WORK_ITEMS_2_NANOS: f64 = CYCLE_TIME.as_nanos() as f64 / (EXPECTED_INITIAL_BATCHES * BATCH_WORK_ITEMS_2) as f64; let expected_updated_time_per_work_item = (initial_time_per_work_item * 7.0 + TIME_PER_WORK_ITEMS_2_NANOS) / 8.0; let expected_updated_allowed_work_items = (CYCLE_TIME.as_nanos() as f64 / expected_updated_time_per_work_item) as usize; limiter.start_cycle(get_time); let mut initial_batches = 0; while limiter.allow_work(get_time) { limiter.record_work(BATCH_WORK_ITEMS_2); advance_time(BATCH_TIME); initial_batches += 1; } limiter.finish_cycle(get_time); assert_eq!(initial_batches, EXPECTED_INITIAL_BATCHES); assert_eq!(limiter.allowed, expected_updated_allowed_work_items); } thread_local! { /// Mocked time pub static TIME: RefCell = RefCell::new(Instant::now()); } fn reset_time() { TIME.with(|t| { *t.borrow_mut() = Instant::now(); }) } fn get_time() -> Instant { TIME.with(|t| *t.borrow()) } fn advance_time(duration: Duration) { TIME.with(|t| { *t.borrow_mut() += duration; }) } } quinn-0.11.6/tests/many_connections.rs000064400000000000000000000150601046102023000161230ustar 00000000000000#![cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] use std::{ convert::TryInto, net::{IpAddr, Ipv4Addr, SocketAddr}, sync::{Arc, Mutex}, time::Duration, }; use crc::Crc; use quinn::{ConnectionError, ReadError, StoppedError, TransportConfig, WriteError}; use rand::{self, RngCore}; use rustls::pki_types::{CertificateDer, PrivatePkcs8KeyDer}; use tokio::runtime::Builder; struct Shared { errors: Vec, } #[test] #[ignore] fn connect_n_nodes_to_1_and_send_1mb_data() { tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .finish(), ) .unwrap(); let runtime = Builder::new_current_thread().enable_all().build().unwrap(); let _guard = runtime.enter(); let shared = Arc::new(Mutex::new(Shared { errors: vec![] })); let (cfg, listener_cert) = configure_listener(); let endpoint = quinn::Endpoint::server(cfg, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap(); let listener_addr = endpoint.local_addr().unwrap(); let expected_messages = 50; let crc = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); let shared2 = shared.clone(); let endpoint2 = endpoint.clone(); let read_incoming_data = async move { for _ in 0..expected_messages { let conn = endpoint2.accept().await.unwrap().await.unwrap(); let shared = shared2.clone(); let task = async move { while let Ok(stream) = conn.accept_uni().await { read_from_peer(stream).await?; conn.close(0u32.into(), &[]); } Ok(()) }; tokio::spawn(async move { if let Err(e) = task.await { shared.lock().unwrap().errors.push(e); } }); } }; runtime.spawn(read_incoming_data); let client_cfg = configure_connector(listener_cert); for _ in 0..expected_messages { let data = random_data_with_hash(1024 * 1024, &crc); let shared = shared.clone(); let connecting = endpoint .connect_with(client_cfg.clone(), listener_addr, "localhost") .unwrap(); let task = async move { let conn = connecting.await.map_err(WriteError::ConnectionLost)?; write_to_peer(conn, data).await?; Ok(()) }; runtime.spawn(async move { if let Err(e) = task.await { use quinn::ConnectionError::*; match e { WriteError::ConnectionLost(ApplicationClosed { .. }) | WriteError::ConnectionLost(Reset) => {} WriteError::ConnectionLost(e) => shared.lock().unwrap().errors.push(e), _ => panic!("unexpected write error"), } } }); } runtime.block_on(endpoint.wait_idle()); let shared = shared.lock().unwrap(); if !shared.errors.is_empty() { panic!("some connections failed: {:?}", shared.errors); } } async fn read_from_peer(mut stream: quinn::RecvStream) -> Result<(), quinn::ConnectionError> { let crc = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); match stream.read_to_end(1024 * 1024 * 5).await { Ok(data) => { assert!(hash_correct(&data, &crc)); Ok(()) } Err(e) => { use quinn::ReadToEndError::*; use ReadError::*; match e { TooLong | Read(ClosedStream) | Read(ZeroRttRejected) | Read(IllegalOrderedRead) => { unreachable!() } Read(Reset(error_code)) => panic!("unexpected stream reset: {error_code}"), Read(ConnectionLost(e)) => Err(e), } } } } async fn write_to_peer(conn: quinn::Connection, data: Vec) -> Result<(), WriteError> { let mut s = conn.open_uni().await.map_err(WriteError::ConnectionLost)?; s.write_all(&data).await?; s.finish().unwrap(); // Wait for the stream to be fully received match s.stopped().await { Ok(_) => Ok(()), Err(StoppedError::ConnectionLost(ConnectionError::ApplicationClosed { .. })) => Ok(()), Err(e) => Err(e.into()), } } /// Builds client configuration. Trusts given node certificate. fn configure_connector(node_cert: CertificateDer<'static>) -> quinn::ClientConfig { let mut roots = rustls::RootCertStore::empty(); roots.add(node_cert).unwrap(); let mut transport_config = TransportConfig::default(); transport_config.max_idle_timeout(Some(Duration::from_secs(20).try_into().unwrap())); let mut peer_cfg = quinn::ClientConfig::with_root_certificates(Arc::new(roots)).unwrap(); peer_cfg.transport_config(Arc::new(transport_config)); peer_cfg } /// Builds listener configuration along with its certificate. fn configure_listener() -> (quinn::ServerConfig, CertificateDer<'static>) { let (our_cert, our_priv_key) = gen_cert(); let mut our_cfg = quinn::ServerConfig::with_single_cert(vec![our_cert.clone()], our_priv_key.into()).unwrap(); let transport_config = Arc::get_mut(&mut our_cfg.transport).unwrap(); transport_config.max_idle_timeout(Some(Duration::from_secs(20).try_into().unwrap())); (our_cfg, our_cert) } fn gen_cert() -> (CertificateDer<'static>, PrivatePkcs8KeyDer<'static>) { let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_string()]).unwrap(); ( cert.cert.into(), PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()), ) } /// Constructs a buffer with random bytes of given size prefixed with a hash of this data. fn random_data_with_hash(size: usize, crc: &Crc) -> Vec { let mut data = random_vec(size + 4); let hash = crc.checksum(&data[4..]); // write hash in big endian data[0] = (hash >> 24) as u8; data[1] = ((hash >> 16) & 0xff) as u8; data[2] = ((hash >> 8) & 0xff) as u8; data[3] = (hash & 0xff) as u8; data } /// Checks if given data buffer hash is correct. Hash itself is a 4 byte prefix in the data. fn hash_correct(data: &[u8], crc: &Crc) -> bool { let encoded_hash = ((data[0] as u32) << 24) | ((data[1] as u32) << 16) | ((data[2] as u32) << 8) | data[3] as u32; let actual_hash = crc.checksum(&data[4..]); encoded_hash == actual_hash } #[allow(unsafe_code)] fn random_vec(size: usize) -> Vec { let mut ret = vec![0; size]; rand::thread_rng().fill_bytes(&mut ret[..]); ret }