quinn-0.10.2/.cargo_vcs_info.json0000644000000001430000000000100122440ustar { "git": { "sha1": "023f10376dbfdda4dfab2d775c5af61a4a29a803" }, "path_in_vcs": "quinn" }quinn-0.10.2/Cargo.lock0000644000001177340000000000100102360ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "anstream" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "is-terminal", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" [[package]] name = "anstyle-parse" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "anstyle-wincon" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" dependencies = [ "anstyle", "windows-sys 0.48.0", ] [[package]] name = "anyhow" version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "async-channel" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ "concurrent-queue", "event-listener", "futures-core", ] [[package]] name = "async-executor" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ "async-lock", "async-task", "concurrent-queue", "fastrand", "futures-lite", "slab", ] [[package]] name = "async-global-executor" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", "async-io", "async-lock", "blocking", "futures-lite", "once_cell", ] [[package]] name = "async-io" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg", "cfg-if", "concurrent-queue", "futures-lite", "log", "parking", "polling", "rustix", "slab", "socket2 0.4.9", "waker-fn", ] [[package]] name = "async-lock" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", ] [[package]] name = "async-std" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ "async-channel", "async-global-executor", "async-io", "async-lock", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", "futures-lite", "gloo-timers", "kv-log-macro", "log", "memchr", "once_cell", "pin-project-lite", "pin-utils", "slab", "wasm-bindgen-futures", ] [[package]] name = "async-task" version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "atomic-waker" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base64" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "bencher" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dfdb4953a096c551ce9ace855a604d702e6e62d77fac690575ae347571717f5" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "blocking" version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ "async-channel", "async-lock", "async-task", "atomic-waker", "fastrand", "futures-lite", "log", ] [[package]] name = "bumpalo" version = "3.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" [[package]] name = "bytes" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" version = "4.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34d21f9bf1b425d2968943631ec91202fe5e837264063503708b83013f8fc938" dependencies = [ "clap_builder", "clap_derive", "once_cell", ] [[package]] name = "clap_builder" version = "4.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "914c8c79fb560f238ef6429439a30023c862f7a28e688c58f7203f12b29970bd" dependencies = [ "anstream", "anstyle", "bitflags", "clap_lex", "strsim", ] [[package]] name = "clap_derive" version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" dependencies = [ "heck", "proc-macro2", "quote", "syn 2.0.15", ] [[package]] name = "clap_lex" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" [[package]] name = "colorchoice" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "concurrent-queue" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] [[package]] name = "core-foundation" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "core-foundation-sys" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "crc" version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crossbeam-utils" version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] [[package]] name = "ctor" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", "syn 1.0.109", ] [[package]] name = "directories-next" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" dependencies = [ "cfg-if", "dirs-sys-next", ] [[package]] name = "dirs-sys-next" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ "libc", "redox_users", "winapi", ] [[package]] name = "errno" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", "windows-sys 0.48.0", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "event-listener" version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "fastrand" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] name = "form_urlencoded" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ "percent-encoding", ] [[package]] name = "futures-channel" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", ] [[package]] name = "futures-core" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-io" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", "futures-io", "memchr", "parking", "pin-project-lite", "waker-fn", ] [[package]] name = "getrandom" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "gloo-timers" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", "js-sys", "wasm-bindgen", ] [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ "libc", ] [[package]] name = "hermit-abi" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" [[package]] name = "idna" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ "unicode-bidi", "unicode-normalization", ] [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "io-lifetimes" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ "hermit-abi 0.3.1", "libc", "windows-sys 0.48.0", ] [[package]] name = "is-terminal" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", "rustix", "windows-sys 0.48.0", ] [[package]] name = "itoa" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" version = "0.3.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68c16e1bfd491478ab155fd8b4896b86f9ede344949b641e61501e07c2b8b4d5" dependencies = [ "wasm-bindgen", ] [[package]] name = "kv-log-macro" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ "log", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "linux-raw-sys" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" [[package]] name = "log" version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", "value-bag", ] [[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata", ] [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "mio" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi", "windows-sys 0.45.0", ] [[package]] name = "nu-ansi-term" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ "overload", "winapi", ] [[package]] name = "num_cpus" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ "hermit-abi 0.2.6", "libc", ] [[package]] name = "num_threads" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ "libc", ] [[package]] name = "once_cell" version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "pem" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ "base64 0.13.1", ] [[package]] name = "percent-encoding" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pin-project" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "pin-project-lite" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "polling" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg", "bitflags", "cfg-if", "concurrent-queue", "libc", "log", "pin-project-lite", "windows-sys 0.48.0", ] [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] [[package]] name = "quinn" version = "0.10.2" dependencies = [ "anyhow", "async-io", "async-std", "bencher", "bytes", "clap", "crc", "directories-next", "futures-io", "pin-project-lite", "quinn-proto", "quinn-udp", "rand", "rcgen", "rustc-hash", "rustls", "rustls-pemfile", "thiserror", "tokio", "tracing", "tracing-futures", "tracing-subscriber", "url", ] [[package]] name = "quinn-proto" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8c8bb234e70c863204303507d841e7fa2295e95c822b2bb4ca8ebf57f17b1cb" dependencies = [ "bytes", "rand", "ring", "rustc-hash", "rustls", "rustls-native-certs", "slab", "thiserror", "tinyvec", "tracing", ] [[package]] name = "quinn-udp" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" dependencies = [ "bytes", "libc", "socket2 0.5.2", "tracing", "windows-sys 0.48.0", ] [[package]] name = "quote" version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "rcgen" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", "time", "yasna", ] [[package]] name = "redox_syscall" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom", "redox_syscall", "thiserror", ] [[package]] name = "regex" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "regex-syntax 0.7.1", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" [[package]] name = "ring" version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", "once_cell", "spin", "untrusted", "web-sys", "winapi", ] [[package]] name = "rustc-hash" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" version = "0.37.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", "windows-sys 0.48.0", ] [[package]] name = "rustls" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" dependencies = [ "ring", "rustls-webpki", "sct", ] [[package]] name = "rustls-native-certs" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", "rustls-pemfile", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ "base64 0.21.0", ] [[package]] name = "rustls-webpki" version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" dependencies = [ "ring", "untrusted", ] [[package]] name = "schannel" version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ "windows-sys 0.42.0", ] [[package]] name = "sct" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", ] [[package]] name = "security-framework" version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", ] [[package]] name = "security-framework-sys" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "serde" version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" [[package]] name = "sharded-slab" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] [[package]] name = "slab" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] [[package]] name = "socket2" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] [[package]] name = "socket2" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d283f86695ae989d1e18440a943880967156325ba025f05049946bff47bcc2b" dependencies = [ "libc", "windows-sys 0.48.0", ] [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "thiserror" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", "syn 2.0.15", ] [[package]] name = "thread_local" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ "cfg-if", "once_cell", ] [[package]] name = "time" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ "itoa", "libc", "num_threads", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] [[package]] name = "tinyvec" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg", "libc", "mio", "num_cpus", "pin-project-lite", "socket2 0.4.9", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", "syn 2.0.15", ] [[package]] name = "tracing" version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", "syn 2.0.15", ] [[package]] name = "tracing-core" version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", ] [[package]] name = "tracing-futures" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ "pin-project", "tracing", ] [[package]] name = "tracing-subscriber" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "sharded-slab", "thread_local", "time", "tracing", "tracing-core", ] [[package]] name = "unicode-bidi" version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "untrusted" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] [[package]] name = "utf8parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "value-bag" version = "1.0.0-alpha.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" dependencies = [ "ctor", "version_check", ] [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "waker-fn" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b6cb788c4e39112fbe1822277ef6fb3c55cd86b95cb3d3c4c1c9597e4ac74b4" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35e522ed4105a9d626d885b35d62501b30d9666283a5c8be12c14a8bdafe7822" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn 2.0.15", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "083abe15c5d88556b77bdf7aef403625be9e327ad37c62c4e4129af740168163" dependencies = [ "cfg-if", "js-sys", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "358a79a0cb89d21db8120cbfb91392335913e4890665b1a7981d9e956903b434" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" dependencies = [ "proc-macro2", "quote", "syn 2.0.15", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a901d592cafaa4d711bc324edfaff879ac700b19c3dfd60058d2b445be2691eb" [[package]] name = "web-sys" version = "0.3.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b5f940c7edfdc6d12126d98c9ef4d1b3d470011c47c76a6581df47ad9ba721" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-sys" version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ "windows-targets 0.42.2", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.0", ] [[package]] name = "windows-targets" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-targets" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", "windows_i686_gnu 0.48.0", "windows_i686_msvc 0.48.0", "windows_x86_64_gnu 0.48.0", "windows_x86_64_gnullvm 0.48.0", "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "yasna" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ "time", ] quinn-0.10.2/Cargo.toml0000644000000064610000000000100102530ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.63" name = "quinn" version = "0.10.2" description = "Versatile QUIC transport protocol implementation" readme = "README.md" keywords = ["quic"] categories = [ "network-programming", "asynchronous", ] license = "MIT OR Apache-2.0" repository = "https://github.com/quinn-rs/quinn" resolver = "1" [package.metadata.docs.rs] all-features = true [[example]] name = "server" required-features = ["tls-rustls"] [[example]] name = "client" required-features = ["tls-rustls"] [[example]] name = "insecure_connection" required-features = ["rustls/dangerous_configuration"] [[example]] name = "single_socket" required-features = ["tls-rustls"] [[example]] name = "connection" required-features = ["tls-rustls"] [[bench]] name = "bench" harness = false required-features = ["tls-rustls"] [dependencies.async-io] version = "1.6" optional = true [dependencies.async-std] version = "1.11" optional = true [dependencies.bytes] version = "1" [dependencies.futures-io] version = "0.3.19" optional = true [dependencies.pin-project-lite] version = "0.2" [dependencies.proto] version = "0.10.2" default-features = false package = "quinn-proto" [dependencies.rustc-hash] version = "1.1" [dependencies.rustls] version = "0.21.0" features = ["quic"] optional = true default-features = false [dependencies.thiserror] version = "1.0.21" [dependencies.tokio] version = "1.28.1" features = ["sync"] [dependencies.tracing] version = "0.1.10" [dependencies.udp] version = "0.4" default-features = false package = "quinn-udp" [dev-dependencies.anyhow] version = "1.0.22" [dev-dependencies.bencher] version = "0.1.5" [dev-dependencies.clap] version = "4" features = ["derive"] [dev-dependencies.crc] version = "3" [dev-dependencies.directories-next] version = "2" [dev-dependencies.rand] version = "0.8" [dev-dependencies.rcgen] version = "0.10.0" [dev-dependencies.rustls-pemfile] version = "1.0.0" [dev-dependencies.tokio] version = "1.28.1" features = [ "rt", "rt-multi-thread", "time", "macros", "sync", ] [dev-dependencies.tracing-futures] version = "0.2.0" features = ["std-future"] default-features = false [dev-dependencies.tracing-subscriber] version = "0.3.0" features = [ "env-filter", "fmt", "ansi", "time", "local-time", ] default-features = false [dev-dependencies.url] version = "2" [features] default = [ "native-certs", "tls-rustls", "runtime-tokio", "log", ] lock_tracking = [] log = [ "tracing/log", "proto/log", "udp/log", ] native-certs = ["proto/native-certs"] ring = ["proto/ring"] runtime-async-std = [ "async-io", "async-std", ] runtime-tokio = [ "tokio/time", "tokio/rt", "tokio/net", ] tls-rustls = [ "rustls", "proto/tls-rustls", "ring", ] [badges.codecov] repository = "djc/quinn" [badges.maintenance] status = "experimental" quinn-0.10.2/Cargo.toml.orig000064400000000000000000000053021046102023000137250ustar 00000000000000[package] name = "quinn" version = "0.10.2" license = "MIT OR Apache-2.0" repository = "https://github.com/quinn-rs/quinn" description = "Versatile QUIC transport protocol implementation" readme = "../README.md" keywords = ["quic"] categories = [ "network-programming", "asynchronous" ] workspace = ".." edition = "2021" rust-version = "1.63" [package.metadata.docs.rs] all-features = true [features] default = ["native-certs", "tls-rustls", "runtime-tokio", "log"] # Records how long locks are held, and warns if they are held >= 1ms lock_tracking = [] # Provides `ClientConfig::with_native_roots()` convenience method native-certs = ["proto/native-certs"] tls-rustls = ["rustls", "proto/tls-rustls", "ring"] # Enables `Endpoint::client` and `Endpoint::server` conveniences ring = ["proto/ring"] runtime-tokio = ["tokio/time", "tokio/rt", "tokio/net"] runtime-async-std = ["async-io", "async-std"] # Write logs via the `log` crate when no `tracing` subscriber exists log = ["tracing/log", "proto/log", "udp/log"] [badges] codecov = { repository = "djc/quinn" } maintenance = { status = "experimental" } [dependencies] async-io = { version = "1.6", optional = true } async-std = { version = "1.11", optional = true } bytes = "1" # Enables futures::io::{AsyncRead, AsyncWrite} support for streams futures-io = { version = "0.3.19", optional = true } rustc-hash = "1.1" pin-project-lite = "0.2" proto = { package = "quinn-proto", path = "../quinn-proto", version = "0.10.2", default-features = false } rustls = { version = "0.21.0", default-features = false, features = ["quic"], optional = true } thiserror = "1.0.21" tracing = "0.1.10" tokio = { version = "1.28.1", features = ["sync"] } udp = { package = "quinn-udp", path = "../quinn-udp", version = "0.4", default-features = false } [dev-dependencies] anyhow = "1.0.22" crc = "3" bencher = "0.1.5" directories-next = "2" rand = "0.8" rcgen = "0.10.0" rustls-pemfile = "1.0.0" clap = { version = "4", features = ["derive"] } tokio = { version = "1.28.1", features = ["rt", "rt-multi-thread", "time", "macros", "sync"] } tracing-subscriber = { version = "0.3.0", default-features = false, features = ["env-filter", "fmt", "ansi", "time", "local-time"] } tracing-futures = { version = "0.2.0", default-features = false, features = ["std-future"] } url = "2" [[example]] name = "server" required-features = ["tls-rustls"] [[example]] name = "client" required-features = ["tls-rustls"] [[example]] name = "insecure_connection" required-features = ["rustls/dangerous_configuration"] [[example]] name = "single_socket" required-features = ["tls-rustls"] [[example]] name = "connection" required-features = ["tls-rustls"] [[bench]] name = "bench" harness = false required-features = ["tls-rustls"] quinn-0.10.2/README.md000064400000000000000000000136651046102023000123300ustar 00000000000000

[![Documentation](https://docs.rs/quinn/badge.svg)](https://docs.rs/quinn/) [![Crates.io](https://img.shields.io/crates/v/quinn.svg)](https://crates.io/crates/quinn) [![Build status](https://github.com/quinn-rs/quinn/workflows/CI/badge.svg)](https://github.com/djc/quinn/actions?query=workflow%3ACI) [![codecov](https://codecov.io/gh/quinn-rs/quinn/branch/main/graph/badge.svg)](https://codecov.io/gh/quinn-rs/quinn) [![Chat](https://img.shields.io/badge/chat-%23quinn:matrix.org-%2346BC99?logo=matrix)](https://matrix.to/#/#quinn:matrix.org) [![Chat](https://badges.gitter.im/gitterHQ/gitter.svg)](https://gitter.im/djc/quinn) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE-MIT) [![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE-APACHE) Quinn is a pure-rust, async-compatible implementation of the IETF [QUIC][quic] transport protocol. ## Features - Simultaneous client/server operation - Ordered and unordered stream reads for improved performance - Works on stable Rust, tested on Linux, macOS and Windows - Pluggable cryptography, with a standard implementation backed by [rustls][rustls] and [*ring*][ring] - Application-layer datagrams for small, unreliable messages - Future-based async API - Minimum supported Rust version of 1.63.0 ## Overview - **quinn:** High-level async API based on tokio, see for usage. This will be used by most developers. (Basic benchmarks are included.) - **quinn-proto:** Deterministic state machine of the protocol which performs [**no** I/O][sans-io] internally and is suitable for use with custom event loops (and potentially a C or C++ API). - **quinn-udp:** UDP sockets with ECN information tuned for the protocol. - **bench:** Benchmarks without any framework. - **fuzz:** Fuzz tests. # Getting Started **Examples** ```sh $ cargo run --example server ./ $ cargo run --example client https://localhost:4433/Cargo.toml ``` This launches an HTTP 0.9 server on the loopback address serving the current working directory, with the client fetching `./Cargo.toml`. By default, the server generates a self-signed certificate and stores it to disk, where the client will automatically find and trust it. **Links** - Talk at [RustFest Paris (May 2018) presentation][talk]; [slides][slides]; [YouTube][youtube] - Usage [examples][examples] - Guide [book][documentation] ## Usage Notes
Click to show the notes ### Buffers A Quinn endpoint corresponds to a single UDP socket, no matter how many connections are in use. Handling high aggregate data rates on a single endpoint can require a larger UDP buffer than is configured by default in most environments. If you observe erratic latency and/or throughput over a stable network link, consider increasing the buffer sizes used. For example, you could adjust the `SO_SNDBUF` and `SO_RCVBUF` options of the UDP socket to be used before passing it in to Quinn. Note that some platforms (e.g. Linux) require elevated privileges or modified system configuration for a process to increase its UDP buffer sizes. ### Certificates By default, Quinn clients validate the cryptographic identity of servers they connect to. This prevents an active, on-path attacker from intercepting messages, but requires trusting some certificate authority. For many purposes, this can be accomplished by using certificates from [Let's Encrypt][letsencrypt] for servers, and relying on the default configuration for clients. For some cases, including peer-to-peer, trust-on-first-use, deliberately insecure applications, or any case where servers are not identified by domain name, this isn't practical. Arbitrary certificate validation logic can be implemented by enabling the `dangerous_configuration` feature of `rustls` and constructing a Quinn `ClientConfig` with an overridden certificate verifier by hand. When operating your own certificate authority doesn't make sense, [rcgen][rcgen] can be used to generate self-signed certificates on demand. To support trust-on-first-use, servers that automatically generate self-signed certificates should write their generated certificate to persistent storage and reuse it on future runs.

## Contribution All feedback welcome. Feel free to file bugs, requests for documentation and any other feedback to the [issue tracker][issues]. The quinn-proto test suite uses simulated IO for reproducibility and to avoid long sleeps in certain timing-sensitive tests. If the `SSLKEYLOGFILE` environment variable is set, the tests will emit UDP packets for inspection using external protocol analyzers like Wireshark, and NSS-compatible key logs for the client side of each connection will be written to the path specified in the variable. The minimum supported Rust version for published releases of our crates will always be at least 6 months old at the time of release. ## Authors * **Dirkjan Ochtman** - *Project owner & founder* * **Benjamin Saunders** - *Project owner & founder* * **Jean-Christophe Begue** - *Project collaborator, author of the HTTP/3 Implementation* [quic]: https://quicwg.github.io/ [issues]: https://github.com/djc/quinn/issues [rustls]: https://github.com/ctz/rustls [ring]: https://github.com/briansmith/ring [talk]: https://paris.rustfest.eu/sessions/a-quic-future-in-rust [slides]: https://github.com/djc/talks/blob/ff760845b51ba4836cce82e7f2c640ecb5fd59fa/2018-05-26%20A%20QUIC%20future%20in%20Rust/Quinn-Speaker.pdf [animation]: https://dirkjan.ochtman.nl/files/head-of-line-blocking.html [youtube]: https://www.youtube.com/watch?v=EHgyY5DNdvI [letsencrypt]: https://letsencrypt.org/ [rcgen]: https://crates.io/crates/rcgen [examples]: https://github.com/djc/quinn/tree/main/quinn/examples [documentation]: https://quinn-rs.github.io/quinn/networking-introduction.html [sans-io]: https://sans-io.readthedocs.io/how-to-sans-io.html quinn-0.10.2/benches/bench.rs000064400000000000000000000120371046102023000140750ustar 00000000000000use std::{ net::{IpAddr, Ipv6Addr, SocketAddr, UdpSocket}, sync::Arc, thread, }; use bencher::{benchmark_group, benchmark_main, Bencher}; use tokio::runtime::{Builder, Runtime}; use tracing::error_span; use tracing_futures::Instrument as _; use quinn::{Endpoint, TokioRuntime}; benchmark_group!( benches, large_data_1_stream, large_data_10_streams, small_data_1_stream, small_data_100_streams ); benchmark_main!(benches); fn large_data_1_stream(bench: &mut Bencher) { send_data(bench, LARGE_DATA, 1); } fn large_data_10_streams(bench: &mut Bencher) { send_data(bench, LARGE_DATA, 10); } fn small_data_1_stream(bench: &mut Bencher) { send_data(bench, SMALL_DATA, 1); } fn small_data_100_streams(bench: &mut Bencher) { send_data(bench, SMALL_DATA, 100); } fn send_data(bench: &mut Bencher, data: &'static [u8], concurrent_streams: usize) { let _ = tracing_subscriber::fmt::try_init(); let ctx = Context::new(); let (addr, thread) = ctx.spawn_server(); let (endpoint, client, runtime) = ctx.make_client(addr); let client = Arc::new(client); bench.bytes = (data.len() as u64) * (concurrent_streams as u64); bench.iter(|| { let mut handles = Vec::new(); for _ in 0..concurrent_streams { let client = client.clone(); handles.push(runtime.spawn(async move { let mut stream = client.open_uni().await.unwrap(); stream.write_all(data).await.unwrap(); stream.finish().await.unwrap(); })); } runtime.block_on(async { for handle in handles { handle.await.unwrap(); } }); }); drop(client); runtime.block_on(endpoint.wait_idle()); thread.join().unwrap() } struct Context { server_config: quinn::ServerConfig, client_config: quinn::ClientConfig, } impl Context { fn new() -> Self { let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = rustls::PrivateKey(cert.serialize_private_key_der()); let cert = rustls::Certificate(cert.serialize_der().unwrap()); let mut server_config = quinn::ServerConfig::with_single_cert(vec![cert.clone()], key).unwrap(); let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); transport_config.max_concurrent_uni_streams(1024_u16.into()); let mut roots = rustls::RootCertStore::empty(); roots.add(&cert).unwrap(); let client_config = quinn::ClientConfig::with_root_certificates(roots); Self { server_config, client_config, } } pub fn spawn_server(&self) -> (SocketAddr, thread::JoinHandle<()>) { let sock = UdpSocket::bind(SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 0)).unwrap(); let addr = sock.local_addr().unwrap(); let config = self.server_config.clone(); let handle = thread::spawn(move || { let runtime = rt(); let endpoint = { let _guard = runtime.enter(); Endpoint::new( Default::default(), Some(config), sock, Arc::new(TokioRuntime), ) .unwrap() }; let handle = runtime.spawn( async move { let connection = endpoint .accept() .await .expect("accept") .await .expect("connect"); while let Ok(mut stream) = connection.accept_uni().await { tokio::spawn(async move { while stream .read_chunk(usize::MAX, false) .await .unwrap() .is_some() {} }); } } .instrument(error_span!("server")), ); runtime.block_on(handle).unwrap(); }); (addr, handle) } pub fn make_client( &self, server_addr: SocketAddr, ) -> (quinn::Endpoint, quinn::Connection, Runtime) { let runtime = rt(); let endpoint = { let _guard = runtime.enter(); Endpoint::client(SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 0)).unwrap() }; let connection = runtime .block_on(async { endpoint .connect_with(self.client_config.clone(), server_addr, "localhost") .unwrap() .instrument(error_span!("client")) .await }) .unwrap(); (endpoint, connection, runtime) } } fn rt() -> Runtime { Builder::new_current_thread().enable_all().build().unwrap() } const LARGE_DATA: &[u8] = &[0xAB; 1024 * 1024]; const SMALL_DATA: &[u8] = &[0xAB; 1]; quinn-0.10.2/examples/README.md000064400000000000000000000056221046102023000141400ustar 00000000000000## HTTP/0.9 File Serving Example The `server` and `client` examples demonstrate fetching files using a HTTP-like toy protocol. 1. Server (`server.rs`) The server listens for any client requesting a file. If the file path is valid and allowed, it returns the contents. Open up a terminal and execute: ```text $ cargo run --example server ./ ``` 2. Client (`client.rs`) The client requests a file and prints it to the console. If the file is on the server, it will receive the response. In a new terminal execute: ```test $ cargo run --example client https://localhost:4433/Cargo.toml ``` where `Cargo.toml` is any file in the directory passed to the server. **Result:** The output will be the contents of this README. **Troubleshooting:** If the client times out with no activity on the server, try forcing the server to run on IPv4 by running it with `cargo run --example server -- ./ --listen 127.0.0.1:4433`. The server listens on IPv6 by default, `localhost` tends to resolve to IPv4, and support for accepting IPv4 packets on IPv6 sockets varies between platforms. If the client prints `failed to process request: failed reading file`, the request was processed successfully but the path segment of the URL did not correspond to a file in the directory being served. ## Minimal Example The `connection.rs` example intends to use the smallest amount of code to make a simple QUIC connection. The server issues it's own certificate and passes it to the client to trust. ```text $ cargo run --example connection ``` This example will make a QUIC connection on localhost, and you should see output like: ```text [client] connected: addr=127.0.0.1:5000 [server] connection accepted: addr=127.0.0.1:53712 ``` ## Insecure Connection Example The `insecure_connection.rs` example demonstrates how to make a QUIC connection that ignores the server certificate. ```text $ cargo run --example insecure_connection --features="rustls/dangerous_configuration" ``` ## Single Socket Example You can have multiple QUIC connections over a single UDP socket. This is especially useful, if you are building a peer-to-peer system where you potentially need to communicate with thousands of peers or if you have a [hole punched](https://en.wikipedia.org/wiki/UDP_hole_punching) UDP socket. Additionally, QUIC servers and clients can both operate on the same UDP socket. This example demonstrates how to make multiple outgoing connections on a single UDP socket. ```text $ cargo run --example single_socket ``` The expected output should be something like: ```text [client] connected: addr=127.0.0.1:5000 [server] incoming connection: addr=127.0.0.1:48930 [client] connected: addr=127.0.0.1:5001 [client] connected: addr=127.0.0.1:5002 [server] incoming connection: addr=127.0.0.1:48930 [server] incoming connection: addr=127.0.0.1:48930 ``` Notice how the server sees multiple incoming connections with different IDs coming from the same endpoint. quinn-0.10.2/examples/client.rs000064400000000000000000000112761046102023000145070ustar 00000000000000//! This example demonstrates an HTTP client that requests files from a server. //! //! Checkout the `README.md` for guidance. use std::{ fs, io::{self, Write}, net::ToSocketAddrs, path::PathBuf, sync::Arc, time::{Duration, Instant}, }; use anyhow::{anyhow, Result}; use clap::Parser; use tracing::{error, info}; use url::Url; mod common; /// HTTP/0.9 over QUIC client #[derive(Parser, Debug)] #[clap(name = "client")] struct Opt { /// Perform NSS-compatible TLS key logging to the file specified in `SSLKEYLOGFILE`. #[clap(long = "keylog")] keylog: bool, url: Url, /// Override hostname used for certificate verification #[clap(long = "host")] host: Option, /// Custom certificate authority to trust, in DER format #[clap(long = "ca")] ca: Option, /// Simulate NAT rebinding after connecting #[clap(long = "rebind")] rebind: bool, } fn main() { tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .finish(), ) .unwrap(); let opt = Opt::parse(); let code = { if let Err(e) = run(opt) { eprintln!("ERROR: {e}"); 1 } else { 0 } }; ::std::process::exit(code); } #[tokio::main] async fn run(options: Opt) -> Result<()> { let url = options.url; let remote = (url.host_str().unwrap(), url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .ok_or_else(|| anyhow!("couldn't resolve to an address"))?; let mut roots = rustls::RootCertStore::empty(); if let Some(ca_path) = options.ca { roots.add(&rustls::Certificate(fs::read(ca_path)?))?; } else { let dirs = directories_next::ProjectDirs::from("org", "quinn", "quinn-examples").unwrap(); match fs::read(dirs.data_local_dir().join("cert.der")) { Ok(cert) => { roots.add(&rustls::Certificate(cert))?; } Err(ref e) if e.kind() == io::ErrorKind::NotFound => { info!("local server certificate not found"); } Err(e) => { error!("failed to open local server certificate: {}", e); } } } let mut client_crypto = rustls::ClientConfig::builder() .with_safe_defaults() .with_root_certificates(roots) .with_no_client_auth(); client_crypto.alpn_protocols = common::ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); if options.keylog { client_crypto.key_log = Arc::new(rustls::KeyLogFile::new()); } let client_config = quinn::ClientConfig::new(Arc::new(client_crypto)); let mut endpoint = quinn::Endpoint::client("[::]:0".parse().unwrap())?; endpoint.set_default_client_config(client_config); let request = format!("GET {}\r\n", url.path()); let start = Instant::now(); let rebind = options.rebind; let host = options .host .as_ref() .map_or_else(|| url.host_str(), |x| Some(x)) .ok_or_else(|| anyhow!("no hostname specified"))?; eprintln!("connecting to {host} at {remote}"); let conn = endpoint .connect(remote, host)? .await .map_err(|e| anyhow!("failed to connect: {}", e))?; eprintln!("connected at {:?}", start.elapsed()); let (mut send, mut recv) = conn .open_bi() .await .map_err(|e| anyhow!("failed to open stream: {}", e))?; if rebind { let socket = std::net::UdpSocket::bind("[::]:0").unwrap(); let addr = socket.local_addr().unwrap(); eprintln!("rebinding to {addr}"); endpoint.rebind(socket).expect("rebind failed"); } send.write_all(request.as_bytes()) .await .map_err(|e| anyhow!("failed to send request: {}", e))?; send.finish() .await .map_err(|e| anyhow!("failed to shutdown stream: {}", e))?; let response_start = Instant::now(); eprintln!("request sent at {:?}", response_start - start); let resp = recv .read_to_end(usize::max_value()) .await .map_err(|e| anyhow!("failed to read response: {}", e))?; let duration = response_start.elapsed(); eprintln!( "response received in {:?} - {} KiB/s", duration, resp.len() as f32 / (duration_secs(&duration) * 1024.0) ); io::stdout().write_all(&resp).unwrap(); io::stdout().flush().unwrap(); conn.close(0u32.into(), b"done"); // Give the server a fair chance to receive the close packet endpoint.wait_idle().await; Ok(()) } fn duration_secs(x: &Duration) -> f32 { x.as_secs() as f32 + x.subsec_nanos() as f32 * 1e-9 } quinn-0.10.2/examples/common/mod.rs000064400000000000000000000045371046102023000153020ustar 00000000000000#![cfg(feature = "rustls")] //! Commonly used code in most examples. use quinn::{ClientConfig, Endpoint, ServerConfig}; use std::{error::Error, net::SocketAddr, sync::Arc}; /// Constructs a QUIC endpoint configured for use a client only. /// /// ## Args /// /// - server_certs: list of trusted certificates. #[allow(unused)] pub fn make_client_endpoint( bind_addr: SocketAddr, server_certs: &[&[u8]], ) -> Result> { let client_cfg = configure_client(server_certs)?; let mut endpoint = Endpoint::client(bind_addr)?; endpoint.set_default_client_config(client_cfg); Ok(endpoint) } /// Constructs a QUIC endpoint configured to listen for incoming connections on a certain address /// and port. /// /// ## Returns /// /// - a stream of incoming QUIC connections /// - server certificate serialized into DER format #[allow(unused)] pub fn make_server_endpoint(bind_addr: SocketAddr) -> Result<(Endpoint, Vec), Box> { let (server_config, server_cert) = configure_server()?; let endpoint = Endpoint::server(server_config, bind_addr)?; Ok((endpoint, server_cert)) } /// Builds default quinn client config and trusts given certificates. /// /// ## Args /// /// - server_certs: a list of trusted certificates in DER format. fn configure_client(server_certs: &[&[u8]]) -> Result> { let mut certs = rustls::RootCertStore::empty(); for cert in server_certs { certs.add(&rustls::Certificate(cert.to_vec()))?; } let client_config = ClientConfig::with_root_certificates(certs); Ok(client_config) } /// Returns default server configuration along with its certificate. fn configure_server() -> Result<(ServerConfig, Vec), Box> { let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let cert_der = cert.serialize_der().unwrap(); let priv_key = cert.serialize_private_key_der(); let priv_key = rustls::PrivateKey(priv_key); let cert_chain = vec![rustls::Certificate(cert_der.clone())]; let mut server_config = ServerConfig::with_single_cert(cert_chain, priv_key)?; let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); transport_config.max_concurrent_uni_streams(0_u8.into()); Ok((server_config, cert_der)) } #[allow(unused)] pub const ALPN_QUIC_HTTP: &[&[u8]] = &[b"hq-29"]; quinn-0.10.2/examples/connection.rs000064400000000000000000000025531046102023000153660ustar 00000000000000//! This example intends to use the smallest amount of code to make a simple QUIC connection. //! //! Checkout the `README.md` for guidance. mod common; use common::{make_client_endpoint, make_server_endpoint}; #[tokio::main] async fn main() -> Result<(), Box> { let server_addr = "127.0.0.1:5000".parse().unwrap(); let (endpoint, server_cert) = make_server_endpoint(server_addr)?; // accept a single connection let endpoint2 = endpoint.clone(); tokio::spawn(async move { let incoming_conn = endpoint2.accept().await.unwrap(); let conn = incoming_conn.await.unwrap(); println!( "[server] connection accepted: addr={}", conn.remote_address() ); // Dropping all handles associated with a connection implicitly closes it }); let endpoint = make_client_endpoint("0.0.0.0:0".parse().unwrap(), &[&server_cert])?; // connect to server let connection = endpoint .connect(server_addr, "localhost") .unwrap() .await .unwrap(); println!("[client] connected: addr={}", connection.remote_address()); // Waiting for a stream will complete with an error when the server closes the connection let _ = connection.accept_uni().await; // Make sure the server has a chance to clean up endpoint.wait_idle().await; Ok(()) } quinn-0.10.2/examples/insecure_connection.rs000064400000000000000000000051101046102023000172530ustar 00000000000000//! This example demonstrates how to make a QUIC connection that ignores the server certificate. //! //! Checkout the `README.md` for guidance. use std::{error::Error, net::SocketAddr, sync::Arc}; use quinn::{ClientConfig, Endpoint}; mod common; use common::make_server_endpoint; #[tokio::main] async fn main() -> Result<(), Box> { // server and client are running on the same thread asynchronously let addr = "127.0.0.1:5000".parse().unwrap(); tokio::spawn(run_server(addr)); run_client(addr).await?; Ok(()) } /// Runs a QUIC server bound to given address. async fn run_server(addr: SocketAddr) { let (endpoint, _server_cert) = make_server_endpoint(addr).unwrap(); // accept a single connection let incoming_conn = endpoint.accept().await.unwrap(); let conn = incoming_conn.await.unwrap(); println!( "[server] connection accepted: addr={}", conn.remote_address() ); } async fn run_client(server_addr: SocketAddr) -> Result<(), Box> { let mut endpoint = Endpoint::client("127.0.0.1:0".parse().unwrap())?; endpoint.set_default_client_config(configure_client()); // connect to server let connection = endpoint .connect(server_addr, "localhost") .unwrap() .await .unwrap(); println!("[client] connected: addr={}", connection.remote_address()); // Dropping handles allows the corresponding objects to automatically shut down drop(connection); // Make sure the server has a chance to clean up endpoint.wait_idle().await; Ok(()) } /// Dummy certificate verifier that treats any certificate as valid. /// NOTE, such verification is vulnerable to MITM attacks, but convenient for testing. struct SkipServerVerification; impl SkipServerVerification { fn new() -> Arc { Arc::new(Self) } } impl rustls::client::ServerCertVerifier for SkipServerVerification { fn verify_server_cert( &self, _end_entity: &rustls::Certificate, _intermediates: &[rustls::Certificate], _server_name: &rustls::ServerName, _scts: &mut dyn Iterator, _ocsp_response: &[u8], _now: std::time::SystemTime, ) -> Result { Ok(rustls::client::ServerCertVerified::assertion()) } } fn configure_client() -> ClientConfig { let crypto = rustls::ClientConfig::builder() .with_safe_defaults() .with_custom_certificate_verifier(SkipServerVerification::new()) .with_no_client_auth(); ClientConfig::new(Arc::new(crypto)) } quinn-0.10.2/examples/server.rs000064400000000000000000000214751046102023000145410ustar 00000000000000//! This example demonstrates an HTTP server that serves files from a directory. //! //! Checkout the `README.md` for guidance. use std::{ ascii, fs, io, net::SocketAddr, path::{self, Path, PathBuf}, str, sync::Arc, }; use anyhow::{anyhow, bail, Context, Result}; use clap::Parser; use tracing::{error, info, info_span}; use tracing_futures::Instrument as _; mod common; #[derive(Parser, Debug)] #[clap(name = "server")] struct Opt { /// file to log TLS keys to for debugging #[clap(long = "keylog")] keylog: bool, /// directory to serve files from root: PathBuf, /// TLS private key in PEM format #[clap(short = 'k', long = "key", requires = "cert")] key: Option, /// TLS certificate in PEM format #[clap(short = 'c', long = "cert", requires = "key")] cert: Option, /// Enable stateless retries #[clap(long = "stateless-retry")] stateless_retry: bool, /// Address to listen on #[clap(long = "listen", default_value = "[::1]:4433")] listen: SocketAddr, } fn main() { tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .finish(), ) .unwrap(); let opt = Opt::parse(); let code = { if let Err(e) = run(opt) { eprintln!("ERROR: {e}"); 1 } else { 0 } }; ::std::process::exit(code); } #[tokio::main] async fn run(options: Opt) -> Result<()> { let (certs, key) = if let (Some(key_path), Some(cert_path)) = (&options.key, &options.cert) { let key = fs::read(key_path).context("failed to read private key")?; let key = if key_path.extension().map_or(false, |x| x == "der") { rustls::PrivateKey(key) } else { let pkcs8 = rustls_pemfile::pkcs8_private_keys(&mut &*key) .context("malformed PKCS #8 private key")?; match pkcs8.into_iter().next() { Some(x) => rustls::PrivateKey(x), None => { let rsa = rustls_pemfile::rsa_private_keys(&mut &*key) .context("malformed PKCS #1 private key")?; match rsa.into_iter().next() { Some(x) => rustls::PrivateKey(x), None => { anyhow::bail!("no private keys found"); } } } } }; let cert_chain = fs::read(cert_path).context("failed to read certificate chain")?; let cert_chain = if cert_path.extension().map_or(false, |x| x == "der") { vec![rustls::Certificate(cert_chain)] } else { rustls_pemfile::certs(&mut &*cert_chain) .context("invalid PEM-encoded certificate")? .into_iter() .map(rustls::Certificate) .collect() }; (cert_chain, key) } else { let dirs = directories_next::ProjectDirs::from("org", "quinn", "quinn-examples").unwrap(); let path = dirs.data_local_dir(); let cert_path = path.join("cert.der"); let key_path = path.join("key.der"); let (cert, key) = match fs::read(&cert_path).and_then(|x| Ok((x, fs::read(&key_path)?))) { Ok(x) => x, Err(ref e) if e.kind() == io::ErrorKind::NotFound => { info!("generating self-signed certificate"); let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = cert.serialize_private_key_der(); let cert = cert.serialize_der().unwrap(); fs::create_dir_all(path).context("failed to create certificate directory")?; fs::write(&cert_path, &cert).context("failed to write certificate")?; fs::write(&key_path, &key).context("failed to write private key")?; (cert, key) } Err(e) => { bail!("failed to read certificate: {}", e); } }; let key = rustls::PrivateKey(key); let cert = rustls::Certificate(cert); (vec![cert], key) }; let mut server_crypto = rustls::ServerConfig::builder() .with_safe_defaults() .with_no_client_auth() .with_single_cert(certs, key)?; server_crypto.alpn_protocols = common::ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); if options.keylog { server_crypto.key_log = Arc::new(rustls::KeyLogFile::new()); } let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(server_crypto)); let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); transport_config.max_concurrent_uni_streams(0_u8.into()); if options.stateless_retry { server_config.use_retry(true); } let root = Arc::::from(options.root.clone()); if !root.exists() { bail!("root path does not exist"); } let endpoint = quinn::Endpoint::server(server_config, options.listen)?; eprintln!("listening on {}", endpoint.local_addr()?); while let Some(conn) = endpoint.accept().await { info!("connection incoming"); let fut = handle_connection(root.clone(), conn); tokio::spawn(async move { if let Err(e) = fut.await { error!("connection failed: {reason}", reason = e.to_string()) } }); } Ok(()) } async fn handle_connection(root: Arc, conn: quinn::Connecting) -> Result<()> { let connection = conn.await?; let span = info_span!( "connection", remote = %connection.remote_address(), protocol = %connection .handshake_data() .unwrap() .downcast::().unwrap() .protocol .map_or_else(|| "".into(), |x| String::from_utf8_lossy(&x).into_owned()) ); async { info!("established"); // Each stream initiated by the client constitutes a new request. loop { let stream = connection.accept_bi().await; let stream = match stream { Err(quinn::ConnectionError::ApplicationClosed { .. }) => { info!("connection closed"); return Ok(()); } Err(e) => { return Err(e); } Ok(s) => s, }; let fut = handle_request(root.clone(), stream); tokio::spawn( async move { if let Err(e) = fut.await { error!("failed: {reason}", reason = e.to_string()); } } .instrument(info_span!("request")), ); } } .instrument(span) .await?; Ok(()) } async fn handle_request( root: Arc, (mut send, mut recv): (quinn::SendStream, quinn::RecvStream), ) -> Result<()> { let req = recv .read_to_end(64 * 1024) .await .map_err(|e| anyhow!("failed reading request: {}", e))?; let mut escaped = String::new(); for &x in &req[..] { let part = ascii::escape_default(x).collect::>(); escaped.push_str(str::from_utf8(&part).unwrap()); } info!(content = %escaped); // Execute the request let resp = process_get(&root, &req).unwrap_or_else(|e| { error!("failed: {}", e); format!("failed to process request: {e}\n").into_bytes() }); // Write the response send.write_all(&resp) .await .map_err(|e| anyhow!("failed to send response: {}", e))?; // Gracefully terminate the stream send.finish() .await .map_err(|e| anyhow!("failed to shutdown stream: {}", e))?; info!("complete"); Ok(()) } fn process_get(root: &Path, x: &[u8]) -> Result> { if x.len() < 4 || &x[0..4] != b"GET " { bail!("missing GET"); } if x[4..].len() < 2 || &x[x.len() - 2..] != b"\r\n" { bail!("missing \\r\\n"); } let x = &x[4..x.len() - 2]; let end = x.iter().position(|&c| c == b' ').unwrap_or(x.len()); let path = str::from_utf8(&x[..end]).context("path is malformed UTF-8")?; let path = Path::new(&path); let mut real_path = PathBuf::from(root); let mut components = path.components(); match components.next() { Some(path::Component::RootDir) => {} _ => { bail!("path must be absolute"); } } for c in components { match c { path::Component::Normal(x) => { real_path.push(x); } x => { bail!("illegal component in path: {:?}", x); } } } let data = fs::read(&real_path).context("failed reading file")?; Ok(data) } quinn-0.10.2/examples/single_socket.rs000064400000000000000000000035431046102023000160600ustar 00000000000000//! This example demonstrates how to make multiple outgoing connections on a single UDP socket. //! //! Checkout the `README.md` for guidance. use std::{error::Error, net::SocketAddr}; use quinn::Endpoint; mod common; use common::{make_client_endpoint, make_server_endpoint}; #[tokio::main] async fn main() -> Result<(), Box> { let addr1 = "127.0.0.1:5000".parse().unwrap(); let addr2 = "127.0.0.1:5001".parse().unwrap(); let addr3 = "127.0.0.1:5002".parse().unwrap(); let server1_cert = run_server(addr1)?; let server2_cert = run_server(addr2)?; let server3_cert = run_server(addr3)?; let client = make_client_endpoint( "127.0.0.1:0".parse().unwrap(), &[&server1_cert, &server2_cert, &server3_cert], )?; // connect to multiple endpoints using the same socket/endpoint tokio::join!( run_client(&client, addr1), run_client(&client, addr2), run_client(&client, addr3), ); // Make sure the server has a chance to clean up client.wait_idle().await; Ok(()) } /// Runs a QUIC server bound to given address and returns server certificate. fn run_server(addr: SocketAddr) -> Result, Box> { let (endpoint, server_cert) = make_server_endpoint(addr)?; // accept a single connection tokio::spawn(async move { let connection = endpoint.accept().await.unwrap().await.unwrap(); println!( "[server] incoming connection: addr={}", connection.remote_address() ); }); Ok(server_cert) } /// Attempt QUIC connection with the given server address. async fn run_client(endpoint: &Endpoint, server_addr: SocketAddr) { let connect = endpoint.connect(server_addr, "localhost").unwrap(); let connection = connect.await.unwrap(); println!("[client] connected: addr={}", connection.remote_address()); } quinn-0.10.2/src/connection.rs000064400000000000000000001233751046102023000143450ustar 00000000000000use std::{ any::Any, fmt, future::Future, net::{IpAddr, SocketAddr}, pin::Pin, sync::Arc, task::{Context, Poll, Waker}, time::{Duration, Instant}, }; use crate::runtime::{AsyncTimer, Runtime}; use bytes::Bytes; use pin_project_lite::pin_project; use proto::{ConnectionError, ConnectionHandle, ConnectionStats, Dir, StreamEvent, StreamId}; use rustc_hash::FxHashMap; use thiserror::Error; use tokio::sync::{futures::Notified, mpsc, oneshot, Notify}; use tracing::debug_span; use udp::UdpState; use crate::{ mutex::Mutex, recv_stream::RecvStream, send_stream::{SendStream, WriteError}, ConnectionEvent, EndpointEvent, VarInt, }; use proto::congestion::Controller; /// In-progress connection attempt future #[derive(Debug)] #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] pub struct Connecting { conn: Option, connected: oneshot::Receiver, handshake_data_ready: Option>, } impl Connecting { pub(crate) fn new( handle: ConnectionHandle, conn: proto::Connection, endpoint_events: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, conn_events: mpsc::UnboundedReceiver, udp_state: Arc, runtime: Arc, ) -> Self { let (on_handshake_data_send, on_handshake_data_recv) = oneshot::channel(); let (on_connected_send, on_connected_recv) = oneshot::channel(); let conn = ConnectionRef::new( handle, conn, endpoint_events, conn_events, on_handshake_data_send, on_connected_send, udp_state, runtime.clone(), ); runtime.spawn(Box::pin(ConnectionDriver(conn.clone()))); Self { conn: Some(conn), connected: on_connected_recv, handshake_data_ready: Some(on_handshake_data_recv), } } /// Convert into a 0-RTT or 0.5-RTT connection at the cost of weakened security /// /// Opens up the connection for use before the handshake finishes, allowing the API user to /// send data with 0-RTT encryption if the necessary key material is available. This is useful /// for reducing start-up latency by beginning transmission of application data without waiting /// for the handshake's cryptographic security guarantees to be established. /// /// When the `ZeroRttAccepted` future completes, the connection has been fully established. /// /// # Security /// /// On outgoing connections, this enables transmission of 0-RTT data, which might be vulnerable /// to replay attacks, and should therefore never invoke non-idempotent operations. /// /// On incoming connections, this enables transmission of 0.5-RTT data, which might be /// intercepted by a man-in-the-middle. If this occurs, the handshake will not complete /// successfully. /// /// # Errors /// /// Outgoing connections are only 0-RTT-capable when a cryptographic session ticket cached from /// a previous connection to the same server is available, and includes a 0-RTT key. If no such /// ticket is found, `self` is returned unmodified. /// /// For incoming connections, a 0.5-RTT connection will always be successfully constructed. pub fn into_0rtt(mut self) -> Result<(Connection, ZeroRttAccepted), Self> { // This lock borrows `self` and would normally be dropped at the end of this scope, so we'll // have to release it explicitly before returning `self` by value. let conn = (self.conn.as_mut().unwrap()).state.lock("into_0rtt"); let is_ok = conn.inner.has_0rtt() || conn.inner.side().is_server(); drop(conn); if is_ok { let conn = self.conn.take().unwrap(); Ok((Connection(conn), ZeroRttAccepted(self.connected))) } else { Err(self) } } /// Parameters negotiated during the handshake /// /// The dynamic type returned is determined by the configured /// [`Session`](proto::crypto::Session). For the default `rustls` session, the return value can /// be [`downcast`](Box::downcast) to a /// [`crypto::rustls::HandshakeData`](crate::crypto::rustls::HandshakeData). pub async fn handshake_data(&mut self) -> Result, ConnectionError> { // Taking &mut self allows us to use a single oneshot channel rather than dealing with // potentially many tasks waiting on the same event. It's a bit of a hack, but keeps things // simple. if let Some(x) = self.handshake_data_ready.take() { let _ = x.await; } let conn = self.conn.as_ref().unwrap(); let inner = conn.state.lock("handshake"); inner .inner .crypto_session() .handshake_data() .ok_or_else(|| { inner .error .clone() .expect("spurious handshake data ready notification") }) } /// The local IP address which was used when the peer established /// the connection /// /// This can be different from the address the endpoint is bound to, in case /// the endpoint is bound to a wildcard address like `0.0.0.0` or `::`. /// /// This will return `None` for clients. /// /// Retrieving the local IP address is currently supported on the following /// platforms: /// - Linux /// - FreeBSD /// - macOS /// /// On all non-supported platforms the local IP address will not be available, /// and the method will return `None`. pub fn local_ip(&self) -> Option { let conn = self.conn.as_ref().unwrap(); let inner = conn.state.lock("local_ip"); inner.inner.local_ip() } /// The peer's UDP address. /// /// Will panic if called after `poll` has returned `Ready`. pub fn remote_address(&self) -> SocketAddr { let conn_ref: &ConnectionRef = self.conn.as_ref().expect("used after yielding Ready"); conn_ref.state.lock("remote_address").inner.remote_address() } } impl Future for Connecting { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { Pin::new(&mut self.connected).poll(cx).map(|_| { let conn = self.conn.take().unwrap(); let inner = conn.state.lock("connecting"); if inner.connected { drop(inner); Ok(Connection(conn)) } else { Err(inner .error .clone() .expect("connected signaled without connection success or error")) } }) } } /// Future that completes when a connection is fully established /// /// For clients, the resulting value indicates if 0-RTT was accepted. For servers, the resulting /// value is meaningless. #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] pub struct ZeroRttAccepted(oneshot::Receiver); impl Future for ZeroRttAccepted { type Output = bool; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { Pin::new(&mut self.0).poll(cx).map(|x| x.unwrap_or(false)) } } /// A future that drives protocol logic for a connection /// /// This future handles the protocol logic for a single connection, routing events from the /// `Connection` API object to the `Endpoint` task and the related stream-related interfaces. /// It also keeps track of outstanding timeouts for the `Connection`. /// /// If the connection encounters an error condition, this future will yield an error. It will /// terminate (yielding `Ok(())`) if the connection was closed without error. Unlike other /// connection-related futures, this waits for the draining period to complete to ensure that /// packets still in flight from the peer are handled gracefully. #[must_use = "connection drivers must be spawned for their connections to function"] #[derive(Debug)] struct ConnectionDriver(ConnectionRef); impl Future for ConnectionDriver { type Output = (); #[allow(unused_mut)] // MSRV fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { let conn = &mut *self.0.state.lock("poll"); let span = debug_span!("drive", id = conn.handle.0); let _guard = span.enter(); if let Err(e) = conn.process_conn_events(&self.0.shared, cx) { conn.terminate(e, &self.0.shared); return Poll::Ready(()); } let mut keep_going = conn.drive_transmit(); // If a timer expires, there might be more to transmit. When we transmit something, we // might need to reset a timer. Hence, we must loop until neither happens. keep_going |= conn.drive_timer(cx); conn.forward_endpoint_events(); conn.forward_app_events(&self.0.shared); if !conn.inner.is_drained() { if keep_going { // If the connection hasn't processed all tasks, schedule it again cx.waker().wake_by_ref(); } else { conn.driver = Some(cx.waker().clone()); } return Poll::Pending; } if conn.error.is_none() { unreachable!("drained connections always have an error"); } Poll::Ready(()) } } /// A QUIC connection. /// /// If all references to a connection (including every clone of the `Connection` handle, streams of /// incoming streams, and the various stream types) have been dropped, then the connection will be /// automatically closed with an `error_code` of 0 and an empty `reason`. You can also close the /// connection explicitly by calling [`Connection::close()`]. /// /// May be cloned to obtain another handle to the same connection. /// /// [`Connection::close()`]: Connection::close #[derive(Debug, Clone)] pub struct Connection(ConnectionRef); impl Connection { /// Initiate a new outgoing unidirectional stream. /// /// Streams are cheap and instantaneous to open unless blocked by flow control. As a /// consequence, the peer won't be notified that a stream has been opened until the stream is /// actually used. pub fn open_uni(&self) -> OpenUni<'_> { OpenUni { conn: &self.0, notify: self.0.shared.stream_budget_available[Dir::Uni as usize].notified(), } } /// Initiate a new outgoing bidirectional stream. /// /// Streams are cheap and instantaneous to open unless blocked by flow control. As a /// consequence, the peer won't be notified that a stream has been opened until the stream is /// actually used. pub fn open_bi(&self) -> OpenBi<'_> { OpenBi { conn: &self.0, notify: self.0.shared.stream_budget_available[Dir::Bi as usize].notified(), } } /// Accept the next incoming uni-directional stream pub fn accept_uni(&self) -> AcceptUni<'_> { AcceptUni { conn: &self.0, notify: self.0.shared.stream_incoming[Dir::Uni as usize].notified(), } } /// Accept the next incoming bidirectional stream pub fn accept_bi(&self) -> AcceptBi<'_> { AcceptBi { conn: &self.0, notify: self.0.shared.stream_incoming[Dir::Bi as usize].notified(), } } /// Receive an application datagram pub fn read_datagram(&self) -> ReadDatagram<'_> { ReadDatagram { conn: &self.0, notify: self.0.shared.datagrams.notified(), } } /// Wait for the connection to be closed for any reason /// /// Despite the return type's name, closed connections are often not an error condition at the /// application layer. Cases that might be routine include [`ConnectionError::LocallyClosed`] /// and [`ConnectionError::ApplicationClosed`]. pub async fn closed(&self) -> ConnectionError { { let conn = self.0.state.lock("closed"); if let Some(error) = conn.error.as_ref() { return error.clone(); } // Construct the future while the lock is held to ensure we can't miss a wakeup if // the `Notify` is signaled immediately after we release the lock. `await` it after // the lock guard is out of scope. self.0.shared.closed.notified() } .await; self.0 .state .lock("closed") .error .as_ref() .expect("closed without an error") .clone() } /// If the connection is closed, the reason why. /// /// Returns `None` if the connection is still open. pub fn close_reason(&self) -> Option { self.0.state.lock("close_reason").error.clone() } /// Close the connection immediately. /// /// Pending operations will fail immediately with [`ConnectionError::LocallyClosed`]. Delivery /// of data on unfinished streams is not guaranteed, so the application must call this only /// when all important communications have been completed, e.g. by calling [`finish`] on /// outstanding [`SendStream`]s and waiting for the resulting futures to complete. /// /// `error_code` and `reason` are not interpreted, and are provided directly to the peer. /// /// `reason` will be truncated to fit in a single packet with overhead; to improve odds that it /// is preserved in full, it should be kept under 1KiB. /// /// [`ConnectionError::LocallyClosed`]: crate::ConnectionError::LocallyClosed /// [`finish`]: crate::SendStream::finish /// [`SendStream`]: crate::SendStream pub fn close(&self, error_code: VarInt, reason: &[u8]) { let conn = &mut *self.0.state.lock("close"); conn.close(error_code, Bytes::copy_from_slice(reason), &self.0.shared); } /// Transmit `data` as an unreliable, unordered application datagram /// /// Application datagrams are a low-level primitive. They may be lost or delivered out of order, /// and `data` must both fit inside a single QUIC packet and be smaller than the maximum /// dictated by the peer. pub fn send_datagram(&self, data: Bytes) -> Result<(), SendDatagramError> { let conn = &mut *self.0.state.lock("send_datagram"); if let Some(ref x) = conn.error { return Err(SendDatagramError::ConnectionLost(x.clone())); } use proto::SendDatagramError::*; match conn.inner.datagrams().send(data) { Ok(()) => { conn.wake(); Ok(()) } Err(e) => Err(match e { UnsupportedByPeer => SendDatagramError::UnsupportedByPeer, Disabled => SendDatagramError::Disabled, TooLarge => SendDatagramError::TooLarge, }), } } /// Compute the maximum size of datagrams that may be passed to [`send_datagram()`]. /// /// Returns `None` if datagrams are unsupported by the peer or disabled locally. /// /// This may change over the lifetime of a connection according to variation in the path MTU /// estimate. The peer can also enforce an arbitrarily small fixed limit, but if the peer's /// limit is large this is guaranteed to be a little over a kilobyte at minimum. /// /// Not necessarily the maximum size of received datagrams. /// /// [`send_datagram()`]: Connection::send_datagram pub fn max_datagram_size(&self) -> Option { self.0 .state .lock("max_datagram_size") .inner .datagrams() .max_size() } /// Bytes available in the outgoing datagram buffer /// /// When greater than zero, calling [`send_datagram()`](Self::send_datagram) with a datagram of /// at most this size is guaranteed not to cause older datagrams to be dropped. pub fn datagram_send_buffer_space(&self) -> usize { self.0 .state .lock("datagram_send_buffer_space") .inner .datagrams() .send_buffer_space() } /// The peer's UDP address /// /// If `ServerConfig::migration` is `true`, clients may change addresses at will, e.g. when /// switching to a cellular internet connection. pub fn remote_address(&self) -> SocketAddr { self.0.state.lock("remote_address").inner.remote_address() } /// The local IP address which was used when the peer established /// the connection /// /// This can be different from the address the endpoint is bound to, in case /// the endpoint is bound to a wildcard address like `0.0.0.0` or `::`. /// /// This will return `None` for clients. /// /// Retrieving the local IP address is currently supported on the following /// platforms: /// - Linux /// /// On all non-supported platforms the local IP address will not be available, /// and the method will return `None`. pub fn local_ip(&self) -> Option { self.0.state.lock("local_ip").inner.local_ip() } /// Current best estimate of this connection's latency (round-trip-time) pub fn rtt(&self) -> Duration { self.0.state.lock("rtt").inner.rtt() } /// Returns connection statistics pub fn stats(&self) -> ConnectionStats { self.0.state.lock("stats").inner.stats() } /// Current state of the congestion control algorithm, for debugging purposes pub fn congestion_state(&self) -> Box { self.0 .state .lock("congestion_state") .inner .congestion_state() .clone_box() } /// Parameters negotiated during the handshake /// /// Guaranteed to return `Some` on fully established connections or after /// [`Connecting::handshake_data()`] succeeds. See that method's documentations for details on /// the returned value. /// /// [`Connection::handshake_data()`]: crate::Connecting::handshake_data pub fn handshake_data(&self) -> Option> { self.0 .state .lock("handshake_data") .inner .crypto_session() .handshake_data() } /// Cryptographic identity of the peer /// /// The dynamic type returned is determined by the configured /// [`Session`](proto::crypto::Session). For the default `rustls` session, the return value can /// be [`downcast`](Box::downcast) to a Vec<[rustls::Certificate](rustls::Certificate)> pub fn peer_identity(&self) -> Option> { self.0 .state .lock("peer_identity") .inner .crypto_session() .peer_identity() } /// A stable identifier for this connection /// /// Peer addresses and connection IDs can change, but this value will remain /// fixed for the lifetime of the connection. pub fn stable_id(&self) -> usize { self.0.stable_id() } // Update traffic keys spontaneously for testing purposes. #[doc(hidden)] pub fn force_key_update(&self) { self.0 .state .lock("force_key_update") .inner .initiate_key_update() } /// Derive keying material from this connection's TLS session secrets. /// /// When both peers call this method with the same `label` and `context` /// arguments and `output` buffers of equal length, they will get the /// same sequence of bytes in `output`. These bytes are cryptographically /// strong and pseudorandom, and are suitable for use as keying material. /// /// See [RFC5705](https://tools.ietf.org/html/rfc5705) for more information. pub fn export_keying_material( &self, output: &mut [u8], label: &[u8], context: &[u8], ) -> Result<(), proto::crypto::ExportKeyingMaterialError> { self.0 .state .lock("export_keying_material") .inner .crypto_session() .export_keying_material(output, label, context) } /// Modify the number of remotely initiated unidirectional streams that may be concurrently open /// /// No streams may be opened by the peer unless fewer than `count` are already open. Large /// `count`s increase both minimum and worst-case memory consumption. pub fn set_max_concurrent_uni_streams(&self, count: VarInt) { let mut conn = self.0.state.lock("set_max_concurrent_uni_streams"); conn.inner.set_max_concurrent_streams(Dir::Uni, count); // May need to send MAX_STREAMS to make progress conn.wake(); } /// See [`proto::TransportConfig::receive_window()`] pub fn set_receive_window(&self, receive_window: VarInt) { let mut conn = self.0.state.lock("set_receive_window"); conn.inner.set_receive_window(receive_window); conn.wake(); } /// Modify the number of remotely initiated bidirectional streams that may be concurrently open /// /// No streams may be opened by the peer unless fewer than `count` are already open. Large /// `count`s increase both minimum and worst-case memory consumption. pub fn set_max_concurrent_bi_streams(&self, count: VarInt) { let mut conn = self.0.state.lock("set_max_concurrent_bi_streams"); conn.inner.set_max_concurrent_streams(Dir::Bi, count); // May need to send MAX_STREAMS to make progress conn.wake(); } } pin_project! { /// Future produced by [`Connection::open_uni`] pub struct OpenUni<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for OpenUni<'_> { type Output = Result; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let this = self.project(); let (conn, id, is_0rtt) = ready!(poll_open(ctx, this.conn, this.notify, Dir::Uni))?; Poll::Ready(Ok(SendStream::new(conn, id, is_0rtt))) } } pin_project! { /// Future produced by [`Connection::open_bi`] pub struct OpenBi<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for OpenBi<'_> { type Output = Result<(SendStream, RecvStream), ConnectionError>; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let this = self.project(); let (conn, id, is_0rtt) = ready!(poll_open(ctx, this.conn, this.notify, Dir::Bi))?; Poll::Ready(Ok(( SendStream::new(conn.clone(), id, is_0rtt), RecvStream::new(conn, id, is_0rtt), ))) } } fn poll_open<'a>( ctx: &mut Context<'_>, conn: &'a ConnectionRef, mut notify: Pin<&mut Notified<'a>>, dir: Dir, ) -> Poll> { let mut state = conn.state.lock("poll_open"); if let Some(ref e) = state.error { return Poll::Ready(Err(e.clone())); } else if let Some(id) = state.inner.streams().open(dir) { let is_0rtt = state.inner.side().is_client() && state.inner.is_handshaking(); drop(state); // Release the lock so clone can take it return Poll::Ready(Ok((conn.clone(), id, is_0rtt))); } loop { match notify.as_mut().poll(ctx) { // `state` lock ensures we didn't race with readiness Poll::Pending => return Poll::Pending, // Spurious wakeup, get a new future Poll::Ready(()) => { notify.set(conn.shared.stream_budget_available[dir as usize].notified()) } } } } pin_project! { /// Future produced by [`Connection::accept_uni`] pub struct AcceptUni<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for AcceptUni<'_> { type Output = Result; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let this = self.project(); let (conn, id, is_0rtt) = ready!(poll_accept(ctx, this.conn, this.notify, Dir::Uni))?; Poll::Ready(Ok(RecvStream::new(conn, id, is_0rtt))) } } pin_project! { /// Future produced by [`Connection::accept_bi`] pub struct AcceptBi<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for AcceptBi<'_> { type Output = Result<(SendStream, RecvStream), ConnectionError>; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let this = self.project(); let (conn, id, is_0rtt) = ready!(poll_accept(ctx, this.conn, this.notify, Dir::Bi))?; Poll::Ready(Ok(( SendStream::new(conn.clone(), id, is_0rtt), RecvStream::new(conn, id, is_0rtt), ))) } } fn poll_accept<'a>( ctx: &mut Context<'_>, conn: &'a ConnectionRef, mut notify: Pin<&mut Notified<'a>>, dir: Dir, ) -> Poll> { let mut state = conn.state.lock("poll_accept"); // Check for incoming streams before checking `state.error` so that already-received streams, // which are necessarily finite, can be drained from a closed connection. if let Some(id) = state.inner.streams().accept(dir) { let is_0rtt = state.inner.is_handshaking(); state.wake(); // To send additional stream ID credit drop(state); // Release the lock so clone can take it return Poll::Ready(Ok((conn.clone(), id, is_0rtt))); } else if let Some(ref e) = state.error { return Poll::Ready(Err(e.clone())); } loop { match notify.as_mut().poll(ctx) { // `state` lock ensures we didn't race with readiness Poll::Pending => return Poll::Pending, // Spurious wakeup, get a new future Poll::Ready(()) => notify.set(conn.shared.stream_incoming[dir as usize].notified()), } } } pin_project! { /// Future produced by [`Connection::read_datagram`] pub struct ReadDatagram<'a> { conn: &'a ConnectionRef, #[pin] notify: Notified<'a>, } } impl Future for ReadDatagram<'_> { type Output = Result; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let mut this = self.project(); let mut state = this.conn.state.lock("ReadDatagram::poll"); // Check for buffered datagrams before checking `state.error` so that already-received // datagrams, which are necessarily finite, can be drained from a closed connection. if let Some(x) = state.inner.datagrams().recv() { return Poll::Ready(Ok(x)); } else if let Some(ref e) = state.error { return Poll::Ready(Err(e.clone())); } loop { match this.notify.as_mut().poll(ctx) { // `state` lock ensures we didn't race with readiness Poll::Pending => return Poll::Pending, // Spurious wakeup, get a new future Poll::Ready(()) => this.notify.set(this.conn.shared.datagrams.notified()), } } } } #[derive(Debug)] pub(crate) struct ConnectionRef(Arc); impl ConnectionRef { #[allow(clippy::too_many_arguments)] fn new( handle: ConnectionHandle, conn: proto::Connection, endpoint_events: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, conn_events: mpsc::UnboundedReceiver, on_handshake_data: oneshot::Sender<()>, on_connected: oneshot::Sender, udp_state: Arc, runtime: Arc, ) -> Self { Self(Arc::new(ConnectionInner { state: Mutex::new(State { inner: conn, driver: None, handle, on_handshake_data: Some(on_handshake_data), on_connected: Some(on_connected), connected: false, timer: None, timer_deadline: None, conn_events, endpoint_events, blocked_writers: FxHashMap::default(), blocked_readers: FxHashMap::default(), finishing: FxHashMap::default(), stopped: FxHashMap::default(), error: None, ref_count: 0, udp_state, runtime, }), shared: Shared::default(), })) } fn stable_id(&self) -> usize { &*self.0 as *const _ as usize } } impl Clone for ConnectionRef { fn clone(&self) -> Self { self.state.lock("clone").ref_count += 1; Self(self.0.clone()) } } impl Drop for ConnectionRef { fn drop(&mut self) { let conn = &mut *self.state.lock("drop"); if let Some(x) = conn.ref_count.checked_sub(1) { conn.ref_count = x; if x == 0 && !conn.inner.is_closed() { // If the driver is alive, it's just it and us, so we'd better shut it down. If it's // not, we can't do any harm. If there were any streams being opened, then either // the connection will be closed for an unrelated reason or a fresh reference will // be constructed for the newly opened stream. conn.implicit_close(&self.shared); } } } } impl std::ops::Deref for ConnectionRef { type Target = ConnectionInner; fn deref(&self) -> &Self::Target { &self.0 } } #[derive(Debug)] pub(crate) struct ConnectionInner { pub(crate) state: Mutex, pub(crate) shared: Shared, } #[derive(Debug, Default)] pub(crate) struct Shared { /// Notified when new streams may be locally initiated due to an increase in stream ID flow /// control budget stream_budget_available: [Notify; 2], /// Notified when the peer has initiated a new stream stream_incoming: [Notify; 2], datagrams: Notify, closed: Notify, } pub(crate) struct State { pub(crate) inner: proto::Connection, driver: Option, handle: ConnectionHandle, on_handshake_data: Option>, on_connected: Option>, connected: bool, timer: Option>>, timer_deadline: Option, conn_events: mpsc::UnboundedReceiver, endpoint_events: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, pub(crate) blocked_writers: FxHashMap, pub(crate) blocked_readers: FxHashMap, pub(crate) finishing: FxHashMap>>, pub(crate) stopped: FxHashMap, /// Always set to Some before the connection becomes drained pub(crate) error: Option, /// Number of live handles that can be used to initiate or handle I/O; excludes the driver ref_count: usize, udp_state: Arc, runtime: Arc, } impl State { fn drive_transmit(&mut self) -> bool { let now = Instant::now(); let mut transmits = 0; let max_datagrams = self.udp_state.max_gso_segments(); while let Some(t) = self.inner.poll_transmit(now, max_datagrams) { transmits += match t.segment_size { None => 1, Some(s) => (t.contents.len() + s - 1) / s, // round up }; // If the endpoint driver is gone, noop. let _ = self .endpoint_events .send((self.handle, EndpointEvent::Transmit(t))); if transmits >= MAX_TRANSMIT_DATAGRAMS { // TODO: What isn't ideal here yet is that if we don't poll all // datagrams that could be sent we don't go into the `app_limited` // state and CWND continues to grow until we get here the next time. // See https://github.com/quinn-rs/quinn/issues/1126 return true; } } false } fn forward_endpoint_events(&mut self) { while let Some(event) = self.inner.poll_endpoint_events() { // If the endpoint driver is gone, noop. let _ = self .endpoint_events .send((self.handle, EndpointEvent::Proto(event))); } } /// If this returns `Err`, the endpoint is dead, so the driver should exit immediately. fn process_conn_events( &mut self, shared: &Shared, cx: &mut Context, ) -> Result<(), ConnectionError> { loop { match self.conn_events.poll_recv(cx) { Poll::Ready(Some(ConnectionEvent::Ping)) => { self.inner.ping(); } Poll::Ready(Some(ConnectionEvent::Proto(event))) => { self.inner.handle_event(event); } Poll::Ready(Some(ConnectionEvent::Close { reason, error_code })) => { self.close(error_code, reason, shared); } Poll::Ready(None) => { return Err(ConnectionError::TransportError(proto::TransportError { code: proto::TransportErrorCode::INTERNAL_ERROR, frame: None, reason: "endpoint driver future was dropped".to_string(), })); } Poll::Pending => { return Ok(()); } } } } fn forward_app_events(&mut self, shared: &Shared) { while let Some(event) = self.inner.poll() { use proto::Event::*; match event { HandshakeDataReady => { if let Some(x) = self.on_handshake_data.take() { let _ = x.send(()); } } Connected => { self.connected = true; if let Some(x) = self.on_connected.take() { // We don't care if the on-connected future was dropped let _ = x.send(self.inner.accepted_0rtt()); } } ConnectionLost { reason } => { self.terminate(reason, shared); } Stream(StreamEvent::Writable { id }) => { if let Some(writer) = self.blocked_writers.remove(&id) { writer.wake(); } } Stream(StreamEvent::Opened { dir: Dir::Uni }) => { shared.stream_incoming[Dir::Uni as usize].notify_waiters(); } Stream(StreamEvent::Opened { dir: Dir::Bi }) => { shared.stream_incoming[Dir::Bi as usize].notify_waiters(); } DatagramReceived => { shared.datagrams.notify_waiters(); } Stream(StreamEvent::Readable { id }) => { if let Some(reader) = self.blocked_readers.remove(&id) { reader.wake(); } } Stream(StreamEvent::Available { dir }) => { // Might mean any number of streams are ready, so we wake up everyone shared.stream_budget_available[dir as usize].notify_waiters(); } Stream(StreamEvent::Finished { id }) => { if let Some(finishing) = self.finishing.remove(&id) { // If the finishing stream was already dropped, there's nothing more to do. let _ = finishing.send(None); } if let Some(stopped) = self.stopped.remove(&id) { stopped.wake(); } } Stream(StreamEvent::Stopped { id, error_code }) => { if let Some(stopped) = self.stopped.remove(&id) { stopped.wake(); } if let Some(finishing) = self.finishing.remove(&id) { let _ = finishing.send(Some(WriteError::Stopped(error_code))); } if let Some(writer) = self.blocked_writers.remove(&id) { writer.wake(); } } } } } fn drive_timer(&mut self, cx: &mut Context) -> bool { // Check whether we need to (re)set the timer. If so, we must poll again to ensure the // timer is registered with the runtime (and check whether it's already // expired). match self.inner.poll_timeout() { Some(deadline) => { if let Some(delay) = &mut self.timer { // There is no need to reset the tokio timer if the deadline // did not change if self .timer_deadline .map(|current_deadline| current_deadline != deadline) .unwrap_or(true) { delay.as_mut().reset(deadline); } } else { self.timer = Some(self.runtime.new_timer(deadline)); } // Store the actual expiration time of the timer self.timer_deadline = Some(deadline); } None => { self.timer_deadline = None; return false; } } if self.timer_deadline.is_none() { return false; } let delay = self .timer .as_mut() .expect("timer must exist in this state") .as_mut(); if delay.poll(cx).is_pending() { // Since there wasn't a timeout event, there is nothing new // for the connection to do return false; } // A timer expired, so the caller needs to check for // new transmits, which might cause new timers to be set. self.inner.handle_timeout(Instant::now()); self.timer_deadline = None; true } /// Wake up a blocked `Driver` task to process I/O pub(crate) fn wake(&mut self) { if let Some(x) = self.driver.take() { x.wake(); } } /// Used to wake up all blocked futures when the connection becomes closed for any reason fn terminate(&mut self, reason: ConnectionError, shared: &Shared) { self.error = Some(reason.clone()); if let Some(x) = self.on_handshake_data.take() { let _ = x.send(()); } for (_, writer) in self.blocked_writers.drain() { writer.wake() } for (_, reader) in self.blocked_readers.drain() { reader.wake() } shared.stream_budget_available[Dir::Uni as usize].notify_waiters(); shared.stream_budget_available[Dir::Bi as usize].notify_waiters(); shared.stream_incoming[Dir::Uni as usize].notify_waiters(); shared.stream_incoming[Dir::Bi as usize].notify_waiters(); shared.datagrams.notify_waiters(); for (_, x) in self.finishing.drain() { let _ = x.send(Some(WriteError::ConnectionLost(reason.clone()))); } if let Some(x) = self.on_connected.take() { let _ = x.send(false); } for (_, waker) in self.stopped.drain() { waker.wake(); } shared.closed.notify_waiters(); } fn close(&mut self, error_code: VarInt, reason: Bytes, shared: &Shared) { self.inner.close(Instant::now(), error_code, reason); self.terminate(ConnectionError::LocallyClosed, shared); self.wake(); } /// Close for a reason other than the application's explicit request pub(crate) fn implicit_close(&mut self, shared: &Shared) { self.close(0u32.into(), Bytes::new(), shared); } pub(crate) fn check_0rtt(&self) -> Result<(), ()> { if self.inner.is_handshaking() || self.inner.accepted_0rtt() || self.inner.side().is_server() { Ok(()) } else { Err(()) } } } impl Drop for State { fn drop(&mut self) { if !self.inner.is_drained() { // Ensure the endpoint can tidy up let _ = self.endpoint_events.send(( self.handle, EndpointEvent::Proto(proto::EndpointEvent::drained()), )); } } } impl fmt::Debug for State { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("State").field("inner", &self.inner).finish() } } /// Errors that can arise when sending a datagram #[derive(Debug, Error, Clone, Eq, PartialEq)] pub enum SendDatagramError { /// The peer does not support receiving datagram frames #[error("datagrams not supported by peer")] UnsupportedByPeer, /// Datagram support is disabled locally #[error("datagram support disabled")] Disabled, /// The datagram is larger than the connection can currently accommodate /// /// Indicates that the path MTU minus overhead or the limit advertised by the peer has been /// exceeded. #[error("datagram too large")] TooLarge, /// The connection was lost #[error("connection lost")] ConnectionLost(#[from] ConnectionError), } /// The maximum amount of datagrams which will be produced in a single `drive_transmit` call /// /// This limits the amount of CPU resources consumed by datagram generation, /// and allows other tasks (like receiving ACKs) to run in between. const MAX_TRANSMIT_DATAGRAMS: usize = 20; /// Error indicating that a stream has already been finished or reset #[derive(Debug, Error, Clone, PartialEq, Eq)] #[error("unknown stream")] pub struct UnknownStream { _private: (), } impl From for UnknownStream { fn from(_: proto::UnknownStream) -> Self { Self { _private: () } } } quinn-0.10.2/src/endpoint.rs000064400000000000000000000641551046102023000140260ustar 00000000000000use std::{ collections::VecDeque, future::Future, io, io::IoSliceMut, mem::MaybeUninit, net::{SocketAddr, SocketAddrV6}, pin::Pin, str, sync::{Arc, Mutex}, task::{Context, Poll, Waker}, time::Instant, }; use crate::runtime::{default_runtime, AsyncUdpSocket, Runtime}; use bytes::{Bytes, BytesMut}; use pin_project_lite::pin_project; use proto::{ self as proto, ClientConfig, ConnectError, ConnectionHandle, DatagramEvent, ServerConfig, }; use rustc_hash::FxHashMap; use tokio::sync::{futures::Notified, mpsc, Notify}; use udp::{RecvMeta, UdpState, BATCH_SIZE}; use crate::{ connection::Connecting, work_limiter::WorkLimiter, ConnectionEvent, EndpointConfig, EndpointEvent, VarInt, IO_LOOP_BOUND, RECV_TIME_BOUND, SEND_TIME_BOUND, }; /// A QUIC endpoint. /// /// An endpoint corresponds to a single UDP socket, may host many connections, and may act as both /// client and server for different connections. /// /// May be cloned to obtain another handle to the same endpoint. #[derive(Debug, Clone)] pub struct Endpoint { pub(crate) inner: EndpointRef, pub(crate) default_client_config: Option, runtime: Arc, } impl Endpoint { /// Helper to construct an endpoint for use with outgoing connections only /// /// Note that `addr` is the *local* address to bind to, which should usually be a wildcard /// address like `0.0.0.0:0` or `[::]:0`, which allow communication with any reachable IPv4 or /// IPv6 address respectively from an OS-assigned port. /// /// Platform defaults for dual-stack sockets vary. For example, any socket bound to a wildcard /// IPv6 address on Windows will not by default be able to communicate with IPv4 /// addresses. Portable applications should bind an address that matches the family they wish to /// communicate within. #[cfg(feature = "ring")] pub fn client(addr: SocketAddr) -> io::Result { let socket = std::net::UdpSocket::bind(addr)?; let runtime = default_runtime() .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "no async runtime found"))?; Self::new_with_runtime( EndpointConfig::default(), None, runtime.wrap_udp_socket(socket)?, runtime, ) } /// Helper to construct an endpoint for use with both incoming and outgoing connections /// /// Platform defaults for dual-stack sockets vary. For example, any socket bound to a wildcard /// IPv6 address on Windows will not by default be able to communicate with IPv4 /// addresses. Portable applications should bind an address that matches the family they wish to /// communicate within. #[cfg(feature = "ring")] pub fn server(config: ServerConfig, addr: SocketAddr) -> io::Result { let socket = std::net::UdpSocket::bind(addr)?; let runtime = default_runtime() .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "no async runtime found"))?; Self::new_with_runtime( EndpointConfig::default(), Some(config), runtime.wrap_udp_socket(socket)?, runtime, ) } /// Construct an endpoint with arbitrary configuration and socket pub fn new( config: EndpointConfig, server_config: Option, socket: std::net::UdpSocket, runtime: Arc, ) -> io::Result { let socket = runtime.wrap_udp_socket(socket)?; Self::new_with_runtime(config, server_config, socket, runtime) } /// Construct an endpoint with arbitrary configuration and pre-constructed abstract socket /// /// Useful when `socket` has additional state (e.g. sidechannels) attached for which shared /// ownership is needed. pub fn new_with_abstract_socket( config: EndpointConfig, server_config: Option, socket: impl AsyncUdpSocket, runtime: Arc, ) -> io::Result { Self::new_with_runtime(config, server_config, Box::new(socket), runtime) } fn new_with_runtime( config: EndpointConfig, server_config: Option, socket: Box, runtime: Arc, ) -> io::Result { let addr = socket.local_addr()?; let allow_mtud = !socket.may_fragment(); let rc = EndpointRef::new( socket, proto::Endpoint::new(Arc::new(config), server_config.map(Arc::new), allow_mtud), addr.is_ipv6(), runtime.clone(), ); let driver = EndpointDriver(rc.clone()); runtime.spawn(Box::pin(async { if let Err(e) = driver.await { tracing::error!("I/O error: {}", e); } })); Ok(Self { inner: rc, default_client_config: None, runtime, }) } /// Get the next incoming connection attempt from a client /// /// Yields [`Connecting`] futures that must be `await`ed to obtain the final `Connection`, or /// `None` if the endpoint is [`close`](Self::close)d. pub fn accept(&self) -> Accept<'_> { Accept { endpoint: self, notify: self.inner.shared.incoming.notified(), } } /// Set the client configuration used by `connect` pub fn set_default_client_config(&mut self, config: ClientConfig) { self.default_client_config = Some(config); } /// Connect to a remote endpoint /// /// `server_name` must be covered by the certificate presented by the server. This prevents a /// connection from being intercepted by an attacker with a valid certificate for some other /// server. /// /// May fail immediately due to configuration errors, or in the future if the connection could /// not be established. pub fn connect(&self, addr: SocketAddr, server_name: &str) -> Result { let config = match &self.default_client_config { Some(config) => config.clone(), None => return Err(ConnectError::NoDefaultClientConfig), }; self.connect_with(config, addr, server_name) } /// Connect to a remote endpoint using a custom configuration. /// /// See [`connect()`] for details. /// /// [`connect()`]: Endpoint::connect pub fn connect_with( &self, config: ClientConfig, addr: SocketAddr, server_name: &str, ) -> Result { let mut endpoint = self.inner.state.lock().unwrap(); if endpoint.driver_lost { return Err(ConnectError::EndpointStopping); } if addr.is_ipv6() && !endpoint.ipv6 { return Err(ConnectError::InvalidRemoteAddress(addr)); } let addr = if endpoint.ipv6 { SocketAddr::V6(ensure_ipv6(addr)) } else { addr }; let (ch, conn) = endpoint.inner.connect(config, addr, server_name)?; let udp_state = endpoint.udp_state.clone(); Ok(endpoint .connections .insert(ch, conn, udp_state, self.runtime.clone())) } /// Switch to a new UDP socket /// /// Allows the endpoint's address to be updated live, affecting all active connections. Incoming /// connections and connections to servers unreachable from the new address will be lost. /// /// On error, the old UDP socket is retained. pub fn rebind(&self, socket: std::net::UdpSocket) -> io::Result<()> { let addr = socket.local_addr()?; let socket = self.runtime.wrap_udp_socket(socket)?; let mut inner = self.inner.state.lock().unwrap(); inner.socket = socket; inner.ipv6 = addr.is_ipv6(); // Generate some activity so peers notice the rebind for sender in inner.connections.senders.values() { // Ignoring errors from dropped connections let _ = sender.send(ConnectionEvent::Ping); } Ok(()) } /// Replace the server configuration, affecting new incoming connections only /// /// Useful for e.g. refreshing TLS certificates without disrupting existing connections. pub fn set_server_config(&self, server_config: Option) { self.inner .state .lock() .unwrap() .inner .set_server_config(server_config.map(Arc::new)) } /// Get the local `SocketAddr` the underlying socket is bound to pub fn local_addr(&self) -> io::Result { self.inner.state.lock().unwrap().socket.local_addr() } /// Reject new incoming connections without affecting existing connections /// /// Convenience short-hand for using /// [`set_server_config`](Self::set_server_config) to update /// [`concurrent_connections`](ServerConfig::concurrent_connections) to /// zero. pub fn reject_new_connections(&self) { self.inner .state .lock() .unwrap() .inner .reject_new_connections(); } /// Close all of this endpoint's connections immediately and cease accepting new connections. /// /// See [`Connection::close()`] for details. /// /// [`Connection::close()`]: crate::Connection::close pub fn close(&self, error_code: VarInt, reason: &[u8]) { let reason = Bytes::copy_from_slice(reason); let mut endpoint = self.inner.state.lock().unwrap(); endpoint.connections.close = Some((error_code, reason.clone())); for sender in endpoint.connections.senders.values() { // Ignoring errors from dropped connections let _ = sender.send(ConnectionEvent::Close { error_code, reason: reason.clone(), }); } self.inner.shared.incoming.notify_waiters(); } /// Wait for all connections on the endpoint to be cleanly shut down /// /// Waiting for this condition before exiting ensures that a good-faith effort is made to notify /// peers of recent connection closes, whereas exiting immediately could force them to wait out /// the idle timeout period. /// /// Does not proactively close existing connections or cause incoming connections to be /// rejected. Consider calling [`close()`] if that is desired. /// /// [`close()`]: Endpoint::close pub async fn wait_idle(&self) { loop { { let endpoint = &mut *self.inner.state.lock().unwrap(); if endpoint.connections.is_empty() { break; } // Construct future while lock is held to avoid race self.inner.shared.idle.notified() } .await; } } } /// A future that drives IO on an endpoint /// /// This task functions as the switch point between the UDP socket object and the /// `Endpoint` responsible for routing datagrams to their owning `Connection`. /// In order to do so, it also facilitates the exchange of different types of events /// flowing between the `Endpoint` and the tasks managing `Connection`s. As such, /// running this task is necessary to keep the endpoint's connections running. /// /// `EndpointDriver` futures terminate when all clones of the `Endpoint` have been dropped, or when /// an I/O error occurs. #[must_use = "endpoint drivers must be spawned for I/O to occur"] #[derive(Debug)] pub(crate) struct EndpointDriver(pub(crate) EndpointRef); impl Future for EndpointDriver { type Output = Result<(), io::Error>; #[allow(unused_mut)] // MSRV fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { let mut endpoint = self.0.state.lock().unwrap(); if endpoint.driver.is_none() { endpoint.driver = Some(cx.waker().clone()); } let now = Instant::now(); let mut keep_going = false; keep_going |= endpoint.drive_recv(cx, now)?; keep_going |= endpoint.handle_events(cx, &self.0.shared); keep_going |= endpoint.drive_send(cx)?; if !endpoint.incoming.is_empty() { self.0.shared.incoming.notify_waiters(); } if endpoint.ref_count == 0 && endpoint.connections.is_empty() { Poll::Ready(Ok(())) } else { drop(endpoint); // If there is more work to do schedule the endpoint task again. // `wake_by_ref()` is called outside the lock to minimize // lock contention on a multithreaded runtime. if keep_going { cx.waker().wake_by_ref(); } Poll::Pending } } } impl Drop for EndpointDriver { fn drop(&mut self) { let mut endpoint = self.0.state.lock().unwrap(); endpoint.driver_lost = true; self.0.shared.incoming.notify_waiters(); // Drop all outgoing channels, signaling the termination of the endpoint to the associated // connections. endpoint.connections.senders.clear(); } } #[derive(Debug)] pub(crate) struct EndpointInner { pub(crate) state: Mutex, pub(crate) shared: Shared, } #[derive(Debug)] pub(crate) struct State { socket: Box, udp_state: Arc, inner: proto::Endpoint, outgoing: VecDeque, incoming: VecDeque, driver: Option, ipv6: bool, connections: ConnectionSet, events: mpsc::UnboundedReceiver<(ConnectionHandle, EndpointEvent)>, /// Number of live handles that can be used to initiate or handle I/O; excludes the driver ref_count: usize, driver_lost: bool, recv_limiter: WorkLimiter, recv_buf: Box<[u8]>, send_limiter: WorkLimiter, runtime: Arc, /// The packet contents length in the outgoing queue. outgoing_queue_contents_len: usize, } #[derive(Debug)] pub(crate) struct Shared { incoming: Notify, idle: Notify, } impl State { fn drive_recv<'a>(&'a mut self, cx: &mut Context, now: Instant) -> Result { self.recv_limiter.start_cycle(); let mut metas = [RecvMeta::default(); BATCH_SIZE]; let mut iovs = MaybeUninit::<[IoSliceMut<'a>; BATCH_SIZE]>::uninit(); self.recv_buf .chunks_mut(self.recv_buf.len() / BATCH_SIZE) .enumerate() .for_each(|(i, buf)| unsafe { iovs.as_mut_ptr() .cast::() .add(i) .write(IoSliceMut::<'a>::new(buf)); }); let mut iovs = unsafe { iovs.assume_init() }; loop { match self.socket.poll_recv(cx, &mut iovs, &mut metas) { Poll::Ready(Ok(msgs)) => { self.recv_limiter.record_work(msgs); for (meta, buf) in metas.iter().zip(iovs.iter()).take(msgs) { let mut data: BytesMut = buf[0..meta.len].into(); while !data.is_empty() { let buf = data.split_to(meta.stride.min(data.len())); match self.inner.handle( now, meta.addr, meta.dst_ip, meta.ecn.map(proto_ecn), buf, ) { Some((handle, DatagramEvent::NewConnection(conn))) => { let conn = self.connections.insert( handle, conn, self.udp_state.clone(), self.runtime.clone(), ); self.incoming.push_back(conn); } Some((handle, DatagramEvent::ConnectionEvent(event))) => { // Ignoring errors from dropped connections that haven't yet been cleaned up let _ = self .connections .senders .get_mut(&handle) .unwrap() .send(ConnectionEvent::Proto(event)); } None => {} } } } } Poll::Pending => { break; } // Ignore ECONNRESET as it's undefined in QUIC and may be injected by an // attacker Poll::Ready(Err(ref e)) if e.kind() == io::ErrorKind::ConnectionReset => { continue; } Poll::Ready(Err(e)) => { return Err(e); } } if !self.recv_limiter.allow_work() { self.recv_limiter.finish_cycle(); return Ok(true); } } self.recv_limiter.finish_cycle(); Ok(false) } fn drive_send(&mut self, cx: &mut Context) -> Result { self.send_limiter.start_cycle(); let result = loop { while self.outgoing.len() < BATCH_SIZE { match self.inner.poll_transmit() { Some(t) => self.queue_transmit(t), None => break, } } if self.outgoing.is_empty() { break Ok(false); } if !self.send_limiter.allow_work() { break Ok(true); } match self .socket .poll_send(&self.udp_state, cx, self.outgoing.as_slices().0) { Poll::Ready(Ok(n)) => { let contents_len: usize = self.outgoing.drain(..n).map(|t| t.contents.len()).sum(); self.decrement_outgoing_contents_len(contents_len); // We count transmits instead of `poll_send` calls since the cost // of a `sendmmsg` still linearily increases with number of packets. self.send_limiter.record_work(n); } Poll::Pending => { break Ok(false); } Poll::Ready(Err(e)) => { break Err(e); } } }; self.send_limiter.finish_cycle(); result } fn handle_events(&mut self, cx: &mut Context, shared: &Shared) -> bool { use EndpointEvent::*; for _ in 0..IO_LOOP_BOUND { match self.events.poll_recv(cx) { Poll::Ready(Some((ch, event))) => match event { Proto(e) => { if e.is_drained() { self.connections.senders.remove(&ch); if self.connections.is_empty() { shared.idle.notify_waiters(); } } if let Some(event) = self.inner.handle_event(ch, e) { // Ignoring errors from dropped connections that haven't yet been cleaned up let _ = self .connections .senders .get_mut(&ch) .unwrap() .send(ConnectionEvent::Proto(event)); } } Transmit(t) => self.queue_transmit(t), }, Poll::Ready(None) => unreachable!("EndpointInner owns one sender"), Poll::Pending => { return false; } } } true } fn queue_transmit(&mut self, t: proto::Transmit) { let contents_len = t.contents.len(); self.increment_outgoing_queue_contents_len(contents_len); self.outgoing.push_back(udp::Transmit { destination: t.destination, ecn: t.ecn.map(udp_ecn), contents: t.contents, segment_size: t.segment_size, src_ip: t.src_ip, }); } fn increment_outgoing_queue_contents_len(&mut self, contents_len: usize) { self.outgoing_queue_contents_len = self .outgoing_queue_contents_len .saturating_add(contents_len); self.inner .set_socket_buffer_fill(self.outgoing_queue_contents_len); } fn decrement_outgoing_contents_len(&mut self, contents_len: usize) { self.outgoing_queue_contents_len = self .outgoing_queue_contents_len .saturating_sub(contents_len); self.inner .set_socket_buffer_fill(self.outgoing_queue_contents_len); } } #[inline] fn udp_ecn(ecn: proto::EcnCodepoint) -> udp::EcnCodepoint { match ecn { proto::EcnCodepoint::Ect0 => udp::EcnCodepoint::Ect0, proto::EcnCodepoint::Ect1 => udp::EcnCodepoint::Ect1, proto::EcnCodepoint::Ce => udp::EcnCodepoint::Ce, } } #[inline] fn proto_ecn(ecn: udp::EcnCodepoint) -> proto::EcnCodepoint { match ecn { udp::EcnCodepoint::Ect0 => proto::EcnCodepoint::Ect0, udp::EcnCodepoint::Ect1 => proto::EcnCodepoint::Ect1, udp::EcnCodepoint::Ce => proto::EcnCodepoint::Ce, } } #[derive(Debug)] struct ConnectionSet { /// Senders for communicating with the endpoint's connections senders: FxHashMap>, /// Stored to give out clones to new ConnectionInners sender: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, /// Set if the endpoint has been manually closed close: Option<(VarInt, Bytes)>, } impl ConnectionSet { fn insert( &mut self, handle: ConnectionHandle, conn: proto::Connection, udp_state: Arc, runtime: Arc, ) -> Connecting { let (send, recv) = mpsc::unbounded_channel(); if let Some((error_code, ref reason)) = self.close { send.send(ConnectionEvent::Close { error_code, reason: reason.clone(), }) .unwrap(); } self.senders.insert(handle, send); Connecting::new(handle, conn, self.sender.clone(), recv, udp_state, runtime) } fn is_empty(&self) -> bool { self.senders.is_empty() } } fn ensure_ipv6(x: SocketAddr) -> SocketAddrV6 { match x { SocketAddr::V6(x) => x, SocketAddr::V4(x) => SocketAddrV6::new(x.ip().to_ipv6_mapped(), x.port(), 0, 0), } } pin_project! { /// Future produced by [`Endpoint::accept`] pub struct Accept<'a> { endpoint: &'a Endpoint, #[pin] notify: Notified<'a>, } } impl<'a> Future for Accept<'a> { type Output = Option; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let mut this = self.project(); let endpoint = &mut *this.endpoint.inner.state.lock().unwrap(); if endpoint.driver_lost { return Poll::Ready(None); } if let Some(conn) = endpoint.incoming.pop_front() { return Poll::Ready(Some(conn)); } if endpoint.connections.close.is_some() { return Poll::Ready(None); } loop { match this.notify.as_mut().poll(ctx) { // `state` lock ensures we didn't race with readiness Poll::Pending => return Poll::Pending, // Spurious wakeup, get a new future Poll::Ready(()) => this .notify .set(this.endpoint.inner.shared.incoming.notified()), } } } } #[derive(Debug)] pub(crate) struct EndpointRef(Arc); impl EndpointRef { pub(crate) fn new( socket: Box, inner: proto::Endpoint, ipv6: bool, runtime: Arc, ) -> Self { let udp_state = Arc::new(UdpState::new()); let recv_buf = vec![ 0; inner.config().get_max_udp_payload_size().min(64 * 1024) as usize * udp_state.gro_segments() * BATCH_SIZE ]; let (sender, events) = mpsc::unbounded_channel(); Self(Arc::new(EndpointInner { shared: Shared { incoming: Notify::new(), idle: Notify::new(), }, state: Mutex::new(State { socket, udp_state, inner, ipv6, events, outgoing: VecDeque::new(), incoming: VecDeque::new(), driver: None, connections: ConnectionSet { senders: FxHashMap::default(), sender, close: None, }, ref_count: 0, driver_lost: false, recv_buf: recv_buf.into(), recv_limiter: WorkLimiter::new(RECV_TIME_BOUND), send_limiter: WorkLimiter::new(SEND_TIME_BOUND), runtime, outgoing_queue_contents_len: 0, }), })) } } impl Clone for EndpointRef { fn clone(&self) -> Self { self.0.state.lock().unwrap().ref_count += 1; Self(self.0.clone()) } } impl Drop for EndpointRef { fn drop(&mut self) { let endpoint = &mut *self.0.state.lock().unwrap(); if let Some(x) = endpoint.ref_count.checked_sub(1) { endpoint.ref_count = x; if x == 0 { // If the driver is about to be on its own, ensure it can shut down if the last // connection is gone. if let Some(task) = endpoint.driver.take() { task.wake(); } } } } } impl std::ops::Deref for EndpointRef { type Target = EndpointInner; fn deref(&self) -> &Self::Target { &self.0 } } quinn-0.10.2/src/lib.rs000064400000000000000000000115271046102023000127470ustar 00000000000000//! QUIC transport protocol implementation //! //! [QUIC](https://en.wikipedia.org/wiki/QUIC) is a modern transport protocol addressing //! shortcomings of TCP, such as head-of-line blocking, poor security, slow handshakes, and //! inefficient congestion control. This crate provides a portable userspace implementation. It //! builds on top of quinn-proto, which implements protocol logic independent of any particular //! runtime. //! //! The entry point of this crate is the [`Endpoint`]. //! //! # About QUIC //! //! A QUIC connection is an association between two endpoints. The endpoint which initiates the //! connection is termed the client, and the endpoint which accepts it is termed the server. A //! single endpoint may function as both client and server for different connections, for example //! in a peer-to-peer application. To communicate application data, each endpoint may open streams //! up to a limit dictated by its peer. Typically, that limit is increased as old streams are //! finished. //! //! Streams may be unidirectional or bidirectional, and are cheap to create and disposable. For //! example, a traditionally datagram-oriented application could use a new stream for every //! message it wants to send, no longer needing to worry about MTUs. Bidirectional streams behave //! much like a traditional TCP connection, and are useful for sending messages that have an //! immediate response, such as an HTTP request. Stream data is delivered reliably, and there is no //! ordering enforced between data on different streams. //! //! By avoiding head-of-line blocking and providing unified congestion control across all streams //! of a connection, QUIC is able to provide higher throughput and lower latency than one or //! multiple TCP connections between the same two hosts, while providing more useful behavior than //! raw UDP sockets. //! //! Quinn also exposes unreliable datagrams, which are a low-level primitive preferred when //! automatic fragmentation and retransmission of certain data is not desired. //! //! QUIC uses encryption and identity verification built directly on TLS 1.3. Just as with a TLS //! server, it is useful for a QUIC server to be identified by a certificate signed by a trusted //! authority. If this is infeasible--for example, if servers are short-lived or not associated //! with a domain name--then as with TLS, self-signed certificates can be used to provide //! encryption alone. #![warn(missing_docs)] #![warn(unreachable_pub)] #![warn(clippy::use_self)] use std::time::Duration; macro_rules! ready { ($e:expr $(,)?) => { match $e { std::task::Poll::Ready(t) => t, std::task::Poll::Pending => return std::task::Poll::Pending, } }; } mod connection; mod endpoint; mod mutex; mod recv_stream; mod runtime; mod send_stream; mod work_limiter; pub use proto::{ congestion, crypto, ApplicationClose, Chunk, ClientConfig, ConfigError, ConnectError, ConnectionClose, ConnectionError, EndpointConfig, IdleTimeout, MtuDiscoveryConfig, ServerConfig, StreamId, Transmit, TransportConfig, VarInt, }; pub use udp; pub use crate::connection::{ AcceptBi, AcceptUni, Connecting, Connection, OpenBi, OpenUni, ReadDatagram, SendDatagramError, UnknownStream, ZeroRttAccepted, }; pub use crate::endpoint::{Accept, Endpoint}; pub use crate::recv_stream::{ReadError, ReadExactError, ReadToEndError, RecvStream}; #[cfg(feature = "runtime-async-std")] pub use crate::runtime::AsyncStdRuntime; #[cfg(feature = "runtime-tokio")] pub use crate::runtime::TokioRuntime; pub use crate::runtime::{default_runtime, AsyncTimer, AsyncUdpSocket, Runtime}; pub use crate::send_stream::{SendStream, StoppedError, WriteError}; #[cfg(test)] mod tests; #[derive(Debug)] enum ConnectionEvent { Close { error_code: VarInt, reason: bytes::Bytes, }, Proto(proto::ConnectionEvent), Ping, } #[derive(Debug)] enum EndpointEvent { Proto(proto::EndpointEvent), Transmit(proto::Transmit), } /// Maximum number of datagrams processed in send/recv calls to make before moving on to other processing /// /// This helps ensure we don't starve anything when the CPU is slower than the link. /// Value is selected by picking a low number which didn't degrade throughput in benchmarks. const IO_LOOP_BOUND: usize = 160; /// The maximum amount of time that should be spent in `recvmsg()` calls per endpoint iteration /// /// 50us are chosen so that an endpoint iteration with a 50us sendmsg limit blocks /// the runtime for a maximum of about 100us. /// Going much lower does not yield any noticeable difference, since a single `recvmmsg` /// batch of size 32 was observed to take 30us on some systems. const RECV_TIME_BOUND: Duration = Duration::from_micros(50); /// The maximum amount of time that should be spent in `sendmsg()` calls per endpoint iteration const SEND_TIME_BOUND: Duration = Duration::from_micros(50); quinn-0.10.2/src/mutex.rs000064400000000000000000000103461046102023000133410ustar 00000000000000use std::{ fmt::Debug, ops::{Deref, DerefMut}, }; #[cfg(feature = "lock_tracking")] mod tracking { use super::*; use std::{ collections::VecDeque, time::{Duration, Instant}, }; use tracing::warn; #[derive(Debug)] struct Inner { last_lock_owner: VecDeque<(&'static str, Duration)>, value: T, } /// A Mutex which optionally allows to track the time a lock was held and /// emit warnings in case of excessive lock times pub(crate) struct Mutex { inner: std::sync::Mutex>, } impl std::fmt::Debug for Mutex { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.inner, f) } } impl Mutex { pub(crate) fn new(value: T) -> Self { Self { inner: std::sync::Mutex::new(Inner { last_lock_owner: VecDeque::new(), value, }), } } /// Acquires the lock for a certain purpose /// /// The purpose will be recorded in the list of last lock owners pub(crate) fn lock(&self, purpose: &'static str) -> MutexGuard { let now = Instant::now(); let guard = self.inner.lock().unwrap(); let lock_time = Instant::now(); let elapsed = lock_time.duration_since(now); if elapsed > Duration::from_millis(1) { warn!( "Locking the connection for {} took {:?}. Last owners: {:?}", purpose, elapsed, guard.last_lock_owner ); } MutexGuard { guard, start_time: lock_time, purpose, } } } pub(crate) struct MutexGuard<'a, T> { guard: std::sync::MutexGuard<'a, Inner>, start_time: Instant, purpose: &'static str, } impl<'a, T> Drop for MutexGuard<'a, T> { fn drop(&mut self) { if self.guard.last_lock_owner.len() == MAX_LOCK_OWNERS { self.guard.last_lock_owner.pop_back(); } let duration = self.start_time.elapsed(); if duration > Duration::from_millis(1) { warn!( "Utilizing the connection for {} took {:?}", self.purpose, duration ); } self.guard .last_lock_owner .push_front((self.purpose, duration)); } } impl<'a, T> Deref for MutexGuard<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { &self.guard.value } } impl<'a, T> DerefMut for MutexGuard<'a, T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.guard.value } } const MAX_LOCK_OWNERS: usize = 20; } #[cfg(feature = "lock_tracking")] pub(crate) use tracking::Mutex; #[cfg(not(feature = "lock_tracking"))] mod non_tracking { use super::*; /// A Mutex which optionally allows to track the time a lock was held and /// emit warnings in case of excessive lock times #[derive(Debug)] pub(crate) struct Mutex { inner: std::sync::Mutex, } impl Mutex { pub(crate) fn new(value: T) -> Self { Self { inner: std::sync::Mutex::new(value), } } /// Acquires the lock for a certain purpose /// /// The purpose will be recorded in the list of last lock owners pub(crate) fn lock(&self, _purpose: &'static str) -> MutexGuard { MutexGuard { guard: self.inner.lock().unwrap(), } } } pub(crate) struct MutexGuard<'a, T> { guard: std::sync::MutexGuard<'a, T>, } impl<'a, T> Deref for MutexGuard<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { self.guard.deref() } } impl<'a, T> DerefMut for MutexGuard<'a, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.guard.deref_mut() } } } #[cfg(not(feature = "lock_tracking"))] pub(crate) use non_tracking::Mutex; quinn-0.10.2/src/recv_stream.rs000064400000000000000000000500401046102023000145040ustar 00000000000000use std::{ future::Future, io, pin::Pin, task::{Context, Poll}, }; use bytes::Bytes; use proto::{Chunk, Chunks, ConnectionError, ReadableError, StreamId}; use thiserror::Error; use tokio::io::ReadBuf; use crate::{ connection::{ConnectionRef, UnknownStream}, VarInt, }; /// A stream that can only be used to receive data /// /// `stop(0)` is implicitly called on drop unless: /// - A variant of [`ReadError`] has been yielded by a read call /// - [`stop()`] was called explicitly /// /// # Closing a stream /// /// When a stream is expected to be closed gracefully the sender should call /// [`SendStream::finish`]. However there is no guarantee the connected [`RecvStream`] will /// receive the "finished" notification in the same QUIC frame as the last frame which /// carried data. /// /// Even if the application layer logic already knows it read all the data because it does /// its own framing, it should still read until it reaches the end of the [`RecvStream`]. /// Otherwise it risks inadvertently calling [`RecvStream::stop`] if it drops the stream. /// And calling [`RecvStream::stop`] could result in the connected [`SendStream::finish`] /// call failing with a [`WriteError::Stopped`] error. /// /// For example if exactly 10 bytes are to be read, you still need to explicitly read the /// end of the stream: /// /// ```no_run /// # use quinn::{SendStream, RecvStream}; /// # async fn func( /// # mut send_stream: SendStream, /// # mut recv_stream: RecvStream, /// # ) -> anyhow::Result<()> /// # { /// // In the sending task /// send_stream.write(&b"0123456789"[..]).await?; /// send_stream.finish().await?; /// /// // In the receiving task /// let mut buf = [0u8; 10]; /// let data = recv_stream.read_exact(&mut buf).await?; /// if recv_stream.read_to_end(0).await.is_err() { /// // Discard unexpected data and notify the peer to stop sending it /// let _ = recv_stream.stop(0u8.into()); /// } /// # Ok(()) /// # } /// ``` /// /// An alternative approach, used in HTTP/3, is to specify a particular error code used with `stop` /// that indicates graceful receiver-initiated stream shutdown, rather than a true error condition. /// /// [`RecvStream::read_chunk`] could be used instead which does not take ownership and /// allows using an explit call to [`RecvStream::stop`] with a custom error code. /// /// [`ReadError`]: crate::ReadError /// [`stop()`]: RecvStream::stop /// [`SendStream::finish`]: crate::SendStream::finish /// [`WriteError::Stopped`]: crate::WriteError::Stopped #[derive(Debug)] pub struct RecvStream { conn: ConnectionRef, stream: StreamId, is_0rtt: bool, all_data_read: bool, reset: Option, } impl RecvStream { pub(crate) fn new(conn: ConnectionRef, stream: StreamId, is_0rtt: bool) -> Self { Self { conn, stream, is_0rtt, all_data_read: false, reset: None, } } /// Read data contiguously from the stream. /// /// Yields the number of bytes read into `buf` on success, or `None` if the stream was finished. pub async fn read(&mut self, buf: &mut [u8]) -> Result, ReadError> { Read { stream: self, buf: ReadBuf::new(buf), } .await } /// Read an exact number of bytes contiguously from the stream. /// /// See [`read()`] for details. /// /// [`read()`]: RecvStream::read pub async fn read_exact(&mut self, buf: &mut [u8]) -> Result<(), ReadExactError> { ReadExact { stream: self, buf: ReadBuf::new(buf), } .await } fn poll_read( &mut self, cx: &mut Context, buf: &mut ReadBuf<'_>, ) -> Poll> { if buf.remaining() == 0 { return Poll::Ready(Ok(())); } self.poll_read_generic(cx, true, |chunks| { let mut read = false; loop { if buf.remaining() == 0 { // We know `read` is `true` because `buf.remaining()` was not 0 before return ReadStatus::Readable(()); } match chunks.next(buf.remaining()) { Ok(Some(chunk)) => { buf.put_slice(&chunk.bytes); read = true; } res => return (if read { Some(()) } else { None }, res.err()).into(), } } }) .map(|res| res.map(|_| ())) } /// Read the next segment of data /// /// Yields `None` if the stream was finished. Otherwise, yields a segment of data and its /// offset in the stream. If `ordered` is `true`, the chunk's offset will be immediately after /// the last data yielded by `read()` or `read_chunk()`. If `ordered` is `false`, segments may /// be received in any order, and the `Chunk`'s `offset` field can be used to determine /// ordering in the caller. Unordered reads are less prone to head-of-line blocking within a /// stream, but require the application to manage reassembling the original data. /// /// Slightly more efficient than `read` due to not copying. Chunk boundaries do not correspond /// to peer writes, and hence cannot be used as framing. pub async fn read_chunk( &mut self, max_length: usize, ordered: bool, ) -> Result, ReadError> { ReadChunk { stream: self, max_length, ordered, } .await } /// Foundation of [`Self::read_chunk`] fn poll_read_chunk( &mut self, cx: &mut Context, max_length: usize, ordered: bool, ) -> Poll, ReadError>> { self.poll_read_generic(cx, ordered, |chunks| match chunks.next(max_length) { Ok(Some(chunk)) => ReadStatus::Readable(chunk), res => (None, res.err()).into(), }) } /// Read the next segments of data /// /// Fills `bufs` with the segments of data beginning immediately after the /// last data yielded by `read` or `read_chunk`, or `None` if the stream was /// finished. /// /// Slightly more efficient than `read` due to not copying. Chunk boundaries /// do not correspond to peer writes, and hence cannot be used as framing. pub async fn read_chunks(&mut self, bufs: &mut [Bytes]) -> Result, ReadError> { ReadChunks { stream: self, bufs }.await } /// Foundation of [`Self::read_chunks`] fn poll_read_chunks( &mut self, cx: &mut Context, bufs: &mut [Bytes], ) -> Poll, ReadError>> { if bufs.is_empty() { return Poll::Ready(Ok(Some(0))); } self.poll_read_generic(cx, true, |chunks| { let mut read = 0; loop { if read >= bufs.len() { // We know `read > 0` because `bufs` cannot be empty here return ReadStatus::Readable(read); } match chunks.next(usize::MAX) { Ok(Some(chunk)) => { bufs[read] = chunk.bytes; read += 1; } res => return (if read == 0 { None } else { Some(read) }, res.err()).into(), } } }) } /// Convenience method to read all remaining data into a buffer /// /// Fails with [`ReadToEndError::TooLong`] on reading more than `size_limit` bytes, discarding /// all data read. Uses unordered reads to be more efficient than using `AsyncRead` would /// allow. `size_limit` should be set to limit worst-case memory use. /// /// If unordered reads have already been made, the resulting buffer may have gaps containing /// arbitrary data. /// /// [`ReadToEndError::TooLong`]: crate::ReadToEndError::TooLong pub async fn read_to_end(&mut self, size_limit: usize) -> Result, ReadToEndError> { ReadToEnd { stream: self, size_limit, read: Vec::new(), start: u64::max_value(), end: 0, } .await } /// Stop accepting data /// /// Discards unread data and notifies the peer to stop transmitting. Once stopped, further /// attempts to operate on a stream will yield `UnknownStream` errors. pub fn stop(&mut self, error_code: VarInt) -> Result<(), UnknownStream> { let mut conn = self.conn.state.lock("RecvStream::stop"); if self.is_0rtt && conn.check_0rtt().is_err() { return Ok(()); } conn.inner.recv_stream(self.stream).stop(error_code)?; conn.wake(); self.all_data_read = true; Ok(()) } /// Check if this stream has been opened during 0-RTT. /// /// In which case any non-idempotent request should be considered dangerous at the application /// level. Because read data is subject to replay attacks. pub fn is_0rtt(&self) -> bool { self.is_0rtt } /// Get the identity of this stream pub fn id(&self) -> StreamId { self.stream } /// Handle common logic related to reading out of a receive stream /// /// This takes an `FnMut` closure that takes care of the actual reading process, matching /// the detailed read semantics for the calling function with a particular return type. /// The closure can read from the passed `&mut Chunks` and has to return the status after /// reading: the amount of data read, and the status after the final read call. fn poll_read_generic( &mut self, cx: &mut Context, ordered: bool, mut read_fn: T, ) -> Poll, ReadError>> where T: FnMut(&mut Chunks) -> ReadStatus, { use proto::ReadError::*; if self.all_data_read { return Poll::Ready(Ok(None)); } let mut conn = self.conn.state.lock("RecvStream::poll_read"); if self.is_0rtt { conn.check_0rtt().map_err(|()| ReadError::ZeroRttRejected)?; } // If we stored an error during a previous call, return it now. This can happen if a // `read_fn` both wants to return data and also returns an error in its final stream status. let status = match self.reset.take() { Some(code) => ReadStatus::Failed(None, Reset(code)), None => { let mut recv = conn.inner.recv_stream(self.stream); let mut chunks = recv.read(ordered)?; let status = read_fn(&mut chunks); if chunks.finalize().should_transmit() { conn.wake(); } status } }; match status { ReadStatus::Readable(read) => Poll::Ready(Ok(Some(read))), ReadStatus::Finished(read) => { self.all_data_read = true; Poll::Ready(Ok(read)) } ReadStatus::Failed(read, Blocked) => match read { Some(val) => Poll::Ready(Ok(Some(val))), None => { if let Some(ref x) = conn.error { return Poll::Ready(Err(ReadError::ConnectionLost(x.clone()))); } conn.blocked_readers.insert(self.stream, cx.waker().clone()); Poll::Pending } }, ReadStatus::Failed(read, Reset(error_code)) => match read { None => { self.all_data_read = true; Poll::Ready(Err(ReadError::Reset(error_code))) } done => { self.reset = Some(error_code); Poll::Ready(Ok(done)) } }, } } } enum ReadStatus { Readable(T), Finished(Option), Failed(Option, proto::ReadError), } impl From<(Option, Option)> for ReadStatus { fn from(status: (Option, Option)) -> Self { match status { (read, None) => Self::Finished(read), (read, Some(e)) => Self::Failed(read, e), } } } /// Future produced by [`RecvStream::read_to_end()`]. /// /// [`RecvStream::read_to_end()`]: crate::RecvStream::read_to_end #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct ReadToEnd<'a> { stream: &'a mut RecvStream, read: Vec<(Bytes, u64)>, start: u64, end: u64, size_limit: usize, } impl Future for ReadToEnd<'_> { type Output = Result, ReadToEndError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { loop { match ready!(self.stream.poll_read_chunk(cx, usize::MAX, false))? { Some(chunk) => { self.start = self.start.min(chunk.offset); let end = chunk.bytes.len() as u64 + chunk.offset; if (end - self.start) > self.size_limit as u64 { return Poll::Ready(Err(ReadToEndError::TooLong)); } self.end = self.end.max(end); self.read.push((chunk.bytes, chunk.offset)); } None => { if self.end == 0 { // Never received anything return Poll::Ready(Ok(Vec::new())); } let start = self.start; let mut buffer = vec![0; (self.end - start) as usize]; for (data, offset) in self.read.drain(..) { let offset = (offset - start) as usize; buffer[offset..offset + data.len()].copy_from_slice(&data); } return Poll::Ready(Ok(buffer)); } } } } } /// Errors from [`RecvStream::read_to_end`] #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum ReadToEndError { /// An error occurred during reading #[error("read error: {0}")] Read(#[from] ReadError), /// The stream is larger than the user-supplied limit #[error("stream too long")] TooLong, } #[cfg(feature = "futures-io")] impl futures_io::AsyncRead for RecvStream { fn poll_read( self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8], ) -> Poll> { let mut buf = ReadBuf::new(buf); ready!(RecvStream::poll_read(self.get_mut(), cx, &mut buf))?; Poll::Ready(Ok(buf.filled().len())) } } impl tokio::io::AsyncRead for RecvStream { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { ready!(Self::poll_read(self.get_mut(), cx, buf))?; Poll::Ready(Ok(())) } } impl Drop for RecvStream { fn drop(&mut self) { let mut conn = self.conn.state.lock("RecvStream::drop"); // clean up any previously registered wakers conn.blocked_readers.remove(&self.stream); if conn.error.is_some() || (self.is_0rtt && conn.check_0rtt().is_err()) { return; } if !self.all_data_read { // Ignore UnknownStream errors let _ = conn.inner.recv_stream(self.stream).stop(0u32.into()); conn.wake(); } } } /// Errors that arise from reading from a stream. #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum ReadError { /// The peer abandoned transmitting data on this stream /// /// Carries an application-defined error code. #[error("stream reset by peer: error {0}")] Reset(VarInt), /// The connection was lost #[error("connection lost")] ConnectionLost(#[from] ConnectionError), /// The stream has already been stopped, finished, or reset #[error("unknown stream")] UnknownStream, /// Attempted an ordered read following an unordered read /// /// Performing an unordered read allows discontinuities to arise in the receive buffer of a /// stream which cannot be recovered, making further ordered reads impossible. #[error("ordered read after unordered read")] IllegalOrderedRead, /// This was a 0-RTT stream and the server rejected it /// /// Can only occur on clients for 0-RTT streams, which can be opened using /// [`Connecting::into_0rtt()`]. /// /// [`Connecting::into_0rtt()`]: crate::Connecting::into_0rtt() #[error("0-RTT rejected")] ZeroRttRejected, } impl From for ReadError { fn from(e: ReadableError) -> Self { match e { ReadableError::UnknownStream => Self::UnknownStream, ReadableError::IllegalOrderedRead => Self::IllegalOrderedRead, } } } impl From for io::Error { fn from(x: ReadError) -> Self { use self::ReadError::*; let kind = match x { Reset { .. } | ZeroRttRejected => io::ErrorKind::ConnectionReset, ConnectionLost(_) | UnknownStream => io::ErrorKind::NotConnected, IllegalOrderedRead => io::ErrorKind::InvalidInput, }; Self::new(kind, x) } } /// Future produced by [`RecvStream::read()`]. /// /// [`RecvStream::read()`]: crate::RecvStream::read #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct Read<'a> { stream: &'a mut RecvStream, buf: ReadBuf<'a>, } impl<'a> Future for Read<'a> { type Output = Result, ReadError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); ready!(this.stream.poll_read(cx, &mut this.buf))?; match this.buf.filled().len() { 0 if this.buf.capacity() != 0 => Poll::Ready(Ok(None)), n => Poll::Ready(Ok(Some(n))), } } } /// Future produced by [`RecvStream::read_exact()`]. /// /// [`RecvStream::read_exact()`]: crate::RecvStream::read_exact #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct ReadExact<'a> { stream: &'a mut RecvStream, buf: ReadBuf<'a>, } impl<'a> Future for ReadExact<'a> { type Output = Result<(), ReadExactError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); let mut remaining = this.buf.remaining(); while remaining > 0 { ready!(this.stream.poll_read(cx, &mut this.buf))?; let new = this.buf.remaining(); if new == remaining { return Poll::Ready(Err(ReadExactError::FinishedEarly)); } remaining = new; } Poll::Ready(Ok(())) } } /// Errors that arise from reading from a stream. #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum ReadExactError { /// The stream finished before all bytes were read #[error("stream finished early")] FinishedEarly, /// A read error occurred #[error(transparent)] ReadError(#[from] ReadError), } /// Future produced by [`RecvStream::read_chunk()`]. /// /// [`RecvStream::read_chunk()`]: crate::RecvStream::read_chunk #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct ReadChunk<'a> { stream: &'a mut RecvStream, max_length: usize, ordered: bool, } impl<'a> Future for ReadChunk<'a> { type Output = Result, ReadError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { let (max_length, ordered) = (self.max_length, self.ordered); self.stream.poll_read_chunk(cx, max_length, ordered) } } /// Future produced by [`RecvStream::read_chunks()`]. /// /// [`RecvStream::read_chunks()`]: crate::RecvStream::read_chunks #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct ReadChunks<'a> { stream: &'a mut RecvStream, bufs: &'a mut [Bytes], } impl<'a> Future for ReadChunks<'a> { type Output = Result, ReadError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); this.stream.poll_read_chunks(cx, this.bufs) } } quinn-0.10.2/src/runtime/async_std.rs000064400000000000000000000041331046102023000156460ustar 00000000000000use std::{ future::Future, io, pin::Pin, task::{Context, Poll}, time::Instant, }; use async_io::{Async, Timer}; use super::{AsyncTimer, AsyncUdpSocket, Runtime}; /// A Quinn runtime for async-std #[derive(Debug)] pub struct AsyncStdRuntime; impl Runtime for AsyncStdRuntime { fn new_timer(&self, t: Instant) -> Pin> { Box::pin(Timer::at(t)) } fn spawn(&self, future: Pin + Send>>) { async_std::task::spawn(future); } fn wrap_udp_socket(&self, sock: std::net::UdpSocket) -> io::Result> { udp::UdpSocketState::configure((&sock).into())?; Ok(Box::new(UdpSocket { io: Async::new(sock)?, inner: udp::UdpSocketState::new(), })) } } impl AsyncTimer for Timer { fn reset(mut self: Pin<&mut Self>, t: Instant) { self.set_at(t) } fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> { Future::poll(self, cx).map(|_| ()) } } #[derive(Debug)] struct UdpSocket { io: Async, inner: udp::UdpSocketState, } impl AsyncUdpSocket for UdpSocket { fn poll_send( &self, state: &udp::UdpState, cx: &mut Context, transmits: &[udp::Transmit], ) -> Poll> { loop { ready!(self.io.poll_writable(cx))?; if let Ok(res) = self.inner.send((&self.io).into(), state, transmits) { return Poll::Ready(Ok(res)); } } } fn poll_recv( &self, cx: &mut Context, bufs: &mut [io::IoSliceMut<'_>], meta: &mut [udp::RecvMeta], ) -> Poll> { loop { ready!(self.io.poll_readable(cx))?; if let Ok(res) = self.inner.recv((&self.io).into(), bufs, meta) { return Poll::Ready(Ok(res)); } } } fn local_addr(&self) -> io::Result { self.io.as_ref().local_addr() } fn may_fragment(&self) -> bool { udp::may_fragment() } } quinn-0.10.2/src/runtime/tokio.rs000064400000000000000000000044631046102023000150120ustar 00000000000000use std::{ future::Future, io, pin::Pin, task::{Context, Poll}, time::Instant, }; use tokio::{ io::Interest, time::{sleep_until, Sleep}, }; use super::{AsyncTimer, AsyncUdpSocket, Runtime}; /// A Quinn runtime for Tokio #[derive(Debug)] pub struct TokioRuntime; impl Runtime for TokioRuntime { fn new_timer(&self, t: Instant) -> Pin> { Box::pin(sleep_until(t.into())) } fn spawn(&self, future: Pin + Send>>) { tokio::spawn(future); } fn wrap_udp_socket(&self, sock: std::net::UdpSocket) -> io::Result> { udp::UdpSocketState::configure((&sock).into())?; Ok(Box::new(UdpSocket { io: tokio::net::UdpSocket::from_std(sock)?, inner: udp::UdpSocketState::new(), })) } } impl AsyncTimer for Sleep { fn reset(self: Pin<&mut Self>, t: Instant) { Self::reset(self, t.into()) } fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> { Future::poll(self, cx) } } #[derive(Debug)] struct UdpSocket { io: tokio::net::UdpSocket, inner: udp::UdpSocketState, } impl AsyncUdpSocket for UdpSocket { fn poll_send( &self, state: &udp::UdpState, cx: &mut Context, transmits: &[udp::Transmit], ) -> Poll> { let inner = &self.inner; let io = &self.io; loop { ready!(io.poll_send_ready(cx))?; if let Ok(res) = io.try_io(Interest::WRITABLE, || { inner.send(io.into(), state, transmits) }) { return Poll::Ready(Ok(res)); } } } fn poll_recv( &self, cx: &mut Context, bufs: &mut [std::io::IoSliceMut<'_>], meta: &mut [udp::RecvMeta], ) -> Poll> { loop { ready!(self.io.poll_recv_ready(cx))?; if let Ok(res) = self.io.try_io(Interest::READABLE, || { self.inner.recv((&self.io).into(), bufs, meta) }) { return Poll::Ready(Ok(res)); } } } fn local_addr(&self) -> io::Result { self.io.local_addr() } fn may_fragment(&self) -> bool { udp::may_fragment() } } quinn-0.10.2/src/runtime.rs000064400000000000000000000057741046102023000136730ustar 00000000000000use std::{ fmt::Debug, future::Future, io::{self, IoSliceMut}, net::SocketAddr, pin::Pin, sync::Arc, task::{Context, Poll}, time::Instant, }; use udp::{RecvMeta, Transmit, UdpState}; /// Abstracts I/O and timer operations for runtime independence pub trait Runtime: Send + Sync + Debug + 'static { /// Construct a timer that will expire at `i` fn new_timer(&self, i: Instant) -> Pin>; /// Drive `future` to completion in the background fn spawn(&self, future: Pin + Send>>); /// Convert `t` into the socket type used by this runtime fn wrap_udp_socket(&self, t: std::net::UdpSocket) -> io::Result>; } /// Abstract implementation of an async timer for runtime independence pub trait AsyncTimer: Send + Debug + 'static { /// Update the timer to expire at `i` fn reset(self: Pin<&mut Self>, i: Instant); /// Check whether the timer has expired, and register to be woken if not fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()>; } /// Abstract implementation of a UDP socket for runtime independence pub trait AsyncUdpSocket: Send + Debug + 'static { /// Send UDP datagrams from `transmits`, or register to be woken if sending may succeed in the /// future fn poll_send( &self, state: &UdpState, cx: &mut Context, transmits: &[Transmit], ) -> Poll>; /// Receive UDP datagrams, or register to be woken if receiving may succeed in the future fn poll_recv( &self, cx: &mut Context, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta], ) -> Poll>; /// Look up the local IP address and port used by this socket fn local_addr(&self) -> io::Result; /// Whether datagrams might get fragmented into multiple parts /// /// Sockets should prevent this for best performance. See e.g. the `IPV6_DONTFRAG` socket /// option. fn may_fragment(&self) -> bool { true } } /// Automatically select an appropriate runtime from those enabled at compile time /// /// If `runtime-tokio` is enabled and this function is called from within a Tokio runtime context, /// then `TokioRuntime` is returned. Otherwise, if `runtime-async-std` is enabled, `AsyncStdRuntime` /// is returned. Otherwise, `None` is returned. pub fn default_runtime() -> Option> { #[cfg(feature = "runtime-tokio")] { if ::tokio::runtime::Handle::try_current().is_ok() { return Some(Arc::new(TokioRuntime)); } } #[cfg(feature = "runtime-async-std")] { return Some(Arc::new(AsyncStdRuntime)); } #[cfg(not(feature = "runtime-async-std"))] None } #[cfg(feature = "runtime-tokio")] mod tokio; #[cfg(feature = "runtime-tokio")] pub use self::tokio::TokioRuntime; #[cfg(feature = "runtime-async-std")] mod async_std; #[cfg(feature = "runtime-async-std")] pub use self::async_std::AsyncStdRuntime; quinn-0.10.2/src/send_stream.rs000064400000000000000000000401171046102023000145020ustar 00000000000000use std::{ future::Future, io, pin::Pin, task::{Context, Poll}, }; use bytes::Bytes; use proto::{ConnectionError, FinishError, StreamId, Written}; use thiserror::Error; use tokio::sync::oneshot; use crate::{ connection::{ConnectionRef, UnknownStream}, VarInt, }; /// A stream that can only be used to send data /// /// If dropped, streams that haven't been explicitly [`reset()`] will continue to (re)transmit /// previously written data until it has been fully acknowledged or the connection is closed. /// /// [`reset()`]: SendStream::reset #[derive(Debug)] pub struct SendStream { conn: ConnectionRef, stream: StreamId, is_0rtt: bool, finishing: Option>>, } impl SendStream { pub(crate) fn new(conn: ConnectionRef, stream: StreamId, is_0rtt: bool) -> Self { Self { conn, stream, is_0rtt, finishing: None, } } /// Write bytes to the stream /// /// Yields the number of bytes written on success. Congestion and flow control may cause this to /// be shorter than `buf.len()`, indicating that only a prefix of `buf` was written. pub async fn write(&mut self, buf: &[u8]) -> Result { Write { stream: self, buf }.await } /// Convenience method to write an entire buffer to the stream pub async fn write_all(&mut self, buf: &[u8]) -> Result<(), WriteError> { WriteAll { stream: self, buf }.await } /// Write chunks to the stream /// /// Yields the number of bytes and chunks written on success. /// Congestion and flow control may cause this to be shorter than `buf.len()`, /// indicating that only a prefix of `bufs` was written pub async fn write_chunks(&mut self, bufs: &mut [Bytes]) -> Result { WriteChunks { stream: self, bufs }.await } /// Convenience method to write a single chunk in its entirety to the stream pub async fn write_chunk(&mut self, buf: Bytes) -> Result<(), WriteError> { WriteChunk { stream: self, buf: [buf], } .await } /// Convenience method to write an entire list of chunks to the stream pub async fn write_all_chunks(&mut self, bufs: &mut [Bytes]) -> Result<(), WriteError> { WriteAllChunks { stream: self, bufs, offset: 0, } .await } fn execute_poll(&mut self, cx: &mut Context, write_fn: F) -> Poll> where F: FnOnce(&mut proto::SendStream) -> Result, { use proto::WriteError::*; let mut conn = self.conn.state.lock("SendStream::poll_write"); if self.is_0rtt { conn.check_0rtt() .map_err(|()| WriteError::ZeroRttRejected)?; } if let Some(ref x) = conn.error { return Poll::Ready(Err(WriteError::ConnectionLost(x.clone()))); } let result = match write_fn(&mut conn.inner.send_stream(self.stream)) { Ok(result) => result, Err(Blocked) => { conn.blocked_writers.insert(self.stream, cx.waker().clone()); return Poll::Pending; } Err(Stopped(error_code)) => { return Poll::Ready(Err(WriteError::Stopped(error_code))); } Err(UnknownStream) => { return Poll::Ready(Err(WriteError::UnknownStream)); } }; conn.wake(); Poll::Ready(Ok(result)) } /// Shut down the send stream gracefully. /// /// No new data may be written after calling this method. Completes when the peer has /// acknowledged all sent data, retransmitting data as needed. pub async fn finish(&mut self) -> Result<(), WriteError> { Finish { stream: self }.await } #[doc(hidden)] pub fn poll_finish(&mut self, cx: &mut Context) -> Poll> { let mut conn = self.conn.state.lock("poll_finish"); if self.is_0rtt { conn.check_0rtt() .map_err(|()| WriteError::ZeroRttRejected)?; } if self.finishing.is_none() { conn.inner .send_stream(self.stream) .finish() .map_err(|e| match e { FinishError::UnknownStream => WriteError::UnknownStream, FinishError::Stopped(error_code) => WriteError::Stopped(error_code), })?; let (send, recv) = oneshot::channel(); self.finishing = Some(recv); conn.finishing.insert(self.stream, send); conn.wake(); } match Pin::new(self.finishing.as_mut().unwrap()) .poll(cx) .map(|x| x.unwrap()) { Poll::Ready(x) => { self.finishing = None; Poll::Ready(x.map_or(Ok(()), Err)) } Poll::Pending => { // To ensure that finished streams can be detected even after the connection is // closed, we must only check for connection errors after determining that the // stream has not yet been finished. Note that this relies on holding the connection // lock so that it is impossible for the stream to become finished between the above // poll call and this check. if let Some(ref x) = conn.error { return Poll::Ready(Err(WriteError::ConnectionLost(x.clone()))); } Poll::Pending } } } /// Close the send stream immediately. /// /// No new data can be written after calling this method. Locally buffered data is dropped, and /// previously transmitted data will no longer be retransmitted if lost. If an attempt has /// already been made to finish the stream, the peer may still receive all written data. pub fn reset(&mut self, error_code: VarInt) -> Result<(), UnknownStream> { let mut conn = self.conn.state.lock("SendStream::reset"); if self.is_0rtt && conn.check_0rtt().is_err() { return Ok(()); } conn.inner.send_stream(self.stream).reset(error_code)?; conn.wake(); Ok(()) } /// Set the priority of the send stream /// /// Every send stream has an initial priority of 0. Locally buffered data from streams with /// higher priority will be transmitted before data from streams with lower priority. Changing /// the priority of a stream with pending data may only take effect after that data has been /// transmitted. Using many different priority levels per connection may have a negative /// impact on performance. pub fn set_priority(&self, priority: i32) -> Result<(), UnknownStream> { let mut conn = self.conn.state.lock("SendStream::set_priority"); conn.inner.send_stream(self.stream).set_priority(priority)?; Ok(()) } /// Get the priority of the send stream pub fn priority(&self) -> Result { let mut conn = self.conn.state.lock("SendStream::priority"); Ok(conn.inner.send_stream(self.stream).priority()?) } /// Completes if/when the peer stops the stream, yielding the error code pub async fn stopped(&mut self) -> Result { Stopped { stream: self }.await } #[doc(hidden)] pub fn poll_stopped(&mut self, cx: &mut Context) -> Poll> { let mut conn = self.conn.state.lock("SendStream::poll_stopped"); if self.is_0rtt { conn.check_0rtt() .map_err(|()| StoppedError::ZeroRttRejected)?; } match conn.inner.send_stream(self.stream).stopped() { Err(_) => Poll::Ready(Err(StoppedError::UnknownStream)), Ok(Some(error_code)) => Poll::Ready(Ok(error_code)), Ok(None) => { conn.stopped.insert(self.stream, cx.waker().clone()); Poll::Pending } } } /// Get the identity of this stream pub fn id(&self) -> StreamId { self.stream } } #[cfg(feature = "futures-io")] impl futures_io::AsyncWrite for SendStream { fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { SendStream::execute_poll(self.get_mut(), cx, |stream| stream.write(buf)).map_err(Into::into) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(Ok(())) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { self.get_mut().poll_finish(cx).map_err(Into::into) } } #[cfg(feature = "runtime-tokio")] impl tokio::io::AsyncWrite for SendStream { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Self::execute_poll(self.get_mut(), cx, |stream| stream.write(buf)).map_err(Into::into) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(Ok(())) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { self.get_mut().poll_finish(cx).map_err(Into::into) } } impl Drop for SendStream { fn drop(&mut self) { let mut conn = self.conn.state.lock("SendStream::drop"); // clean up any previously registered wakers conn.finishing.remove(&self.stream); conn.stopped.remove(&self.stream); conn.blocked_writers.remove(&self.stream); if conn.error.is_some() || (self.is_0rtt && conn.check_0rtt().is_err()) { return; } if self.finishing.is_none() { match conn.inner.send_stream(self.stream).finish() { Ok(()) => conn.wake(), Err(FinishError::Stopped(reason)) => { if conn.inner.send_stream(self.stream).reset(reason).is_ok() { conn.wake(); } } // Already finished or reset, which is fine. Err(FinishError::UnknownStream) => {} } } } } /// Future produced by `SendStream::finish` #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct Finish<'a> { stream: &'a mut SendStream, } impl Future for Finish<'_> { type Output = Result<(), WriteError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { self.get_mut().stream.poll_finish(cx) } } /// Future produced by `SendStream::stopped` #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct Stopped<'a> { stream: &'a mut SendStream, } impl Future for Stopped<'_> { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { self.get_mut().stream.poll_stopped(cx) } } /// Future produced by [`SendStream::write()`]. /// /// [`SendStream::write()`]: crate::SendStream::write #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct Write<'a> { stream: &'a mut SendStream, buf: &'a [u8], } impl<'a> Future for Write<'a> { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); let buf = this.buf; this.stream.execute_poll(cx, |s| s.write(buf)) } } /// Future produced by [`SendStream::write_all()`]. /// /// [`SendStream::write_all()`]: crate::SendStream::write_all #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct WriteAll<'a> { stream: &'a mut SendStream, buf: &'a [u8], } impl<'a> Future for WriteAll<'a> { type Output = Result<(), WriteError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); loop { if this.buf.is_empty() { return Poll::Ready(Ok(())); } let buf = this.buf; let n = ready!(this.stream.execute_poll(cx, |s| s.write(buf)))?; this.buf = &this.buf[n..]; } } } /// Future produced by [`SendStream::write_chunks()`]. /// /// [`SendStream::write_chunks()`]: crate::SendStream::write_chunks #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct WriteChunks<'a> { stream: &'a mut SendStream, bufs: &'a mut [Bytes], } impl<'a> Future for WriteChunks<'a> { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); let bufs = &mut *this.bufs; this.stream.execute_poll(cx, |s| s.write_chunks(bufs)) } } /// Future produced by [`SendStream::write_chunk()`]. /// /// [`SendStream::write_chunk()`]: crate::SendStream::write_chunk #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct WriteChunk<'a> { stream: &'a mut SendStream, buf: [Bytes; 1], } impl<'a> Future for WriteChunk<'a> { type Output = Result<(), WriteError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); loop { if this.buf[0].is_empty() { return Poll::Ready(Ok(())); } let bufs = &mut this.buf[..]; ready!(this.stream.execute_poll(cx, |s| s.write_chunks(bufs)))?; } } } /// Future produced by [`SendStream::write_all_chunks()`]. /// /// [`SendStream::write_all_chunks()`]: crate::SendStream::write_all_chunks #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] struct WriteAllChunks<'a> { stream: &'a mut SendStream, bufs: &'a mut [Bytes], offset: usize, } impl<'a> Future for WriteAllChunks<'a> { type Output = Result<(), WriteError>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = self.get_mut(); loop { if this.offset == this.bufs.len() { return Poll::Ready(Ok(())); } let bufs = &mut this.bufs[this.offset..]; let written = ready!(this.stream.execute_poll(cx, |s| s.write_chunks(bufs)))?; this.offset += written.chunks; } } } /// Errors that arise from writing to a stream #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum WriteError { /// The peer is no longer accepting data on this stream /// /// Carries an application-defined error code. #[error("sending stopped by peer: error {0}")] Stopped(VarInt), /// The connection was lost #[error("connection lost")] ConnectionLost(#[from] ConnectionError), /// The stream has already been finished or reset #[error("unknown stream")] UnknownStream, /// This was a 0-RTT stream and the server rejected it /// /// Can only occur on clients for 0-RTT streams, which can be opened using /// [`Connecting::into_0rtt()`]. /// /// [`Connecting::into_0rtt()`]: crate::Connecting::into_0rtt() #[error("0-RTT rejected")] ZeroRttRejected, } /// Errors that arise while monitoring for a send stream stop from the peer #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum StoppedError { /// The connection was lost #[error("connection lost")] ConnectionLost(#[from] ConnectionError), /// The stream has already been finished or reset #[error("unknown stream")] UnknownStream, /// This was a 0-RTT stream and the server rejected it /// /// Can only occur on clients for 0-RTT streams, which can be opened using /// [`Connecting::into_0rtt()`]. /// /// [`Connecting::into_0rtt()`]: crate::Connecting::into_0rtt() #[error("0-RTT rejected")] ZeroRttRejected, } impl From for io::Error { fn from(x: WriteError) -> Self { use self::WriteError::*; let kind = match x { Stopped(_) | ZeroRttRejected => io::ErrorKind::ConnectionReset, ConnectionLost(_) | UnknownStream => io::ErrorKind::NotConnected, }; Self::new(kind, x) } } quinn-0.10.2/src/tests.rs000064400000000000000000000572161046102023000133500ustar 00000000000000#![cfg(feature = "rustls")] use std::{ convert::TryInto, io, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, str, sync::Arc, }; use crate::runtime::TokioRuntime; use bytes::Bytes; use rand::{rngs::StdRng, RngCore, SeedableRng}; use tokio::{ runtime::{Builder, Runtime}, time::{Duration, Instant}, }; use tracing::{info, info_span}; use tracing_futures::Instrument as _; use tracing_subscriber::EnvFilter; use super::{ClientConfig, Endpoint, RecvStream, SendStream, TransportConfig}; #[test] fn handshake_timeout() { let _guard = subscribe(); let runtime = rt_threaded(); let client = { let _guard = runtime.enter(); Endpoint::client(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap() }; let mut client_config = crate::ClientConfig::with_root_certificates(rustls::RootCertStore::empty()); const IDLE_TIMEOUT: Duration = Duration::from_millis(500); let mut transport_config = crate::TransportConfig::default(); transport_config .max_idle_timeout(Some(IDLE_TIMEOUT.try_into().unwrap())) .initial_rtt(Duration::from_millis(10)); client_config.transport_config(Arc::new(transport_config)); let start = Instant::now(); runtime.block_on(async move { match client .connect_with( client_config, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1), "localhost", ) .unwrap() .await { Err(crate::ConnectionError::TimedOut) => {} Err(e) => panic!("unexpected error: {e:?}"), Ok(_) => panic!("unexpected success"), } }); let dt = start.elapsed(); assert!(dt > IDLE_TIMEOUT && dt < 2 * IDLE_TIMEOUT); } #[tokio::test] async fn close_endpoint() { let _guard = subscribe(); let mut endpoint = Endpoint::client(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap(); endpoint.set_default_client_config(ClientConfig::with_root_certificates( rustls::RootCertStore::empty(), )); let conn = endpoint .connect( SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1234), "localhost", ) .unwrap(); tokio::spawn(async move { let _ = conn.await; }); let conn = endpoint .connect( SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1234), "localhost", ) .unwrap(); endpoint.close(0u32.into(), &[]); match conn.await { Err(crate::ConnectionError::LocallyClosed) => (), Err(e) => panic!("unexpected error: {e}"), Ok(_) => { panic!("unexpected success"); } } } #[test] fn local_addr() { let socket = UdpSocket::bind("[::1]:0").unwrap(); let addr = socket.local_addr().unwrap(); let runtime = rt_basic(); let ep = { let _guard = runtime.enter(); Endpoint::new(Default::default(), None, socket, Arc::new(TokioRuntime)).unwrap() }; assert_eq!( addr, ep.local_addr() .expect("Could not obtain our local endpoint") ); } #[test] fn read_after_close() { let _guard = subscribe(); let runtime = rt_basic(); let endpoint = { let _guard = runtime.enter(); endpoint() }; const MSG: &[u8] = b"goodbye!"; let endpoint2 = endpoint.clone(); runtime.spawn(async move { let new_conn = endpoint2 .accept() .await .expect("endpoint") .await .expect("connection"); let mut s = new_conn.open_uni().await.unwrap(); s.write_all(MSG).await.unwrap(); s.finish().await.unwrap(); }); runtime.block_on(async move { let new_conn = endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap() .await .expect("connect"); tokio::time::sleep_until(Instant::now() + Duration::from_millis(100)).await; let mut stream = new_conn.accept_uni().await.expect("incoming streams"); let msg = stream .read_to_end(usize::max_value()) .await .expect("read_to_end"); assert_eq!(msg, MSG); }); } #[test] fn export_keying_material() { let _guard = subscribe(); let runtime = rt_basic(); let endpoint = { let _guard = runtime.enter(); endpoint() }; runtime.block_on(async move { let outgoing_conn = endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap() .await .expect("connect"); let incoming_conn = endpoint .accept() .await .expect("endpoint") .await .expect("connection"); let mut i_buf = [0u8; 64]; incoming_conn .export_keying_material(&mut i_buf, b"asdf", b"qwer") .unwrap(); let mut o_buf = [0u8; 64]; outgoing_conn .export_keying_material(&mut o_buf, b"asdf", b"qwer") .unwrap(); assert_eq!(&i_buf[..], &o_buf[..]); }); } #[tokio::test] async fn accept_after_close() { let _guard = subscribe(); let endpoint = endpoint(); const MSG: &[u8] = b"goodbye!"; let sender = endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap() .await .expect("connect"); let mut s = sender.open_uni().await.unwrap(); s.write_all(MSG).await.unwrap(); s.finish().await.unwrap(); sender.close(0u32.into(), b""); // Allow some time for the close to be sent and processed tokio::time::sleep(Duration::from_millis(100)).await; // Despite the connection having closed, we should be able to accept it... let receiver = endpoint .accept() .await .expect("endpoint") .await .expect("connection"); // ...and read what was sent. let mut stream = receiver.accept_uni().await.expect("incoming streams"); let msg = stream .read_to_end(usize::max_value()) .await .expect("read_to_end"); assert_eq!(msg, MSG); // But it's still definitely closed. assert!(receiver.open_uni().await.is_err()); } /// Construct an endpoint suitable for connecting to itself fn endpoint() -> Endpoint { endpoint_with_config(TransportConfig::default()) } fn endpoint_with_config(transport_config: TransportConfig) -> Endpoint { let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = rustls::PrivateKey(cert.serialize_private_key_der()); let cert = rustls::Certificate(cert.serialize_der().unwrap()); let transport_config = Arc::new(transport_config); let mut server_config = crate::ServerConfig::with_single_cert(vec![cert.clone()], key).unwrap(); server_config.transport_config(transport_config.clone()); let mut roots = rustls::RootCertStore::empty(); roots.add(&cert).unwrap(); let mut endpoint = Endpoint::server( server_config, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), ) .unwrap(); let mut client_config = ClientConfig::with_root_certificates(roots); client_config.transport_config(transport_config); endpoint.set_default_client_config(client_config); endpoint } #[tokio::test] async fn zero_rtt() { let _guard = subscribe(); let endpoint = endpoint(); const MSG0: &[u8] = b"zero"; const MSG1: &[u8] = b"one"; let endpoint2 = endpoint.clone(); tokio::spawn(async move { for _ in 0..2 { let incoming = endpoint2.accept().await.unwrap(); let (connection, established) = incoming.into_0rtt().unwrap_or_else(|_| unreachable!()); let c = connection.clone(); tokio::spawn(async move { while let Ok(mut x) = c.accept_uni().await { let msg = x.read_to_end(usize::max_value()).await.unwrap(); assert_eq!(msg, MSG0); } }); info!("sending 0.5-RTT"); let mut s = connection.open_uni().await.expect("open_uni"); s.write_all(MSG0).await.expect("write"); s.finish().await.expect("finish"); established.await; info!("sending 1-RTT"); let mut s = connection.open_uni().await.expect("open_uni"); s.write_all(MSG1).await.expect("write"); // The peer might close the connection before ACKing let _ = s.finish().await; } }); let connection = endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap() .into_0rtt() .err() .expect("0-RTT succeeded without keys") .await .expect("connect"); { let mut stream = connection.accept_uni().await.expect("incoming streams"); let msg = stream .read_to_end(usize::max_value()) .await .expect("read_to_end"); assert_eq!(msg, MSG0); // Read a 1-RTT message to ensure the handshake completes fully, allowing the server's // NewSessionTicket frame to be received. let mut stream = connection.accept_uni().await.expect("incoming streams"); let msg = stream .read_to_end(usize::max_value()) .await .expect("read_to_end"); assert_eq!(msg, MSG1); drop(connection); } info!("initial connection complete"); let (connection, zero_rtt) = endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap() .into_0rtt() .unwrap_or_else(|_| panic!("missing 0-RTT keys")); // Send something ASAP to use 0-RTT let c = connection.clone(); tokio::spawn(async move { let mut s = c.open_uni().await.expect("0-RTT open uni"); info!("sending 0-RTT"); s.write_all(MSG0).await.expect("0-RTT write"); s.finish().await.expect("0-RTT finish"); }); let mut stream = connection.accept_uni().await.expect("incoming streams"); let msg = stream .read_to_end(usize::max_value()) .await .expect("read_to_end"); assert_eq!(msg, MSG0); assert!(zero_rtt.await); drop((stream, connection)); endpoint.wait_idle().await; } #[test] fn echo_v6() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 0), nr_streams: 1, stream_size: 10 * 1024, receive_window: None, stream_receive_window: None, }); } #[test] fn echo_v4() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 1, stream_size: 10 * 1024, receive_window: None, stream_receive_window: None, }); } #[test] #[cfg(any(target_os = "linux", target_os = "macos"))] // Dual-stack sockets aren't the default anywhere else. fn echo_dualstack() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 1, stream_size: 10 * 1024, receive_window: None, stream_receive_window: None, }); } #[test] #[cfg(not(tarpaulin))] fn stress_receive_window() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 50, stream_size: 25 * 1024 + 11, receive_window: Some(37), stream_receive_window: Some(100 * 1024 * 1024), }); } #[test] #[cfg(not(tarpaulin))] fn stress_stream_receive_window() { // Note that there is no point in runnning this with too many streams, // since the window is only active within a stream run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 2, stream_size: 250 * 1024 + 11, receive_window: Some(100 * 1024 * 1024), stream_receive_window: Some(37), }); } #[test] #[cfg(not(tarpaulin))] fn stress_both_windows() { run_echo(EchoArgs { client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), nr_streams: 50, stream_size: 25 * 1024 + 11, receive_window: Some(37), stream_receive_window: Some(37), }); } fn run_echo(args: EchoArgs) { let _guard = subscribe(); let runtime = rt_basic(); let handle = { // Use small receive windows let mut transport_config = TransportConfig::default(); if let Some(receive_window) = args.receive_window { transport_config.receive_window(receive_window.try_into().unwrap()); } if let Some(stream_receive_window) = args.stream_receive_window { transport_config.stream_receive_window(stream_receive_window.try_into().unwrap()); } transport_config.max_concurrent_bidi_streams(1_u8.into()); transport_config.max_concurrent_uni_streams(1_u8.into()); let transport_config = Arc::new(transport_config); // We don't use the `endpoint` helper here because we want two different endpoints with // different addresses. let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = rustls::PrivateKey(cert.serialize_private_key_der()); let cert_der = cert.serialize_der().unwrap(); let cert = rustls::Certificate(cert_der); let mut server_config = crate::ServerConfig::with_single_cert(vec![cert.clone()], key).unwrap(); server_config.transport = transport_config.clone(); let server_sock = UdpSocket::bind(args.server_addr).unwrap(); let server_addr = server_sock.local_addr().unwrap(); let server = { let _guard = runtime.enter(); Endpoint::new( Default::default(), Some(server_config), server_sock, Arc::new(TokioRuntime), ) .unwrap() }; let mut roots = rustls::RootCertStore::empty(); roots.add(&cert).unwrap(); let mut client_crypto = rustls::ClientConfig::builder() .with_safe_defaults() .with_root_certificates(roots) .with_no_client_auth(); client_crypto.key_log = Arc::new(rustls::KeyLogFile::new()); let mut client = { let _guard = runtime.enter(); Endpoint::client(args.client_addr).unwrap() }; let mut client_config = ClientConfig::new(Arc::new(client_crypto)); client_config.transport_config(transport_config); client.set_default_client_config(client_config); let handle = runtime.spawn(async move { let incoming = server.accept().await.unwrap(); // Note for anyone modifying the platform support in this test: // If `local_ip` gets available on additional platforms - which // requires modifying this test - please update the list of supported // platforms in the doc comments of the various `local_ip` functions. if cfg!(target_os = "linux") || cfg!(target_os = "freebsd") || cfg!(target_os = "macos") { let local_ip = incoming.local_ip().expect("Local IP must be available"); assert!(local_ip.is_loopback()); } else { assert_eq!(None, incoming.local_ip()); } let new_conn = incoming.instrument(info_span!("server")).await.unwrap(); tokio::spawn(async move { while let Ok(stream) = new_conn.accept_bi().await { tokio::spawn(echo(stream)); } }); server.wait_idle().await; }); info!("connecting from {} to {}", args.client_addr, server_addr); runtime.block_on(async move { let new_conn = client .connect(server_addr, "localhost") .unwrap() .instrument(info_span!("client")) .await .expect("connect"); /// This is just an arbitrary number to generate deterministic test data const SEED: u64 = 0x12345678; for i in 0..args.nr_streams { println!("Opening stream {i}"); let (mut send, mut recv) = new_conn.open_bi().await.expect("stream open"); let msg = gen_data(args.stream_size, SEED); let send_task = async { send.write_all(&msg).await.expect("write"); send.finish().await.expect("finish"); }; let recv_task = async { recv.read_to_end(usize::max_value()).await.expect("read") }; let (_, data) = tokio::join!(send_task, recv_task); assert_eq!(data[..], msg[..], "Data mismatch"); } new_conn.close(0u32.into(), b"done"); client.wait_idle().await; }); handle }; runtime.block_on(handle).unwrap(); } struct EchoArgs { client_addr: SocketAddr, server_addr: SocketAddr, nr_streams: usize, stream_size: usize, receive_window: Option, stream_receive_window: Option, } async fn echo((mut send, mut recv): (SendStream, RecvStream)) { loop { // These are 32 buffers, for reading approximately 32kB at once #[rustfmt::skip] let mut bufs = [ Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), ]; match recv.read_chunks(&mut bufs).await.expect("read chunks") { Some(n) => { send.write_all_chunks(&mut bufs[..n]) .await .expect("write chunks"); } None => break, } } let _ = send.finish().await; } fn gen_data(size: usize, seed: u64) -> Vec { let mut rng: StdRng = SeedableRng::seed_from_u64(seed); let mut buf = vec![0; size]; rng.fill_bytes(&mut buf); buf } fn subscribe() -> tracing::subscriber::DefaultGuard { let sub = tracing_subscriber::FmtSubscriber::builder() .with_env_filter(EnvFilter::from_default_env()) .with_writer(|| TestWriter) .finish(); tracing::subscriber::set_default(sub) } struct TestWriter; impl std::io::Write for TestWriter { fn write(&mut self, buf: &[u8]) -> io::Result { print!( "{}", str::from_utf8(buf).expect("tried to log invalid UTF-8") ); Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { io::stdout().flush() } } fn rt_basic() -> Runtime { Builder::new_current_thread().enable_all().build().unwrap() } fn rt_threaded() -> Runtime { Builder::new_multi_thread().enable_all().build().unwrap() } #[tokio::test] async fn rebind_recv() { let _guard = subscribe(); let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = rustls::PrivateKey(cert.serialize_private_key_der()); let cert = rustls::Certificate(cert.serialize_der().unwrap()); let mut roots = rustls::RootCertStore::empty(); roots.add(&cert).unwrap(); let mut client = Endpoint::client(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap(); let mut client_config = ClientConfig::new(Arc::new( rustls::ClientConfig::builder() .with_safe_defaults() .with_root_certificates(roots) .with_no_client_auth(), )); client_config.transport_config(Arc::new({ let mut cfg = TransportConfig::default(); cfg.max_concurrent_uni_streams(1u32.into()); cfg })); client.set_default_client_config(client_config); let server_config = crate::ServerConfig::with_single_cert(vec![cert.clone()], key).unwrap(); let server = Endpoint::server( server_config, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), ) .unwrap(); let server_addr = server.local_addr().unwrap(); const MSG: &[u8; 5] = b"hello"; let write_send = Arc::new(tokio::sync::Notify::new()); let write_recv = write_send.clone(); let connected_send = Arc::new(tokio::sync::Notify::new()); let connected_recv = connected_send.clone(); let server = tokio::spawn(async move { let connection = server.accept().await.unwrap().await.unwrap(); info!("got conn"); connected_send.notify_one(); write_recv.notified().await; let mut stream = connection.open_uni().await.unwrap(); stream.write_all(MSG).await.unwrap(); stream.finish().await.unwrap(); }); let connection = client .connect(server_addr, "localhost") .unwrap() .await .unwrap(); info!("connected"); connected_recv.notified().await; client .rebind(UdpSocket::bind(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap()) .unwrap(); info!("rebound"); write_send.notify_one(); let mut stream = connection.accept_uni().await.unwrap(); assert_eq!(stream.read_to_end(MSG.len()).await.unwrap(), MSG); server.await.unwrap(); } #[tokio::test] async fn stream_id_flow_control() { let _guard = subscribe(); let mut cfg = TransportConfig::default(); cfg.max_concurrent_uni_streams(1u32.into()); let endpoint = endpoint_with_config(cfg); let (client, server) = tokio::join!( endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap(), async { endpoint.accept().await.unwrap().await } ); let client = client.unwrap(); let server = server.unwrap(); // If `open_uni` doesn't get unblocked when the previous stream is dropped, this will time out. tokio::join!( async { client.open_uni().await.unwrap(); }, async { client.open_uni().await.unwrap(); }, async { client.open_uni().await.unwrap(); }, async { server.accept_uni().await.unwrap(); server.accept_uni().await.unwrap(); } ); } #[tokio::test] async fn two_datagram_readers() { let _guard = subscribe(); let endpoint = endpoint(); let (client, server) = tokio::join!( endpoint .connect(endpoint.local_addr().unwrap(), "localhost") .unwrap(), async { endpoint.accept().await.unwrap().await } ); let client = client.unwrap(); let server = server.unwrap(); let done = tokio::sync::Notify::new(); let (a, b, ()) = tokio::join!( async { let x = client.read_datagram().await.unwrap(); done.notify_waiters(); x }, async { let x = client.read_datagram().await.unwrap(); done.notify_waiters(); x }, async { server.send_datagram(b"one"[..].into()).unwrap(); done.notified().await; server.send_datagram(b"two"[..].into()).unwrap(); } ); assert!(*a == *b"one" || *b == *b"one"); assert!(*a == *b"two" || *b == *b"two"); } quinn-0.10.2/src/work_limiter.rs000064400000000000000000000201351046102023000147030ustar 00000000000000use std::time::{Duration, Instant}; /// Limits the amount of time spent on a certain type of work in a cycle /// /// The limiter works dynamically: For a sampled subset of cycles it measures /// the time that is approximately required for fulfilling 1 work item, and /// calculates the amount of allowed work items per cycle. /// The estimates are smoothed over all cycles where the exact duration is measured. /// /// In cycles where no measurement is performed the previously determined work limit /// is used. /// /// For the limiter the exact definition of a work item does not matter. /// It could for example track the amount of transmitted bytes per cycle, /// or the amount of transmitted datagrams per cycle. /// It will however work best if the required time to complete a work item is /// constant. #[derive(Debug)] pub(crate) struct WorkLimiter { /// Whether to measure the required work time, or to use the previous estimates mode: Mode, /// The current cycle number cycle: u16, /// The time the cycle started - only used in measurement mode start_time: Instant, /// How many work items have been completed in the cycle completed: usize, /// The amount of work items which are allowed for a cycle allowed: usize, /// The desired cycle time desired_cycle_time: Duration, /// The estimated and smoothed time per work item in nanoseconds smoothed_time_per_work_item_nanos: f64, /// Retrieves the current time for unit-test purposes #[cfg(test)] get_time: fn() -> Instant, } impl WorkLimiter { pub(crate) fn new(desired_cycle_time: Duration) -> Self { Self { mode: Mode::Measure, cycle: 0, start_time: Instant::now(), completed: 0, allowed: 0, desired_cycle_time, smoothed_time_per_work_item_nanos: 0.0, #[cfg(test)] get_time: std::time::Instant::now, } } /// Starts one work cycle pub(crate) fn start_cycle(&mut self) { self.completed = 0; if let Mode::Measure = self.mode { self.start_time = self.now(); } } /// Returns whether more work can be performed inside the `desired_cycle_time` /// /// Requires that previous work was tracked using `record_work`. pub(crate) fn allow_work(&mut self) -> bool { match self.mode { Mode::Measure => (self.now() - self.start_time) < self.desired_cycle_time, Mode::HistoricData => self.completed < self.allowed, } } /// Records that `work` additional work items have been completed inside the cycle /// /// Must be called between `start_cycle` and `finish_cycle`. pub(crate) fn record_work(&mut self, work: usize) { self.completed += work; } /// Finishes one work cycle /// /// For cycles where the exact duration is measured this will update the estimates /// for the time per work item and the limit of allowed work items per cycle. /// The estimate is updated using the same exponential averaging (smoothing) /// mechanism which is used for determining QUIC path rtts: The last value is /// weighted by 1/8, and the previous average by 7/8. pub(crate) fn finish_cycle(&mut self) { // If no work was done in the cycle drop the measurement, it won't be useful if self.completed == 0 { return; } if let Mode::Measure = self.mode { let elapsed = self.now() - self.start_time; let time_per_work_item_nanos = (elapsed.as_nanos()) as f64 / self.completed as f64; // Calculate the time per work item. We set this to at least 1ns to avoid // dividing by 0 when calculating the allowed amount of work items. self.smoothed_time_per_work_item_nanos = if self.allowed == 0 { // Initial estimate time_per_work_item_nanos } else { // Smoothed estimate (7.0 * self.smoothed_time_per_work_item_nanos + time_per_work_item_nanos) / 8.0 } .max(1.0); // Allow at least 1 work item in order to make progress self.allowed = (((self.desired_cycle_time.as_nanos()) as f64 / self.smoothed_time_per_work_item_nanos) as usize) .max(1); } self.cycle = self.cycle.wrapping_add(1); self.mode = match self.cycle % SAMPLING_INTERVAL { 0 => Mode::Measure, _ => Mode::HistoricData, }; } #[cfg(not(test))] fn now(&self) -> Instant { Instant::now() } #[cfg(test)] fn now(&self) -> Instant { (self.get_time)() } } /// We take a measurement sample once every `SAMPLING_INTERVAL` cycles const SAMPLING_INTERVAL: u16 = 256; #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Mode { Measure, HistoricData, } #[cfg(test)] mod tests { use super::*; use std::cell::RefCell; #[test] fn limit_work() { const CYCLE_TIME: Duration = Duration::from_millis(500); const BATCH_WORK_ITEMS: usize = 12; const BATCH_TIME: Duration = Duration::from_millis(100); const EXPECTED_INITIAL_BATCHES: usize = (CYCLE_TIME.as_nanos() / BATCH_TIME.as_nanos()) as usize; const EXPECTED_ALLOWED_WORK_ITEMS: usize = EXPECTED_INITIAL_BATCHES * BATCH_WORK_ITEMS; let mut limiter = WorkLimiter::new(CYCLE_TIME); limiter.get_time = get_time; reset_time(); // The initial cycle is measuring limiter.start_cycle(); let mut initial_batches = 0; while limiter.allow_work() { limiter.record_work(BATCH_WORK_ITEMS); advance_time(BATCH_TIME); initial_batches += 1; } limiter.finish_cycle(); assert_eq!(initial_batches, EXPECTED_INITIAL_BATCHES); assert_eq!(limiter.allowed, EXPECTED_ALLOWED_WORK_ITEMS); let initial_time_per_work_item = limiter.smoothed_time_per_work_item_nanos; // The next cycles are using historic data const BATCH_SIZES: [usize; 4] = [1, 2, 3, 5]; for &batch_size in &BATCH_SIZES { limiter.start_cycle(); let mut allowed_work = 0; while limiter.allow_work() { limiter.record_work(batch_size); allowed_work += batch_size; } limiter.finish_cycle(); assert_eq!(allowed_work, EXPECTED_ALLOWED_WORK_ITEMS); } // After `SAMPLING_INTERVAL`, we get into measurement mode again for _ in 0..(SAMPLING_INTERVAL as usize - BATCH_SIZES.len() - 1) { limiter.start_cycle(); limiter.record_work(1); limiter.finish_cycle(); } // We now do more work per cycle, and expect the estimate of allowed // work items to go up const BATCH_WORK_ITEMS_2: usize = 96; const TIME_PER_WORK_ITEMS_2_NANOS: f64 = CYCLE_TIME.as_nanos() as f64 / (EXPECTED_INITIAL_BATCHES * BATCH_WORK_ITEMS_2) as f64; let expected_updated_time_per_work_item = (initial_time_per_work_item * 7.0 + TIME_PER_WORK_ITEMS_2_NANOS) / 8.0; let expected_updated_allowed_work_items = (CYCLE_TIME.as_nanos() as f64 / expected_updated_time_per_work_item) as usize; limiter.start_cycle(); let mut initial_batches = 0; while limiter.allow_work() { limiter.record_work(BATCH_WORK_ITEMS_2); advance_time(BATCH_TIME); initial_batches += 1; } limiter.finish_cycle(); assert_eq!(initial_batches, EXPECTED_INITIAL_BATCHES); assert_eq!(limiter.allowed, expected_updated_allowed_work_items); } thread_local! { /// Mocked time pub static TIME: RefCell = RefCell::new(Instant::now()); } fn reset_time() { TIME.with(|t| { *t.borrow_mut() = Instant::now(); }) } fn get_time() -> Instant { TIME.with(|t| *t.borrow()) } fn advance_time(duration: Duration) { TIME.with(|t| { *t.borrow_mut() += duration; }) } } quinn-0.10.2/tests/many_connections.rs000064400000000000000000000145431046102023000161230ustar 00000000000000#![cfg(feature = "rustls")] use std::{ convert::TryInto, sync::{Arc, Mutex}, time::Duration, }; use crc::Crc; use quinn::{ConnectionError, ReadError, TransportConfig, WriteError}; use rand::{self, RngCore}; use tokio::runtime::Builder; struct Shared { errors: Vec, } #[test] #[ignore] fn connect_n_nodes_to_1_and_send_1mb_data() { tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .finish(), ) .unwrap(); let runtime = Builder::new_current_thread().enable_all().build().unwrap(); let _guard = runtime.enter(); let shared = Arc::new(Mutex::new(Shared { errors: vec![] })); let (cfg, listener_cert) = configure_listener(); let endpoint = quinn::Endpoint::server(cfg, "127.0.0.1:0".parse().unwrap()).unwrap(); let listener_addr = endpoint.local_addr().unwrap(); let expected_messages = 50; let crc = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); let shared2 = shared.clone(); let endpoint2 = endpoint.clone(); let read_incoming_data = async move { for _ in 0..expected_messages { let conn = endpoint2.accept().await.unwrap().await.unwrap(); let shared = shared2.clone(); let task = async move { while let Ok(stream) = conn.accept_uni().await { read_from_peer(stream).await?; conn.close(0u32.into(), &[]); } Ok(()) }; tokio::spawn(async move { if let Err(e) = task.await { shared.lock().unwrap().errors.push(e); } }); } }; runtime.spawn(read_incoming_data); let client_cfg = configure_connector(&listener_cert); for _ in 0..expected_messages { let data = random_data_with_hash(1024 * 1024, &crc); let shared = shared.clone(); let connecting = endpoint .connect_with(client_cfg.clone(), listener_addr, "localhost") .unwrap(); let task = async move { let conn = connecting.await.map_err(WriteError::ConnectionLost)?; write_to_peer(conn, data).await?; Ok(()) }; runtime.spawn(async move { if let Err(e) = task.await { use quinn::ConnectionError::*; match e { WriteError::ConnectionLost(ApplicationClosed { .. }) | WriteError::ConnectionLost(Reset) => {} WriteError::ConnectionLost(e) => shared.lock().unwrap().errors.push(e), _ => panic!("unexpected write error"), } } }); } runtime.block_on(endpoint.wait_idle()); let shared = shared.lock().unwrap(); if !shared.errors.is_empty() { panic!("some connections failed: {:?}", shared.errors); } } async fn read_from_peer(mut stream: quinn::RecvStream) -> Result<(), quinn::ConnectionError> { let crc = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); match stream.read_to_end(1024 * 1024 * 5).await { Ok(data) => { assert!(hash_correct(&data, &crc)); Ok(()) } Err(e) => { use quinn::ReadToEndError::*; use ReadError::*; match e { TooLong | Read(UnknownStream) | Read(ZeroRttRejected) | Read(IllegalOrderedRead) => unreachable!(), Read(Reset(error_code)) => panic!("unexpected stream reset: {error_code}"), Read(ConnectionLost(e)) => Err(e), } } } } async fn write_to_peer(conn: quinn::Connection, data: Vec) -> Result<(), WriteError> { let mut s = conn.open_uni().await.map_err(WriteError::ConnectionLost)?; s.write_all(&data).await?; // Suppress finish errors, since the peer may close before ACKing match s.finish().await { Ok(()) => Ok(()), Err(WriteError::ConnectionLost(ConnectionError::ApplicationClosed { .. })) => Ok(()), Err(e) => Err(e), } } /// Builds client configuration. Trusts given node certificate. fn configure_connector(node_cert: &rustls::Certificate) -> quinn::ClientConfig { let mut roots = rustls::RootCertStore::empty(); roots.add(node_cert).unwrap(); let mut transport_config = TransportConfig::default(); transport_config.max_idle_timeout(Some(Duration::from_secs(20).try_into().unwrap())); let mut peer_cfg = quinn::ClientConfig::with_root_certificates(roots); peer_cfg.transport_config(Arc::new(transport_config)); peer_cfg } /// Builds listener configuration along with its certificate. fn configure_listener() -> (quinn::ServerConfig, rustls::Certificate) { let (our_cert, our_priv_key) = gen_cert(); let mut our_cfg = quinn::ServerConfig::with_single_cert(vec![our_cert.clone()], our_priv_key).unwrap(); let transport_config = Arc::get_mut(&mut our_cfg.transport).unwrap(); transport_config.max_idle_timeout(Some(Duration::from_secs(20).try_into().unwrap())); (our_cfg, our_cert) } fn gen_cert() -> (rustls::Certificate, rustls::PrivateKey) { let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_string()]).unwrap(); let key = rustls::PrivateKey(cert.serialize_private_key_der()); (rustls::Certificate(cert.serialize_der().unwrap()), key) } /// Constructs a buffer with random bytes of given size prefixed with a hash of this data. fn random_data_with_hash(size: usize, crc: &Crc) -> Vec { let mut data = random_vec(size + 4); let hash = crc.checksum(&data[4..]); // write hash in big endian data[0] = (hash >> 24) as u8; data[1] = ((hash >> 16) & 0xff) as u8; data[2] = ((hash >> 8) & 0xff) as u8; data[3] = (hash & 0xff) as u8; data } /// Checks if given data buffer hash is correct. Hash itself is a 4 byte prefix in the data. fn hash_correct(data: &[u8], crc: &Crc) -> bool { let encoded_hash = ((data[0] as u32) << 24) | ((data[1] as u32) << 16) | ((data[2] as u32) << 8) | data[3] as u32; let actual_hash = crc.checksum(&data[4..]); encoded_hash == actual_hash } #[allow(unsafe_code)] fn random_vec(size: usize) -> Vec { let mut ret = vec![0; size]; rand::thread_rng().fill_bytes(&mut ret[..]); ret }