hickory-resolver-0.24.0/.cargo_vcs_info.json0000644000000001550000000000100144270ustar { "git": { "sha1": "408d0baca080d1b201cd33e616dc4abd160ef6c0" }, "path_in_vcs": "crates/resolver" }hickory-resolver-0.24.0/Cargo.lock0000644000001150240000000000100124040ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" dependencies = [ "memchr", ] [[package]] name = "async-trait" version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "base64" version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "bumpalo" version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "bytes" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "cc" version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "libc", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "core-foundation" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "core-foundation-sys" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "data-encoding" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "enum-as-inner" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck", "proc-macro2", "quote", "syn", ] [[package]] name = "errno" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" dependencies = [ "errno-dragonfly", "libc", "windows-sys", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "fastrand" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] name = "fastrand" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foreign-types" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ "foreign-types-shared", ] [[package]] name = "foreign-types-shared" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] [[package]] name = "futures" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-io" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "futures-sink" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "getrandom" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "gimli" version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "h2" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", "http", "indexmap", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "h3" version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6de6ca43eed186fd055214af06967b0a7a68336cefec7e8a4004e96efeaccb9e" dependencies = [ "bytes", "fastrand 1.9.0", "futures-util", "http", "tokio", "tracing", ] [[package]] name = "h3-quinn" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d4a1a1763e4f3e82ee9f1ecf2cf862b22cc7316ebe14684e42f94532b5ec64d" dependencies = [ "bytes", "futures", "h3", "quinn", "quinn-proto", "tokio-util", ] [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hickory-proto" version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "091a6fbccf4860009355e3efc52ff4acf37a63489aad7435372d44ceeb6fbbcf" dependencies = [ "async-trait", "bytes", "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", "futures-io", "futures-util", "h2", "h3", "h3-quinn", "http", "idna", "ipnet", "native-tls", "once_cell", "openssl", "quinn", "rand", "ring", "rustls", "rustls-native-certs", "rustls-pemfile", "serde", "thiserror", "tinyvec", "tokio", "tokio-native-tls", "tokio-openssl", "tokio-rustls", "tracing", "url", "webpki-roots", ] [[package]] name = "hickory-resolver" version = "0.24.0" dependencies = [ "cfg-if", "futures-executor", "futures-util", "hickory-proto", "ipconfig", "lru-cache", "once_cell", "parking_lot", "rand", "resolv-conf", "rustls", "rustls-native-certs", "serde", "smallvec", "thiserror", "tokio", "tokio-native-tls", "tokio-openssl", "tokio-rustls", "tracing", "tracing-subscriber", "webpki-roots", ] [[package]] name = "hostname" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", "winapi", ] [[package]] name = "http" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "idna" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", ] [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "ipconfig" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ "socket2", "widestring", "windows-sys", "winreg", ] [[package]] name = "ipnet" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "itoa" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "linked-hash-map" version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" [[package]] name = "lock_api" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "lru-cache" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" dependencies = [ "linked-hash-map", ] [[package]] name = "match_cfg" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata 0.1.10", ] [[package]] name = "memchr" version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "wasi", "windows-sys", ] [[package]] name = "native-tls" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", "log", "openssl", "openssl-probe", "openssl-sys", "schannel", "security-framework", "security-framework-sys", "tempfile", ] [[package]] name = "nu-ansi-term" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ "overload", "winapi", ] [[package]] name = "num_cpus" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", ] [[package]] name = "object" version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ "bitflags 2.4.0", "cfg-if", "foreign-types", "libc", "once_cell", "openssl-macros", "openssl-sys", ] [[package]] name = "openssl-macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" version = "0.9.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" dependencies = [ "cc", "libc", "pkg-config", "vcpkg", ] [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking_lot" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-targets", ] [[package]] name = "percent-encoding" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pin-project-lite" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] [[package]] name = "quick-error" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quinn" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash", "rustls", "thiserror", "tokio", "tracing", ] [[package]] name = "quinn-proto" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand", "ring", "rustc-hash", "rustls", "slab", "thiserror", "tinyvec", "tracing", ] [[package]] name = "quinn-udp" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", "socket2", "tracing", "windows-sys", ] [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "redox_syscall" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "regex" version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick", "memchr", "regex-automata 0.3.8", "regex-syntax 0.7.5", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ "regex-syntax 0.6.29", ] [[package]] name = "regex-automata" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", "regex-syntax 0.7.5", ] [[package]] name = "regex-syntax" version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "resolv-conf" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ "hostname", "quick-error", ] [[package]] name = "ring" version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", "once_cell", "spin", "untrusted", "web-sys", "winapi", ] [[package]] name = "rustc-demangle" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" version = "0.38.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f" dependencies = [ "bitflags 2.4.0", "errno", "libc", "linux-raw-sys", "windows-sys", ] [[package]] name = "rustls" version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", "rustls-webpki", "sct", ] [[package]] name = "rustls-native-certs" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", "rustls-pemfile", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ "base64", ] [[package]] name = "rustls-webpki" version = "0.101.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" dependencies = [ "ring", "untrusted", ] [[package]] name = "schannel" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ "windows-sys", ] [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", ] [[package]] name = "security-framework" version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", ] [[package]] name = "security-framework-sys" version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "serde" version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "sharded-slab" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "socket2" version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", "windows-sys", ] [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "syn" version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tempfile" version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", "fastrand 2.0.1", "redox_syscall", "rustix", "windows-sys", ] [[package]] name = "thiserror" version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "thread_local" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ "cfg-if", "once_cell", ] [[package]] name = "tinyvec" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ "backtrace", "bytes", "libc", "mio", "num_cpus", "pin-project-lite", "socket2", "tokio-macros", "windows-sys", ] [[package]] name = "tokio-macros" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tokio-native-tls" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", ] [[package]] name = "tokio-openssl" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" dependencies = [ "futures-util", "openssl", "openssl-sys", "tokio", ] [[package]] name = "tokio-rustls" version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls", "tokio", ] [[package]] name = "tokio-util" version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", "tracing", ] [[package]] name = "tracing" version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tracing-core" version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", ] [[package]] name = "tracing-log" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", "log", "tracing-core", ] [[package]] name = "tracing-subscriber" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", ] [[package]] name = "unicode-bidi" version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "untrusted" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", ] [[package]] name = "valuable" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vcpkg" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "webpki-roots" version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "widestring" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winreg" version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", "windows-sys", ] hickory-resolver-0.24.0/Cargo.toml0000644000000113070000000000100124260ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.67.0" name = "hickory-resolver" version = "0.24.0" authors = ["The contributors to Hickory DNS"] description = """ Hickory DNS is a safe and secure DNS library. This Resolver library uses the Client library to perform all DNS queries. The Resolver is intended to be a high-level library for any DNS record resolution see Resolver and AsyncResolver for supported resolution types. The Client can be used for other queries. """ homepage = "https://hickory-dns.org/" documentation = "https://docs.rs/hickory-resolver" readme = "README.md" keywords = [ "DNS", "BIND", "dig", "named", "dnssec", ] categories = ["network-programming"] license = "MIT OR Apache-2.0" repository = "https://github.com/hickory-dns/hickory-dns" [package.metadata.docs.rs] all-features = true default-target = "x86_64-unknown-linux-gnu" rustdoc-args = [ "--cfg", "docsrs", ] targets = [ "x86_64-apple-darwin", "x86_64-pc-windows-msvc", ] [lib] name = "hickory_resolver" path = "src/lib.rs" [[example]] name = "custom_provider" required-features = ["tokio-runtime"] [[example]] name = "flush_cache" required-features = [ "tokio-runtime", "system-config", ] [[example]] name = "global_resolver" required-features = [ "tokio-runtime", "system-config", ] [[example]] name = "multithreaded_runtime" required-features = [ "tokio-runtime", "system-config", ] [dependencies.cfg-if] version = "1" [dependencies.futures-util] version = "0.3.5" features = ["std"] default-features = false [dependencies.hickory-proto] version = "0.24.0" default-features = false [dependencies.lru-cache] version = "0.1.2" [dependencies.once_cell] version = "1.18.0" [dependencies.parking_lot] version = "0.12" [dependencies.rand] version = "0.8" [dependencies.resolv-conf] version = "0.7.0" features = ["system"] optional = true [dependencies.rustls] version = "0.21.6" optional = true [dependencies.rustls-native-certs] version = "0.6.3" optional = true [dependencies.serde] version = "1.0" features = ["derive"] optional = true [dependencies.smallvec] version = "1.6" [dependencies.thiserror] version = "1.0.20" [dependencies.tokio] version = "1.21" optional = true [dependencies.tokio-native-tls] version = "0.3.0" optional = true [dependencies.tokio-openssl] version = "0.6.0" optional = true [dependencies.tokio-rustls] version = "0.24.0" optional = true [dependencies.tracing] version = "0.1.30" [dependencies.webpki-roots] version = "0.25.0" optional = true [dev-dependencies.futures-executor] version = "0.3.5" features = ["std"] default-features = false [dev-dependencies.tokio] version = "1.21" features = [ "macros", "test-util", ] [dev-dependencies.tracing-subscriber] version = "0.3" features = [ "std", "fmt", "env-filter", ] [features] default = [ "system-config", "tokio-runtime", ] dns-over-h3 = [ "dns-over-rustls", "hickory-proto/dns-over-h3", ] dns-over-https = ["hickory-proto/dns-over-https"] dns-over-https-rustls = [ "hickory-proto/dns-over-https-rustls", "dns-over-rustls", "dns-over-https", ] dns-over-native-tls = [ "dns-over-tls", "tokio-native-tls", "hickory-proto/dns-over-native-tls", ] dns-over-openssl = [ "dns-over-tls", "hickory-proto/dns-over-openssl", "tokio-openssl", ] dns-over-quic = [ "rustls/quic", "dns-over-rustls", "hickory-proto/dns-over-quic", ] dns-over-rustls = [ "dns-over-tls", "rustls", "tokio-rustls", "hickory-proto/dns-over-rustls", ] dns-over-tls = ["tokio-runtime"] dnssec = [] dnssec-openssl = [ "dnssec", "hickory-proto/dnssec-openssl", ] dnssec-ring = [ "dnssec", "hickory-proto/dnssec-ring", ] native-certs = [ "dep:rustls-native-certs", "hickory-proto/native-certs", ] serde-config = [ "serde", "hickory-proto/serde-config", ] system-config = [ "ipconfig", "resolv-conf", ] testing = [] tokio-runtime = [ "tokio/rt", "hickory-proto/tokio-runtime", ] webpki-roots = [ "dep:webpki-roots", "hickory-proto/webpki-roots", ] [target."cfg(windows)".dependencies.ipconfig] version = "0.3.0" optional = true [badges.codecov] branch = "main" repository = "hickory-dns/hickory-dns" service = "github" [badges.maintenance] status = "actively-developed" hickory-resolver-0.24.0/Cargo.toml.orig000064400000000000000000000111531046102023000161060ustar 00000000000000[package] name = "hickory-resolver" # A short blurb about the package. This is not rendered in any format when # uploaded to crates.io (aka this is not markdown) description = """ Hickory DNS is a safe and secure DNS library. This Resolver library uses the Client library to perform all DNS queries. The Resolver is intended to be a high-level library for any DNS record resolution see Resolver and AsyncResolver for supported resolution types. The Client can be used for other queries. """ # These URLs point to more information about the repository documentation = "https://docs.rs/hickory-resolver" # This points to a file in the repository (relative to this Cargo.toml). The # contents of this file are stored and indexed in the registry. readme = "README.md" version.workspace = true authors.workspace = true edition.workspace = true rust-version.workspace = true homepage.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [badges] #github-actions = { repository = "bluejekyll/hickory", branch = "main", workflow = "test" } codecov = { repository = "hickory-dns/hickory-dns", branch = "main", service = "github" } maintenance = { status = "actively-developed" } [features] default = ["system-config", "tokio-runtime"] #backtrace = ["dep:backtrace", "hickory-proto/backtrace"] dns-over-native-tls = [ "dns-over-tls", "tokio-native-tls", "hickory-proto/dns-over-native-tls", ] # DNS over TLS with OpenSSL currently needs a good way to set default CAs, use rustls or native-tls dns-over-openssl = [ "dns-over-tls", "hickory-proto/dns-over-openssl", "tokio-openssl", ] dns-over-rustls = [ "dns-over-tls", "rustls", "tokio-rustls", "hickory-proto/dns-over-rustls", ] dns-over-tls = ["tokio-runtime"] # This requires some TLS library, currently only rustls is supported dns-over-https-rustls = [ "hickory-proto/dns-over-https-rustls", "dns-over-rustls", "dns-over-https", ] dns-over-https = ["hickory-proto/dns-over-https"] dns-over-quic = [ "rustls/quic", "dns-over-rustls", "hickory-proto/dns-over-quic", ] dns-over-h3 = ["dns-over-rustls", "hickory-proto/dns-over-h3"] webpki-roots = ["dep:webpki-roots", "hickory-proto/webpki-roots"] native-certs = ["dep:rustls-native-certs", "hickory-proto/native-certs"] dnssec-openssl = ["dnssec", "hickory-proto/dnssec-openssl"] dnssec-ring = ["dnssec", "hickory-proto/dnssec-ring"] dnssec = [] serde-config = ["serde", "hickory-proto/serde-config"] system-config = ["ipconfig", "resolv-conf"] # # enables experimental the mDNS (multicast) feature # TODO: we will be revisiting how mdns is built into the resolver... #mdns = ["hickory-proto/mdns"] testing = [] tokio-runtime = ["tokio/rt", "hickory-proto/tokio-runtime"] [lib] name = "hickory_resolver" path = "src/lib.rs" [dependencies] #backtrace = { version = "0.3.50", optional = true } cfg-if.workspace = true futures-util = { workspace = true, default-features = false, features = [ "std", ] } lru-cache.workspace = true once_cell.workspace = true parking_lot.workspace = true rand.workspace = true resolv-conf = { workspace = true, optional = true, features = ["system"] } rustls = { workspace = true, optional = true } rustls-native-certs = { workspace = true, optional = true } serde = { workspace = true, features = ["derive"], optional = true } smallvec.workspace = true thiserror.workspace = true tracing.workspace = true tokio = { workspace = true, optional = true } tokio-native-tls = { workspace = true, optional = true } tokio-openssl = { workspace = true, optional = true } tokio-rustls = { workspace = true, optional = true } hickory-proto = { workspace = true, default-features = false } webpki-roots = { workspace = true, optional = true } [target.'cfg(windows)'.dependencies] ipconfig = { workspace = true, optional = true } [dev-dependencies] futures-executor = { workspace = true, default-features = false, features = [ "std", ] } tokio = { workspace = true, features = ["macros", "test-util"] } tracing-subscriber = { workspace = true, features = [ "std", "fmt", "env-filter", ] } [package.metadata.docs.rs] all-features = true default-target = "x86_64-unknown-linux-gnu" targets = ["x86_64-apple-darwin", "x86_64-pc-windows-msvc"] rustdoc-args = ["--cfg", "docsrs"] [[example]] name = "custom_provider" required-features = ["tokio-runtime"] [[example]] name = "flush_cache" required-features = ["tokio-runtime", "system-config"] [[example]] name = "global_resolver" required-features = ["tokio-runtime", "system-config"] [[example]] name = "multithreaded_runtime" required-features = ["tokio-runtime", "system-config"] hickory-resolver-0.24.0/LICENSE-APACHE000064400000000000000000000261401046102023000151450ustar 00000000000000 Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. hickory-resolver-0.24.0/LICENSE-MIT000064400000000000000000000021151046102023000146510ustar 00000000000000Copyright (c) 2015 The Hickory DNS Developers Copyright (c) 2017 Google LLC. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. hickory-resolver-0.24.0/README.md000064400000000000000000000133341046102023000145010ustar 00000000000000# Overview Hickory DNS Resolver is a library which implements the DNS resolver using the Hickory DNS Proto library. This library contains implementations for IPv4 (A) and IPv6 (AAAA) resolution, more features are in the works. It is built on top of the [tokio](https://tokio.rs) async-io project, this allows it to be integrated into other systems using the tokio and futures libraries. The Hickory DNS [project](https://github.com/hickory-dns/hickory-dns) contains other libraries for DNS: a [client library](https://crates.io/crates/hickory-client) for raw protocol usage, a [server library](https://crates.io/crates/hickory-server) for hosting zones, and variations on the TLS implementation over [rustls](https://crates.io/crates/hickory-dns-rustls) and [native-tls](https://crates.io/crates/hickory-dns-native-tls). **NOTICE** This project was rebranded fromt Trust-DNS to Hickory DNS and has been moved to the https://github.com/hickory-dns/hickory-dns organization and repo, this crate/binary has been moved to [hickory-resolver](https://crates.io/crates/hickory-resolver), from `0.24` and onward, for prior versions see [trust-dns-resolver](https://crates.io/crates/trust-dns-resolver). ## Features - Various IPv4 and IPv6 lookup strategies - `/etc/resolv.conf` based configuration on Unix/Posix systems - NameServer pools with performance based priority usage - Caching of query results - NxDomain/NoData caching (negative caching) - DNSSEC validation - Generic Record Type Lookup - CNAME chain resolution - _experimental_ mDNS support (enable with `mdns` feature) - DNS over TLS (utilizing `native-tls`, `rustls`, and `openssl`; `native-tls` or `rustls` are recommended) - DNS over HTTPS (currently only supports `rustls`) ## Example ```rust use std::net::*; use hickory_resolver::Resolver; use hickory_resolver::config::*; // Construct a new Resolver with default configuration options let mut resolver = Resolver::new(ResolverConfig::default(), ResolverOpts::default()).unwrap(); // On Unix/Posix systems, this will read the /etc/resolv.conf // let mut resolver = Resolver::from_system_conf().unwrap(); // Lookup the IP addresses associated with a name. let mut response = resolver.lookup_ip("www.example.com.").unwrap(); // There can be many addresses associated with the name, // this can return IPv4 and/or IPv6 addresses let address = response.iter().next().expect("no addresses returned!"); if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!(address, IpAddr::V6(Ipv6Addr::new(0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946))); } ``` ## DNS-over-TLS and DNS-over-HTTPS DoT and DoH are supported. This is accomplished through the use of one of `native-tls`, `openssl`, or `rustls` (only `rustls` is currently supported for DoH). The Resolver requires only requires valid DoT or DoH resolvers being registered in order to be used. To use with the `Client`, the `TlsClientConnection` or `HttpsClientConnection` should be used. Similarly, to use with the tokio `AsyncClient` the `TlsClientStream` or `HttpsClientStream` should be used. ClientAuth, mTLS, is currently not supported, there are some issues still being worked on. TLS is useful for Server authentication and connection privacy. To enable DoT one of the features `dns-over-native-tls`, `dns-over-openssl`, or `dns-over-rustls` must be enabled, `dns-over-https-rustls` is used for DoH. ### Example Enable the TLS library through the dependency on `hickory-resolver`: ```toml hickory-resolver = { version = "*", features = ["dns-over-rustls"] } ``` A default TLS configuration is available for Cloudflare's `1.1.1.1` DNS service (Quad9 as well): ```rust // Construct a new Resolver with default configuration options let mut resolver = Resolver::new(ResolverConfig::cloudflare_tls(), ResolverOpts::default()).unwrap(); /// see example above... ``` ## DNSSEC status Currently the root key is hardcoded into the system. This gives validation of DNSKEY and DS records back to the root. NSEC is implemented, but not NSEC3. Because caching is not yet enabled, it has been noticed that some DNS servers appear to rate limit the connections, validating RRSIG records back to the root can require a significant number of additional queries for those records. Zones will be automatically resigned on any record updates via dynamic DNS. To enable DNSSEC, one of the features `dnssec-openssl` or `dnssec-ring` must be enabled. ## Testing the resolver via CLI with resolve Useful for testing hickory-resolver and it's features via an independent CLI. ```shell cargo install --bin resolve hickory-util ``` ### example ```shell $ resolve www.example.com. Querying for www.example.com. A from udp:8.8.8.8:53, tcp:8.8.8.8:53, udp:8.8.4.4:53, tcp:8.8.4.4:53, udp:[2001:4860:4860::8888]:53, tcp:[2001:4860:4860::8888]:53, udp:[2001:4860:4860::8844]:53, tcp:[2001:4860:4860::8844]:53 Success for query name: www.example.com. type: A class: IN www.example.com. 21063 IN A 93.184.216.34 ``` ## Minimum Rust Version The current minimum rustc version for this project is `1.67` ## Versioning Hickory DNS does its best job to follow semver. Hickory DNS will be promoted to 1.0 upon stabilization of the publicly exposed APIs. This does not mean that Hickory DNS will necessarily break on upgrades between 0.x updates. Whenever possible, old APIs will be deprecated with notes on what replaced those deprecations. Hickory DNS will make a best effort to never break software which depends on it due to API changes, though this can not be guaranteed. Deprecated interfaces will be maintained for at minimum one major release after that in which they were deprecated (where possible), with the exception of the upgrade to 1.0 where all deprecated interfaces will be planned to be removed. hickory-resolver-0.24.0/examples/custom_provider.rs000064400000000000000000000066131046102023000206340ustar 00000000000000#![recursion_limit = "128"] #[cfg(any(feature = "webpki-roots", feature = "native-certs"))] use { hickory_resolver::config::{ResolverConfig, ResolverOpts}, hickory_resolver::name_server::{ConnectionProvider, GenericConnector, RuntimeProvider}, hickory_resolver::proto::iocompat::AsyncIoTokioAsStd, hickory_resolver::proto::TokioTime, hickory_resolver::{AsyncResolver, TokioHandle}, std::future::Future, std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, std::pin::Pin, tokio::net::{TcpStream, UdpSocket}, }; #[cfg(any(feature = "webpki-roots", feature = "native-certs"))] #[derive(Clone, Default)] struct PrintProvider { handle: TokioHandle, } #[cfg(any(feature = "webpki-roots", feature = "native-certs"))] impl RuntimeProvider for PrintProvider { type Handle = TokioHandle; type Timer = TokioTime; type Udp = UdpSocket; type Tcp = AsyncIoTokioAsStd; fn create_handle(&self) -> Self::Handle { self.handle.clone() } fn connect_tcp( &self, server_addr: SocketAddr, ) -> Pin>>> { println!("Create tcp server_addr: {}", server_addr); Box::pin(async move { let tcp = TcpStream::connect(server_addr).await?; Ok(AsyncIoTokioAsStd(tcp)) }) } fn bind_udp( &self, local_addr: SocketAddr, server_addr: SocketAddr, ) -> Pin>>> { // The server_addr parameter is used only when you need to establish a tunnel or something similar. // For example, you try to use a http proxy and encapsulate UDP packets inside a TCP stream. println!( "Create udp local_addr: {}, server_addr: {}", local_addr, server_addr ); Box::pin(UdpSocket::bind(local_addr)) } } #[cfg(any(feature = "webpki-roots", feature = "native-certs"))] async fn lookup_test(resolver: AsyncResolver) { let response = resolver.lookup_ip("www.example.com.").await.unwrap(); // There can be many addresses associated with the name, // this can return IPv4 and/or IPv6 addresses let address = response.iter().next().expect("no addresses returned!"); if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946 )) ); } } #[cfg(any(feature = "webpki-roots", feature = "native-certs"))] #[tokio::main] async fn main() { let resolver = AsyncResolver::new( ResolverConfig::google(), ResolverOpts::default(), GenericConnector::new(PrintProvider::default()), ); lookup_test(resolver).await; #[cfg(feature = "dns-over-https-rustls")] { let resolver2 = AsyncResolver::new( ResolverConfig::cloudflare_https(), ResolverOpts::default(), GenericConnector::new(PrintProvider::default()), ); lookup_test(resolver2).await; } println!("Hello, world!"); } #[cfg(not(any(feature = "webpki-roots", feature = "native-certs")))] fn main() { println!("either `webpki-roots` or `native-certs` feature must be enabled") } #[test] fn test_custom_provider() { main() } hickory-resolver-0.24.0/examples/flush_cache.rs000064400000000000000000000057441046102023000176600ustar 00000000000000#![recursion_limit = "128"] fn main() { tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap() .block_on(async { tokio_main().await; }); } async fn tokio_main() { use hickory_resolver::{name_server::TokioConnectionProvider, TokioAsyncResolver}; let resolver = { // To make this independent, if targeting macOS, BSD, Linux, or Windows, we can use the system's configuration: #[cfg(any(unix, windows))] { // use the system resolver configuration TokioAsyncResolver::from_system_conf(TokioConnectionProvider::default()) } // For other operating systems, we can use one of the preconfigured definitions #[cfg(not(any(unix, windows)))] { // Directly reference the config types use hickory_resolver::config::{ResolverConfig, ResolverOpts}; // Get a new resolver with the google nameservers as the upstream recursive resolvers AsyncResolver::tokio( ResolverConfig::quad9(), ResolverOpts::default(), //runtime.handle().clone(), ) } } .map(std::sync::Arc::new) .expect("failed to create resolver"); // Create some futures representing name lookups. let names = ["hickory-dns.org.", "estada.ch.", "wikipedia.org."]; let first_resolve = resolve_list(&names, &*resolver).await; let cached_resolve = resolve_list(&names, &*resolver).await; resolver.clear_cache(); let second_resolve = resolve_list(&names, &*resolver).await; println!("first_resolve: {first_resolve:?}"); println!("cached_resolve: {cached_resolve:?}"); println!("second_resolve: {second_resolve:?}"); // Drop the resolver, which means that the runtime will become idle. drop(resolver); } async fn resolve_list( names: &[&str], resolver: &hickory_resolver::AsyncResolver

, ) -> tokio::time::Duration { use tokio::time::Instant; let start_time = Instant::now(); // Create the resolve requests first let futures = names .iter() .map(|name: &&str| { let name: String = name.to_string(); let resolver = resolver.clone(); let future = { let name = name.clone(); tokio::spawn(async move { resolver.txt_lookup(name).await }) }; (name, future) }) .collect::>(); // Go through the list of resolution operations in parallel and wait for them to complete. for (name, lookup) in futures { let txts = lookup.await.expect("unable to spawn resolver").map(|txt| { txt.iter() .map(|rdata| rdata.to_string()) .collect::>() }); println!(" {name} returned to {txts:?}"); } println!(); start_time.elapsed() } #[test] fn test_flush_cache() { main() } hickory-resolver-0.24.0/examples/global_resolver.rs000064400000000000000000000116571046102023000205750ustar 00000000000000#![recursion_limit = "128"] use { futures_util::future, hickory_resolver::name_server::TokioConnectionProvider, hickory_resolver::TokioAsyncResolver, hickory_resolver::{IntoName, TryParseIp}, once_cell::sync::Lazy, std::fmt::Display, std::io, std::net::SocketAddr, std::task::Poll, }; // This is an example of registering a static global resolver into any system. // // We may want to create a GlobalResolver as part of the Resolver library // in the mean time, this example has the necessary steps to do so. // // Thank you to @zonyitoo for the original example. // TODO: this example can probably be made much simpler with the new // `AsyncResolver`. // First we need to setup the global Resolver static GLOBAL_DNS_RESOLVER: Lazy = Lazy::new(|| { use std::sync::{Arc, Condvar, Mutex}; use std::thread; // We'll be using this condvar to get the Resolver from the thread... let pair = Arc::new((Mutex::new(None::), Condvar::new())); let pair2 = pair.clone(); // Spawn the runtime to a new thread... // // This thread will manage the actual resolution runtime thread::spawn(move || { // A runtime for this new thread let runtime = tokio::runtime::Runtime::new().expect("failed to launch Runtime"); // our platform independent future, result, see next blocks let resolver = { // To make this independent, if targeting macOS, BSD, Linux, or Windows, we can use the system's configuration: #[cfg(any(unix, windows))] { // use the system resolver configuration TokioAsyncResolver::from_system_conf(TokioConnectionProvider::default()) } // For other operating systems, we can use one of the preconfigured definitions #[cfg(not(any(unix, windows)))] { // Directly reference the config types use hickory_resolver::config::{ResolverConfig, ResolverOpts}; // Get a new resolver with the google nameservers as the upstream recursive resolvers TokioAsyncResolver::new( ResolverConfig::google(), ResolverOpts::default(), runtime.handle().clone(), ) } }; let (lock, cvar) = &*pair2; let mut started = lock.lock().unwrap(); let resolver = resolver.expect("failed to create hickory-resolver"); *started = Some(resolver); cvar.notify_one(); drop(started); runtime.block_on(future::poll_fn(|_cx| Poll::<()>::Pending)) }); // Wait for the thread to start up. let (lock, cvar) = &*pair; let mut resolver = lock.lock().unwrap(); while resolver.is_none() { resolver = cvar.wait(resolver).unwrap(); } // take the started resolver let resolver = resolver.take(); // set the global resolver resolver.expect("resolver should not be none") }); /// Provide a general purpose resolution function. /// /// This looks up the `host` (a `&str` or `String` is good), and combines that with the provided port /// this mimics the lookup functions of `std::net`. pub async fn resolve( host: N, port: u16, ) -> io::Result> { // Now we use the global resolver to perform a lookup_ip. let name = host.to_string(); let result = GLOBAL_DNS_RESOLVER.lookup_ip(host).await; // map the result into what we want... result .map_err(move |err| { // we transform the error into a standard IO error for convenience io::Error::new( io::ErrorKind::AddrNotAvailable, format!("dns resolution error for {name}: {err}"), ) }) .map(move |lookup_ip| { // we take all the IPs returned, and then send back the set of IPs lookup_ip .iter() .map(|ip| SocketAddr::new(ip, port)) .collect::>() }) } fn main() { use std::thread; // Let's resolve some names, we should be able to do it across threads let names = &["www.google.com", "www.reddit.com", "www.wikipedia.org"]; // spawn all the threads to do the lookups let threads = names .iter() .map(|name| { let join = thread::spawn(move || { let runtime = tokio::runtime::Runtime::new().expect("failed to launch Runtime"); runtime.block_on(resolve(*name, 443)) }); (name, join) }) .collect::>(); // print the resolved IPs for (name, join) in threads { let result = join .join() .expect("resolution thread failed") .expect("resolution failed"); println!("{name} resolved to {result:?}"); } } #[test] fn test_global_resolver() { main() } hickory-resolver-0.24.0/examples/multithreaded_runtime.rs000064400000000000000000000042521046102023000220030ustar 00000000000000#![recursion_limit = "128"] //! This example shows how to create a resolver that uses the tokio multithreaded runtime. This is how //! you might integrate the resolver into a more complex application. fn main() { use hickory_resolver::{name_server::TokioConnectionProvider, TokioAsyncResolver}; use tokio::runtime::Runtime; tracing_subscriber::fmt::init(); // Set up the standard tokio runtime (multithreaded by default). let runtime = Runtime::new().expect("Failed to create runtime"); let resolver = { // To make this independent, if targeting macOS, BSD, Linux, or Windows, we can use the system's configuration: #[cfg(any(unix, windows))] { // use the system resolver configuration TokioAsyncResolver::from_system_conf(TokioConnectionProvider::default()) } // For other operating systems, we can use one of the preconfigured definitions #[cfg(not(any(unix, windows)))] { // Directly reference the config types use hickory_resolver::config::{ResolverConfig, ResolverOpts}; // Get a new resolver with the google nameservers as the upstream recursive resolvers AsyncResolver::new( ResolverConfig::google(), ResolverOpts::default(), runtime.handle().clone(), ) } } .expect("failed to create resolver"); // Create some futures representing name lookups. let names = &["www.google.com", "www.reddit.com", "www.wikipedia.org"]; let mut futures = names .iter() .map(|name| (name, resolver.lookup_ip(*name))) .collect::>(); // Go through the list of resolution operations and wait for them to complete. for (name, lookup) in futures.drain(..) { let ips = runtime .block_on(lookup) .expect("Failed completing lookup future") .iter() .collect::>(); println!("{name} resolved to {ips:?}"); } // Drop the resolver, which means that the runtime will become idle. drop(futures); drop(resolver); } #[test] fn test_multithreaded_runtime() { main() } hickory-resolver-0.24.0/src/async_resolver.rs000064400000000000000000001335211046102023000174160ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Structs for creating and using a AsyncResolver use std::fmt; use std::net::IpAddr; use std::sync::Arc; use proto::error::ProtoResult; use proto::op::Query; use proto::rr::domain::usage::ONION; use proto::rr::domain::TryParseIp; use proto::rr::{IntoName, Name, Record, RecordType}; use proto::xfer::{DnsRequestOptions, RetryDnsHandle}; use tracing::{debug, trace}; use crate::caching_client::CachingClient; use crate::config::{ResolverConfig, ResolverOpts}; use crate::dns_lru::{self, DnsLru}; use crate::error::*; use crate::lookup::{self, Lookup, LookupEither, LookupFuture}; use crate::lookup_ip::{LookupIp, LookupIpFuture}; #[cfg(feature = "tokio-runtime")] use crate::name_server::TokioConnectionProvider; use crate::name_server::{ConnectionProvider, NameServerPool}; use crate::Hosts; /// An asynchronous resolver for DNS generic over async Runtimes. /// /// Creating a `AsyncResolver` returns a new handle and a future that should /// be spawned on an executor to drive the background work. The lookup methods /// on `AsyncResolver` request lookups from the background task. /// /// The futures returned by a `AsyncResolver` and the corresponding background /// task need not be spawned on the same executor, or be in the same thread. /// Additionally, one background task may have any number of handles; calling /// `clone()` on a handle will create a new handle linked to the same /// background task. /// /// *NOTE* If lookup futures returned by a `AsyncResolver` and the background /// future are spawned on two separate `CurrentThread` executors, one thread /// cannot run both executors simultaneously, so the `run` or `block_on` /// functions will cause the thread to deadlock. If both the background work /// and the lookup futures are intended to be run on the same thread, they /// should be spawned on the same executor. /// /// The background task manages the name server pool and other state used /// to drive lookups. When this future is spawned on an executor, it will /// first construct and configure the necessary client state, before checking /// for any incoming lookup requests, handling them, and yielding. It will /// continue to do so as long as there are still any [`AsyncResolver`] handle /// linked to it. When all of its [`AsyncResolver`]s have been dropped, the /// background future will finish. #[derive(Clone)] pub struct AsyncResolver { config: ResolverConfig, options: ResolverOpts, client_cache: CachingClient, ResolveError>, hosts: Option>, } /// An AsyncResolver used with Tokio #[cfg(feature = "tokio-runtime")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-runtime")))] pub type TokioAsyncResolver = AsyncResolver; macro_rules! lookup_fn { ($p:ident, $l:ty, $r:path) => { /// Performs a lookup for the associated type. /// /// *hint* queries that end with a '.' are fully qualified names and are cheaper lookups /// /// # Arguments /// /// * `query` - a string which parses to a domain name, failure to parse will return an error pub async fn $p(&self, query: N) -> Result<$l, ResolveError> { let name = match query.into_name() { Ok(name) => name, Err(err) => { return Err(err.into()); } }; self.inner_lookup(name, $r, self.request_options()).await } }; ($p:ident, $l:ty, $r:path, $t:ty) => { /// Performs a lookup for the associated type. /// /// # Arguments /// /// * `query` - a type which can be converted to `Name` via `From`. pub async fn $p(&self, query: $t) -> Result<$l, ResolveError> { let name = Name::from(query); self.inner_lookup(name, $r, self.request_options()).await } }; } #[cfg(feature = "tokio-runtime")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-runtime")))] impl TokioAsyncResolver { /// Construct a new Tokio based `AsyncResolver` with the provided configuration. /// /// # Arguments /// /// * `config` - configuration, name_servers, etc. for the Resolver /// * `options` - basic lookup options for the resolver /// /// # Returns /// /// A tuple containing the new `AsyncResolver` and a future that drives the /// background task that runs resolutions for the `AsyncResolver`. See the /// documentation for `AsyncResolver` for more information on how to use /// the background future. pub fn tokio(config: ResolverConfig, options: ResolverOpts) -> Self { Self::new(config, options, TokioConnectionProvider::default()) } /// Constructs a new Tokio based Resolver with the system configuration. /// /// This will use `/etc/resolv.conf` on Unix OSes and the registry on Windows. #[cfg(any(unix, target_os = "windows"))] #[cfg(feature = "system-config")] #[cfg_attr( docsrs, doc(cfg(all(feature = "system-config", any(unix, target_os = "windows")))) )] pub fn tokio_from_system_conf() -> Result { Self::from_system_conf(TokioConnectionProvider::default()) } } impl AsyncResolver { /// Construct a new generic `AsyncResolver` with the provided configuration. /// /// see [TokioAsyncResolver::tokio(..)] instead. /// /// # Arguments /// /// * `config` - configuration, name_servers, etc. for the Resolver /// * `options` - basic lookup options for the resolver /// /// # Returns /// /// A tuple containing the new `AsyncResolver` and a future that drives the /// background task that runs resolutions for the `AsyncResolver`. See the /// documentation for `AsyncResolver` for more information on how to use /// the background future. pub fn new(config: ResolverConfig, options: ResolverOpts, provider: R) -> Self { Self::new_with_conn(config, options, provider) } /// Constructs a new Resolver with the system configuration. /// /// see [TokioAsyncResolver::tokio_from_system_conf(..)] instead. /// /// This will use `/etc/resolv.conf` on Unix OSes and the registry on Windows. #[cfg(any(unix, target_os = "windows"))] #[cfg(feature = "system-config")] #[cfg_attr( docsrs, doc(cfg(all(feature = "system-config", any(unix, target_os = "windows")))) )] pub fn from_system_conf(runtime: R) -> Result { Self::from_system_conf_with_provider(runtime) } /// Flushes/Removes all entries from the cache pub fn clear_cache(&self) { self.client_cache.clear_cache(); } } impl AsyncResolver

{ /// Construct a new `AsyncResolver` with the provided configuration. /// /// # Arguments /// /// * `config` - configuration, name_servers, etc. for the Resolver /// * `options` - basic lookup options for the resolver /// /// # Returns /// /// A tuple containing the new `AsyncResolver` and a future that drives the /// background task that runs resolutions for the `AsyncResolver`. See the /// documentation for `AsyncResolver` for more information on how to use /// the background future. pub fn new_with_conn(config: ResolverConfig, options: ResolverOpts, conn_provider: P) -> Self { let pool = NameServerPool::from_config_with_provider(&config, options.clone(), conn_provider); let either; let client = RetryDnsHandle::new(pool, options.attempts); if options.validate { #[cfg(feature = "dnssec")] { use proto::xfer::DnssecDnsHandle; either = LookupEither::Secure(DnssecDnsHandle::new(client)); } #[cfg(not(feature = "dnssec"))] { // TODO: should this just be a panic, or a pinned error? tracing::warn!("validate option is only available with 'dnssec' feature"); either = LookupEither::Retry(client); } } else { either = LookupEither::Retry(client); } let hosts = if options.use_hosts_file { Some(Arc::new(Hosts::new())) } else { None }; trace!("handle passed back"); let lru = DnsLru::new(options.cache_size, dns_lru::TtlConfig::from_opts(&options)); Self { config, client_cache: CachingClient::with_cache(lru, either, options.preserve_intermediates), options, hosts, } } /// Constructs a new Resolver with the system configuration. /// /// This will use `/etc/resolv.conf` on Unix OSes and the registry on Windows. #[cfg(any(unix, target_os = "windows"))] #[cfg(feature = "system-config")] #[cfg_attr( docsrs, doc(cfg(all(feature = "system-config", any(unix, target_os = "windows")))) )] pub fn from_system_conf_with_provider(conn_provider: P) -> Result { let (config, options) = super::system_conf::read_system_conf()?; Ok(Self::new_with_conn(config, options, conn_provider)) } /// Per request options based on the ResolverOpts pub(crate) fn request_options(&self) -> DnsRequestOptions { let mut request_opts = DnsRequestOptions::default(); request_opts.recursion_desired = self.options.recursion_desired; request_opts.use_edns = self.options.edns0; request_opts } /// Generic lookup for any RecordType /// /// *WARNING* this interface may change in the future, see if one of the specializations would be better. /// /// # Arguments /// /// * `name` - name of the record to lookup, if name is not a valid domain name, an error will be returned /// * `record_type` - type of record to lookup, all RecordData responses will be filtered to this type /// /// # Returns /// // A future for the returned Lookup RData pub async fn lookup( &self, name: N, record_type: RecordType, ) -> Result { let name = match name.into_name() { Ok(name) => name, Err(err) => return Err(err.into()), }; self.inner_lookup(name, record_type, self.request_options()) .await } fn push_name(name: Name, names: &mut Vec) { if !names.contains(&name) { names.push(name); } } fn build_names(&self, name: Name) -> Vec { // if it's fully qualified, we can short circuit the lookup logic if name.is_fqdn() || ONION.zone_of(&name) && name .trim_to(2) .iter() .next() .map(|name| name.len() == 56) // size of onion v3 address .unwrap_or(false) { // if already fully qualified, or if onion address, don't assume it might be a // sub-domain vec![name] } else { // Otherwise we have to build the search list // Note: the vec is built in reverse order of precedence, for stack semantics let mut names = Vec::::with_capacity(1 /*FQDN*/ + 1 /*DOMAIN*/ + self.config.search().len()); // if not meeting ndots, we always do the raw name in the final lookup, or it's a localhost... let raw_name_first: bool = name.num_labels() as usize > self.options.ndots || name.is_localhost(); // if not meeting ndots, we always do the raw name in the final lookup if !raw_name_first { names.push(name.clone()); } for search in self.config.search().iter().rev() { let name_search = name.clone().append_domain(search); match name_search { Ok(name_search) => Self::push_name(name_search, &mut names), Err(e) => debug!( "Not adding {} to {} for search due to error: {}", search, name, e ), } } if let Some(domain) = self.config.domain() { let name_search = name.clone().append_domain(domain); match name_search { Ok(name_search) => Self::push_name(name_search, &mut names), Err(e) => debug!( "Not adding {} to {} for search due to error: {}", domain, name, e ), } } // this is the direct name lookup if raw_name_first { // adding the name as though it's an FQDN for lookup names.push(name); } names } } pub(crate) async fn inner_lookup( &self, name: Name, record_type: RecordType, options: DnsRequestOptions, ) -> Result where L: From + Send + 'static, { let names = self.build_names(name); LookupFuture::lookup(names, record_type, options, self.client_cache.clone()) .await .map(L::from) } /// Performs a dual-stack DNS lookup for the IP for the given hostname. /// /// See the configuration and options parameters for controlling the way in which A(Ipv4) and AAAA(Ipv6) lookups will be performed. For the least expensive query a fully-qualified-domain-name, FQDN, which ends in a final `.`, e.g. `www.example.com.`, will only issue one query. Anything else will always incur the cost of querying the `ResolverConfig::domain` and `ResolverConfig::search`. /// /// # Arguments /// * `host` - string hostname, if this is an invalid hostname, an error will be returned. pub async fn lookup_ip( &self, host: N, ) -> Result { let mut finally_ip_addr: Option = None; let maybe_ip = host.try_parse_ip(); let maybe_name: ProtoResult = host.into_name(); // if host is a ip address, return directly. if let Some(ip_addr) = maybe_ip { let name = maybe_name.clone().unwrap_or_default(); let record = Record::from_rdata(name.clone(), dns_lru::MAX_TTL, ip_addr.clone()); // if ndots are greater than 4, then we can't assume the name is an IpAddr // this accepts IPv6 as well, b/c IPv6 can take the form: 2001:db8::198.51.100.35 // but `:` is not a valid DNS character, so technically this will fail parsing. // TODO: should we always do search before returning this? if self.options.ndots > 4 { finally_ip_addr = Some(record); } else { let query = Query::query(name, ip_addr.record_type()); let lookup = Lookup::new_with_max_ttl(query, Arc::from([record])); return Ok(lookup.into()); } } let name = match (maybe_name, finally_ip_addr.as_ref()) { (Ok(name), _) => name, (Err(_), Some(ip_addr)) => { // it was a valid IP, return that... let query = Query::query(ip_addr.name().clone(), ip_addr.record_type()); let lookup = Lookup::new_with_max_ttl(query, Arc::from([ip_addr.clone()])); return Ok(lookup.into()); } (Err(err), None) => { return Err(err.into()); } }; let names = self.build_names(name); let hosts = self.hosts.as_ref().cloned(); LookupIpFuture::lookup( names, self.options.ip_strategy, self.client_cache.clone(), self.request_options(), hosts, finally_ip_addr.and_then(Record::into_data), ) .await } /// Customizes the static hosts used in this resolver. pub fn set_hosts(&mut self, hosts: Option) { self.hosts = hosts.map(Arc::new); } lookup_fn!( reverse_lookup, lookup::ReverseLookup, RecordType::PTR, IpAddr ); lookup_fn!(ipv4_lookup, lookup::Ipv4Lookup, RecordType::A); lookup_fn!(ipv6_lookup, lookup::Ipv6Lookup, RecordType::AAAA); lookup_fn!(mx_lookup, lookup::MxLookup, RecordType::MX); lookup_fn!(ns_lookup, lookup::NsLookup, RecordType::NS); lookup_fn!(soa_lookup, lookup::SoaLookup, RecordType::SOA); lookup_fn!(srv_lookup, lookup::SrvLookup, RecordType::SRV); lookup_fn!(tlsa_lookup, lookup::TlsaLookup, RecordType::TLSA); lookup_fn!(txt_lookup, lookup::TxtLookup, RecordType::TXT); } impl fmt::Debug for AsyncResolver

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("AsyncResolver") .field("request_tx", &"...") .finish() } } /// Unit tests compatible with different runtime. #[cfg(any(test, feature = "testing"))] #[cfg_attr(docsrs, doc(cfg(feature = "testing")))] #[allow(dead_code, unreachable_pub)] pub mod testing { use std::{net::*, str::FromStr}; use crate::config::{LookupIpStrategy, NameServerConfig, ResolverConfig, ResolverOpts}; use crate::name_server::ConnectionProvider; use crate::AsyncResolver; use proto::{rr::Name, Executor}; /// Test IP lookup from URLs. pub fn lookup_test( config: ResolverConfig, mut exec: E, handle: R, ) { let resolver = AsyncResolver::::new(config, ResolverOpts::default(), handle); let response = exec .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } } /// Test IP lookup from IP literals. pub fn ip_lookup_test(mut exec: E, handle: R) { let resolver = AsyncResolver::::new(ResolverConfig::default(), ResolverOpts::default(), handle); let response = exec .block_on(resolver.lookup_ip("10.1.0.2")) .expect("failed to run lookup"); assert_eq!( Some(IpAddr::V4(Ipv4Addr::new(10, 1, 0, 2))), response.iter().next() ); let response = exec .block_on(resolver.lookup_ip("2606:2800:220:1:248:1893:25c8:1946")) .expect("failed to run lookup"); assert_eq!( Some(IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, ))), response.iter().next() ); } /// Test IP lookup from IP literals across threads. pub fn ip_lookup_across_threads_test( handle: R, ) { // Test ensuring that running the background task on a separate // executor in a separate thread from the futures returned by the // AsyncResolver works correctly. use std::thread; let resolver = AsyncResolver::::new(ResolverConfig::default(), ResolverOpts::default(), handle); let resolver_one = resolver.clone(); let resolver_two = resolver; let test_fn = |resolver: AsyncResolver| { let mut exec = E::new(); let response = exec .block_on(resolver.lookup_ip("10.1.0.2")) .expect("failed to run lookup"); assert_eq!( Some(IpAddr::V4(Ipv4Addr::new(10, 1, 0, 2))), response.iter().next() ); let response = exec .block_on(resolver.lookup_ip("2606:2800:220:1:248:1893:25c8:1946")) .expect("failed to run lookup"); assert_eq!( Some(IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, ))), response.iter().next() ); }; let thread_one = thread::spawn(move || { test_fn(resolver_one); }); let thread_two = thread::spawn(move || { test_fn(resolver_two); }); thread_one.join().expect("thread_one failed"); thread_two.join().expect("thread_two failed"); } /// Test IP lookup from URLs with DNSSEC validation. #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn sec_lookup_test( mut exec: E, handle: R, ) { //env_logger::try_init().ok(); let resolver = AsyncResolver::new( ResolverConfig::default(), ResolverOpts { validate: true, try_tcp_on_error: true, ..ResolverOpts::default() }, handle, ); let response = exec .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); // TODO: this test is flaky, sometimes 1 is returned, sometimes 2... //assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } } /// Test IP lookup from domains that exist but unsigned with DNSSEC validation. #[allow(deprecated)] #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn sec_lookup_fails_test( mut exec: E, handle: R, ) { use crate::error::*; use proto::rr::RecordType; let resolver = AsyncResolver::new( ResolverConfig::default(), ResolverOpts { validate: true, ip_strategy: LookupIpStrategy::Ipv4Only, ..ResolverOpts::default() }, handle, ); // needs to be a domain that exists, but is not signed (eventually this will be) let response = exec.block_on(resolver.lookup_ip("hickory-dns.org.")); assert!(response.is_err()); let error = response.unwrap_err(); use proto::error::{ProtoError, ProtoErrorKind}; let error_str = format!("{error}"); let name = Name::from_str("hickory-dns.org.").unwrap(); let expected_str = format!( "{}", ResolveError::from(ProtoError::from(ProtoErrorKind::RrsigsNotPresent { name, record_type: RecordType::A })) ); assert_eq!(error_str, expected_str); if let ResolveErrorKind::Proto(_) = *error.kind() { } else { panic!("wrong error") } } /// Test AsyncResolver created from system configuration with IP lookup. #[cfg(feature = "system-config")] #[cfg_attr(docsrs, doc(cfg(feature = "system-config")))] pub fn system_lookup_test( mut exec: E, handle: R, ) { let resolver = AsyncResolver::::from_system_conf(handle).expect("failed to create resolver"); let response = exec .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 2); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } } /// Test AsyncResolver created from system configuration with host lookups. #[cfg(feature = "system-config")] #[cfg_attr(docsrs, doc(cfg(feature = "system-config")))] pub fn hosts_lookup_test( mut exec: E, handle: R, ) { let resolver = AsyncResolver::::from_system_conf(handle).expect("failed to create resolver"); let response = exec .block_on(resolver.lookup_ip("a.com")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(10, 1, 0, 104))); } else { panic!("failed to run lookup"); } } } /// Test fqdn. pub fn fqdn_test(mut exec: E, handle: R) { let domain = Name::from_str("incorrect.example.com.").unwrap(); let search = vec![ Name::from_str("bad.example.com.").unwrap(), Name::from_str("wrong.example.com.").unwrap(), ]; let name_servers: Vec = ResolverConfig::default().name_servers().to_owned(); let resolver = AsyncResolver::::new( ResolverConfig::from_parts(Some(domain), search, name_servers), ResolverOpts { ip_strategy: LookupIpStrategy::Ipv4Only, ..ResolverOpts::default() }, handle, ); let response = exec .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { panic!("should only be looking up IPv4"); } } } /// Test ndots with non-fqdn. pub fn ndots_test(mut exec: E, handle: R) { let domain = Name::from_str("incorrect.example.com.").unwrap(); let search = vec![ Name::from_str("bad.example.com.").unwrap(), Name::from_str("wrong.example.com.").unwrap(), ]; let name_servers: Vec = ResolverConfig::default().name_servers().to_owned(); let resolver = AsyncResolver::::new( ResolverConfig::from_parts(Some(domain), search, name_servers), ResolverOpts { // our name does have 2, the default should be fine, let's just narrow the test criteria a bit. ndots: 2, ip_strategy: LookupIpStrategy::Ipv4Only, ..ResolverOpts::default() }, handle, ); // notice this is not a FQDN, no trailing dot. let response = exec .block_on(resolver.lookup_ip("www.example.com")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { panic!("should only be looking up IPv4"); } } } /// Test large ndots with non-fqdn. pub fn large_ndots_test( mut exec: E, handle: R, ) { let domain = Name::from_str("incorrect.example.com.").unwrap(); let search = vec![ Name::from_str("bad.example.com.").unwrap(), Name::from_str("wrong.example.com.").unwrap(), ]; let name_servers: Vec = ResolverConfig::default().name_servers().to_owned(); let resolver = AsyncResolver::::new( ResolverConfig::from_parts(Some(domain), search, name_servers), ResolverOpts { // matches kubernetes default ndots: 5, ip_strategy: LookupIpStrategy::Ipv4Only, ..ResolverOpts::default() }, handle, ); // notice this is not a FQDN, no trailing dot. let response = exec .block_on(resolver.lookup_ip("www.example.com")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { panic!("should only be looking up IPv4"); } } } /// Test domain search. pub fn domain_search_test( mut exec: E, handle: R, ) { //env_logger::try_init().ok(); // domain is good now, should be combined with the name to form www.example.com let domain = Name::from_str("example.com.").unwrap(); let search = vec![ Name::from_str("bad.example.com.").unwrap(), Name::from_str("wrong.example.com.").unwrap(), ]; let name_servers: Vec = ResolverConfig::default().name_servers().to_owned(); let resolver = AsyncResolver::::new( ResolverConfig::from_parts(Some(domain), search, name_servers), ResolverOpts { ip_strategy: LookupIpStrategy::Ipv4Only, ..ResolverOpts::default() }, handle, ); // notice no dots, should not trigger ndots rule let response = exec .block_on(resolver.lookup_ip("www")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { panic!("should only be looking up IPv4"); } } } /// Test search lists. pub fn search_list_test( mut exec: E, handle: R, ) { let domain = Name::from_str("incorrect.example.com.").unwrap(); let search = vec![ // let's skip one search domain to test the loop... Name::from_str("bad.example.com.").unwrap(), // this should combine with the search name to form www.example.com Name::from_str("example.com.").unwrap(), ]; let name_servers: Vec = ResolverConfig::default().name_servers().to_owned(); let resolver = AsyncResolver::::new( ResolverConfig::from_parts(Some(domain), search, name_servers), ResolverOpts { ip_strategy: LookupIpStrategy::Ipv4Only, ..ResolverOpts::default() }, handle, ); // notice no dots, should not trigger ndots rule let response = exec .block_on(resolver.lookup_ip("www")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { panic!("should only be looking up IPv4"); } } } /// Test idna. pub fn idna_test(mut exec: E, handle: R) { let resolver = AsyncResolver::::new(ResolverConfig::default(), ResolverOpts::default(), handle); let response = exec .block_on(resolver.lookup_ip("中国.icom.museum.")) .expect("failed to run lookup"); // we just care that the request succeeded, not about the actual content // it's not certain that the ip won't change. assert!(response.iter().next().is_some()); } /// Test ipv4 localhost. pub fn localhost_ipv4_test( mut exec: E, handle: R, ) { let resolver = AsyncResolver::::new( ResolverConfig::default(), ResolverOpts { ip_strategy: LookupIpStrategy::Ipv4thenIpv6, ..ResolverOpts::default() }, handle, ); let response = exec .block_on(resolver.lookup_ip("localhost")) .expect("failed to run lookup"); let mut iter = response.iter(); assert_eq!( iter.next().expect("no A"), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)) ); } /// Test ipv6 localhost. pub fn localhost_ipv6_test( mut exec: E, handle: R, ) { let resolver = AsyncResolver::::new( ResolverConfig::default(), ResolverOpts { ip_strategy: LookupIpStrategy::Ipv6thenIpv4, ..ResolverOpts::default() }, handle, ); let response = exec .block_on(resolver.lookup_ip("localhost")) .expect("failed to run lookup"); let mut iter = response.iter(); assert_eq!( iter.next().expect("no AAAA"), IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1,)) ); } /// Test ipv4 search with large ndots. pub fn search_ipv4_large_ndots_test( mut exec: E, handle: R, ) { let mut config = ResolverConfig::default(); config.add_search(Name::from_str("example.com").unwrap()); let resolver = AsyncResolver::::new( config, ResolverOpts { ip_strategy: LookupIpStrategy::Ipv4Only, ndots: 5, ..ResolverOpts::default() }, handle, ); let response = exec .block_on(resolver.lookup_ip("198.51.100.35")) .expect("failed to run lookup"); let mut iter = response.iter(); assert_eq!( iter.next().expect("no rdatas"), IpAddr::V4(Ipv4Addr::new(198, 51, 100, 35)) ); } /// Test ipv6 search with large ndots. pub fn search_ipv6_large_ndots_test( mut exec: E, handle: R, ) { let mut config = ResolverConfig::default(); config.add_search(Name::from_str("example.com").unwrap()); let resolver = AsyncResolver::::new( config, ResolverOpts { ip_strategy: LookupIpStrategy::Ipv4Only, ndots: 5, ..ResolverOpts::default() }, handle, ); let response = exec .block_on(resolver.lookup_ip("2001:db8::c633:6423")) .expect("failed to run lookup"); let mut iter = response.iter(); assert_eq!( iter.next().expect("no rdatas"), IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0xc633, 0x6423)) ); } /// Test ipv6 name parse fails. pub fn search_ipv6_name_parse_fails_test< E: Executor + Send + 'static, R: ConnectionProvider, >( mut exec: E, handle: R, ) { let mut config = ResolverConfig::default(); config.add_search(Name::from_str("example.com").unwrap()); let resolver = AsyncResolver::::new( config, ResolverOpts { ip_strategy: LookupIpStrategy::Ipv4Only, ndots: 5, ..ResolverOpts::default() }, handle, ); let response = exec .block_on(resolver.lookup_ip("2001:db8::198.51.100.35")) .expect("failed to run lookup"); let mut iter = response.iter(); assert_eq!( iter.next().expect("no rdatas"), IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0xc633, 0x6423)) ); } } #[cfg(test)] #[cfg(feature = "tokio-runtime")] #[allow(clippy::extra_unused_type_parameters)] mod tests { use proto::xfer::DnsRequest; use tokio::runtime::Runtime; use crate::config::{ResolverConfig, ResolverOpts}; use crate::name_server::GenericConnection; use super::*; fn is_send_t() -> bool { true } fn is_sync_t() -> bool { true } #[test] fn test_send_sync() { assert!(is_send_t::()); assert!(is_sync_t::()); assert!(is_send_t::()); assert!(is_sync_t::()); assert!(is_send_t::>()); assert!(is_sync_t::>()); assert!(is_send_t::()); assert!(is_send_t::>()); assert!(is_send_t::>()); } #[test] fn test_lookup_google() { use super::testing::lookup_test; let io_loop = Runtime::new().expect("failed to create tokio runtime"); let handle = TokioConnectionProvider::default(); lookup_test::(ResolverConfig::google(), io_loop, handle) } #[test] fn test_lookup_cloudflare() { use super::testing::lookup_test; let io_loop = Runtime::new().expect("failed to create tokio runtime"); let handle = TokioConnectionProvider::default(); lookup_test::( ResolverConfig::cloudflare(), io_loop, handle, ) } #[test] fn test_lookup_quad9() { use super::testing::lookup_test; let io_loop = Runtime::new().expect("failed to create tokio runtime"); let handle = TokioConnectionProvider::default(); lookup_test::(ResolverConfig::quad9(), io_loop, handle) } #[test] fn test_ip_lookup() { use super::testing::ip_lookup_test; let io_loop = Runtime::new().expect("failed to create tokio runtime"); let handle = TokioConnectionProvider::default(); ip_lookup_test::(io_loop, handle) } #[test] fn test_ip_lookup_across_threads() { use super::testing::ip_lookup_across_threads_test; let _io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); ip_lookup_across_threads_test::(handle) } #[test] #[cfg(feature = "dnssec")] fn test_sec_lookup() { use super::testing::sec_lookup_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); sec_lookup_test::(io_loop, handle); } #[test] #[cfg(feature = "dnssec")] fn test_sec_lookup_fails() { use super::testing::sec_lookup_fails_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); sec_lookup_fails_test::(io_loop, handle); } #[test] #[ignore] #[cfg(any(unix, target_os = "windows"))] #[cfg(feature = "system-config")] fn test_system_lookup() { use super::testing::system_lookup_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); system_lookup_test::(io_loop, handle); } #[test] #[ignore] // these appear to not work on CI, test on macos with `10.1.0.104 a.com` #[cfg(unix)] #[cfg(feature = "system-config")] fn test_hosts_lookup() { use super::testing::hosts_lookup_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); hosts_lookup_test::(io_loop, handle); } #[test] fn test_fqdn() { use super::testing::fqdn_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); fqdn_test::(io_loop, handle); } #[test] fn test_ndots() { use super::testing::ndots_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); ndots_test::(io_loop, handle); } #[test] fn test_large_ndots() { use super::testing::large_ndots_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); large_ndots_test::(io_loop, handle); } #[test] fn test_domain_search() { use super::testing::domain_search_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); domain_search_test::(io_loop, handle); } #[test] fn test_search_list() { use super::testing::search_list_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); search_list_test::(io_loop, handle); } #[test] fn test_idna() { use super::testing::idna_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); idna_test::(io_loop, handle); } #[test] fn test_localhost_ipv4() { use super::testing::localhost_ipv4_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); localhost_ipv4_test::(io_loop, handle); } #[test] fn test_localhost_ipv6() { use super::testing::localhost_ipv6_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); localhost_ipv6_test::(io_loop, handle); } #[test] fn test_search_ipv4_large_ndots() { use super::testing::search_ipv4_large_ndots_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); search_ipv4_large_ndots_test::(io_loop, handle); } #[test] fn test_search_ipv6_large_ndots() { use super::testing::search_ipv6_large_ndots_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); search_ipv6_large_ndots_test::(io_loop, handle); } #[test] fn test_search_ipv6_name_parse_fails() { use super::testing::search_ipv6_name_parse_fails_test; let io_loop = Runtime::new().expect("failed to create tokio runtime io_loop"); let handle = TokioConnectionProvider::default(); search_ipv6_name_parse_fails_test::(io_loop, handle); } #[test] fn test_build_names_onion() { let handle = TokioConnectionProvider::default(); let mut config = ResolverConfig::default(); config.add_search(Name::from_ascii("example.com.").unwrap()); let resolver = AsyncResolver::::new(config, ResolverOpts::default(), handle); let tor_address = [ Name::from_ascii("2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion") .unwrap(), Name::from_ascii("www.2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion") .unwrap(), // subdomain are allowed too ]; let not_tor_address = [ Name::from_ascii("onion").unwrap(), Name::from_ascii("www.onion").unwrap(), Name::from_ascii("2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.www.onion") .unwrap(), // www before key Name::from_ascii("2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion.to") .unwrap(), // Tor2web ]; for name in &tor_address { assert_eq!(resolver.build_names(name.clone()).len(), 1); } for name in ¬_tor_address { assert_eq!(resolver.build_names(name.clone()).len(), 2); } } } hickory-resolver-0.24.0/src/caching_client.rs000064400000000000000000001125421046102023000173120ustar 00000000000000// Copyright 2015-2023 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Caching related functionality for the Resolver. use std::{ borrow::Cow, error::Error, pin::Pin, sync::{ atomic::{AtomicU8, Ordering}, Arc, }, time::Instant, }; use futures_util::future::Future; use once_cell::sync::Lazy; use crate::{ dns_lru::{self, DnsLru, TtlConfig}, error::{ResolveError, ResolveErrorKind}, lookup::Lookup, proto::{ error::ProtoError, op::{Query, ResponseCode}, rr::{ domain::usage::{ ResolverUsage, DEFAULT, INVALID, IN_ADDR_ARPA_127, IP6_ARPA_1, LOCAL, LOCALHOST as LOCALHOST_usage, ONION, }, rdata::{A, AAAA, CNAME, PTR, SOA}, resource::RecordRef, DNSClass, Name, RData, Record, RecordType, }, xfer::{DnsHandle, DnsRequestOptions, DnsResponse, FirstAnswer}, }, }; const MAX_QUERY_DEPTH: u8 = 8; // arbitrarily chosen number... static LOCALHOST: Lazy = Lazy::new(|| RData::PTR(PTR(Name::from_ascii("localhost.").unwrap()))); static LOCALHOST_V4: Lazy = Lazy::new(|| RData::A(A::new(127, 0, 0, 1))); static LOCALHOST_V6: Lazy = Lazy::new(|| RData::AAAA(AAAA::new(0, 0, 0, 0, 0, 0, 0, 1))); struct DepthTracker { query_depth: Arc, } impl DepthTracker { fn track(query_depth: Arc) -> Self { query_depth.fetch_add(1, Ordering::Release); Self { query_depth } } } impl Drop for DepthTracker { fn drop(&mut self) { self.query_depth.fetch_sub(1, Ordering::Release); } } // TODO: need to consider this storage type as it compares to Authority in server... // should it just be an variation on Authority? #[derive(Clone, Debug)] #[doc(hidden)] pub struct CachingClient where C: DnsHandle, E: Into + From + Error + Clone + Send + Unpin + 'static, { lru: DnsLru, client: C, query_depth: Arc, preserve_intermediates: bool, } impl CachingClient where C: DnsHandle + Send + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { #[doc(hidden)] pub fn new(max_size: usize, client: C, preserve_intermediates: bool) -> Self { Self::with_cache( DnsLru::new(max_size, TtlConfig::default()), client, preserve_intermediates, ) } pub(crate) fn with_cache(lru: DnsLru, client: C, preserve_intermediates: bool) -> Self { let query_depth = Arc::new(AtomicU8::new(0)); Self { lru, client, query_depth, preserve_intermediates, } } /// Perform a lookup against this caching client, looking first in the cache for a result pub fn lookup( &mut self, query: Query, options: DnsRequestOptions, ) -> Pin> + Send>> { Box::pin(Self::inner_lookup(query, options, self.clone(), vec![])) } async fn inner_lookup( query: Query, options: DnsRequestOptions, mut client: Self, preserved_records: Vec<(Record, u32)>, ) -> Result { // see https://tools.ietf.org/html/rfc6761 // // ```text // Name resolution APIs and libraries SHOULD recognize localhost // names as special and SHOULD always return the IP loopback address // for address queries and negative responses for all other query // types. Name resolution APIs SHOULD NOT send queries for // localhost names to their configured caching DNS server(s). // ``` // special use rules only apply to the IN Class if query.query_class() == DNSClass::IN { let usage = match query.name() { n if LOCALHOST_usage.zone_of(n) => &*LOCALHOST_usage, n if IN_ADDR_ARPA_127.zone_of(n) => &*LOCALHOST_usage, n if IP6_ARPA_1.zone_of(n) => &*LOCALHOST_usage, n if INVALID.zone_of(n) => &*INVALID, n if LOCAL.zone_of(n) => &*LOCAL, n if ONION.zone_of(n) => &*ONION, _ => &*DEFAULT, }; match usage.resolver() { ResolverUsage::Loopback => match query.query_type() { // TODO: look in hosts for these ips/names first... RecordType::A => return Ok(Lookup::from_rdata(query, LOCALHOST_V4.clone())), RecordType::AAAA => return Ok(Lookup::from_rdata(query, LOCALHOST_V6.clone())), RecordType::PTR => return Ok(Lookup::from_rdata(query, LOCALHOST.clone())), _ => { return Err(ResolveError::nx_error( query, None, None, ResponseCode::NoError, false, )) } // Are there any other types we can use? }, // when mdns is enabled we will follow a standard query path #[cfg(feature = "mdns")] ResolverUsage::LinkLocal => (), // TODO: this requires additional config, as Kubernetes and other systems misuse the .local. zone. // when mdns is not enabled we will return errors on LinkLocal ("*.local.") names #[cfg(not(feature = "mdns"))] ResolverUsage::LinkLocal => (), ResolverUsage::NxDomain => { return Err(ResolveError::nx_error( query, None, None, ResponseCode::NXDomain, false, )) } ResolverUsage::Normal => (), } } let _tracker = DepthTracker::track(client.query_depth.clone()); let is_dnssec = client.client.is_verifying_dnssec(); // first transition any polling that is needed (mutable refs...) if let Some(cached_lookup) = client.lookup_from_cache(&query) { return cached_lookup; }; let response_message = client .client .lookup(query.clone(), options) .first_answer() .await .map_err(E::into); // TODO: technically this might be duplicating work, as name_server already performs this evaluation. // we may want to create a new type, if evaluated... but this is most generic to support any impl in LookupState... let response_message = if let Ok(response) = response_message { ResolveError::from_response(response, false) } else { response_message }; // TODO: take all records and cache them? // if it's DNSSEC they must be signed, otherwise? let records: Result = match response_message { // this is the only cacheable form Err(ResolveError { kind: ResolveErrorKind::NoRecordsFound { query, soa, negative_ttl, response_code, trusted, }, .. }) => { Err(Self::handle_nxdomain( is_dnssec, false, /*tbd*/ *query, soa.map(|v| *v), negative_ttl, response_code, trusted, )) } Err(e) => return Err(e), Ok(response_message) => { // allow the handle_noerror function to deal with any error codes let records = Self::handle_noerror( &mut client, options, is_dnssec, &query, response_message, preserved_records, )?; Ok(records) } }; // after the request, evaluate if we have additional queries to perform match records { Ok(Records::CnameChain { next: future, min_ttl: ttl, }) => match future.await { Ok(lookup) => client.cname(lookup, query, ttl), Err(e) => client.cache(query, Err(e)), }, Ok(Records::Exists(rdata)) => client.cache(query, Ok(rdata)), Err(e) => client.cache(query, Err(e)), } } /// Check if this query is already cached fn lookup_from_cache(&self, query: &Query) -> Option> { self.lru.get(query, Instant::now()) } /// See https://tools.ietf.org/html/rfc2308 /// /// For now we will regard NXDomain to strictly mean the query failed /// and a record for the name, regardless of CNAME presence, what have you /// ultimately does not exist. /// /// This also handles empty responses in the same way. When performing DNSSEC enabled queries, we should /// never enter here, and should never cache unless verified requests. /// /// TODO: should this should be expanded to do a forward lookup? Today, this will fail even if there are /// forwarding options. /// /// # Arguments /// /// * `message` - message to extract SOA, etc, from for caching failed requests /// * `valid_nsec` - species that in DNSSEC mode, this request is safe to cache /// * `negative_ttl` - this should be the SOA minimum for negative ttl fn handle_nxdomain( is_dnssec: bool, valid_nsec: bool, query: Query, soa: Option>, negative_ttl: Option, response_code: ResponseCode, trusted: bool, ) -> ResolveError { if valid_nsec || !is_dnssec { // only trust if there were validated NSEC records ResolveErrorKind::NoRecordsFound { query: Box::new(query), soa: soa.map(Box::new), negative_ttl, response_code, trusted: true, } .into() } else { // not cacheable, no ttl... ResolveErrorKind::NoRecordsFound { query: Box::new(query), soa: soa.map(Box::new), negative_ttl: None, response_code, trusted, } .into() } } /// Handle the case where there is no error returned fn handle_noerror( client: &mut Self, options: DnsRequestOptions, is_dnssec: bool, query: &Query, response: DnsResponse, mut preserved_records: Vec<(Record, u32)>, ) -> Result { // initial ttl is what CNAMES for min usage const INITIAL_TTL: u32 = dns_lru::MAX_TTL; // need to capture these before the subsequent and destructive record processing let soa = response.soa().as_ref().map(RecordRef::to_owned); let negative_ttl = response.negative_ttl(); let response_code = response.response_code(); // seek out CNAMES, this is only performed if the query is not a CNAME, ANY, or SRV // FIXME: for SRV this evaluation is inadequate. CNAME is a single chain to a single record // for SRV, there could be many different targets. The search_name needs to be enhanced to // be a list of names found for SRV records. let (search_name, cname_ttl, was_cname, preserved_records) = { // this will only search for CNAMEs if the request was not meant to be for one of the triggers for recursion let (search_name, cname_ttl, was_cname) = if query.query_type().is_any() || query.query_type().is_cname() { (Cow::Borrowed(query.name()), INITIAL_TTL, false) } else { // Folds any cnames from the answers section, into the final cname in the answers section // this works by folding the last CNAME found into the final folded result. // it assumes that the CNAMEs are in chained order in the DnsResponse Message... // For SRV, the name added for the search becomes the target name. // // TODO: should this include the additionals? response.answers().iter().fold( (Cow::Borrowed(query.name()), INITIAL_TTL, false), |(search_name, cname_ttl, was_cname), r| { match r.data() { Some(RData::CNAME(CNAME(ref cname))) => { // take the minimum TTL of the cname_ttl and the next record in the chain let ttl = cname_ttl.min(r.ttl()); debug_assert_eq!(r.record_type(), RecordType::CNAME); if search_name.as_ref() == r.name() { return (Cow::Owned(cname.clone()), ttl, true); } } Some(RData::SRV(ref srv)) => { // take the minimum TTL of the cname_ttl and the next record in the chain let ttl = cname_ttl.min(r.ttl()); debug_assert_eq!(r.record_type(), RecordType::SRV); // the search name becomes the srv.target return (Cow::Owned(srv.target().clone()), ttl, true); } _ => (), } (search_name, cname_ttl, was_cname) }, ) }; // take all answers. // TODO: following CNAMES? let mut response = response.into_message(); let answers = response.take_answers(); let additionals = response.take_additionals(); let name_servers = response.take_name_servers(); // set of names that still require resolution // TODO: this needs to be enhanced for SRV let mut found_name = false; // After following all the CNAMES to the last one, try and lookup the final name let records = answers .into_iter() // Chained records will generally exist in the additionals section .chain(additionals) .chain(name_servers) .filter_map(|r| { // because this resolved potentially recursively, we want the min TTL from the chain let ttl = cname_ttl.min(r.ttl()); // TODO: disable name validation with ResolverOpts? glibc feature... // restrict to the RData type requested if query.query_class() == r.dns_class() { // standard evaluation, it's an any type or it's the requested type and the search_name matches #[allow(clippy::suspicious_operation_groupings)] if (query.query_type().is_any() || query.query_type() == r.record_type()) && (search_name.as_ref() == r.name() || query.name() == r.name()) { found_name = true; return Some((r, ttl)); } // CNAME evaluation, the record is from the CNAME lookup chain. if client.preserve_intermediates && r.record_type() == RecordType::CNAME { return Some((r, ttl)); } // srv evaluation, it's an srv lookup and the srv_search_name/target matches this name // and it's an IP if query.query_type().is_srv() && r.record_type().is_ip_addr() && search_name.as_ref() == r.name() { found_name = true; Some((r, ttl)) } else if query.query_type().is_ns() && r.record_type().is_ip_addr() { Some((r, ttl)) } else { None } } else { None } }) .collect::>(); // adding the newly collected records to the preserved records preserved_records.extend(records); if !preserved_records.is_empty() && found_name { return Ok(Records::Exists(preserved_records)); } ( search_name.into_owned(), cname_ttl, was_cname, preserved_records, ) }; // TODO: for SRV records we *could* do an implicit lookup, but, this requires knowing the type of IP desired // for now, we'll make the API require the user to perform a follow up to the lookups. // It was a CNAME, but not included in the request... if was_cname && client.query_depth.load(Ordering::Acquire) < MAX_QUERY_DEPTH { let next_query = Query::query(search_name, query.query_type()); Ok(Records::CnameChain { next: Box::pin(Self::inner_lookup( next_query, options, client.clone(), preserved_records, )), min_ttl: cname_ttl, }) } else { // TODO: review See https://tools.ietf.org/html/rfc2308 for NoData section // Note on DNSSEC, in secure_client_handle, if verify_nsec fails then the request fails. // this will mean that no unverified negative caches will make it to this point and be stored Err(Self::handle_nxdomain( is_dnssec, true, query.clone(), soa, negative_ttl, response_code, false, )) } } #[allow(clippy::unnecessary_wraps)] fn cname(&self, lookup: Lookup, query: Query, cname_ttl: u32) -> Result { // this duplicates the cache entry under the original query Ok(self.lru.duplicate(query, lookup, cname_ttl, Instant::now())) } fn cache( &self, query: Query, records: Result, ResolveError>, ) -> Result { // this will put this object into an inconsistent state, but no one should call poll again... match records { Ok(rdata) => Ok(self.lru.insert(query, rdata, Instant::now())), Err(err) => Err(self.lru.negative(query, err, Instant::now())), } } /// Flushes/Removes all entries from the cache pub fn clear_cache(&self) { self.lru.clear(); } } enum Records { /// The records exists, a vec of rdata with ttl Exists(Vec<(Record, u32)>), /// Future lookup for recursive cname records CnameChain { next: Pin> + Send>>, min_ttl: u32, }, } // see also the lookup_tests.rs in integration-tests crate #[cfg(test)] mod tests { use std::net::*; use std::str::FromStr; use std::time::*; use futures_executor::block_on; use proto::op::{Message, Query}; use proto::rr::rdata::{NS, SRV}; use proto::rr::{Name, Record}; use super::*; use crate::lookup_ip::tests::*; #[test] fn test_empty_cache() { let cache = DnsLru::new(1, dns_lru::TtlConfig::default()); let client = mock(vec![empty()]); let client = CachingClient::with_cache(cache, client, false); if let ResolveErrorKind::NoRecordsFound { query, negative_ttl, .. } = block_on(CachingClient::inner_lookup( Query::new(), DnsRequestOptions::default(), client, vec![], )) .unwrap_err() .kind() { assert_eq!(**query, Query::new()); assert_eq!(*negative_ttl, None); } else { panic!("wrong error received") } } #[test] fn test_from_cache() { let cache = DnsLru::new(1, dns_lru::TtlConfig::default()); let query = Query::new(); cache.insert( query.clone(), vec![( Record::from_rdata( query.name().clone(), u32::max_value(), RData::A(A::new(127, 0, 0, 1)), ), u32::max_value(), )], Instant::now(), ); let client = mock(vec![empty()]); let client = CachingClient::with_cache(cache, client, false); let ips = block_on(CachingClient::inner_lookup( Query::new(), DnsRequestOptions::default(), client, vec![], )) .unwrap(); assert_eq!( ips.iter().cloned().collect::>(), vec![RData::A(A::new(127, 0, 0, 1))] ); } #[test] fn test_no_cache_insert() { let cache = DnsLru::new(1, dns_lru::TtlConfig::default()); // first should come from client... let client = mock(vec![v4_message()]); let client = CachingClient::with_cache(cache.clone(), client, false); let ips = block_on(CachingClient::inner_lookup( Query::new(), DnsRequestOptions::default(), client, vec![], )) .unwrap(); assert_eq!( ips.iter().cloned().collect::>(), vec![RData::A(A::new(127, 0, 0, 1))] ); // next should come from cache... let client = mock(vec![empty()]); let client = CachingClient::with_cache(cache, client, false); let ips = block_on(CachingClient::inner_lookup( Query::new(), DnsRequestOptions::default(), client, vec![], )) .unwrap(); assert_eq!( ips.iter().cloned().collect::>(), vec![RData::A(A::new(127, 0, 0, 1))] ); } #[allow(clippy::unnecessary_wraps)] pub(crate) fn cname_message() -> Result { let mut message = Message::new(); message.add_query(Query::query( Name::from_str("www.example.com.").unwrap(), RecordType::A, )); message.insert_answers(vec![Record::from_rdata( Name::from_str("www.example.com.").unwrap(), 86400, RData::CNAME(CNAME(Name::from_str("actual.example.com.").unwrap())), )]); Ok(DnsResponse::from_message(message).unwrap()) } #[allow(clippy::unnecessary_wraps)] pub(crate) fn srv_message() -> Result { let mut message = Message::new(); message.add_query(Query::query( Name::from_str("_443._tcp.www.example.com.").unwrap(), RecordType::SRV, )); message.insert_answers(vec![Record::from_rdata( Name::from_str("_443._tcp.www.example.com.").unwrap(), 86400, RData::SRV(SRV::new( 1, 2, 443, Name::from_str("www.example.com.").unwrap(), )), )]); Ok(DnsResponse::from_message(message).unwrap()) } #[allow(clippy::unnecessary_wraps)] pub(crate) fn ns_message() -> Result { let mut message = Message::new(); message.add_query(Query::query( Name::from_str("www.example.com.").unwrap(), RecordType::NS, )); message.insert_answers(vec![Record::from_rdata( Name::from_str("www.example.com.").unwrap(), 86400, RData::NS(NS(Name::from_str("www.example.com.").unwrap())), )]); Ok(DnsResponse::from_message(message).unwrap()) } fn no_recursion_on_query_test(query_type: RecordType) { let cache = DnsLru::new(1, dns_lru::TtlConfig::default()); // the cname should succeed, we shouldn't query again after that, which would cause an error... let client = mock(vec![error(), cname_message()]); let client = CachingClient::with_cache(cache, client, false); let ips = block_on(CachingClient::inner_lookup( Query::query(Name::from_str("www.example.com.").unwrap(), query_type), DnsRequestOptions::default(), client, vec![], )) .expect("lookup failed"); assert_eq!( ips.iter().cloned().collect::>(), vec![RData::CNAME(CNAME( Name::from_str("actual.example.com.").unwrap() ))] ); } #[test] fn test_no_recursion_on_cname_query() { no_recursion_on_query_test(RecordType::CNAME); } #[test] fn test_no_recursion_on_all_query() { no_recursion_on_query_test(RecordType::ANY); } #[test] fn test_non_recursive_srv_query() { let cache = DnsLru::new(1, dns_lru::TtlConfig::default()); // the cname should succeed, we shouldn't query again after that, which would cause an error... let client = mock(vec![error(), srv_message()]); let client = CachingClient::with_cache(cache, client, false); let ips = block_on(CachingClient::inner_lookup( Query::query( Name::from_str("_443._tcp.www.example.com.").unwrap(), RecordType::SRV, ), DnsRequestOptions::default(), client, vec![], )) .expect("lookup failed"); assert_eq!( ips.iter().cloned().collect::>(), vec![RData::SRV(SRV::new( 1, 2, 443, Name::from_str("www.example.com.").unwrap(), ))] ); } #[test] fn test_single_srv_query_response() { let cache = DnsLru::new(1, dns_lru::TtlConfig::default()); let mut message = srv_message().unwrap().into_message(); message.add_answer(Record::from_rdata( Name::from_str("www.example.com.").unwrap(), 86400, RData::CNAME(CNAME(Name::from_str("actual.example.com.").unwrap())), )); message.insert_additionals(vec![ Record::from_rdata( Name::from_str("actual.example.com.").unwrap(), 86400, RData::A(A::new(127, 0, 0, 1)), ), Record::from_rdata( Name::from_str("actual.example.com.").unwrap(), 86400, RData::AAAA(AAAA::new(0, 0, 0, 0, 0, 0, 0, 1)), ), ]); let client = mock(vec![ error(), Ok(DnsResponse::from_message(message).unwrap()), ]); let client = CachingClient::with_cache(cache, client, false); let ips = block_on(CachingClient::inner_lookup( Query::query( Name::from_str("_443._tcp.www.example.com.").unwrap(), RecordType::SRV, ), DnsRequestOptions::default(), client, vec![], )) .expect("lookup failed"); assert_eq!( ips.iter().cloned().collect::>(), vec![ RData::SRV(SRV::new( 1, 2, 443, Name::from_str("www.example.com.").unwrap(), )), RData::A(A::new(127, 0, 0, 1)), RData::AAAA(AAAA::new(0, 0, 0, 0, 0, 0, 0, 1)), ] ); } // TODO: if we ever enable recursive lookups for SRV, here are the tests... // #[test] // fn test_recursive_srv_query() { // let cache = Arc::new(Mutex::new(DnsLru::new(1))); // let mut message = Message::new(); // message.add_answer(Record::from_rdata( // Name::from_str("www.example.com.").unwrap(), // 86400, // RecordType::CNAME, // RData::CNAME(Name::from_str("actual.example.com.").unwrap()), // )); // message.insert_additionals(vec![ // Record::from_rdata( // Name::from_str("actual.example.com.").unwrap(), // 86400, // RecordType::A, // RData::A(Ipv4Addr::new(127, 0, 0, 1)), // ), // ]); // let mut client = mock(vec![error(), Ok(DnsResponse::from_message(message).unwrap()), srv_message()]); // let ips = QueryState::lookup( // Query::query( // Name::from_str("_443._tcp.www.example.com.").unwrap(), // RecordType::SRV, // ), // Default::default(), // &mut client, // cache.clone(), // ).wait() // .expect("lookup failed"); // assert_eq!( // ips.iter().cloned().collect::>(), // vec![ // RData::SRV(SRV::new( // 1, // 2, // 443, // Name::from_str("www.example.com.").unwrap(), // )), // RData::A(Ipv4Addr::new(127, 0, 0, 1)), // //RData::AAAA(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), // ] // ); // } #[test] fn test_single_ns_query_response() { let cache = DnsLru::new(1, dns_lru::TtlConfig::default()); let mut message = ns_message().unwrap().into_message(); message.add_answer(Record::from_rdata( Name::from_str("www.example.com.").unwrap(), 86400, RData::CNAME(CNAME(Name::from_str("actual.example.com.").unwrap())), )); message.insert_additionals(vec![ Record::from_rdata( Name::from_str("actual.example.com.").unwrap(), 86400, RData::A(A::new(127, 0, 0, 1)), ), Record::from_rdata( Name::from_str("actual.example.com.").unwrap(), 86400, RData::AAAA(AAAA::new(0, 0, 0, 0, 0, 0, 0, 1)), ), ]); let client = mock(vec![ error(), Ok(DnsResponse::from_message(message).unwrap()), ]); let client = CachingClient::with_cache(cache, client, false); let ips = block_on(CachingClient::inner_lookup( Query::query(Name::from_str("www.example.com.").unwrap(), RecordType::NS), DnsRequestOptions::default(), client, vec![], )) .expect("lookup failed"); assert_eq!( ips.iter().cloned().collect::>(), vec![ RData::NS(NS(Name::from_str("www.example.com.").unwrap())), RData::A(A::new(127, 0, 0, 1)), RData::AAAA(AAAA::new(0, 0, 0, 0, 0, 0, 0, 1)), ] ); } fn cname_ttl_test(first: u32, second: u32) { let lru = DnsLru::new(1, dns_lru::TtlConfig::default()); // expecting no queries to be performed let mut client = CachingClient::with_cache(lru, mock(vec![error()]), false); let mut message = Message::new(); message.insert_answers(vec![Record::from_rdata( Name::from_str("ttl.example.com.").unwrap(), first, RData::CNAME(CNAME(Name::from_str("actual.example.com.").unwrap())), )]); message.insert_additionals(vec![Record::from_rdata( Name::from_str("actual.example.com.").unwrap(), second, RData::A(A::new(127, 0, 0, 1)), )]); let records = CachingClient::handle_noerror( &mut client, DnsRequestOptions::default(), false, &Query::query(Name::from_str("ttl.example.com.").unwrap(), RecordType::A), DnsResponse::from_message(message).unwrap(), vec![], ); if let Ok(records) = records { if let Records::Exists(records) = records { for (record, ttl) in records.iter() { if record.record_type() == RecordType::CNAME { continue; } assert_eq!(ttl, &1); } } else { panic!("records don't exist"); } } else { panic!("error getting records"); } } #[test] fn test_cname_ttl() { cname_ttl_test(1, 2); cname_ttl_test(2, 1); } #[test] fn test_early_return_localhost() { let cache = DnsLru::new(0, dns_lru::TtlConfig::default()); let client = mock(vec![empty()]); let mut client = CachingClient::with_cache(cache, client, false); { let query = Query::query(Name::from_ascii("localhost.").unwrap(), RecordType::A); let lookup = block_on(client.lookup(query.clone(), DnsRequestOptions::default())) .expect("should have returned localhost"); assert_eq!(lookup.query(), &query); assert_eq!( lookup.iter().cloned().collect::>(), vec![LOCALHOST_V4.clone()] ); } { let query = Query::query(Name::from_ascii("localhost.").unwrap(), RecordType::AAAA); let lookup = block_on(client.lookup(query.clone(), DnsRequestOptions::default())) .expect("should have returned localhost"); assert_eq!(lookup.query(), &query); assert_eq!( lookup.iter().cloned().collect::>(), vec![LOCALHOST_V6.clone()] ); } { let query = Query::query(Name::from(Ipv4Addr::new(127, 0, 0, 1)), RecordType::PTR); let lookup = block_on(client.lookup(query.clone(), DnsRequestOptions::default())) .expect("should have returned localhost"); assert_eq!(lookup.query(), &query); assert_eq!( lookup.iter().cloned().collect::>(), vec![LOCALHOST.clone()] ); } { let query = Query::query( Name::from(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), RecordType::PTR, ); let lookup = block_on(client.lookup(query.clone(), DnsRequestOptions::default())) .expect("should have returned localhost"); assert_eq!(lookup.query(), &query); assert_eq!( lookup.iter().cloned().collect::>(), vec![LOCALHOST.clone()] ); } assert!(block_on(client.lookup( Query::query(Name::from_ascii("localhost.").unwrap(), RecordType::MX), DnsRequestOptions::default() )) .is_err()); assert!(block_on(client.lookup( Query::query(Name::from(Ipv4Addr::new(127, 0, 0, 1)), RecordType::MX), DnsRequestOptions::default() )) .is_err()); assert!(block_on(client.lookup( Query::query( Name::from(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), RecordType::MX ), DnsRequestOptions::default() )) .is_err()); } #[test] fn test_early_return_invalid() { let cache = DnsLru::new(0, dns_lru::TtlConfig::default()); let client = mock(vec![empty()]); let mut client = CachingClient::with_cache(cache, client, false); assert!(block_on(client.lookup( Query::query( Name::from_ascii("horrible.invalid.").unwrap(), RecordType::A, ), DnsRequestOptions::default() )) .is_err()); } #[test] fn test_no_error_on_dot_local_no_mdns() { let cache = DnsLru::new(1, dns_lru::TtlConfig::default()); let mut message = srv_message().unwrap().into_message(); message.add_query(Query::query( Name::from_ascii("www.example.local.").unwrap(), RecordType::A, )); message.add_answer(Record::from_rdata( Name::from_str("www.example.local.").unwrap(), 86400, RData::A(A::new(127, 0, 0, 1)), )); let client = mock(vec![ error(), Ok(DnsResponse::from_message(message).unwrap()), ]); let mut client = CachingClient::with_cache(cache, client, false); assert!(block_on(client.lookup( Query::query( Name::from_ascii("www.example.local.").unwrap(), RecordType::A, ), DnsRequestOptions::default() )) .is_ok()); } } hickory-resolver-0.24.0/src/config.rs000064400000000000000000001207301046102023000156230ustar 00000000000000// Copyright 2015-2017 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Configuration for a resolver #![allow(clippy::use_self)] use std::fmt; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::ops::{Deref, DerefMut}; use std::time::Duration; #[cfg(feature = "dns-over-rustls")] use std::sync::Arc; use proto::rr::Name; #[cfg(feature = "dns-over-rustls")] use rustls::ClientConfig; #[cfg(all(feature = "serde-config", feature = "dns-over-rustls"))] use serde::{ de::{Deserialize as DeserializeT, Deserializer}, ser::{Serialize as SerializeT, Serializer}, }; /// Configuration for the upstream nameservers to use for resolution #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde-config", derive(Serialize, Deserialize))] pub struct ResolverConfig { // base search domain #[cfg_attr(feature = "serde-config", serde(default))] domain: Option, // search domains #[cfg_attr(feature = "serde-config", serde(default))] search: Vec, // nameservers to use for resolution. name_servers: NameServerConfigGroup, } impl ResolverConfig { /// Creates a new empty configuration pub fn new() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::new(), } } /// Creates a default configuration, using `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` (thank you, Google). /// /// Please see Google's [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important information about what they track, many ISP's track similar information in DNS. To use the system configuration see: `Resolver::from_system_conf` and `AsyncResolver::from_system_conf` /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` pub fn google() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::google(), } } /// Creates a default configuration, using `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` (thank you, Google). This limits the registered connections to just TLS lookups /// /// Please see Google's [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important information about what they track, many ISP's track similar information in DNS. To use the system configuration see: `Resolver::from_system_conf` and `AsyncResolver::from_system_conf` /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` #[cfg(feature = "dns-over-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-tls")))] pub fn google_tls() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::google_tls(), } } /// Creates a default configuration, using `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` (thank you, Google). This limits the registered connections to just HTTPS lookups /// /// Please see Google's [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important information about what they track, many ISP's track similar information in DNS. To use the system configuration see: `Resolver::from_system_conf` and `AsyncResolver::from_system_conf` /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` #[cfg(feature = "dns-over-https")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-https")))] pub fn google_https() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::google_https(), } } /// Creates a default configuration, using `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` (thank you, Google). This limits the registered connections to just HTTP/3 lookups /// /// Please see Google's [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important information about what they track, many ISP's track similar information in DNS. To use the system configuration see: `Resolver::from_system_conf` and `AsyncResolver::from_system_conf` /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` #[cfg(feature = "dns-over-h3")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-h3")))] pub fn google_h3() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::google_h3(), } } /// Creates a default configuration, using `1.1.1.1`, `1.0.0.1` and `2606:4700:4700::1111`, `2606:4700:4700::1001` (thank you, Cloudflare). /// /// Please see: /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` pub fn cloudflare() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::cloudflare(), } } /// Creates a configuration, using `1.1.1.1`, `1.0.0.1` and `2606:4700:4700::1111`, `2606:4700:4700::1001` (thank you, Cloudflare). This limits the registered connections to just TLS lookups /// /// Please see: /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` #[cfg(feature = "dns-over-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-tls")))] pub fn cloudflare_tls() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::cloudflare_tls(), } } /// Creates a configuration, using `1.1.1.1`, `1.0.0.1` and `2606:4700:4700::1111`, `2606:4700:4700::1001` (thank you, Cloudflare). This limits the registered connections to just HTTPS lookups /// /// Please see: /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` #[cfg(feature = "dns-over-https")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-https")))] pub fn cloudflare_https() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::cloudflare_https(), } } /// Creates a configuration, using `9.9.9.9`, `149.112.112.112` and `2620:fe::fe`, `2620:fe::fe:9`, the "secure" variants of the quad9 settings (thank you, Quad9). /// /// Please see: /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` pub fn quad9() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::quad9(), } } /// Creates a configuration, using `9.9.9.9`, `149.112.112.112` and `2620:fe::fe`, `2620:fe::fe:9`, the "secure" variants of the quad9 settings. This limits the registered connections to just TLS lookups /// /// Please see: /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` #[cfg(feature = "dns-over-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-tls")))] pub fn quad9_tls() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::quad9_tls(), } } /// Creates a configuration, using `9.9.9.9`, `149.112.112.112` and `2620:fe::fe`, `2620:fe::fe:9`, the "secure" variants of the quad9 settings. This limits the registered connections to just HTTPS lookups /// /// Please see: /// /// NameServerConfigGroups can be combined to use a set of different providers, see `NameServerConfigGroup` and `ResolverConfig::from_parts` #[cfg(feature = "dns-over-https")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-https")))] pub fn quad9_https() -> Self { Self { // TODO: this should get the hostname and use the basename as the default domain: None, search: vec![], name_servers: NameServerConfigGroup::quad9_https(), } } /// Create a ResolverConfig with all parts specified /// /// # Arguments /// /// * `domain` - domain of the entity querying results. If the `Name` being looked up is not an FQDN, then this is the first part appended to attempt a lookup. `ndots` in the `ResolverOption` does take precedence over this. /// * `search` - additional search domains that are attempted if the `Name` is not found in `domain`, defaults to `vec![]` /// * `name_servers` - set of name servers to use for lookups, defaults are Google: `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` pub fn from_parts>( domain: Option, search: Vec, name_servers: G, ) -> Self { Self { domain, search, name_servers: name_servers.into(), } } /// Returns the local domain /// /// By default any names will be appended to all non-fully-qualified-domain names, and searched for after any ndots rules pub fn domain(&self) -> Option<&Name> { self.domain.as_ref() } /// Set the domain of the entity querying results. pub fn set_domain(&mut self, domain: Name) { self.domain = Some(domain.clone()); self.search = vec![domain]; } /// Returns the search domains /// /// These will be queried after any local domain and then in the order of the set of search domains pub fn search(&self) -> &[Name] { &self.search } /// Add a search domain pub fn add_search(&mut self, search: Name) { self.search.push(search) } // TODO: consider allowing options per NameServer... like different timeouts? /// Add the configuration for a name server pub fn add_name_server(&mut self, name_server: NameServerConfig) { self.name_servers.push(name_server); } /// Returns a reference to the name servers pub fn name_servers(&self) -> &[NameServerConfig] { &self.name_servers } /// return the associated TlsClientConfig #[cfg(feature = "dns-over-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-rustls")))] pub fn client_config(&self) -> &Option { &self.name_servers.1 } /// adds the `rustls::ClientConf` for every configured NameServer /// of the Resolver. /// /// ``` /// use std::sync::Arc; /// /// use rustls::{ClientConfig, ProtocolVersion, RootCertStore, OwnedTrustAnchor}; /// use hickory_resolver::config::ResolverConfig; /// # #[cfg(feature = "webpki-roots")] /// use webpki_roots; /// /// let mut root_store = RootCertStore::empty(); /// # #[cfg(feature = "webpki-roots")] /// root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { /// OwnedTrustAnchor::from_subject_spki_name_constraints( /// ta.subject, /// ta.spki, /// ta.name_constraints, /// ) /// })); /// /// let mut client_config = ClientConfig::builder() /// .with_safe_default_cipher_suites() /// .with_safe_default_kx_groups() /// .with_protocol_versions(&[&rustls::version::TLS12]) /// .unwrap() /// .with_root_certificates(root_store) /// .with_no_client_auth(); /// /// let mut resolver_config = ResolverConfig::quad9_tls(); /// resolver_config.set_tls_client_config(Arc::new(client_config)); /// ``` #[cfg(feature = "dns-over-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-rustls")))] pub fn set_tls_client_config(&mut self, client_config: Arc) { self.name_servers = self.name_servers.clone().with_client_config(client_config); } } impl Default for ResolverConfig { /// Creates a default configuration, using `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` (thank you, Google). /// /// Please see Google's [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important information about what they track, many ISP's track similar information in DNS. To use the system configuration see: `Resolver::from_system_conf` and `AsyncResolver::from_system_conf` fn default() -> Self { Self::google() } } /// The protocol on which a NameServer should be communicated with #[derive(Clone, Copy, Debug, Eq, PartialEq)] #[cfg_attr( feature = "serde-config", derive(Serialize, Deserialize), serde(rename_all = "lowercase") )] #[non_exhaustive] pub enum Protocol { /// UDP is the traditional DNS port, this is generally the correct choice Udp, /// TCP can be used for large queries, but not all NameServers support it Tcp, /// Tls for DNS over TLS #[cfg(feature = "dns-over-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-tls")))] Tls, /// Https for DNS over HTTPS #[cfg(feature = "dns-over-https")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-https")))] Https, /// QUIC for DNS over QUIC #[cfg(feature = "dns-over-quic")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-quic")))] Quic, /// HTTP/3 for DNS over HTTP/3 #[cfg(feature = "dns-over-h3")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-h3")))] H3, /// mDNS protocol for performing multicast lookups #[cfg(feature = "mdns")] #[cfg_attr(docsrs, doc(cfg(feature = "mdns")))] Mdns, } impl fmt::Display for Protocol { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let protocol = match self { Self::Udp => "udp", Self::Tcp => "tcp", #[cfg(feature = "dns-over-tls")] Self::Tls => "tls", #[cfg(feature = "dns-over-https")] Self::Https => "https", #[cfg(feature = "dns-over-quic")] Self::Quic => "quic", #[cfg(feature = "dns-over-h3")] Self::H3 => "h3", #[cfg(feature = "mdns")] Self::Mdns => "mdns", }; f.write_str(protocol) } } impl Protocol { /// Returns true if this is a datagram oriented protocol, e.g. UDP pub fn is_datagram(self) -> bool { match self { Self::Udp => true, Self::Tcp => false, #[cfg(feature = "dns-over-tls")] Self::Tls => false, #[cfg(feature = "dns-over-https")] Self::Https => false, // TODO: if you squint, this is true... #[cfg(feature = "dns-over-quic")] Self::Quic => true, #[cfg(feature = "dns-over-h3")] Self::H3 => true, #[cfg(feature = "mdns")] Self::Mdns => true, } } /// Returns true if this is a stream oriented protocol, e.g. TCP pub fn is_stream(self) -> bool { !self.is_datagram() } /// Is this an encrypted protocol, i.e. TLS or HTTPS pub fn is_encrypted(self) -> bool { match self { Self::Udp => false, Self::Tcp => false, #[cfg(feature = "dns-over-tls")] Self::Tls => true, #[cfg(feature = "dns-over-https")] Self::Https => true, #[cfg(feature = "dns-over-quic")] Self::Quic => true, #[cfg(feature = "dns-over-h3")] Self::H3 => true, #[cfg(feature = "mdns")] Self::Mdns => false, } } } impl Default for Protocol { /// Default protocol should be UDP, which is supported by all DNS servers fn default() -> Self { Self::Udp } } /// a compatibility wrapper around rustls /// ClientConfig #[cfg(feature = "dns-over-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-rustls")))] #[derive(Clone)] pub struct TlsClientConfig(pub Arc); #[cfg(feature = "dns-over-rustls")] impl std::cmp::PartialEq for TlsClientConfig { fn eq(&self, other: &Self) -> bool { Arc::ptr_eq(&self.0, &other.0) } } #[cfg(feature = "dns-over-rustls")] impl std::cmp::Eq for TlsClientConfig {} #[cfg(feature = "dns-over-rustls")] impl std::fmt::Debug for TlsClientConfig { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "rustls client config") } } /// Configuration for the NameServer #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr(feature = "serde-config", derive(Serialize, Deserialize))] pub struct NameServerConfig { /// The address which the DNS NameServer is registered at. pub socket_addr: SocketAddr, /// The protocol to use when communicating with the NameServer. #[cfg_attr(feature = "serde-config", serde(default))] pub protocol: Protocol, /// SPKI name, only relevant for TLS connections #[cfg_attr(feature = "serde-config", serde(default))] pub tls_dns_name: Option, /// Whether to trust `NXDOMAIN` responses from upstream nameservers. /// /// When this is `true`, and an empty `NXDOMAIN` response or `NOERROR` /// with an empty answers set is received, the /// query will not be retried against other configured name servers if /// the response has the Authoritative flag set. /// /// (On a response with any other error /// response code, the query will still be retried regardless of this /// configuration setting.) /// /// Defaults to false. #[cfg_attr(feature = "serde-config", serde(default))] pub trust_negative_responses: bool, #[cfg(feature = "dns-over-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-rustls")))] #[cfg_attr(feature = "serde-config", serde(skip))] /// Optional configuration for the TLS client. /// /// The correct ALPN for the corresponding protocol is automatically /// inserted if none was specificed. pub tls_config: Option, /// The client address (IP and port) to use for connecting to the server. pub bind_addr: Option, } impl NameServerConfig { /// Constructs a Nameserver configuration with some basic defaults pub fn new(socket_addr: SocketAddr, protocol: Protocol) -> Self { Self { socket_addr, protocol, trust_negative_responses: true, tls_dns_name: None, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, } } } impl fmt::Display for NameServerConfig { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}:", self.protocol)?; if let Some(ref tls_dns_name) = self.tls_dns_name { write!(f, "{tls_dns_name}@")?; } write!(f, "{}", self.socket_addr) } } /// A set of name_servers to associate with a [`ResolverConfig`]. #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr( all(feature = "serde-config", not(feature = "dns-over-rustls")), derive(Serialize, Deserialize) )] pub struct NameServerConfigGroup( Vec, #[cfg(feature = "dns-over-rustls")] Option, ); #[cfg(all(feature = "serde-config", feature = "dns-over-rustls"))] impl SerializeT for NameServerConfigGroup { fn serialize(&self, serializer: S) -> Result where S: Serializer, { self.0.serialize(serializer) } } #[cfg(all(feature = "serde-config", feature = "dns-over-rustls"))] impl<'de> DeserializeT<'de> for NameServerConfigGroup { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { Vec::deserialize(deserializer).map(|nameservers| Self(nameservers, None)) } } impl NameServerConfigGroup { /// Creates a new `NameServerConfigGroup` with a default size of 2 pub fn new() -> Self { // this might be a nice opportunity for SmallVec // most name_server configs will be 2. Self::with_capacity(2) } /// Creates a new `NameServiceConfigGroup` with the specified capacity pub fn with_capacity(capacity: usize) -> Self { Self( Vec::with_capacity(capacity), #[cfg(feature = "dns-over-rustls")] None, ) } /// Returns the inner vec of configs pub fn into_inner(self) -> Vec { self.0 } /// Configure a NameServer address and port /// /// This will create UDP and TCP connections, using the same port. pub fn from_ips_clear(ips: &[IpAddr], port: u16, trust_negative_responses: bool) -> Self { let mut name_servers = Self::with_capacity(ips.len()); for ip in ips { let udp = NameServerConfig { socket_addr: SocketAddr::new(*ip, port), protocol: Protocol::Udp, tls_dns_name: None, trust_negative_responses, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }; let tcp = NameServerConfig { socket_addr: SocketAddr::new(*ip, port), protocol: Protocol::Tcp, tls_dns_name: None, trust_negative_responses, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }; name_servers.push(udp); name_servers.push(tcp); } name_servers } #[cfg(any(feature = "dns-over-tls", feature = "dns-over-https"))] fn from_ips_encrypted( ips: &[IpAddr], port: u16, tls_dns_name: String, protocol: Protocol, trust_negative_responses: bool, ) -> Self { assert!(protocol.is_encrypted()); let mut name_servers = Self::with_capacity(ips.len()); for ip in ips { let config = NameServerConfig { socket_addr: SocketAddr::new(*ip, port), protocol, tls_dns_name: Some(tls_dns_name.clone()), trust_negative_responses, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }; name_servers.push(config); } name_servers } /// Configure a NameServer address and port for DNS-over-TLS /// /// This will create a TLS connections. #[cfg(feature = "dns-over-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-tls")))] pub fn from_ips_tls( ips: &[IpAddr], port: u16, tls_dns_name: String, trust_negative_responses: bool, ) -> Self { Self::from_ips_encrypted( ips, port, tls_dns_name, Protocol::Tls, trust_negative_responses, ) } /// Configure a NameServer address and port for DNS-over-HTTPS /// /// This will create a HTTPS connections. #[cfg(feature = "dns-over-https")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-https")))] pub fn from_ips_https( ips: &[IpAddr], port: u16, tls_dns_name: String, trust_negative_responses: bool, ) -> Self { Self::from_ips_encrypted( ips, port, tls_dns_name, Protocol::Https, trust_negative_responses, ) } /// Configure a NameServer address and port for DNS-over-QUIC /// /// This will create a QUIC connections. #[cfg(feature = "dns-over-quic")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-quic")))] pub fn from_ips_quic( ips: &[IpAddr], port: u16, tls_dns_name: String, trust_negative_responses: bool, ) -> Self { Self::from_ips_encrypted( ips, port, tls_dns_name, Protocol::Quic, trust_negative_responses, ) } /// Configure a NameServer address and port for DNS-over-HTTP/3 /// /// This will create a HTTP/3 connection. #[cfg(feature = "dns-over-h3")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-h3")))] pub fn from_ips_h3( ips: &[IpAddr], port: u16, tls_dns_name: String, trust_negative_responses: bool, ) -> Self { Self::from_ips_encrypted( ips, port, tls_dns_name, Protocol::H3, trust_negative_responses, ) } /// Creates a default configuration, using `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` (thank you, Google). /// /// Please see Google's [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important information about what they track, many ISP's track similar information in DNS. To use the system configuration see: `Resolver::from_system_conf` and `AsyncResolver::from_system_conf` pub fn google() -> Self { Self::from_ips_clear(GOOGLE_IPS, 53, true) } /// Creates a default configuration, using `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` (thank you, Google). This limits the registered connections to just TLS lookups /// /// Please see Google's [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important information about what they track, many ISP's track similar information in DNS. To use the system configuration see: `Resolver::from_system_conf` and `AsyncResolver::from_system_conf` #[cfg(feature = "dns-over-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-tls")))] pub fn google_tls() -> Self { Self::from_ips_tls(GOOGLE_IPS, 853, "dns.google".to_string(), true) } /// Creates a default configuration, using `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` (thank you, Google). This limits the registered connections to just HTTPS lookups /// /// Please see Google's [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important information about what they track, many ISP's track similar information in DNS. To use the system configuration see: `Resolver::from_system_conf` and `AsyncResolver::from_system_conf` #[cfg(feature = "dns-over-https")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-https")))] pub fn google_https() -> Self { Self::from_ips_https(GOOGLE_IPS, 443, "dns.google".to_string(), true) } /// Creates a default configuration, using `8.8.8.8`, `8.8.4.4` and `2001:4860:4860::8888`, `2001:4860:4860::8844` (thank you, Google). This limits the registered connections to just HTTP/3 lookups /// /// Please see Google's [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important information about what they track, many ISP's track similar information in DNS. To use the system configuration see: `Resolver::from_system_conf` and `AsyncResolver::from_system_conf` #[cfg(feature = "dns-over-h3")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-h3")))] pub fn google_h3() -> Self { Self::from_ips_h3(GOOGLE_IPS, 443, "dns.google".to_string(), true) } /// Creates a default configuration, using `1.1.1.1`, `1.0.0.1` and `2606:4700:4700::1111`, `2606:4700:4700::1001` (thank you, Cloudflare). /// /// Please see: pub fn cloudflare() -> Self { Self::from_ips_clear(CLOUDFLARE_IPS, 53, true) } /// Creates a configuration, using `1.1.1.1`, `1.0.0.1` and `2606:4700:4700::1111`, `2606:4700:4700::1001` (thank you, Cloudflare). This limits the registered connections to just TLS lookups /// /// Please see: #[cfg(feature = "dns-over-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-tls")))] pub fn cloudflare_tls() -> Self { Self::from_ips_tls(CLOUDFLARE_IPS, 853, "cloudflare-dns.com".to_string(), true) } /// Creates a configuration, using `1.1.1.1`, `1.0.0.1` and `2606:4700:4700::1111`, `2606:4700:4700::1001` (thank you, Cloudflare). This limits the registered connections to just HTTPS lookups /// /// Please see: #[cfg(feature = "dns-over-https")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-https")))] pub fn cloudflare_https() -> Self { Self::from_ips_https(CLOUDFLARE_IPS, 443, "cloudflare-dns.com".to_string(), true) } /// Creates a configuration, using `9.9.9.9`, `149.112.112.112` and `2620:fe::fe`, `2620:fe::fe:9`, the "secure" variants of the quad9 settings (thank you, Quad9). /// /// Please see: pub fn quad9() -> Self { Self::from_ips_clear(QUAD9_IPS, 53, true) } /// Creates a configuration, using `9.9.9.9`, `149.112.112.112` and `2620:fe::fe`, `2620:fe::fe:9`, the "secure" variants of the quad9 settings. This limits the registered connections to just TLS lookups /// /// Please see: #[cfg(feature = "dns-over-tls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-tls")))] pub fn quad9_tls() -> Self { Self::from_ips_tls(QUAD9_IPS, 853, "dns.quad9.net".to_string(), true) } /// Creates a configuration, using `9.9.9.9`, `149.112.112.112` and `2620:fe::fe`, `2620:fe::fe:9`, the "secure" variants of the quad9 settings. This limits the registered connections to just HTTPS lookups /// /// Please see: #[cfg(feature = "dns-over-https")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-https")))] pub fn quad9_https() -> Self { Self::from_ips_https(QUAD9_IPS, 443, "dns.quad9.net".to_string(), true) } /// Merges this set of [`NameServerConfig`]s with the other /// /// ``` /// use std::net::{SocketAddr, Ipv4Addr}; /// use hickory_resolver::config::NameServerConfigGroup; /// /// let mut group = NameServerConfigGroup::google(); /// group.merge(NameServerConfigGroup::cloudflare()); /// group.merge(NameServerConfigGroup::quad9()); /// /// assert!(group.iter().any(|c| c.socket_addr == SocketAddr::new(Ipv4Addr::new(8, 8, 8, 8).into(), 53))); /// assert!(group.iter().any(|c| c.socket_addr == SocketAddr::new(Ipv4Addr::new(1, 1, 1, 1).into(), 53))); /// assert!(group.iter().any(|c| c.socket_addr == SocketAddr::new(Ipv4Addr::new(9, 9, 9, 9).into(), 53))); /// ``` pub fn merge(&mut self, mut other: Self) { #[cfg(not(feature = "dns-over-rustls"))] { self.append(&mut other); } #[cfg(feature = "dns-over-rustls")] { self.0.append(&mut other); } } /// add a [`rustls::ClientConfig`] #[cfg(feature = "dns-over-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-rustls")))] pub fn with_client_config(self, client_config: Arc) -> Self { Self(self.0, Some(TlsClientConfig(client_config))) } /// Sets the client address (IP and port) to connect from on all name servers. pub fn with_bind_addr(mut self, bind_addr: Option) -> Self { for server in &mut self.0 { server.bind_addr = bind_addr; } self } } impl Default for NameServerConfigGroup { fn default() -> Self { Self::new() } } impl Deref for NameServerConfigGroup { type Target = Vec; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for NameServerConfigGroup { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From> for NameServerConfigGroup { fn from(configs: Vec) -> Self { #[cfg(not(feature = "dns-over-rustls"))] { Self(configs) } #[cfg(feature = "dns-over-rustls")] { Self(configs, None) } } } /// The lookup ip strategy #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "serde-config", derive(Serialize, Deserialize))] pub enum LookupIpStrategy { /// Only query for A (Ipv4) records Ipv4Only, /// Only query for AAAA (Ipv6) records Ipv6Only, /// Query for A and AAAA in parallel Ipv4AndIpv6, /// Query for Ipv6 if that fails, query for Ipv4 Ipv6thenIpv4, /// Query for Ipv4 if that fails, query for Ipv6 (default) Ipv4thenIpv6, } impl Default for LookupIpStrategy { /// Returns [`LookupIpStrategy::Ipv4thenIpv6`] as the default. fn default() -> Self { Self::Ipv4thenIpv6 } } /// The strategy for establishing the query order of name servers in a pool. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "serde-config", derive(Serialize, Deserialize))] pub enum ServerOrderingStrategy { /// Servers are ordered based on collected query statistics. The ordering /// may vary over time. QueryStatistics, /// The order provided to the resolver is used. The ordering does not vary /// over time. UserProvidedOrder, } impl Default for ServerOrderingStrategy { /// Returns [`ServerOrderingStrategy::QueryStatistics`] as the default. fn default() -> Self { Self::QueryStatistics } } /// Configuration for the Resolver #[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr( feature = "serde-config", derive(Serialize, Deserialize), serde(default) )] #[allow(missing_copy_implementations)] #[non_exhaustive] pub struct ResolverOpts { /// Sets the number of dots that must appear (unless it's a final dot representing the root) /// before a query is assumed to include the TLD. The default is one, which means that `www` /// would never be assumed to be a TLD, and would always be appended to either the search pub ndots: usize, /// Specify the timeout for a request. Defaults to 5 seconds pub timeout: Duration, /// Number of retries after lookup failure before giving up. Defaults to 2 pub attempts: usize, /// Rotate through the resource records in the response (if there is more than one for a given name) pub rotate: bool, /// Validate the names in the response, not implemented don't really see the point unless you need to support /// badly configured DNS pub check_names: bool, /// Enable edns, for larger records pub edns0: bool, /// Use DNSSEC to validate the request pub validate: bool, /// The ip_strategy for the Resolver to use when lookup Ipv4 or Ipv6 addresses pub ip_strategy: LookupIpStrategy, /// Cache size is in number of records (some records can be large) pub cache_size: usize, /// Check /ect/hosts file before dns requery (only works for unix like OS) pub use_hosts_file: bool, /// Optional minimum TTL for positive responses. /// /// If this is set, any positive responses with a TTL lower than this value will have a TTL of /// `positive_min_ttl` instead. Otherwise, this will default to 0 seconds. pub positive_min_ttl: Option, /// Optional minimum TTL for negative (`NXDOMAIN`) responses. /// /// If this is set, any negative responses with a TTL lower than this value will have a TTL of /// `negative_min_ttl` instead. Otherwise, this will default to 0 seconds. pub negative_min_ttl: Option, /// Optional maximum TTL for positive responses. /// /// If this is set, any positive responses with a TTL higher than this value will have a TTL of /// `positive_max_ttl` instead. Otherwise, this will default to [`MAX_TTL`] seconds. /// /// [`MAX_TTL`]: ../dns_lru/const.MAX_TTL.html pub positive_max_ttl: Option, /// Optional maximum TTL for negative (`NXDOMAIN`) responses. /// /// If this is set, any negative responses with a TTL higher than this value will have a TTL of /// `negative_max_ttl` instead. Otherwise, this will default to [`MAX_TTL`] seconds. /// /// [`MAX_TTL`]: ../dns_lru/const.MAX_TTL.html pub negative_max_ttl: Option, /// Number of concurrent requests per query /// /// Where more than one nameserver is configured, this configures the resolver to send queries /// to a number of servers in parallel. Defaults to 2; 0 or 1 will execute requests serially. pub num_concurrent_reqs: usize, /// Preserve all intermediate records in the lookup response, such as CNAME records pub preserve_intermediates: bool, /// Try queries over TCP if they fail over UDP. pub try_tcp_on_error: bool, /// The server ordering strategy that the resolver should use. pub server_ordering_strategy: ServerOrderingStrategy, /// Request upstream recursive resolvers to not perform any recursion. /// /// This is true by default, disabling this is useful for requesting single records, but may prevent successful resolution. pub recursion_desired: bool, /// This is true by default, disabling this is useful for requesting single records, but may prevent successful resolution. pub authentic_data: bool, /// Shuffle DNS servers before each query. pub shuffle_dns_servers: bool, } impl Default for ResolverOpts { /// Default values for the Resolver configuration. /// /// This follows the resolv.conf defaults as defined in the [Linux man pages](https://man7.org/linux/man-pages/man5/resolv.conf.5.html) fn default() -> Self { Self { ndots: 1, timeout: Duration::from_secs(5), attempts: 2, rotate: false, check_names: true, edns0: false, validate: false, ip_strategy: LookupIpStrategy::default(), cache_size: 32, use_hosts_file: true, positive_min_ttl: None, negative_min_ttl: None, positive_max_ttl: None, negative_max_ttl: None, num_concurrent_reqs: 2, // Defaults to `true` to match the behavior of dig and nslookup. preserve_intermediates: true, try_tcp_on_error: false, server_ordering_strategy: ServerOrderingStrategy::default(), recursion_desired: true, authentic_data: false, shuffle_dns_servers: false, } } } /// IP addresses for Google Public DNS pub const GOOGLE_IPS: &[IpAddr] = &[ IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)), IpAddr::V4(Ipv4Addr::new(8, 8, 4, 4)), IpAddr::V6(Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8888)), IpAddr::V6(Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8844)), ]; /// IP addresses for Cloudflare's 1.1.1.1 DNS service pub const CLOUDFLARE_IPS: &[IpAddr] = &[ IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1)), IpAddr::V4(Ipv4Addr::new(1, 0, 0, 1)), IpAddr::V6(Ipv6Addr::new(0x2606, 0x4700, 0x4700, 0, 0, 0, 0, 0x1111)), IpAddr::V6(Ipv6Addr::new(0x2606, 0x4700, 0x4700, 0, 0, 0, 0, 0x1001)), ]; /// IP address for the Quad9 DNS service pub const QUAD9_IPS: &[IpAddr] = &[ IpAddr::V4(Ipv4Addr::new(9, 9, 9, 9)), IpAddr::V4(Ipv4Addr::new(149, 112, 112, 112)), IpAddr::V6(Ipv6Addr::new(0x2620, 0x00fe, 0, 0, 0, 0, 0, 0x00fe)), IpAddr::V6(Ipv6Addr::new(0x2620, 0x00fe, 0, 0, 0, 0, 0x00fe, 0x0009)), ]; hickory-resolver-0.24.0/src/dns_lru.rs000064400000000000000000000652161046102023000160330ustar 00000000000000// Copyright 2015-2017 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! An LRU cache designed for work with DNS lookups use std::collections::HashMap; use std::sync::Arc; use std::time::{Duration, Instant}; use lru_cache::LruCache; use parking_lot::Mutex; use proto::op::Query; use proto::rr::Record; use crate::config; use crate::error::*; use crate::lookup::Lookup; /// Maximum TTL as defined in https://tools.ietf.org/html/rfc2181, 2147483647 /// Setting this to a value of 1 day, in seconds pub(crate) const MAX_TTL: u32 = 86400_u32; #[derive(Debug)] struct LruValue { // In the None case, this represents an NXDomain lookup: Result, valid_until: Instant, } impl LruValue { /// Returns true if this set of ips is still valid fn is_current(&self, now: Instant) -> bool { now <= self.valid_until } /// Returns the ttl as a Duration of time remaining. fn ttl(&self, now: Instant) -> Duration { self.valid_until.saturating_duration_since(now) } fn with_updated_ttl(&self, now: Instant) -> Self { let lookup = match self.lookup { Ok(ref lookup) => { let records = lookup .records() .iter() .map(|record| { let mut record = record.clone(); record.set_ttl(self.ttl(now).as_secs() as u32); record }) .collect::>(); Ok(Lookup::new_with_deadline( lookup.query().clone(), Arc::from(records), self.valid_until, )) } Err(ref e) => Err(e.clone()), }; Self { lookup, valid_until: self.valid_until, } } } /// And LRU eviction cache specifically for storing DNS records #[derive(Clone, Debug)] pub struct DnsLru { cache: Arc>>, /// A minimum TTL value for positive responses. /// /// Positive responses with TTLs under `positive_max_ttl` will use /// `positive_max_ttl` instead. /// /// If this value is not set on the `TtlConfig` used to construct this /// `DnsLru`, it will default to 0. positive_min_ttl: Duration, /// A minimum TTL value for negative (`NXDOMAIN`) responses. /// /// `NXDOMAIN` responses with TTLs under `negative_min_ttl` will use /// `negative_min_ttl` instead. /// /// If this value is not set on the `TtlConfig` used to construct this /// `DnsLru`, it will default to 0. negative_min_ttl: Duration, /// A maximum TTL value for positive responses. /// /// Positive responses with TTLs over `positive_max_ttl` will use /// `positive_max_ttl` instead. /// /// If this value is not set on the `TtlConfig` used to construct this /// `DnsLru`, it will default to [`MAX_TTL`] seconds. /// /// [`MAX_TTL`]: const.MAX_TTL.html positive_max_ttl: Duration, /// A maximum TTL value for negative (`NXDOMAIN`) responses. /// /// `NXDOMAIN` responses with TTLs over `negative_max_ttl` will use /// `negative_max_ttl` instead. /// /// If this value is not set on the `TtlConfig` used to construct this /// `DnsLru`, it will default to [`MAX_TTL`] seconds. /// /// [`MAX_TTL`]: const.MAX_TTL.html negative_max_ttl: Duration, } /// The time-to-live, TTL, configuration for use by the cache. /// /// It should be understood that the TTL in DNS is expressed with a u32. /// We use Duration here for tracking this which can express larger values /// than the DNS standard. Generally a Duration greater than u32::MAX_VALUE /// shouldn't cause any issue as this will never be used in serialization, /// but understand that this would be outside the standard range. #[derive(Copy, Clone, Debug, Default)] pub struct TtlConfig { /// An optional minimum TTL value for positive responses. /// /// Positive responses with TTLs under `positive_min_ttl` will use /// `positive_min_ttl` instead. pub(crate) positive_min_ttl: Option, /// An optional minimum TTL value for negative (`NXDOMAIN`) responses. /// /// `NXDOMAIN` responses with TTLs under `negative_min_ttl will use /// `negative_min_ttl` instead. pub(crate) negative_min_ttl: Option, /// An optional maximum TTL value for positive responses. /// /// Positive responses with TTLs positive `positive_max_ttl` will use /// `positive_max_ttl` instead. pub(crate) positive_max_ttl: Option, /// An optional maximum TTL value for negative (`NXDOMAIN`) responses. /// /// `NXDOMAIN` responses with TTLs over `negative_max_ttl` will use /// `negative_max_ttl` instead. pub(crate) negative_max_ttl: Option, } impl TtlConfig { /// Construct the LRU based on the ResolverOpts configuration pub fn from_opts(opts: &config::ResolverOpts) -> Self { Self { positive_min_ttl: opts.positive_min_ttl, negative_min_ttl: opts.negative_min_ttl, positive_max_ttl: opts.positive_max_ttl, negative_max_ttl: opts.negative_max_ttl, } } } impl DnsLru { /// Construct a new cache /// /// # Arguments /// /// * `capacity` - size in number of records, this can be the max size of 2048 (record size) * `capacity` /// * `ttl_cfg` - force minimums and maximums for cached records pub fn new(capacity: usize, ttl_cfg: TtlConfig) -> Self { let TtlConfig { positive_min_ttl, negative_min_ttl, positive_max_ttl, negative_max_ttl, } = ttl_cfg; let cache = Arc::new(Mutex::new(LruCache::new(capacity))); Self { cache, positive_min_ttl: positive_min_ttl.unwrap_or_else(|| Duration::from_secs(0)), negative_min_ttl: negative_min_ttl.unwrap_or_else(|| Duration::from_secs(0)), positive_max_ttl: positive_max_ttl .unwrap_or_else(|| Duration::from_secs(u64::from(MAX_TTL))), negative_max_ttl: negative_max_ttl .unwrap_or_else(|| Duration::from_secs(u64::from(MAX_TTL))), } } pub(crate) fn clear(&self) { self.cache.lock().clear(); } pub(crate) fn insert( &self, query: Query, records_and_ttl: Vec<(Record, u32)>, now: Instant, ) -> Lookup { let len = records_and_ttl.len(); // collapse the values, we're going to take the Minimum TTL as the correct one let (records, ttl): (Vec, Duration) = records_and_ttl.into_iter().fold( (Vec::with_capacity(len), self.positive_max_ttl), |(mut records, mut min_ttl), (record, ttl)| { records.push(record); let ttl = Duration::from_secs(u64::from(ttl)); min_ttl = min_ttl.min(ttl); (records, min_ttl) }, ); // If the cache was configured with a minimum TTL, and that value is higher // than the minimum TTL in the values, use it instead. let ttl = self.positive_min_ttl.max(ttl); let valid_until = now + ttl; // insert into the LRU let lookup = Lookup::new_with_deadline(query.clone(), Arc::from(records), valid_until); self.cache.lock().insert( query, LruValue { lookup: Ok(lookup.clone()), valid_until, }, ); lookup } /// inserts a record based on the name and type. /// /// # Arguments /// /// * `original_query` - is used for matching the records that should be returned /// * `records` - the records will be partitioned by type and name for storage in the cache /// * `now` - current time for use in associating TTLs /// /// # Return /// /// This should always return some records, but will be None if there are no records or the original_query matches none pub fn insert_records( &self, original_query: Query, records: impl Iterator, now: Instant, ) -> Option { // collect all records by name let records = records.fold( HashMap::>::new(), |mut map, record| { let mut query = Query::query(record.name().clone(), record.record_type()); query.set_query_class(record.dns_class()); let ttl = record.ttl(); map.entry(query).or_default().push((record, ttl)); map }, ); // now insert by record type and name let mut lookup = None; for (query, records_and_ttl) in records { let is_query = original_query == query; let inserted = self.insert(query, records_and_ttl, now); if is_query { lookup = Some(inserted) } } lookup } /// Generally for inserting a set of records that have already been cached, but with a different Query. pub(crate) fn duplicate(&self, query: Query, lookup: Lookup, ttl: u32, now: Instant) -> Lookup { let ttl = Duration::from_secs(u64::from(ttl)); let valid_until = now + ttl; self.cache.lock().insert( query, LruValue { lookup: Ok(lookup.clone()), valid_until, }, ); lookup } /// This converts the ResolveError to set the inner negative_ttl value to be the /// current expiration ttl. fn nx_error_with_ttl(error: &mut ResolveError, new_ttl: Duration) { if let ResolveError { kind: ResolveErrorKind::NoRecordsFound { ref mut negative_ttl, .. }, .. } = error { *negative_ttl = Some(u32::try_from(new_ttl.as_secs()).unwrap_or(MAX_TTL)); } } pub(crate) fn negative( &self, query: Query, mut error: ResolveError, now: Instant, ) -> ResolveError { // TODO: if we are getting a negative response, should we instead fallback to cache? // this would cache indefinitely, probably not correct if let ResolveError { kind: ResolveErrorKind::NoRecordsFound { negative_ttl: Some(ttl), .. }, .. } = error { let ttl_duration = Duration::from_secs(u64::from(ttl)) // Clamp the TTL so that it's between the cache's configured // minimum and maximum TTLs for negative responses. .clamp(self.negative_min_ttl, self.negative_max_ttl); let valid_until = now + ttl_duration; { let error = error.clone(); self.cache.lock().insert( query, LruValue { lookup: Err(error), valid_until, }, ); } Self::nx_error_with_ttl(&mut error, ttl_duration); } error } /// Based on the query, see if there are any records available pub fn get(&self, query: &Query, now: Instant) -> Option> { let mut out_of_date = false; let mut cache = self.cache.lock(); let lookup = cache.get_mut(query).and_then(|value| { if value.is_current(now) { out_of_date = false; let mut result = value.with_updated_ttl(now).lookup; if let Err(ref mut err) = result { Self::nx_error_with_ttl(err, value.ttl(now)); } Some(result) } else { out_of_date = true; None } }); // in this case, we can preemptively remove out of data elements // this assumes time is always moving forward, this would only not be true in contrived situations where now // is not current time, like tests... if out_of_date { cache.remove(query); } lookup } } // see also the lookup_tests.rs in integration-tests crate #[cfg(test)] mod tests { use std::str::FromStr; use std::time::*; use proto::op::{Query, ResponseCode}; use proto::rr::rdata::A; use proto::rr::{Name, RData, RecordType}; use super::*; #[test] fn test_is_current() { let now = Instant::now(); let not_the_future = now + Duration::from_secs(4); let future = now + Duration::from_secs(5); let past_the_future = now + Duration::from_secs(6); let value = LruValue { lookup: Err(ResolveErrorKind::Message("test error").into()), valid_until: future, }; assert!(value.is_current(now)); assert!(value.is_current(not_the_future)); assert!(value.is_current(future)); assert!(!value.is_current(past_the_future)); } #[test] fn test_lookup_uses_positive_min_ttl() { let now = Instant::now(); let name = Name::from_str("www.example.com.").unwrap(); let query = Query::query(name.clone(), RecordType::A); // record should have TTL of 1 second. let ips_ttl = vec![( Record::from_rdata(name.clone(), 1, RData::A(A::new(127, 0, 0, 1))), 1, )]; let ips = vec![RData::A(A::new(127, 0, 0, 1))]; // configure the cache with a minimum TTL of 2 seconds. let ttls = TtlConfig { positive_min_ttl: Some(Duration::from_secs(2)), ..TtlConfig::default() }; let lru = DnsLru::new(1, ttls); let rc_ips = lru.insert(query.clone(), ips_ttl, now); assert_eq!(*rc_ips.iter().next().unwrap(), ips[0]); // the returned lookup should use the cache's min TTL, since the // query's TTL was below the minimum. assert_eq!(rc_ips.valid_until(), now + Duration::from_secs(2)); // record should have TTL of 3 seconds. let ips_ttl = vec![( Record::from_rdata(name, 3, RData::A(A::new(127, 0, 0, 1))), 3, )]; let rc_ips = lru.insert(query, ips_ttl, now); assert_eq!(*rc_ips.iter().next().unwrap(), ips[0]); // the returned lookup should use the record's TTL, since it's // greater than the cache's minimum. assert_eq!(rc_ips.valid_until(), now + Duration::from_secs(3)); } #[test] fn test_error_uses_negative_min_ttl() { let now = Instant::now(); let name = Query::query(Name::from_str("www.example.com.").unwrap(), RecordType::A); // configure the cache with a maximum TTL of 2 seconds. let ttls = TtlConfig { negative_min_ttl: Some(Duration::from_secs(2)), ..TtlConfig::default() }; let lru = DnsLru::new(1, ttls); // neg response should have TTL of 1 seconds. let err = ResolveErrorKind::NoRecordsFound { query: Box::new(name.clone()), soa: None, negative_ttl: Some(1), response_code: ResponseCode::NoError, trusted: false, }; let nx_error = lru.negative(name.clone(), err.into(), now); match nx_error.kind() { &ResolveErrorKind::NoRecordsFound { negative_ttl, .. } => { let valid_until = negative_ttl.expect("resolve error should have a deadline"); // the error's `valid_until` field should have been limited to 2 seconds. assert_eq!(valid_until, 2); } other => panic!("expected ResolveErrorKind::NoRecordsFound, got {:?}", other), } // neg response should have TTL of 3 seconds. let err = ResolveErrorKind::NoRecordsFound { query: Box::new(name.clone()), soa: None, negative_ttl: Some(3), response_code: ResponseCode::NoError, trusted: false, }; let nx_error = lru.negative(name, err.into(), now); match nx_error.kind() { &ResolveErrorKind::NoRecordsFound { negative_ttl, .. } => { let negative_ttl = negative_ttl.expect("ResolveError should have a deadline"); // the error's `valid_until` field should not have been limited, as it was // over the min TTL. assert_eq!(negative_ttl, 3); } other => panic!("expected ResolveErrorKind::NoRecordsFound, got {:?}", other), } } #[test] fn test_lookup_uses_positive_max_ttl() { let now = Instant::now(); let name = Name::from_str("www.example.com.").unwrap(); let query = Query::query(name.clone(), RecordType::A); // record should have TTL of 62 seconds. let ips_ttl = vec![( Record::from_rdata(name.clone(), 62, RData::A(A::new(127, 0, 0, 1))), 62, )]; let ips = vec![RData::A(A::new(127, 0, 0, 1))]; // configure the cache with a maximum TTL of 60 seconds. let ttls = TtlConfig { positive_max_ttl: Some(Duration::from_secs(60)), ..TtlConfig::default() }; let lru = DnsLru::new(1, ttls); let rc_ips = lru.insert(query.clone(), ips_ttl, now); assert_eq!(*rc_ips.iter().next().unwrap(), ips[0]); // the returned lookup should use the cache's min TTL, since the // query's TTL was above the maximum. assert_eq!(rc_ips.valid_until(), now + Duration::from_secs(60)); // record should have TTL of 59 seconds. let ips_ttl = vec![( Record::from_rdata(name, 59, RData::A(A::new(127, 0, 0, 1))), 59, )]; let rc_ips = lru.insert(query, ips_ttl, now); assert_eq!(*rc_ips.iter().next().unwrap(), ips[0]); // the returned lookup should use the record's TTL, since it's // below than the cache's maximum. assert_eq!(rc_ips.valid_until(), now + Duration::from_secs(59)); } #[test] fn test_error_uses_negative_max_ttl() { let now = Instant::now(); let name = Query::query(Name::from_str("www.example.com.").unwrap(), RecordType::A); // configure the cache with a maximum TTL of 60 seconds. let ttls = TtlConfig { negative_max_ttl: Some(Duration::from_secs(60)), ..TtlConfig::default() }; let lru = DnsLru::new(1, ttls); // neg response should have TTL of 62 seconds. let err = ResolveErrorKind::NoRecordsFound { query: Box::new(name.clone()), soa: None, negative_ttl: Some(62), response_code: ResponseCode::NoError, trusted: false, }; let nx_error = lru.negative(name.clone(), err.into(), now); match nx_error.kind() { &ResolveErrorKind::NoRecordsFound { negative_ttl, .. } => { let negative_ttl = negative_ttl.expect("resolve error should have a deadline"); // the error's `valid_until` field should have been limited to 60 seconds. assert_eq!(negative_ttl, 60); } other => panic!("expected ResolveErrorKind::NoRecordsFound, got {:?}", other), } // neg response should have TTL of 59 seconds. let err = ResolveErrorKind::NoRecordsFound { query: Box::new(name.clone()), soa: None, negative_ttl: Some(59), response_code: ResponseCode::NoError, trusted: false, }; let nx_error = lru.negative(name, err.into(), now); match nx_error.kind() { &ResolveErrorKind::NoRecordsFound { negative_ttl, .. } => { let negative_ttl = negative_ttl.expect("resolve error should have a deadline"); // the error's `valid_until` field should not have been limited, as it was // under the max TTL. assert_eq!(negative_ttl, 59); } other => panic!("expected ResolveErrorKind::NoRecordsFound, got {:?}", other), } } #[test] fn test_insert() { let now = Instant::now(); let name = Name::from_str("www.example.com.").unwrap(); let query = Query::query(name.clone(), RecordType::A); let ips_ttl = vec![( Record::from_rdata(name, 1, RData::A(A::new(127, 0, 0, 1))), 1, )]; let ips = vec![RData::A(A::new(127, 0, 0, 1))]; let lru = DnsLru::new(1, TtlConfig::default()); let rc_ips = lru.insert(query.clone(), ips_ttl, now); assert_eq!(*rc_ips.iter().next().unwrap(), ips[0]); let rc_ips = lru.get(&query, now).unwrap().expect("records should exist"); assert_eq!(*rc_ips.iter().next().unwrap(), ips[0]); } #[test] fn test_update_ttl() { let now = Instant::now(); let name = Name::from_str("www.example.com.").unwrap(); let query = Query::query(name.clone(), RecordType::A); let ips_ttl = vec![( Record::from_rdata(name, 10, RData::A(A::new(127, 0, 0, 1))), 10, )]; let ips = vec![RData::A(A::new(127, 0, 0, 1))]; let lru = DnsLru::new(1, TtlConfig::default()); let rc_ips = lru.insert(query.clone(), ips_ttl, now); assert_eq!(*rc_ips.iter().next().unwrap(), ips[0]); let ttl = lru .get(&query, now + Duration::from_secs(2)) .unwrap() .expect("records should exist") .record_iter() .next() .unwrap() .ttl(); assert!(ttl <= 8); } #[test] fn test_insert_ttl() { let now = Instant::now(); let name = Name::from_str("www.example.com.").unwrap(); let query = Query::query(name.clone(), RecordType::A); // TTL should be 1 let ips_ttl = vec![ ( Record::from_rdata(name.clone(), 1, RData::A(A::new(127, 0, 0, 1))), 1, ), ( Record::from_rdata(name, 2, RData::A(A::new(127, 0, 0, 2))), 2, ), ]; let ips = vec![ RData::A(A::new(127, 0, 0, 1)), RData::A(A::new(127, 0, 0, 2)), ]; let lru = DnsLru::new(1, TtlConfig::default()); lru.insert(query.clone(), ips_ttl, now); // still valid let rc_ips = lru .get(&query, now + Duration::from_secs(1)) .unwrap() .expect("records should exist"); assert_eq!(*rc_ips.iter().next().unwrap(), ips[0]); // 2 should be one too far let rc_ips = lru.get(&query, now + Duration::from_secs(2)); assert!(rc_ips.is_none()); } #[test] fn test_insert_positive_min_ttl() { let now = Instant::now(); let name = Name::from_str("www.example.com.").unwrap(); let query = Query::query(name.clone(), RecordType::A); // TTL should be 1 let ips_ttl = vec![ ( Record::from_rdata(name.clone(), 1, RData::A(A::new(127, 0, 0, 1))), 1, ), ( Record::from_rdata(name, 2, RData::A(A::new(127, 0, 0, 2))), 2, ), ]; let ips = vec![ RData::A(A::new(127, 0, 0, 1)), RData::A(A::new(127, 0, 0, 2)), ]; // this cache should override the TTL of 1 seconds with the configured // minimum TTL of 3 seconds. let ttls = TtlConfig { positive_min_ttl: Some(Duration::from_secs(3)), ..TtlConfig::default() }; let lru = DnsLru::new(1, ttls); lru.insert(query.clone(), ips_ttl, now); // still valid let rc_ips = lru .get(&query, now + Duration::from_secs(1)) .unwrap() .expect("records should exist"); for (rc_ip, ip) in rc_ips.iter().zip(ips.iter()) { assert_eq!(rc_ip, ip, "after 1 second"); } let rc_ips = lru .get(&query, now + Duration::from_secs(2)) .unwrap() .expect("records should exist"); for (rc_ip, ip) in rc_ips.iter().zip(ips.iter()) { assert_eq!(rc_ip, ip, "after 2 seconds"); } let rc_ips = lru .get(&query, now + Duration::from_secs(3)) .unwrap() .expect("records should exist"); for (rc_ip, ip) in rc_ips.iter().zip(ips.iter()) { assert_eq!(rc_ip, ip, "after 3 seconds"); } // after 4 seconds, the records should be invalid. let rc_ips = lru.get(&query, now + Duration::from_secs(4)); assert!(rc_ips.is_none()); } #[test] fn test_insert_positive_max_ttl() { let now = Instant::now(); let name = Name::from_str("www.example.com.").unwrap(); let query = Query::query(name.clone(), RecordType::A); // TTL should be 500 let ips_ttl = vec![ ( Record::from_rdata(name.clone(), 400, RData::A(A::new(127, 0, 0, 1))), 400, ), ( Record::from_rdata(name, 500, RData::A(A::new(127, 0, 0, 2))), 500, ), ]; let ips = vec![ RData::A(A::new(127, 0, 0, 1)), RData::A(A::new(127, 0, 0, 2)), ]; // this cache should override the TTL of 500 seconds with the configured // minimum TTL of 2 seconds. let ttls = TtlConfig { positive_max_ttl: Some(Duration::from_secs(2)), ..TtlConfig::default() }; let lru = DnsLru::new(1, ttls); lru.insert(query.clone(), ips_ttl, now); // still valid let rc_ips = lru .get(&query, now + Duration::from_secs(1)) .unwrap() .expect("records should exist"); for (rc_ip, ip) in rc_ips.iter().zip(ips.iter()) { assert_eq!(rc_ip, ip, "after 1 second"); } let rc_ips = lru .get(&query, now + Duration::from_secs(2)) .unwrap() .expect("records should exist"); for (rc_ip, ip) in rc_ips.iter().zip(ips.iter()) { assert_eq!(rc_ip, ip, "after 2 seconds"); } // after 3 seconds, the records should be invalid. let rc_ips = lru.get(&query, now + Duration::from_secs(3)); assert!(rc_ips.is_none()); } } hickory-resolver-0.24.0/src/dns_sd.rs000064400000000000000000000147161046102023000156360ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! DNS Service Discovery #![cfg(feature = "mdns")] use std::borrow::Cow; use std::collections::HashMap; use std::pin::Pin; use std::task::{Context, Poll}; use futures_util::future::Future; use proto::rr::rdata::TXT; use proto::rr::{Name, RecordType}; use proto::xfer::DnsRequestOptions; use proto::DnsHandle; use crate::error::*; use crate::lookup::{ReverseLookup, ReverseLookupIter, TxtLookup}; use crate::name_server::ConnectionProvider; use crate::AsyncResolver; /// An extension for the Resolver to perform DNS Service Discovery pub trait DnsSdHandle { /// List all services available /// /// /// /// For registered service types, see: fn list_services(&self, name: Name) -> ListServicesFuture; /// Retrieve service information /// /// fn service_info(&self, name: Name) -> ServiceInfoFuture; } impl, P: ConnectionProvider> DnsSdHandle for AsyncResolver { fn list_services(&self, name: Name) -> ListServicesFuture { let this = self.clone(); let ptr_future = async move { let mut options = DnsRequestOptions::default(); options.expects_multiple_responses = true; // TODO: This should use the AsyncResolver's options.edns0 // setting, but options is private. options.use_edns = false; this.inner_lookup(name, RecordType::PTR, options).await }; ListServicesFuture(Box::pin(ptr_future)) } fn service_info(&self, name: Name) -> ServiceInfoFuture { let this = self.clone(); let ptr_future = async move { this.txt_lookup(name).await }; ServiceInfoFuture(Box::pin(ptr_future)) } } /// A DNS Service Discovery future of Services discovered through the list operation pub struct ListServicesFuture( Pin> + Send + 'static>>, ); impl Future for ListServicesFuture { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.0.as_mut().poll(cx) { Poll::Ready(Ok(lookup)) => Poll::Ready(Ok(ListServices(lookup))), Poll::Pending => Poll::Pending, Poll::Ready(Err(e)) => Poll::Ready(Err(e)), } } } /// The list of Services discovered pub struct ListServices(ReverseLookup); impl ListServices { /// Returns an iterator over the list of returned names of services. /// /// Each name can be queried for additional information. To lookup service entries see [AsyncResolver::lookup_srv(..)]. To get parameters associated with the service, see `DnsSdFuture::service_info`. pub fn iter(&self) -> ListServicesIter<'_> { ListServicesIter(self.0.iter()) } } /// An iterator over the Lookup type pub struct ListServicesIter<'i>(ReverseLookupIter<'i>); impl<'i> Iterator for ListServicesIter<'i> { type Item = &'i Name; fn next(&mut self) -> Option { self.0.next() } } /// A Future that resolves to the TXT information for a service pub struct ServiceInfoFuture( Pin> + Send + 'static>>, ); impl Future for ServiceInfoFuture { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.0.as_mut().poll(cx) { Poll::Ready(Ok(lookup)) => Poll::Ready(Ok(ServiceInfo(lookup))), Poll::Pending => Poll::Pending, Poll::Ready(Err(e)) => Poll::Ready(Err(e)), } } } /// The list of Services discovered pub struct ServiceInfo(TxtLookup); impl ServiceInfo { /// Returns this as a map, this allocates a new hashmap /// /// This converts the DNS-SD TXT record into a map following the rules specified in pub fn to_map(&self) -> HashMap, Option>> { self.0 .iter() .flat_map(TXT::iter) .filter_map(|bytes| { let mut split = bytes.split(|byte| *byte == b'='); let key = split.next().map(String::from_utf8_lossy); let value = split.next().map(String::from_utf8_lossy); if let Some(key) = key { Some((key, value)) } else { None } }) .collect() } } #[cfg(test)] mod tests { #![allow(clippy::dbg_macro, clippy::print_stdout)] use std::str::FromStr; use tokio::runtime::Runtime; use crate::config::*; use crate::{TokioAsyncResolver, TokioHandle}; use super::*; #[test] #[ignore] fn test_list_services() { let io_loop = Runtime::new().unwrap(); let resolver = TokioAsyncResolver::new( ResolverConfig::default(), ResolverOpts { ip_strategy: LookupIpStrategy::Ipv6thenIpv4, ..ResolverOpts::default() }, TokioHandle::default(), ) .expect("failed to create resolver"); let response = io_loop .block_on(resolver.list_services(Name::from_str("_http._tcp.local.").unwrap())) .expect("failed to run lookup"); for name in response.iter() { println!("service: {}", name); let srvs = io_loop .block_on(resolver.srv_lookup(name.clone())) .expect("failed to lookup name"); for srv in srvs.iter() { println!("service: {:#?}", srv); let info = io_loop .block_on(resolver.service_info(name.clone())) .expect("info failed"); let info = info.to_map(); println!("info: {:#?}", info); } for ip in srvs.ip_iter() { println!("ip: {}", ip); } } } } hickory-resolver-0.24.0/src/error.rs000064400000000000000000000303141046102023000155050ustar 00000000000000// Copyright 2015-2023 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Error types for the crate use std::{cmp::Ordering, fmt, io, sync}; use thiserror::Error; use tracing::debug; use crate::proto::{ error::{ProtoError, ProtoErrorKind}, op::{Query, ResponseCode}, rr::{ rdata::SOA, resource::{Record, RecordRef}, }, xfer::{retry_dns_handle::RetryableError, DnsResponse}, }; #[cfg(feature = "backtrace")] use crate::proto::{trace, ExtBacktrace}; /// An alias for results returned by functions of this crate pub type ResolveResult = ::std::result::Result; #[allow(clippy::large_enum_variant)] /// The error kind for errors that get returned in the crate #[derive(Debug, Error)] #[non_exhaustive] pub enum ResolveErrorKind { /// An error with an arbitrary message, referenced as &'static str #[error("{0}")] Message(&'static str), /// An error with an arbitrary message, stored as String #[error("{0}")] Msg(String), /// No resolvers available #[error("No connections available")] NoConnections, /// No records were found for a query #[error("no record found for {:?}", query)] NoRecordsFound { /// The query for which no records were found. query: Box, /// If an SOA is present, then this is an authoritative response or a referral to another nameserver, see the negative_type field. soa: Option>>, /// negative ttl, as determined from DnsResponse::negative_ttl /// this will only be present if the SOA was also present. negative_ttl: Option, /// ResponseCode, if `NXDOMAIN`, the domain does not exist (and no other types). /// If `NoError`, then the domain exists but there exist either other types at the same label, or subzones of that label. response_code: ResponseCode, /// If we trust `NXDOMAIN` errors from this server trusted: bool, }, // foreign /// An error got returned from IO #[error("io error: {0}")] Io(#[from] std::io::Error), /// An error got returned by the hickory-proto crate #[error("proto error: {0}")] Proto(#[from] ProtoError), /// A request timed out #[error("request timed out")] Timeout, } impl Clone for ResolveErrorKind { fn clone(&self) -> Self { use self::ResolveErrorKind::*; match self { NoConnections => NoConnections, Message(msg) => Message(msg), Msg(ref msg) => Msg(msg.clone()), NoRecordsFound { ref query, ref soa, negative_ttl, response_code, trusted, } => NoRecordsFound { query: query.clone(), soa: soa.clone(), negative_ttl: *negative_ttl, response_code: *response_code, trusted: *trusted, }, // foreign Io(io) => Self::from(std::io::Error::from(io.kind())), Proto(proto) => Self::from(proto.clone()), Timeout => Timeout, } } } /// The error type for errors that get returned in the crate #[derive(Debug, Clone, Error)] pub struct ResolveError { pub(crate) kind: ResolveErrorKind, #[cfg(feature = "backtrace")] backtrack: Option, } impl ResolveError { pub(crate) fn nx_error( query: Query, soa: Option>, negative_ttl: Option, response_code: ResponseCode, trusted: bool, ) -> Self { ResolveErrorKind::NoRecordsFound { query: Box::new(query), soa: soa.map(Box::new), negative_ttl, response_code, trusted, } .into() } /// Get the kind of the error pub fn kind(&self) -> &ResolveErrorKind { &self.kind } pub(crate) fn no_connections() -> Self { Self { kind: ResolveErrorKind::NoConnections, #[cfg(feature = "backtrace")] backtrack: trace!(), } } pub(crate) fn is_no_connections(&self) -> bool { matches!(self.kind, ResolveErrorKind::NoConnections) } /// A conversion to determine if the response is an error pub fn from_response(response: DnsResponse, trust_nx: bool) -> Result { debug!("Response:{}", *response); match response.response_code() { response_code @ ResponseCode::ServFail | response_code @ ResponseCode::Refused | response_code @ ResponseCode::FormErr | response_code @ ResponseCode::NotImp | response_code @ ResponseCode::YXDomain | response_code @ ResponseCode::YXRRSet | response_code @ ResponseCode::NXRRSet | response_code @ ResponseCode::NotAuth | response_code @ ResponseCode::NotZone | response_code @ ResponseCode::BADVERS | response_code @ ResponseCode::BADSIG | response_code @ ResponseCode::BADKEY | response_code @ ResponseCode::BADTIME | response_code @ ResponseCode::BADMODE | response_code @ ResponseCode::BADNAME | response_code @ ResponseCode::BADALG | response_code @ ResponseCode::BADTRUNC | response_code @ ResponseCode::BADCOOKIE => { let response = response; let soa = response.soa().as_ref().map(RecordRef::to_owned); let query = response.queries().iter().next().cloned().unwrap_or_default(); let error_kind = ResolveErrorKind::NoRecordsFound { query: Box::new(query), soa: soa.map(Box::new), negative_ttl: None, response_code, trusted: false, }; Err(Self::from(error_kind)) } // Some NXDOMAIN responses contain CNAME referrals, that will not be an error response_code @ ResponseCode::NXDomain | // No answers are available, CNAME referrals are not failures response_code @ ResponseCode::NoError if !response.contains_answer() && !response.truncated() => { // TODO: if authoritative, this is cacheable, store a TTL (currently that requires time, need a "now" here) // let valid_until = if response.authoritative() { now + response.negative_ttl() }; let response = response; let soa = response.soa().as_ref().map(RecordRef::to_owned); let negative_ttl = response.negative_ttl(); // Note: improperly configured servers may do recursive lookups and return bad SOA // records here via AS112 (blackhole-1.iana.org. etc) // Such servers should be marked not trusted, as they may break reverse lookups // for local hosts. let trusted = trust_nx && soa.is_some(); let query = response.into_message().take_queries().drain(..).next().unwrap_or_default(); let error_kind = ResolveErrorKind::NoRecordsFound { query: Box::new(query), soa: soa.map(Box::new), negative_ttl, response_code, trusted, }; Err(Self::from(error_kind)) } ResponseCode::NXDomain | ResponseCode::NoError | ResponseCode::Unknown(_) => Ok(response), } } /// Compare two errors to see if one contains a server response. pub(crate) fn cmp_specificity(&self, other: &Self) -> Ordering { let kind = self.kind(); let other = other.kind(); match (kind, other) { (ResolveErrorKind::NoRecordsFound { .. }, ResolveErrorKind::NoRecordsFound { .. }) => { return Ordering::Equal } (ResolveErrorKind::NoRecordsFound { .. }, _) => return Ordering::Greater, (_, ResolveErrorKind::NoRecordsFound { .. }) => return Ordering::Less, _ => (), } match (kind, other) { (ResolveErrorKind::Io { .. }, ResolveErrorKind::Io { .. }) => return Ordering::Equal, (ResolveErrorKind::Io { .. }, _) => return Ordering::Greater, (_, ResolveErrorKind::Io { .. }) => return Ordering::Less, _ => (), } match (kind, other) { (ResolveErrorKind::Proto { .. }, ResolveErrorKind::Proto { .. }) => { return Ordering::Equal } (ResolveErrorKind::Proto { .. }, _) => return Ordering::Greater, (_, ResolveErrorKind::Proto { .. }) => return Ordering::Less, _ => (), } match (kind, other) { (ResolveErrorKind::Timeout, ResolveErrorKind::Timeout) => return Ordering::Equal, (ResolveErrorKind::Timeout, _) => return Ordering::Greater, (_, ResolveErrorKind::Timeout) => return Ordering::Less, _ => (), } Ordering::Equal } } impl RetryableError for ResolveError { fn should_retry(&self) -> bool { match self.kind() { ResolveErrorKind::Message(_) | ResolveErrorKind::Msg(_) | ResolveErrorKind::NoConnections | ResolveErrorKind::NoRecordsFound { .. } => false, ResolveErrorKind::Io(_) | ResolveErrorKind::Proto(_) | ResolveErrorKind::Timeout => { true } } } fn attempted(&self) -> bool { match self.kind() { ResolveErrorKind::Proto(e) => !matches!(e.kind(), ProtoErrorKind::Busy), _ => true, } } } impl fmt::Display for ResolveError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { cfg_if::cfg_if! { if #[cfg(feature = "backtrace")] { if let Some(ref backtrace) = self.backtrack { fmt::Display::fmt(&self.kind, f)?; fmt::Debug::fmt(backtrace, f) } else { fmt::Display::fmt(&self.kind, f) } } else { fmt::Display::fmt(&self.kind, f) } } } } impl From for ResolveError { fn from(kind: ResolveErrorKind) -> Self { Self { kind, #[cfg(feature = "backtrace")] backtrack: trace!(), } } } impl From<&'static str> for ResolveError { fn from(msg: &'static str) -> Self { ResolveErrorKind::Message(msg).into() } } #[cfg(target_os = "windows")] #[cfg(feature = "system-config")] #[cfg_attr(docsrs, doc(cfg(all(feature = "system-config", windows))))] impl From for ResolveError { fn from(e: ipconfig::error::Error) -> ResolveError { ResolveErrorKind::Msg(format!("failed to read from registry: {}", e)).into() } } impl From for ResolveError { fn from(msg: String) -> Self { ResolveErrorKind::Msg(msg).into() } } impl From for ResolveError { fn from(e: io::Error) -> Self { match e.kind() { io::ErrorKind::TimedOut => ResolveErrorKind::Timeout.into(), _ => ResolveErrorKind::from(e).into(), } } } impl From for ResolveError { fn from(e: ProtoError) -> Self { match *e.kind() { ProtoErrorKind::Timeout => ResolveErrorKind::Timeout.into(), _ => ResolveErrorKind::from(e).into(), } } } impl From for io::Error { fn from(e: ResolveError) -> Self { match e.kind() { ResolveErrorKind::Timeout => Self::new(io::ErrorKind::TimedOut, e), _ => Self::new(io::ErrorKind::Other, e), } } } impl From> for ResolveError { fn from(e: sync::PoisonError) -> Self { ResolveErrorKind::Msg(format!("lock was poisoned, this is non-recoverable: {e}")).into() } } hickory-resolver-0.24.0/src/h2.rs000064400000000000000000000106421046102023000146670ustar 00000000000000// Copyright 2015-2022 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::future::Future; use std::net::SocketAddr; use crate::tls::CLIENT_CONFIG; use proto::h2::{HttpsClientConnect, HttpsClientStream, HttpsClientStreamBuilder}; use proto::tcp::{Connect, DnsTcpStream}; use proto::xfer::{DnsExchange, DnsExchangeConnect}; use proto::TokioTime; use crate::config::TlsClientConfig; #[allow(clippy::type_complexity)] #[allow(unused)] pub(crate) fn new_https_stream( socket_addr: SocketAddr, bind_addr: Option, dns_name: String, client_config: Option, ) -> DnsExchangeConnect, HttpsClientStream, TokioTime> where S: Connect, { let client_config = if let Some(TlsClientConfig(client_config)) = client_config { client_config } else { match CLIENT_CONFIG.clone() { Ok(client_config) => client_config, Err(error) => return DnsExchange::error(error), } }; let mut https_builder = HttpsClientStreamBuilder::with_client_config(client_config); if let Some(bind_addr) = bind_addr { https_builder.bind_addr(bind_addr); } DnsExchange::connect(https_builder.build::(socket_addr, dns_name)) } #[allow(clippy::type_complexity)] pub(crate) fn new_https_stream_with_future( future: F, socket_addr: SocketAddr, dns_name: String, client_config: Option, ) -> DnsExchangeConnect, HttpsClientStream, TokioTime> where S: DnsTcpStream, F: Future> + Send + Unpin + 'static, { let client_config = if let Some(TlsClientConfig(client_config)) = client_config { client_config } else { match CLIENT_CONFIG.clone() { Ok(client_config) => client_config, Err(error) => return DnsExchange::error(error), } }; DnsExchange::connect(HttpsClientStreamBuilder::build_with_future( future, client_config, socket_addr, dns_name, )) } #[cfg(any(feature = "webpki-roots", feature = "native-certs"))] #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use tokio::runtime::Runtime; use crate::config::{ResolverConfig, ResolverOpts}; use crate::name_server::TokioConnectionProvider; use crate::TokioAsyncResolver; fn https_test(config: ResolverConfig) { let io_loop = Runtime::new().unwrap(); let resolver = TokioAsyncResolver::new( config, ResolverOpts { try_tcp_on_error: true, ..ResolverOpts::default() }, TokioConnectionProvider::default(), ); let response = io_loop .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } // check if there is another connection created let response = io_loop .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } } #[test] fn test_google_https() { https_test(ResolverConfig::google_https()) } #[test] fn test_cloudflare_https() { https_test(ResolverConfig::cloudflare_https()) } } hickory-resolver-0.24.0/src/h3.rs000064400000000000000000000110421046102023000146630ustar 00000000000000// Copyright 2015-2022 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::future::Future; use std::net::SocketAddr; use crate::config::TlsClientConfig; use crate::tls::CLIENT_CONFIG; use proto::h3::{H3ClientConnect, H3ClientStream}; use proto::xfer::{DnsExchange, DnsExchangeConnect}; use proto::TokioTime; use hickory_proto::udp::{DnsUdpSocket, QuicLocalAddr}; use rustls::ClientConfig as CryptoConfig; #[allow(clippy::type_complexity)] #[allow(unused)] pub(crate) fn new_h3_stream( socket_addr: SocketAddr, bind_addr: Option, dns_name: String, client_config: Option, ) -> DnsExchangeConnect { let client_config = if let Some(TlsClientConfig(client_config)) = client_config { client_config } else { match CLIENT_CONFIG.clone() { Ok(client_config) => client_config, Err(error) => return DnsExchange::error(error), } }; let mut h3_builder = H3ClientStream::builder(); // TODO: normalize the crypto config settings, can we just use common ALPN settings? let crypto_config: CryptoConfig = (*client_config).clone(); h3_builder.crypto_config(crypto_config); if let Some(bind_addr) = bind_addr { h3_builder.bind_addr(bind_addr); } DnsExchange::connect(h3_builder.build(socket_addr, dns_name)) } #[allow(clippy::type_complexity)] pub(crate) fn new_h3_stream_with_future( future: F, socket_addr: SocketAddr, dns_name: String, client_config: Option, ) -> DnsExchangeConnect where S: DnsUdpSocket + QuicLocalAddr + 'static, F: Future> + Send + Unpin + 'static, { let client_config = if let Some(TlsClientConfig(client_config)) = client_config { client_config } else { match CLIENT_CONFIG.clone() { Ok(client_config) => client_config, Err(error) => return DnsExchange::error(error), } }; let mut h3_builder = H3ClientStream::builder(); // TODO: normalize the crypto config settings, can we just use common ALPN settings? let crypto_config: CryptoConfig = (*client_config).clone(); h3_builder.crypto_config(crypto_config); DnsExchange::connect(h3_builder.build_with_future(future, socket_addr, dns_name)) } #[cfg(all(test, any(feature = "native-certs", feature = "webpki-roots")))] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use tokio::runtime::Runtime; use crate::config::{ResolverConfig, ResolverOpts}; use crate::name_server::TokioConnectionProvider; use crate::TokioAsyncResolver; fn h3_test(config: ResolverConfig) { let io_loop = Runtime::new().unwrap(); let resolver = TokioAsyncResolver::new( config, ResolverOpts::default(), TokioConnectionProvider::default(), ); let response = io_loop .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } // check if there is another connection created let response = io_loop .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } } #[test] fn test_google_h3() { h3_test(ResolverConfig::google_h3()) } } hickory-resolver-0.24.0/src/hosts.rs000064400000000000000000000210701046102023000155130ustar 00000000000000//! Hosts result from a configuration of the system hosts file use std::collections::HashMap; use std::io; use std::path::Path; use std::str::FromStr; use std::sync::Arc; use proto::op::Query; use proto::rr::{Name, RecordType}; use proto::rr::{RData, Record}; use tracing::warn; use crate::dns_lru; use crate::lookup::Lookup; #[derive(Debug, Default)] struct LookupType { /// represents the A record type a: Option, /// represents the AAAA record type aaaa: Option, } /// Configuration for the local hosts file #[derive(Debug, Default)] pub struct Hosts { /// Name -> RDatas map by_name: HashMap, } impl Hosts { /// Creates a new configuration from the system hosts file, /// only works for Windows and Unix-like OSes, /// will return empty configuration on others #[cfg(any(unix, windows))] pub fn new() -> Self { read_hosts_conf(hosts_path()).unwrap_or_default() } /// Creates a default configuration for non Windows or Unix-like OSes #[cfg(not(any(unix, windows)))] pub fn new() -> Self { Hosts::default() } /// Look up the addresses for the given host from the system hosts file. pub fn lookup_static_host(&self, query: &Query) -> Option { if !self.by_name.is_empty() { if let Some(val) = self.by_name.get(query.name()) { let result = match query.query_type() { RecordType::A => val.a.clone(), RecordType::AAAA => val.aaaa.clone(), _ => None, }; return result; } } None } /// Insert a new Lookup for the associated `Name` and `RecordType` pub fn insert(&mut self, name: Name, record_type: RecordType, lookup: Lookup) { assert!(record_type == RecordType::A || record_type == RecordType::AAAA); let lookup_type = self.by_name.entry(name.clone()).or_default(); let new_lookup = { let old_lookup = match record_type { RecordType::A => lookup_type.a.get_or_insert_with(|| { let query = Query::query(name.clone(), record_type); Lookup::new_with_max_ttl(query, Arc::from([])) }), RecordType::AAAA => lookup_type.aaaa.get_or_insert_with(|| { let query = Query::query(name.clone(), record_type); Lookup::new_with_max_ttl(query, Arc::from([])) }), _ => { tracing::warn!("unsupported IP type from Hosts file: {:#?}", record_type); return; } }; old_lookup.append(lookup) }; // replace the appended version match record_type { RecordType::A => lookup_type.a = Some(new_lookup), RecordType::AAAA => lookup_type.aaaa = Some(new_lookup), _ => tracing::warn!("unsupported IP type from Hosts file"), } } /// parse configuration from `src` pub fn read_hosts_conf(mut self, src: impl io::Read) -> io::Result { use std::io::{BufRead, BufReader}; use proto::rr::domain::TryParseIp; // lines in the src should have the form `addr host1 host2 host3 ...` // line starts with `#` will be regarded with comments and ignored, // also empty line also will be ignored, // if line only include `addr` without `host` will be ignored, // the src will be parsed to map in the form `Name -> LookUp`. for line in BufReader::new(src).lines() { // Remove comments from the line let line = line?; let line = line.split('#').next().unwrap().trim(); if line.is_empty() { continue; } let fields: Vec<_> = line.split_whitespace().collect(); if fields.len() < 2 { continue; } let addr = if let Some(a) = fields[0].try_parse_ip() { a } else { warn!("could not parse an IP from hosts file"); continue; }; for domain in fields.iter().skip(1).map(|domain| domain.to_lowercase()) { if let Ok(name) = Name::from_str(&domain) { let record = Record::from_rdata(name.clone(), dns_lru::MAX_TTL, addr.clone()); match addr { RData::A(..) => { let query = Query::query(name.clone(), RecordType::A); let lookup = Lookup::new_with_max_ttl(query, Arc::from([record])); self.insert(name.clone(), RecordType::A, lookup); } RData::AAAA(..) => { let query = Query::query(name.clone(), RecordType::AAAA); let lookup = Lookup::new_with_max_ttl(query, Arc::from([record])); self.insert(name.clone(), RecordType::AAAA, lookup); } _ => { warn!("unsupported IP type from Hosts file: {:#?}", addr); continue; } }; // TODO: insert reverse lookup as well. }; } } Ok(self) } } #[cfg(unix)] fn hosts_path() -> &'static str { "/etc/hosts" } #[cfg(windows)] fn hosts_path() -> std::path::PathBuf { let system_root = std::env::var_os("SystemRoot").expect("Environtment variable SystemRoot not found"); let system_root = Path::new(&system_root); system_root.join("System32\\drivers\\etc\\hosts") } /// parse configuration from `path` #[cfg(any(unix, windows))] #[cfg_attr(docsrs, doc(cfg(any(unix, windows))))] pub(crate) fn read_hosts_conf>(path: P) -> io::Result { use std::fs::File; let file = File::open(path)?; Hosts::default().read_hosts_conf(file) } #[cfg(any(unix, windows))] #[cfg(test)] mod tests { use super::*; use std::env; use std::net::{Ipv4Addr, Ipv6Addr}; fn tests_dir() -> String { let server_path = env::var("TDNS_WORKSPACE_ROOT").unwrap_or_else(|_| "../..".to_owned()); format! {"{server_path}/crates/resolver/tests"} } #[test] fn test_read_hosts_conf() { let path = format!("{}/hosts", tests_dir()); let hosts = read_hosts_conf(path).unwrap(); let name = Name::from_str("localhost").unwrap(); let rdatas = hosts .lookup_static_host(&Query::query(name.clone(), RecordType::A)) .unwrap() .iter() .map(ToOwned::to_owned) .collect::>(); assert_eq!(rdatas, vec![RData::A(Ipv4Addr::new(127, 0, 0, 1).into())]); let rdatas = hosts .lookup_static_host(&Query::query(name, RecordType::AAAA)) .unwrap() .iter() .map(ToOwned::to_owned) .collect::>(); assert_eq!( rdatas, vec![RData::AAAA(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into())] ); let name = Name::from_str("broadcasthost").unwrap(); let rdatas = hosts .lookup_static_host(&Query::query(name, RecordType::A)) .unwrap() .iter() .map(ToOwned::to_owned) .collect::>(); assert_eq!( rdatas, vec![RData::A(Ipv4Addr::new(255, 255, 255, 255).into())] ); let name = Name::from_str("example.com").unwrap(); let rdatas = hosts .lookup_static_host(&Query::query(name, RecordType::A)) .unwrap() .iter() .map(ToOwned::to_owned) .collect::>(); assert_eq!(rdatas, vec![RData::A(Ipv4Addr::new(10, 0, 1, 102).into())]); let name = Name::from_str("a.example.com").unwrap(); let rdatas = hosts .lookup_static_host(&Query::query(name, RecordType::A)) .unwrap() .iter() .map(ToOwned::to_owned) .collect::>(); assert_eq!(rdatas, vec![RData::A(Ipv4Addr::new(10, 0, 1, 111).into())]); let name = Name::from_str("b.example.com").unwrap(); let rdatas = hosts .lookup_static_host(&Query::query(name, RecordType::A)) .unwrap() .iter() .map(ToOwned::to_owned) .collect::>(); assert_eq!(rdatas, vec![RData::A(Ipv4Addr::new(10, 0, 1, 111).into())]); } } hickory-resolver-0.24.0/src/lib.rs000064400000000000000000000277431046102023000151360ustar 00000000000000// Copyright 2015-2017 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! The Resolver is responsible for performing recursive queries to lookup domain names. //! //! This is a 100% in process DNS resolver. It *does not* use the Host OS' resolver. If what is //! desired is to use the Host OS' resolver, generally in the system's libc, then the //! `std::net::ToSocketAddrs` variant over `&str` should be used. //! //! Unlike the `hickory-client`, this tries to provide a simpler interface to perform DNS //! queries. For update options, i.e. Dynamic DNS, the `hickory-client` crate must be used //! instead. The Resolver library is capable of searching multiple domains (this can be disabled by //! using an FQDN during lookup), dual-stack IPv4/IPv6 lookups, performing chained CNAME lookups, //! and features connection metric tracking for attempting to pick the best upstream DNS resolver. //! //! There are two types for performing DNS queries, [`Resolver`] and [`AsyncResolver`]. `Resolver` //! is the easiest to work with, it is a wrapper around [`AsyncResolver`]. `AsyncResolver` is a //! `Tokio` based async resolver, and can be used inside any `Tokio` based system. //! //! This as best as possible attempts to abide by the DNS RFCs, please file issues at //! . //! //! # Usage //! //! ## Declare dependency //! //! ```toml //! [dependency] //! hickory-resolver = "*" //! ``` //! //! ## Using the Synchronous Resolver //! //! This uses the default configuration, which sets the [Google Public //! DNS](https://developers.google.com/speed/public-dns/) as the upstream resolvers. Please see //! their [privacy statement](https://developers.google.com/speed/public-dns/privacy) for important //! information about what they track, many ISP's track similar information in DNS. //! //! ```rust //! # fn main() { //! # #[cfg(feature = "tokio-runtime")] //! # { //! use std::net::*; //! use hickory_resolver::Resolver; //! use hickory_resolver::config::*; //! //! // Construct a new Resolver with default configuration options //! let resolver = Resolver::new(ResolverConfig::default(), ResolverOpts::default()).unwrap(); //! //! // Lookup the IP addresses associated with a name. //! // The final dot forces this to be an FQDN, otherwise the search rules as specified //! // in `ResolverOpts` will take effect. FQDN's are generally cheaper queries. //! let response = resolver.lookup_ip("www.example.com.").unwrap(); //! //! // There can be many addresses associated with the name, //! // this can return IPv4 and/or IPv6 addresses //! let address = response.iter().next().expect("no addresses returned!"); //! if address.is_ipv4() { //! assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); //! } else { //! assert_eq!(address, IpAddr::V6(Ipv6Addr::new(0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946))); //! } //! # } //! # } //! ``` //! //! ## Using the host system config //! //! On Unix systems, the `/etc/resolv.conf` can be used for configuration. Not all options //! specified in the host systems `resolv.conf` are applicable or compatible with this software. In //! addition there may be additional options supported which the host system does not. Example: //! //! ```rust,no_run //! # fn main() { //! # #[cfg(feature = "tokio-runtime")] //! # { //! # use std::net::*; //! # use hickory_resolver::Resolver; //! // Use the host OS'es `/etc/resolv.conf` //! # #[cfg(unix)] //! let resolver = Resolver::from_system_conf().unwrap(); //! # #[cfg(unix)] //! let response = resolver.lookup_ip("www.example.com.").unwrap(); //! # } //! # } //! ``` //! //! ## Using the Tokio/Async Resolver //! //! For more advanced asynchronous usage, the `AsyncResolver`] is integrated with Tokio. In fact, //! the [`AsyncResolver`] is used by the synchronous Resolver for all lookups. //! //! ```rust //! # fn main() { //! # #[cfg(feature = "tokio-runtime")] //! # { //! use std::net::*; //! use tokio::runtime::Runtime; //! use hickory_resolver::TokioAsyncResolver; //! use hickory_resolver::config::*; //! //! // We need a Tokio Runtime to run the resolver //! // this is responsible for running all Future tasks and registering interest in IO channels //! let mut io_loop = Runtime::new().unwrap(); //! //! // Construct a new Resolver with default configuration options //! let resolver = io_loop.block_on(async { //! TokioAsyncResolver::tokio( //! ResolverConfig::default(), //! ResolverOpts::default()) //! }); //! //! // Lookup the IP addresses associated with a name. //! // This returns a future that will lookup the IP addresses, it must be run in the Core to //! // to get the actual result. //! let lookup_future = resolver.lookup_ip("www.example.com."); //! //! // Run the lookup until it resolves or errors //! let mut response = io_loop.block_on(lookup_future).unwrap(); //! //! // There can be many addresses associated with the name, //! // this can return IPv4 and/or IPv6 addresses //! let address = response.iter().next().expect("no addresses returned!"); //! if address.is_ipv4() { //! assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); //! } else { //! assert_eq!(address, IpAddr::V6(Ipv6Addr::new(0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946))); //! } //! # } //! # } //! ``` //! //! Generally after a lookup in an asynchronous context, there would probably be a connection made //! to a server, for example: //! //! ```rust,no_run //! # fn main() { //! # #[cfg(feature = "tokio-runtime")] //! # { //! # use std::net::*; //! # use tokio::runtime::Runtime; //! # use hickory_resolver::TokioAsyncResolver; //! # use hickory_resolver::config::*; //! # use futures_util::TryFutureExt; //! # //! # let mut io_loop = Runtime::new().unwrap(); //! # //! # let resolver = io_loop.block_on(async { //! # TokioAsyncResolver::tokio( //! # ResolverConfig::default(), //! # ResolverOpts::default()) //! # }); //! # //! let ips = io_loop.block_on(resolver.lookup_ip("www.example.com.")).unwrap(); //! //! let result = io_loop.block_on(async { //! let ip = ips.iter().next().unwrap(); //! TcpStream::connect((ip, 443)) //! }) //! .and_then(|conn| Ok(conn) /* do something with the connection... */) //! .unwrap(); //! # } //! # } //! ``` //! //! It's beyond the scope of these examples to show how to deal with connection failures and //! looping etc. But if you wanted to say try a different address from the result set after a //! connection failure, it will be necessary to create a type that implements the `Future` trait. //! Inside the `Future::poll` method would be the place to implement a loop over the different IP //! addresses. //! //! ## DNS-over-TLS and DNS-over-HTTPS //! //! DNS-over-TLS and DNS-over-HTTPS are supported in the Hickory DNS Resolver library. The underlying //! implementations are available as addon libraries. *WARNING* The hickory-dns developers make no //! claims on the security and/or privacy guarantees of this implementation. //! //! To use DNS-over-TLS one of the `dns-over-tls` features must be enabled at compile time. There //! are three: `dns-over-openssl`, `dns-over-native-tls`, and `dns-over-rustls`. For DNS-over-HTTPS //! only rustls is supported with the `dns-over-https-rustls`, this implicitly enables support for //! DNS-over-TLS as well. The reason for each is to make the Hickory DNS libraries flexible for //! different deployments, and/or security concerns. The easiest to use will generally be //! `dns-over-rustls` which utilizes the `*ring*` Rust cryptography library (a rework of the //! `boringssl` project), this should compile and be usable on most ARM and x86 platforms. //! `dns-over-native-tls` will utilize the hosts TLS implementation where available or fallback to //! `openssl` where not supported. `dns-over-openssl` will specify that `openssl` should be used //! (which is a perfectly fine option if required). If more than one is specified, the precedence //! will be in this order (i.e. only one can be used at a time) `dns-over-rustls`, //! `dns-over-native-tls`, and then `dns-over-openssl`. **NOTICE** the Hickory DNS developers are not //! responsible for any choice of library that does not meet required security requirements. //! //! ### Example //! //! Enable the TLS library through the dependency on `hickory-resolver`: //! //! ```toml //! hickory-resolver = { version = "*", features = ["dns-over-rustls"] } //! ``` //! //! A default TLS configuration is available for Cloudflare's `1.1.1.1` DNS service (Quad9 as //! well): //! //! ```rust,no_run //! # fn main() { //! # #[cfg(feature = "tokio-runtime")] //! # { //! use hickory_resolver::Resolver; //! use hickory_resolver::config::*; //! //! // Construct a new Resolver with default configuration options //! # #[cfg(feature = "dns-over-tls")] //! let mut resolver = Resolver::new(ResolverConfig::cloudflare_tls(), ResolverOpts::default()).unwrap(); //! //! // see example above... //! # } //! # } //! ``` //! //! ## mDNS (multicast DNS) //! //! Multicast DNS is an experimental feature in Hickory DNS at the moment. Its support on different //! platforms is not yet ideal. Initial support is only for IPv4 mDNS, as there are some //! complexities to figure out with IPv6. Once enabled, an mDNS `NameServer` will automatically be //! added to the `Resolver` and used for any lookups performed in the `.local.` zone. // LIBRARY WARNINGS #![warn( clippy::default_trait_access, clippy::dbg_macro, clippy::print_stdout, clippy::unimplemented, clippy::use_self, missing_copy_implementations, missing_docs, non_snake_case, non_upper_case_globals, rust_2018_idioms, unreachable_pub )] #![recursion_limit = "128"] #![allow(clippy::needless_doctest_main, clippy::single_component_path_imports)] #![cfg_attr(docsrs, feature(doc_cfg))] #[cfg(feature = "dns-over-tls")] #[macro_use] extern crate cfg_if; #[cfg(feature = "serde-config")] #[macro_use] extern crate serde; pub extern crate hickory_proto as proto; mod async_resolver; pub mod caching_client; pub mod config; pub mod dns_lru; pub mod dns_sd; pub mod error; #[cfg(feature = "dns-over-https")] mod h2; #[cfg(feature = "dns-over-h3")] mod h3; mod hosts; pub mod lookup; pub mod lookup_ip; // TODO: consider #[doc(hidden)] pub mod name_server; #[cfg(feature = "dns-over-quic")] mod quic; #[cfg(feature = "tokio-runtime")] mod resolver; pub mod system_conf; #[cfg(feature = "dns-over-tls")] mod tls; // reexports from proto pub use self::proto::rr::{IntoName, Name, TryParseIp}; #[cfg(feature = "testing")] #[cfg_attr(docsrs, doc(cfg(feature = "testing")))] pub use async_resolver::testing; pub use async_resolver::AsyncResolver; #[cfg(feature = "tokio-runtime")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-runtime")))] pub use async_resolver::TokioAsyncResolver; pub use hosts::Hosts; #[cfg(feature = "tokio-runtime")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-runtime")))] pub use name_server::TokioHandle; #[cfg(feature = "tokio-runtime")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-runtime")))] pub use resolver::Resolver; /// This is an alias for [`AsyncResolver`], which replaced the type previously /// called `ResolverFuture`. /// /// # Note /// /// For users of `ResolverFuture`, the return type for `ResolverFuture::new` /// has changed since version 0.9 of `hickory-resolver`. It now returns /// a tuple of an [`AsyncResolver`] _and_ a background future, which must /// be spawned on a reactor before any lookup futures will run. /// /// See the [`AsyncResolver`] documentation for more information on how to /// use the background future. #[deprecated(note = "use [`hickory_resolver::AsyncResolver`] instead")] #[cfg(feature = "tokio-runtime")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-runtime")))] pub type ResolverFuture = TokioAsyncResolver; /// returns a version as specified in Cargo.toml pub fn version() -> &'static str { env!("CARGO_PKG_VERSION") } hickory-resolver-0.24.0/src/lookup.rs000064400000000000000000000516221046102023000156720ustar 00000000000000// Copyright 2015-2023 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Lookup result from a resolution of ipv4 and ipv6 records with a Resolver. use std::{ cmp::min, error::Error, pin::Pin, slice::Iter, sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, }; use futures_util::{ future::{self, Future}, stream::Stream, FutureExt, }; use crate::{ caching_client::CachingClient, dns_lru::MAX_TTL, error::*, lookup_ip::LookupIpIter, name_server::{ConnectionProvider, NameServerPool}, proto::{ error::ProtoError, op::Query, rr::{ rdata::{self, A, AAAA, NS, PTR}, Name, RData, Record, RecordType, }, xfer::{DnsRequest, DnsRequestOptions, DnsResponse}, DnsHandle, RetryDnsHandle, }, }; #[cfg(feature = "dnssec")] use proto::DnssecDnsHandle; /// Result of a DNS query when querying for any record type supported by the Hickory DNS Proto library. /// /// For IP resolution see LookupIp, as it has more features for A and AAAA lookups. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Lookup { query: Query, records: Arc<[Record]>, valid_until: Instant, } impl Lookup { /// Return new instance with given rdata and the maximum TTL. pub fn from_rdata(query: Query, rdata: RData) -> Self { let record = Record::from_rdata(query.name().clone(), MAX_TTL, rdata); Self::new_with_max_ttl(query, Arc::from([record])) } /// Return new instance with given records and the maximum TTL. pub fn new_with_max_ttl(query: Query, records: Arc<[Record]>) -> Self { let valid_until = Instant::now() + Duration::from_secs(u64::from(MAX_TTL)); Self { query, records, valid_until, } } /// Return a new instance with the given records and deadline. pub fn new_with_deadline(query: Query, records: Arc<[Record]>, valid_until: Instant) -> Self { Self { query, records, valid_until, } } /// Returns a reference to the `Query` that was used to produce this result. pub fn query(&self) -> &Query { &self.query } /// Returns a borrowed iterator of the returned IPs pub fn iter(&self) -> LookupIter<'_> { LookupIter(self.records.iter()) } /// Returns a borrowed iterator of the returned IPs pub fn record_iter(&self) -> LookupRecordIter<'_> { LookupRecordIter(self.records.iter()) } /// Returns the `Instant` at which this `Lookup` is no longer valid. pub fn valid_until(&self) -> Instant { self.valid_until } #[doc(hidden)] pub fn is_empty(&self) -> bool { self.records.is_empty() } pub(crate) fn len(&self) -> usize { self.records.len() } /// Returns the records list pub fn records(&self) -> &[Record] { self.records.as_ref() } /// Clones the inner vec, appends the other vec pub(crate) fn append(&self, other: Self) -> Self { let mut records = Vec::with_capacity(self.len() + other.len()); records.extend_from_slice(&self.records); records.extend_from_slice(&other.records); // Choose the sooner deadline of the two lookups. let valid_until = min(self.valid_until(), other.valid_until()); Self::new_with_deadline(self.query.clone(), Arc::from(records), valid_until) } } /// Borrowed view of set of [`RData`]s returned from a Lookup pub struct LookupIter<'a>(Iter<'a, Record>); impl<'a> Iterator for LookupIter<'a> { type Item = &'a RData; fn next(&mut self) -> Option { self.0.next().and_then(Record::data) } } /// Borrowed view of set of [`Record`]s returned from a Lookup pub struct LookupRecordIter<'a>(Iter<'a, Record>); impl<'a> Iterator for LookupRecordIter<'a> { type Item = &'a Record; fn next(&mut self) -> Option { self.0.next() } } // TODO: consider removing this as it's not a zero-cost abstraction impl IntoIterator for Lookup { type Item = RData; type IntoIter = LookupIntoIter; /// This is most likely not a free conversion, the `RData`s will be cloned if data is /// held behind an Arc with more than one reference (which is most likely the case coming from cache) fn into_iter(self) -> Self::IntoIter { LookupIntoIter { records: Arc::clone(&self.records), index: 0, } } } /// Borrowed view of set of [`RData`]s returned from a [`Lookup`]. /// /// This is not usually a zero overhead `Iterator`, it may result in clones of the [`RData`]. pub struct LookupIntoIter { // the result of the try_unwrap on Arc records: Arc<[Record]>, index: usize, } impl Iterator for LookupIntoIter { type Item = RData; fn next(&mut self) -> Option { let rdata = self.records.get(self.index).and_then(Record::data); self.index += 1; rdata.cloned() } } /// Different lookup options for the lookup attempts and validation #[derive(Clone)] #[doc(hidden)] pub enum LookupEither { Retry(RetryDnsHandle>), #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] Secure(DnssecDnsHandle>>), } impl DnsHandle for LookupEither

{ type Response = Pin> + Send>>; type Error = ResolveError; fn is_verifying_dnssec(&self) -> bool { match *self { Self::Retry(ref c) => c.is_verifying_dnssec(), #[cfg(feature = "dnssec")] Self::Secure(ref c) => c.is_verifying_dnssec(), } } fn send + Unpin + Send + 'static>(&self, request: R) -> Self::Response { match *self { Self::Retry(ref c) => c.send(request), #[cfg(feature = "dnssec")] Self::Secure(ref c) => c.send(request), } } } /// The Future returned from [`AsyncResolver`] when performing a lookup. #[doc(hidden)] pub struct LookupFuture where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { client_cache: CachingClient, names: Vec, record_type: RecordType, options: DnsRequestOptions, query: Pin> + Send>>, } impl LookupFuture where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { /// Perform a lookup from a name and type to a set of RDatas /// /// # Arguments /// /// * `names` - a set of DNS names to attempt to resolve, they will be attempted in queue order, i.e. the first is `names.pop()`. Upon each failure, the next will be attempted. /// * `record_type` - type of record being sought /// * `client_cache` - cache with a connection to use for performing all lookups #[doc(hidden)] pub fn lookup( mut names: Vec, record_type: RecordType, options: DnsRequestOptions, mut client_cache: CachingClient, ) -> Self { let name = names.pop().ok_or_else(|| { ResolveError::from(ResolveErrorKind::Message("can not lookup for no names")) }); let query: Pin> + Send>> = match name { Ok(name) => client_cache .lookup(Query::query(name, record_type), options) .boxed(), Err(err) => future::err(err).boxed(), }; Self { client_cache, names, record_type, options, query, } } } impl Future for LookupFuture where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { // Try polling the underlying DNS query. let query = self.query.as_mut().poll_unpin(cx); // Determine whether or not we will attempt to retry the query. let should_retry = match query { // If the query is NotReady, yield immediately. Poll::Pending => return Poll::Pending, // If the query returned a successful lookup, we will attempt // to retry if the lookup is empty. Otherwise, we will return // that lookup. Poll::Ready(Ok(ref lookup)) => lookup.records.len() == 0, // If the query failed, we will attempt to retry. Poll::Ready(Err(_)) => true, }; if should_retry { if let Some(name) = self.names.pop() { let record_type = self.record_type; let options = self.options; // If there's another name left to try, build a new query // for that next name and continue looping. self.query = self .client_cache .lookup(Query::query(name, record_type), options); // Continue looping with the new query. It will be polled // on the next iteration of the loop. continue; } } // If we didn't have to retry the query, or we weren't able to // retry because we've exhausted the names to search, return the // current query. return query; // If we skipped retrying the query, this will return the // successful lookup, otherwise, if the retry failed, this will // return the last query result --- either an empty lookup or the // last error we saw. } } } /// The result of an SRV lookup #[derive(Debug, Clone)] pub struct SrvLookup(Lookup); impl SrvLookup { /// Returns an iterator over the SRV RData pub fn iter(&self) -> SrvLookupIter<'_> { SrvLookupIter(self.0.iter()) } /// Returns a reference to the Query that was used to produce this result. pub fn query(&self) -> &Query { self.0.query() } /// Returns the list of IPs associated with the SRV record. /// /// *Note*: That Hickory DNS performs a recursive lookup on SRV records for IPs if they were not included in the original request. If there are no IPs associated to the result, a subsequent query for the IPs via the `srv.target()` should not resolve to the IPs. pub fn ip_iter(&self) -> LookupIpIter<'_> { LookupIpIter(self.0.iter()) } /// Return a reference to the inner lookup /// /// This can be useful for getting all records from the request pub fn as_lookup(&self) -> &Lookup { &self.0 } } impl From for SrvLookup { fn from(lookup: Lookup) -> Self { Self(lookup) } } /// An iterator over the Lookup type pub struct SrvLookupIter<'i>(LookupIter<'i>); impl<'i> Iterator for SrvLookupIter<'i> { type Item = &'i rdata::SRV; fn next(&mut self) -> Option { let iter: &mut _ = &mut self.0; iter.filter_map(|rdata| match *rdata { RData::SRV(ref data) => Some(data), _ => None, }) .next() } } impl IntoIterator for SrvLookup { type Item = rdata::SRV; type IntoIter = SrvLookupIntoIter; /// This is most likely not a free conversion, the RDatas will be cloned if data is /// held behind an Arc with more than one reference (which is most likely the case coming from cache) fn into_iter(self) -> Self::IntoIter { SrvLookupIntoIter(self.0.into_iter()) } } /// Borrowed view of set of RDatas returned from a Lookup pub struct SrvLookupIntoIter(LookupIntoIter); impl Iterator for SrvLookupIntoIter { type Item = rdata::SRV; fn next(&mut self) -> Option { let iter: &mut _ = &mut self.0; iter.filter_map(|rdata| match rdata { RData::SRV(data) => Some(data), _ => None, }) .next() } } /// Creates a Lookup result type from the specified components macro_rules! lookup_type { ($l:ident, $i:ident, $ii:ident, $r:path, $t:path) => { /// Contains the results of a lookup for the associated RecordType #[derive(Debug, Clone)] pub struct $l(Lookup); impl $l { /// Returns an iterator over the RData pub fn iter(&self) -> $i<'_> { $i(self.0.iter()) } /// Returns a reference to the Query that was used to produce this result. pub fn query(&self) -> &Query { self.0.query() } /// Returns the `Instant` at which this result is no longer valid. pub fn valid_until(&self) -> Instant { self.0.valid_until() } /// Return a reference to the inner lookup /// /// This can be useful for getting all records from the request pub fn as_lookup(&self) -> &Lookup { &self.0 } } impl From for $l { fn from(lookup: Lookup) -> Self { $l(lookup) } } impl From<$l> for Lookup { fn from(revlookup: $l) -> Self { revlookup.0 } } /// An iterator over the Lookup type pub struct $i<'i>(LookupIter<'i>); impl<'i> Iterator for $i<'i> { type Item = &'i $t; fn next(&mut self) -> Option { let iter: &mut _ = &mut self.0; iter.filter_map(|rdata| match *rdata { $r(ref data) => Some(data), _ => None, }) .next() } } impl IntoIterator for $l { type Item = $t; type IntoIter = $ii; /// This is most likely not a free conversion, the RDatas will be cloned if data is /// held behind an Arc with more than one reference (which is most likely the case coming from cache) fn into_iter(self) -> Self::IntoIter { $ii(self.0.into_iter()) } } /// Borrowed view of set of RDatas returned from a Lookup pub struct $ii(LookupIntoIter); impl Iterator for $ii { type Item = $t; fn next(&mut self) -> Option { let iter: &mut _ = &mut self.0; iter.filter_map(|rdata| match rdata { $r(data) => Some(data), _ => None, }) .next() } } }; } // Generate all Lookup record types lookup_type!( ReverseLookup, ReverseLookupIter, ReverseLookupIntoIter, RData::PTR, PTR ); lookup_type!(Ipv4Lookup, Ipv4LookupIter, Ipv4LookupIntoIter, RData::A, A); lookup_type!( Ipv6Lookup, Ipv6LookupIter, Ipv6LookupIntoIter, RData::AAAA, AAAA ); lookup_type!( MxLookup, MxLookupIter, MxLookupIntoIter, RData::MX, rdata::MX ); lookup_type!( TlsaLookup, TlsaLookupIter, TlsaLookupIntoIter, RData::TLSA, rdata::TLSA ); lookup_type!( TxtLookup, TxtLookupIter, TxtLookupIntoIter, RData::TXT, rdata::TXT ); lookup_type!( SoaLookup, SoaLookupIter, SoaLookupIntoIter, RData::SOA, rdata::SOA ); lookup_type!(NsLookup, NsLookupIter, NsLookupIntoIter, RData::NS, NS); #[cfg(test)] pub mod tests { use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use std::sync::{Arc, Mutex}; use futures_executor::block_on; use futures_util::future; use futures_util::stream::once; use proto::op::{Message, Query}; use proto::rr::{Name, RData, Record, RecordType}; use proto::xfer::{DnsRequest, DnsRequestOptions}; use super::*; use crate::error::ResolveError; #[derive(Clone)] pub struct MockDnsHandle { messages: Arc>>>, } impl DnsHandle for MockDnsHandle { type Response = Pin> + Send>>; type Error = ResolveError; fn send>(&self, _: R) -> Self::Response { Box::pin(once( future::ready(self.messages.lock().unwrap().pop().unwrap_or_else(empty)).boxed(), )) } } pub fn v4_message() -> Result { let mut message = Message::new(); message.add_query(Query::query(Name::root(), RecordType::A)); message.insert_answers(vec![Record::from_rdata( Name::root(), 86400, RData::A(A::new(127, 0, 0, 1)), )]); let resp = DnsResponse::from_message(message).unwrap(); assert!(resp.contains_answer()); Ok(resp) } pub fn empty() -> Result { Ok(DnsResponse::from_message(Message::new()).unwrap()) } pub fn error() -> Result { Err(ResolveError::from(ProtoError::from(std::io::Error::from( std::io::ErrorKind::Other, )))) } pub fn mock(messages: Vec>) -> MockDnsHandle { MockDnsHandle { messages: Arc::new(Mutex::new(messages)), } } #[test] fn test_lookup() { assert_eq!( block_on(LookupFuture::lookup( vec![Name::root()], RecordType::A, DnsRequestOptions::default(), CachingClient::new(0, mock(vec![v4_message()]), false), )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv4Addr::new(127, 0, 0, 1)] ); } #[test] fn test_lookup_slice() { assert_eq!( Record::data( &block_on(LookupFuture::lookup( vec![Name::root()], RecordType::A, DnsRequestOptions::default(), CachingClient::new(0, mock(vec![v4_message()]), false), )) .unwrap() .records()[0] ) .unwrap() .ip_addr() .unwrap(), Ipv4Addr::new(127, 0, 0, 1) ); } #[test] fn test_lookup_into_iter() { assert_eq!( block_on(LookupFuture::lookup( vec![Name::root()], RecordType::A, DnsRequestOptions::default(), CachingClient::new(0, mock(vec![v4_message()]), false), )) .unwrap() .into_iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv4Addr::new(127, 0, 0, 1)] ); } #[test] fn test_error() { assert!(block_on(LookupFuture::lookup( vec![Name::root()], RecordType::A, DnsRequestOptions::default(), CachingClient::new(0, mock(vec![error()]), false), )) .is_err()); } #[test] fn test_empty_no_response() { if let ResolveErrorKind::NoRecordsFound { query, negative_ttl, .. } = block_on(LookupFuture::lookup( vec![Name::root()], RecordType::A, DnsRequestOptions::default(), CachingClient::new(0, mock(vec![empty()]), false), )) .unwrap_err() .kind() { assert_eq!(**query, Query::query(Name::root(), RecordType::A)); assert_eq!(*negative_ttl, None); } else { panic!("wrong error received"); } } #[test] fn test_lookup_into_iter_arc() { let mut lookup = LookupIntoIter { records: Arc::from([ Record::from_rdata( Name::from_str("www.example.com.").unwrap(), 80, RData::A(A::new(127, 0, 0, 1)), ), Record::from_rdata( Name::from_str("www.example.com.").unwrap(), 80, RData::A(A::new(127, 0, 0, 2)), ), ]), index: 0, }; assert_eq!(lookup.next().unwrap(), RData::A(A::new(127, 0, 0, 1))); assert_eq!(lookup.next().unwrap(), RData::A(A::new(127, 0, 0, 2))); assert_eq!(lookup.next(), None); } } hickory-resolver-0.24.0/src/lookup_ip.rs000064400000000000000000000564451046102023000163720ustar 00000000000000// Copyright 2015-2017 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! LookupIp result from a resolution of ipv4 and ipv6 records with a Resolver. //! //! At it's heart LookupIp uses Lookup for performing all lookups. It is unlike other standard lookups in that there are customizations around A and AAAA resolutions. use std::error::Error; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Instant; use futures_util::{future, future::Either, future::Future, FutureExt}; use proto::error::ProtoError; use proto::op::Query; use proto::rr::{Name, RData, Record, RecordType}; use proto::xfer::{DnsHandle, DnsRequestOptions}; use tracing::debug; use crate::caching_client::CachingClient; use crate::config::LookupIpStrategy; use crate::dns_lru::MAX_TTL; use crate::error::*; use crate::hosts::Hosts; use crate::lookup::{Lookup, LookupIntoIter, LookupIter}; /// Result of a DNS query when querying for A or AAAA records. /// /// When resolving IP records, there can be many IPs that match a given name. A consumer of this should expect that there are more than a single address potentially returned. Generally there are multiple IPs stored for a given service in DNS so that there is a form of high availability offered for a given name. The service implementation is responsible for the semantics around which IP should be used and when, but in general if a connection fails to one, the next in the list should be attempted. #[derive(Debug, Clone)] pub struct LookupIp(Lookup); impl LookupIp { /// Returns a borrowed iterator of the returned IPs pub fn iter(&self) -> LookupIpIter<'_> { LookupIpIter(self.0.iter()) } /// Returns a reference to the `Query` that was used to produce this result. pub fn query(&self) -> &Query { self.0.query() } /// Returns the `Instant` at which this lookup is no longer valid. pub fn valid_until(&self) -> Instant { self.0.valid_until() } /// Return a reference to the inner lookup /// /// This can be useful for getting all records from the request pub fn as_lookup(&self) -> &Lookup { &self.0 } } impl From for LookupIp { fn from(lookup: Lookup) -> Self { Self(lookup) } } impl From for Lookup { fn from(lookup: LookupIp) -> Self { lookup.0 } } /// Borrowed view of set of IPs returned from a LookupIp pub struct LookupIpIter<'i>(pub(crate) LookupIter<'i>); impl<'i> Iterator for LookupIpIter<'i> { type Item = IpAddr; fn next(&mut self) -> Option { let iter: &mut _ = &mut self.0; iter.filter_map(|rdata| match *rdata { RData::A(ip) => Some(IpAddr::from(Ipv4Addr::from(ip))), RData::AAAA(ip) => Some(IpAddr::from(Ipv6Addr::from(ip))), _ => None, }) .next() } } impl IntoIterator for LookupIp { type Item = IpAddr; type IntoIter = LookupIpIntoIter; /// This is most likely not a free conversion, the RDatas will be cloned if data is /// held behind an Arc with more than one reference (which is most likely the case coming from cache) fn into_iter(self) -> Self::IntoIter { LookupIpIntoIter(self.0.into_iter()) } } /// Borrowed view of set of RDatas returned from a Lookup pub struct LookupIpIntoIter(LookupIntoIter); impl Iterator for LookupIpIntoIter { type Item = IpAddr; fn next(&mut self) -> Option { let iter: &mut _ = &mut self.0; iter.filter_map(|rdata| match rdata { RData::A(ip) => Some(IpAddr::from(Ipv4Addr::from(ip))), RData::AAAA(ip) => Some(IpAddr::from(Ipv6Addr::from(ip))), _ => None, }) .next() } } /// The Future returned from [crate::AsyncResolver] when performing an A or AAAA lookup. /// /// This type isn't necessarily something that should be used by users, see the default TypeParameters are generally correct pub struct LookupIpFuture where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { client_cache: CachingClient, names: Vec, strategy: LookupIpStrategy, options: DnsRequestOptions, query: Pin> + Send>>, hosts: Option>, finally_ip_addr: Option, } impl Future for LookupIpFuture where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { // Try polling the underlying DNS query. let query = self.query.as_mut().poll(cx); // Determine whether or not we will attempt to retry the query. let should_retry = match query { // If the query is NotReady, yield immediately. Poll::Pending => return Poll::Pending, // If the query returned a successful lookup, we will attempt // to retry if the lookup is empty. Otherwise, we will return // that lookup. Poll::Ready(Ok(ref lookup)) => lookup.is_empty(), // If the query failed, we will attempt to retry. Poll::Ready(Err(_)) => true, }; if should_retry { if let Some(name) = self.names.pop() { // If there's another name left to try, build a new query // for that next name and continue looping. self.query = strategic_lookup( name, self.strategy, self.client_cache.clone(), self.options, self.hosts.clone(), ) .boxed(); // Continue looping with the new query. It will be polled // on the next iteration of the loop. continue; } else if let Some(ip_addr) = self.finally_ip_addr.take() { // Otherwise, if there's an IP address to fall back to, // we'll return it. let record = Record::from_rdata(Name::new(), MAX_TTL, ip_addr); let lookup = Lookup::new_with_max_ttl(Query::new(), Arc::from([record])); return Poll::Ready(Ok(lookup.into())); } }; // If we didn't have to retry the query, or we weren't able to // retry because we've exhausted the names to search and have no // fallback IP address, return the current query. return query.map(|f| f.map(LookupIp::from)); // If we skipped retrying the query, this will return the // successful lookup, otherwise, if the retry failed, this will // return the last query result --- either an empty lookup or the // last error we saw. } } } impl LookupIpFuture where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { /// Perform a lookup from a hostname to a set of IPs /// /// # Arguments /// /// * `names` - a set of DNS names to attempt to resolve, they will be attempted in queue order, i.e. the first is `names.pop()`. Upon each failure, the next will be attempted. /// * `strategy` - the lookup IP strategy to use /// * `client_cache` - cache with a connection to use for performing all lookups pub fn lookup( names: Vec, strategy: LookupIpStrategy, client_cache: CachingClient, options: DnsRequestOptions, hosts: Option>, finally_ip_addr: Option, ) -> Self { let empty = ResolveError::from(ResolveErrorKind::Message("can not lookup IPs for no names")); Self { names, strategy, client_cache, // If there are no names remaining, this will be returned immediately, // otherwise, it will be retried. query: future::err(empty).boxed(), options, hosts, finally_ip_addr, } } } /// returns a new future for lookup async fn strategic_lookup( name: Name, strategy: LookupIpStrategy, client: CachingClient, options: DnsRequestOptions, hosts: Option>, ) -> Result where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { match strategy { LookupIpStrategy::Ipv4Only => ipv4_only(name, client, options, hosts).await, LookupIpStrategy::Ipv6Only => ipv6_only(name, client, options, hosts).await, LookupIpStrategy::Ipv4AndIpv6 => ipv4_and_ipv6(name, client, options, hosts).await, LookupIpStrategy::Ipv6thenIpv4 => ipv6_then_ipv4(name, client, options, hosts).await, LookupIpStrategy::Ipv4thenIpv6 => ipv4_then_ipv6(name, client, options, hosts).await, } } /// first lookups in hosts, then performs the query async fn hosts_lookup( query: Query, mut client: CachingClient, options: DnsRequestOptions, hosts: Option>, ) -> Result where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { if let Some(hosts) = hosts { if let Some(lookup) = hosts.lookup_static_host(&query) { return Ok(lookup); }; } client.lookup(query, options).await } /// queries only for A records async fn ipv4_only( name: Name, client: CachingClient, options: DnsRequestOptions, hosts: Option>, ) -> Result where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { hosts_lookup(Query::query(name, RecordType::A), client, options, hosts).await } /// queries only for AAAA records async fn ipv6_only( name: Name, client: CachingClient, options: DnsRequestOptions, hosts: Option>, ) -> Result where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { hosts_lookup(Query::query(name, RecordType::AAAA), client, options, hosts).await } // TODO: this really needs to have a stream interface /// queries only for A and AAAA in parallel async fn ipv4_and_ipv6( name: Name, client: CachingClient, options: DnsRequestOptions, hosts: Option>, ) -> Result where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { let sel_res = future::select( hosts_lookup( Query::query(name.clone(), RecordType::A), client.clone(), options, hosts.clone(), ) .boxed(), hosts_lookup(Query::query(name, RecordType::AAAA), client, options, hosts).boxed(), ) .await; let (ips, remaining_query) = match sel_res { Either::Left(ips_and_remaining) => ips_and_remaining, Either::Right(ips_and_remaining) => ips_and_remaining, }; let next_ips = remaining_query.await; match (ips, next_ips) { (Ok(ips), Ok(next_ips)) => { // TODO: create a LookupIp enum with the ability to chain these together let ips = ips.append(next_ips); Ok(ips) } (Ok(ips), Err(e)) | (Err(e), Ok(ips)) => { debug!( "one of ipv4 or ipv6 lookup failed in ipv4_and_ipv6 strategy: {}", e ); Ok(ips) } (Err(e1), Err(e2)) => { debug!( "both of ipv4 or ipv6 lookup failed in ipv4_and_ipv6 strategy e1: {}, e2: {}", e1, e2 ); Err(e1) } } } /// queries only for AAAA and on no results queries for A async fn ipv6_then_ipv4( name: Name, client: CachingClient, options: DnsRequestOptions, hosts: Option>, ) -> Result where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { rt_then_swap( name, client, RecordType::AAAA, RecordType::A, options, hosts, ) .await } /// queries only for A and on no results queries for AAAA async fn ipv4_then_ipv6( name: Name, client: CachingClient, options: DnsRequestOptions, hosts: Option>, ) -> Result where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { rt_then_swap( name, client, RecordType::A, RecordType::AAAA, options, hosts, ) .await } /// queries only for first_type and on no results queries for second_type async fn rt_then_swap( name: Name, client: CachingClient, first_type: RecordType, second_type: RecordType, options: DnsRequestOptions, hosts: Option>, ) -> Result where C: DnsHandle + 'static, E: Into + From + Error + Clone + Send + Unpin + 'static, { let or_client = client.clone(); let res = hosts_lookup( Query::query(name.clone(), first_type), client, options, hosts.clone(), ) .await; match res { Ok(ips) => { if ips.is_empty() { // no ips returns, NXDomain or Otherwise, doesn't matter hosts_lookup( Query::query(name.clone(), second_type), or_client, options, hosts, ) .await } else { Ok(ips) } } Err(_) => { hosts_lookup( Query::query(name.clone(), second_type), or_client, options, hosts, ) .await } } } #[cfg(test)] pub mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::sync::{Arc, Mutex}; use futures_executor::block_on; use futures_util::future; use proto::op::Message; use proto::rr::{Name, RData, Record}; use proto::xfer::{DnsHandle, DnsRequest, DnsResponse}; use futures_util::stream::{once, Stream}; use super::*; use crate::error::ResolveError; #[derive(Clone)] pub struct MockDnsHandle { messages: Arc>>>, } impl DnsHandle for MockDnsHandle { type Response = Pin> + Send + Unpin>>; type Error = ResolveError; fn send>(&self, _: R) -> Self::Response { Box::pin(once(future::ready( self.messages.lock().unwrap().pop().unwrap_or_else(empty), ))) } } pub fn v4_message() -> Result { let mut message = Message::new(); message.add_query(Query::query(Name::root(), RecordType::A)); message.insert_answers(vec![Record::from_rdata( Name::root(), 86400, RData::A(Ipv4Addr::new(127, 0, 0, 1).into()), )]); let resp = DnsResponse::from_message(message).unwrap(); assert!(resp.contains_answer()); Ok(resp) } pub fn v6_message() -> Result { let mut message = Message::new(); message.add_query(Query::query(Name::root(), RecordType::AAAA)); message.insert_answers(vec![Record::from_rdata( Name::root(), 86400, RData::AAAA(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into()), )]); let resp = DnsResponse::from_message(message).unwrap(); assert!(resp.contains_answer()); Ok(resp) } pub fn empty() -> Result { Ok(DnsResponse::from_message(Message::new()).unwrap()) } pub fn error() -> Result { Err(ResolveError::from("forced test failure")) } pub fn mock(messages: Vec>) -> MockDnsHandle { MockDnsHandle { messages: Arc::new(Mutex::new(messages)), } } #[test] fn test_ipv4_only_strategy() { assert_eq!( block_on(ipv4_only( Name::root(), CachingClient::new(0, mock(vec![v4_message()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv4Addr::new(127, 0, 0, 1)] ); } #[test] fn test_ipv6_only_strategy() { assert_eq!( block_on(ipv6_only( Name::root(), CachingClient::new(0, mock(vec![v6_message()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)] ); } #[test] fn test_ipv4_and_ipv6_strategy() { // ipv6 is consistently queried first (even though the select has it second) // both succeed assert_eq!( block_on(ipv4_and_ipv6( Name::root(), CachingClient::new(0, mock(vec![v6_message(), v4_message()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![ IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), ] ); // only ipv4 available assert_eq!( block_on(ipv4_and_ipv6( Name::root(), CachingClient::new(0, mock(vec![empty(), v4_message()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))] ); // error then ipv4 assert_eq!( block_on(ipv4_and_ipv6( Name::root(), CachingClient::new(0, mock(vec![error(), v4_message()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))] ); // only ipv6 available assert_eq!( block_on(ipv4_and_ipv6( Name::root(), CachingClient::new(0, mock(vec![v6_message(), empty()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))] ); // error, then only ipv6 available assert_eq!( block_on(ipv4_and_ipv6( Name::root(), CachingClient::new(0, mock(vec![v6_message(), error()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))] ); } #[test] fn test_ipv6_then_ipv4_strategy() { // ipv6 first assert_eq!( block_on(ipv6_then_ipv4( Name::root(), CachingClient::new(0, mock(vec![v6_message()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)] ); // nothing then ipv4 assert_eq!( block_on(ipv6_then_ipv4( Name::root(), CachingClient::new(0, mock(vec![v4_message(), empty()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv4Addr::new(127, 0, 0, 1)] ); // ipv4 and error assert_eq!( block_on(ipv6_then_ipv4( Name::root(), CachingClient::new(0, mock(vec![v4_message(), error()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv4Addr::new(127, 0, 0, 1)] ); } #[test] fn test_ipv4_then_ipv6_strategy() { // ipv6 first assert_eq!( block_on(ipv4_then_ipv6( Name::root(), CachingClient::new(0, mock(vec![v4_message()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv4Addr::new(127, 0, 0, 1)] ); // nothing then ipv6 assert_eq!( block_on(ipv4_then_ipv6( Name::root(), CachingClient::new(0, mock(vec![v6_message(), empty()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)] ); // error then ipv6 assert_eq!( block_on(ipv4_then_ipv6( Name::root(), CachingClient::new(0, mock(vec![v6_message(), error()]), false), DnsRequestOptions::default(), None, )) .unwrap() .iter() .map(|r| r.ip_addr().unwrap()) .collect::>(), vec![Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)] ); } } hickory-resolver-0.24.0/src/name_server/connection_provider.rs000064400000000000000000000460171046102023000227420ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::io; use std::marker::Unpin; use std::net::SocketAddr; #[cfg(any(feature = "dns-over-quic", feature = "dns-over-h3"))] use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use futures_util::future::{Future, FutureExt}; use futures_util::ready; use futures_util::stream::{Stream, StreamExt}; #[cfg(feature = "tokio-runtime")] use tokio::net::TcpStream as TokioTcpStream; #[cfg(all(feature = "dns-over-native-tls", not(feature = "dns-over-rustls")))] use tokio_native_tls::TlsStream as TokioTlsStream; #[cfg(all( feature = "dns-over-openssl", not(feature = "dns-over-rustls"), not(feature = "dns-over-native-tls") ))] use tokio_openssl::SslStream as TokioTlsStream; #[cfg(feature = "dns-over-rustls")] use tokio_rustls::client::TlsStream as TokioTlsStream; use crate::config::{NameServerConfig, Protocol, ResolverOpts}; #[cfg(any(feature = "dns-over-quic", feature = "dns-over-h3"))] use hickory_proto::udp::QuicLocalAddr; #[cfg(feature = "dns-over-https")] use proto::h2::{HttpsClientConnect, HttpsClientStream}; #[cfg(feature = "dns-over-h3")] use proto::h3::{H3ClientConnect, H3ClientStream}; #[cfg(feature = "mdns")] use proto::multicast::{MdnsClientConnect, MdnsClientStream, MdnsQueryType}; #[cfg(feature = "dns-over-quic")] use proto::quic::{QuicClientConnect, QuicClientStream}; use proto::tcp::DnsTcpStream; use proto::udp::DnsUdpSocket; use proto::{ self, error::ProtoError, op::NoopMessageFinalizer, tcp::TcpClientConnect, tcp::TcpClientStream, udp::UdpClientConnect, udp::UdpClientStream, xfer::{ DnsExchange, DnsExchangeConnect, DnsExchangeSend, DnsHandle, DnsMultiplexer, DnsMultiplexerConnect, DnsRequest, DnsResponse, }, Time, }; #[cfg(feature = "tokio-runtime")] use proto::{iocompat::AsyncIoTokioAsStd, TokioTime}; use crate::error::ResolveError; /// RuntimeProvider defines which async runtime that handles IO and timers. pub trait RuntimeProvider: Clone + Send + Sync + Unpin + 'static { /// Handle to the executor; type Handle: Clone + Send + Spawn + Sync + Unpin; /// Timer type Timer: Time + Send + Unpin; #[cfg(not(any(feature = "dns-over-quic", feature = "dns-over-h3")))] /// UdpSocket type Udp: DnsUdpSocket + Send; #[cfg(any(feature = "dns-over-quic", feature = "dns-over-h3"))] /// UdpSocket, where `QuicLocalAddr` is for `quinn` crate. type Udp: DnsUdpSocket + QuicLocalAddr + Send; /// TcpStream type Tcp: DnsTcpStream; /// Create a runtime handle fn create_handle(&self) -> Self::Handle; /// Create a TCP connection with custom configuration. fn connect_tcp( &self, server_addr: SocketAddr, ) -> Pin>>>; /// Create a UDP socket bound to `local_addr`. The returned value should **not** be connected to `server_addr`. /// *Notice: the future should be ready once returned at best effort. Otherwise UDP DNS may need much more retries.* fn bind_udp( &self, local_addr: SocketAddr, server_addr: SocketAddr, ) -> Pin>>>; } /// Create `DnsHandle` with the help of `RuntimeProvider`. /// This trait is designed for customization. pub trait ConnectionProvider: 'static + Clone + Send + Sync + Unpin { /// The handle to the connect for sending DNS requests. type Conn: DnsHandle + Clone + Send + Sync + 'static; /// Ths future is responsible for spawning any background tasks as necessary. type FutureConn: Future> + Send + 'static; /// Provider that handles the underlying I/O and timing. type RuntimeProvider: RuntimeProvider; /// Create a new connection. fn new_connection(&self, config: &NameServerConfig, options: &ResolverOpts) -> Self::FutureConn; } /// A type defines the Handle which can spawn future. pub trait Spawn { /// Spawn a future in the background fn spawn_bg(&mut self, future: F) where F: Future> + Send + 'static; } #[cfg(feature = "dns-over-tls")] /// Predefined type for TLS client stream type TlsClientStream = TcpClientStream>>>; /// The variants of all supported connections for the Resolver #[allow(clippy::large_enum_variant, clippy::type_complexity)] pub(crate) enum ConnectionConnect { Udp(DnsExchangeConnect, UdpClientStream, R::Timer>), Tcp( DnsExchangeConnect< DnsMultiplexerConnect< TcpClientConnect<::Tcp>, TcpClientStream<::Tcp>, NoopMessageFinalizer, >, DnsMultiplexer::Tcp>, NoopMessageFinalizer>, R::Timer, >, ), #[cfg(all(feature = "dns-over-tls", feature = "tokio-runtime"))] Tls( DnsExchangeConnect< DnsMultiplexerConnect< Pin< Box< dyn Future< Output = Result< TlsClientStream<::Tcp>, ProtoError, >, > + Send + 'static, >, >, TlsClientStream<::Tcp>, NoopMessageFinalizer, >, DnsMultiplexer::Tcp>, NoopMessageFinalizer>, TokioTime, >, ), #[cfg(all(feature = "dns-over-https", feature = "tokio-runtime"))] Https(DnsExchangeConnect, HttpsClientStream, TokioTime>), #[cfg(all(feature = "dns-over-quic", feature = "tokio-runtime"))] Quic(DnsExchangeConnect), #[cfg(all(feature = "dns-over-h3", feature = "tokio-runtime"))] H3(DnsExchangeConnect), #[cfg(feature = "mdns")] Mdns( DnsExchangeConnect< DnsMultiplexerConnect, DnsMultiplexer, TokioTime, >, ), } /// Resolves to a new Connection #[must_use = "futures do nothing unless polled"] pub struct ConnectionFuture { pub(crate) connect: ConnectionConnect, pub(crate) spawner: R::Handle, } impl Future for ConnectionFuture { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { Poll::Ready(Ok(match &mut self.connect { ConnectionConnect::Udp(ref mut conn) => { let (conn, bg) = ready!(conn.poll_unpin(cx))?; self.spawner.spawn_bg(bg); GenericConnection(conn) } ConnectionConnect::Tcp(ref mut conn) => { let (conn, bg) = ready!(conn.poll_unpin(cx))?; self.spawner.spawn_bg(bg); GenericConnection(conn) } #[cfg(feature = "dns-over-tls")] ConnectionConnect::Tls(ref mut conn) => { let (conn, bg) = ready!(conn.poll_unpin(cx))?; self.spawner.spawn_bg(bg); GenericConnection(conn) } #[cfg(feature = "dns-over-https")] ConnectionConnect::Https(ref mut conn) => { let (conn, bg) = ready!(conn.poll_unpin(cx))?; self.spawner.spawn_bg(bg); GenericConnection(conn) } #[cfg(feature = "dns-over-quic")] ConnectionConnect::Quic(ref mut conn) => { let (conn, bg) = ready!(conn.poll_unpin(cx))?; self.spawner.spawn_bg(bg); GenericConnection(conn) } #[cfg(feature = "dns-over-h3")] ConnectionConnect::H3(ref mut conn) => { let (conn, bg) = ready!(conn.poll_unpin(cx))?; self.spawner.spawn_bg(bg); GenericConnection(conn) } #[cfg(feature = "mdns")] ConnectionConnect::Mdns(ref mut conn) => { let (conn, bg) = ready!(conn.poll_unpin(cx))?; self.spawner.spawn_bg(bg); GenericConnection(conn) } })) } } /// A connected DNS handle #[derive(Clone)] pub struct GenericConnection(DnsExchange); impl DnsHandle for GenericConnection { type Response = ConnectionResponse; type Error = ResolveError; fn send + Unpin + Send + 'static>(&self, request: R) -> Self::Response { ConnectionResponse(self.0.send(request)) } } /// Default connector for `GenericConnection` #[derive(Clone)] pub struct GenericConnector { runtime_provider: P, } impl GenericConnector

{ /// Create a new instance. pub fn new(runtime_provider: P) -> Self { Self { runtime_provider } } } impl Default for GenericConnector

{ fn default() -> Self { Self { runtime_provider: P::default(), } } } impl ConnectionProvider for GenericConnector

{ type Conn = GenericConnection; type FutureConn = ConnectionFuture

; type RuntimeProvider = P; fn new_connection( &self, config: &NameServerConfig, options: &ResolverOpts, ) -> Self::FutureConn { let dns_connect = match config.protocol { Protocol::Udp => { let provider_handle = self.runtime_provider.clone(); let closure = move |local_addr: SocketAddr, server_addr: SocketAddr| { provider_handle.bind_udp(local_addr, server_addr) }; let stream = UdpClientStream::with_creator( config.socket_addr, None, options.timeout, Arc::new(closure), ); let exchange = DnsExchange::connect(stream); ConnectionConnect::Udp(exchange) } Protocol::Tcp => { let socket_addr = config.socket_addr; let timeout = options.timeout; let tcp_future = self.runtime_provider.connect_tcp(socket_addr); let (stream, handle) = TcpClientStream::with_future(tcp_future, socket_addr, timeout); // TODO: need config for Signer... let dns_conn = DnsMultiplexer::with_timeout( stream, handle, timeout, NoopMessageFinalizer::new(), ); let exchange = DnsExchange::connect(dns_conn); ConnectionConnect::Tcp(exchange) } #[cfg(feature = "dns-over-tls")] Protocol::Tls => { let socket_addr = config.socket_addr; let timeout = options.timeout; let tls_dns_name = config.tls_dns_name.clone().unwrap_or_default(); let tcp_future = self.runtime_provider.connect_tcp(socket_addr); #[cfg(feature = "dns-over-rustls")] let client_config = config.tls_config.clone(); #[cfg(feature = "dns-over-rustls")] let (stream, handle) = { crate::tls::new_tls_stream_with_future( tcp_future, socket_addr, tls_dns_name, client_config, ) }; #[cfg(not(feature = "dns-over-rustls"))] let (stream, handle) = { crate::tls::new_tls_stream_with_future(tcp_future, socket_addr, tls_dns_name) }; let dns_conn = DnsMultiplexer::with_timeout( stream, handle, timeout, NoopMessageFinalizer::new(), ); let exchange = DnsExchange::connect(dns_conn); ConnectionConnect::Tls(exchange) } #[cfg(feature = "dns-over-https")] Protocol::Https => { let socket_addr = config.socket_addr; let tls_dns_name = config.tls_dns_name.clone().unwrap_or_default(); #[cfg(feature = "dns-over-rustls")] let client_config = config.tls_config.clone(); let tcp_future = self.runtime_provider.connect_tcp(socket_addr); let exchange = crate::h2::new_https_stream_with_future( tcp_future, socket_addr, tls_dns_name, client_config, ); ConnectionConnect::Https(exchange) } #[cfg(feature = "dns-over-quic")] Protocol::Quic => { let socket_addr = config.socket_addr; let bind_addr = config.bind_addr.unwrap_or(match socket_addr { SocketAddr::V4(_) => SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0), SocketAddr::V6(_) => { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0) } }); let tls_dns_name = config.tls_dns_name.clone().unwrap_or_default(); #[cfg(feature = "dns-over-rustls")] let client_config = config.tls_config.clone(); let udp_future = self.runtime_provider.bind_udp(bind_addr, socket_addr); let exchange = crate::quic::new_quic_stream_with_future( udp_future, socket_addr, tls_dns_name, client_config, ); ConnectionConnect::Quic(exchange) } #[cfg(feature = "dns-over-h3")] Protocol::H3 => { let socket_addr = config.socket_addr; let bind_addr = config.bind_addr.unwrap_or(match socket_addr { SocketAddr::V4(_) => SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0), SocketAddr::V6(_) => { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0) } }); let tls_dns_name = config.tls_dns_name.clone().unwrap_or_default(); let client_config = config.tls_config.clone(); let udp_future = self.runtime_provider.bind_udp(bind_addr, socket_addr); let exchange = crate::h3::new_h3_stream_with_future( udp_future, socket_addr, tls_dns_name, client_config, ); ConnectionConnect::H3(exchange) } #[cfg(feature = "mdns")] Protocol::Mdns => { let socket_addr = config.socket_addr; let timeout = options.timeout; let (stream, handle) = MdnsClientStream::new(socket_addr, MdnsQueryType::OneShot, None, None, None); // TODO: need config for Signer... let dns_conn = DnsMultiplexer::with_timeout( stream, handle, timeout, NoopMessageFinalizer::new(), ); let exchange = DnsExchange::connect(dns_conn); ConnectionConnect::Mdns(exchange) } }; ConnectionFuture::

{ connect: dns_connect, spawner: self.runtime_provider.create_handle(), } } } /// A stream of response to a DNS request. #[must_use = "steam do nothing unless polled"] pub struct ConnectionResponse(DnsExchangeSend); impl Stream for ConnectionResponse { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Poll::Ready(ready!(self.0.poll_next_unpin(cx)).map(|r| r.map_err(ResolveError::from))) } } #[cfg(feature = "tokio-runtime")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-runtime")))] #[allow(unreachable_pub)] pub mod tokio_runtime { use super::*; use std::sync::{Arc, Mutex}; use tokio::net::UdpSocket as TokioUdpSocket; use tokio::task::JoinSet; /// A handle to the Tokio runtime #[derive(Clone, Default)] pub struct TokioHandle { join_set: Arc>>>, } impl Spawn for TokioHandle { fn spawn_bg(&mut self, future: F) where F: Future> + Send + 'static, { let mut join_set = self.join_set.lock().unwrap(); join_set.spawn(future); reap_tasks(&mut join_set); } } /// The Tokio Runtime for async execution #[derive(Clone, Default)] pub struct TokioRuntimeProvider(TokioHandle); impl TokioRuntimeProvider { /// Create a Tokio runtime pub fn new() -> Self { Self::default() } } impl RuntimeProvider for TokioRuntimeProvider { type Handle = TokioHandle; type Timer = TokioTime; type Udp = TokioUdpSocket; type Tcp = AsyncIoTokioAsStd; fn create_handle(&self) -> Self::Handle { self.0.clone() } fn connect_tcp( &self, server_addr: SocketAddr, ) -> Pin>>> { Box::pin(async move { TokioTcpStream::connect(server_addr) .await .map(AsyncIoTokioAsStd) }) } fn bind_udp( &self, local_addr: SocketAddr, _server_addr: SocketAddr, ) -> Pin>>> { Box::pin(tokio::net::UdpSocket::bind(local_addr)) } } /// Reap finished tasks from a `JoinSet`, without awaiting or blocking. fn reap_tasks(join_set: &mut JoinSet>) { while FutureExt::now_or_never(join_set.join_next()) .flatten() .is_some() {} } /// Default ConnectionProvider with `GenericConnection`. pub type TokioConnectionProvider = GenericConnector; } hickory-resolver-0.24.0/src/name_server/mod.rs000064400000000000000000000024031046102023000174370ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! A module with associated items for working with nameservers mod connection_provider; #[allow(clippy::module_inception)] mod name_server; mod name_server_pool; mod name_server_state; mod name_server_stats; pub use self::connection_provider::{ConnectionProvider, RuntimeProvider, Spawn}; pub use self::connection_provider::{GenericConnection, GenericConnector}; #[cfg(feature = "mdns")] #[cfg_attr(docsrs, doc(cfg(feature = "mdns")))] pub(crate) use self::name_server::mdns_nameserver; pub use self::name_server::{GenericNameServer, NameServer}; pub use self::name_server_pool::{GenericNameServerPool, NameServerPool}; use self::name_server_state::NameServerState; use self::name_server_stats::NameServerStats; #[cfg(feature = "tokio-runtime")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-runtime")))] pub use self::connection_provider::tokio_runtime::{ TokioConnectionProvider, TokioHandle, TokioRuntimeProvider, }; hickory-resolver-0.24.0/src/name_server/name_server.rs000064400000000000000000000251571046102023000212010ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::cmp::Ordering; use std::fmt::{self, Debug, Formatter}; use std::pin::Pin; use std::sync::Arc; use std::time::Instant; use futures_util::lock::Mutex; use futures_util::stream::{once, Stream}; #[cfg(feature = "mdns")] use proto::multicast::MDNS_IPV4; use proto::xfer::{DnsHandle, DnsRequest, DnsResponse, FirstAnswer}; use tracing::debug; use crate::config::{NameServerConfig, ResolverOpts}; use crate::error::ResolveError; use crate::name_server::connection_provider::{ConnectionProvider, GenericConnector}; use crate::name_server::{NameServerState, NameServerStats}; #[cfg(feature = "mdns")] use proto::multicast::{MdnsClientConnect, MdnsClientStream, MdnsQueryType}; /// This struct is used to create `DnsHandle` with the help of `P`. #[derive(Clone)] pub struct NameServer { config: NameServerConfig, options: ResolverOpts, client: Arc>>, state: Arc, stats: Arc, connection_provider: P, } /// Specifies the details of a remote NameServer used for lookups pub type GenericNameServer = NameServer>; impl

Debug for NameServer

where P: ConnectionProvider + Send, { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { write!(f, "config: {:?}, options: {:?}", self.config, self.options) } } impl

NameServer

where P: ConnectionProvider + Send, { /// Construct a new Nameserver with the configuration and options. The connection provider will create UDP and TCP sockets pub fn new(config: NameServerConfig, options: ResolverOpts, connection_provider: P) -> Self { Self { config, options, client: Arc::new(Mutex::new(None)), state: Arc::new(NameServerState::init(None)), stats: Arc::new(NameServerStats::default()), connection_provider, } } #[doc(hidden)] pub fn from_conn( config: NameServerConfig, options: ResolverOpts, client: P::Conn, connection_provider: P, ) -> Self { Self { config, options, client: Arc::new(Mutex::new(Some(client))), state: Arc::new(NameServerState::init(None)), stats: Arc::new(NameServerStats::default()), connection_provider, } } #[cfg(test)] #[allow(dead_code)] pub(crate) fn is_connected(&self) -> bool { !self.state.is_failed() && if let Some(client) = self.client.try_lock() { client.is_some() } else { // assuming that if someone has it locked it will be or is connected true } } /// This will return a mutable client to allows for sending messages. /// /// If the connection is in a failed state, then this will establish a new connection async fn connected_mut_client(&mut self) -> Result { let mut client = self.client.lock().await; // if this is in a failure state if self.state.is_failed() || client.is_none() { debug!("reconnecting: {:?}", self.config); // TODO: we need the local EDNS options self.state.reinit(None); let new_client = Box::pin( self.connection_provider .new_connection(&self.config, &self.options), ) .await?; // establish a new connection *client = Some(new_client); } else { debug!("existing connection: {:?}", self.config); } Ok((*client) .clone() .expect("bad state, client should be connected")) } async fn inner_send + Unpin + Send + 'static>( mut self, request: R, ) -> Result { let client = self.connected_mut_client().await?; let now = Instant::now(); let response = client.send(request).first_answer().await; let rtt = now.elapsed(); match response { Ok(response) => { // Record the measured latency. self.stats.record_rtt(rtt); // First evaluate if the message succeeded. let response = ResolveError::from_response(response, self.config.trust_negative_responses)?; // TODO: consider making message::take_edns... let remote_edns = response.extensions().clone(); // take the remote edns options and store them self.state.establish(remote_edns); Ok(response) } Err(error) => { debug!("name_server connection failure: {}", error); // this transitions the state to failure self.state.fail(Instant::now()); // record the failure self.stats.record_connection_failure(); // These are connection failures, not lookup failures, that is handled in the resolver layer Err(error) } } } /// Specifies that this NameServer will treat negative responses as permanent failures and will not retry pub fn trust_nx_responses(&self) -> bool { self.config.trust_negative_responses } } impl

DnsHandle for NameServer

where P: ConnectionProvider + Clone, { type Response = Pin> + Send>>; type Error = ResolveError; fn is_verifying_dnssec(&self) -> bool { self.options.validate } // TODO: there needs to be some way of customizing the connection based on EDNS options from the server side... fn send + Unpin + Send + 'static>(&self, request: R) -> Self::Response { let this = self.clone(); // if state is failed, return future::err(), unless retry delay expired.. Box::pin(once(this.inner_send(request))) } } impl

Ord for NameServer

where P: ConnectionProvider + Send, { /// Custom implementation of Ord for NameServer which incorporates the performance of the connection into it's ranking fn cmp(&self, other: &Self) -> Ordering { // if they are literally equal, just return if self == other { return Ordering::Equal; } self.stats.cmp(&other.stats) } } impl

PartialOrd for NameServer

where P: ConnectionProvider + Send, { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl

PartialEq for NameServer

where P: ConnectionProvider + Send, { /// NameServers are equal if the config (connection information) are equal fn eq(&self, other: &Self) -> bool { self.config == other.config } } impl

Eq for NameServer

where P: ConnectionProvider + Send {} // TODO: once IPv6 is better understood, also make this a binary keep. #[cfg(feature = "mdns")] pub(crate) fn mdns_nameserver

( options: ResolverOpts, conn_provider: P, trust_negative_responses: bool, ) -> GenericNameServer

where P: ConnectionProvider, { let config = NameServerConfig { socket_addr: *MDNS_IPV4, protocol: Protocol::Mdns, tls_dns_name: None, trust_negative_responses, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }; GenericNameServer::new_with_provider(config, options, conn_provider) } #[cfg(test)] #[cfg(feature = "tokio-runtime")] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::Duration; use futures_util::{future, FutureExt}; use tokio::runtime::Runtime; use proto::op::{Query, ResponseCode}; use proto::rr::{Name, RecordType}; use proto::xfer::{DnsHandle, DnsRequestOptions, FirstAnswer}; use super::*; use crate::config::Protocol; use crate::name_server::TokioConnectionProvider; #[test] fn test_name_server() { //env_logger::try_init().ok(); let config = NameServerConfig { socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)), 53), protocol: Protocol::Udp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }; let io_loop = Runtime::new().unwrap(); let name_server = future::lazy(|_| { GenericNameServer::new( config, ResolverOpts::default(), TokioConnectionProvider::default(), ) }); let name = Name::parse("www.example.com.", None).unwrap(); let response = io_loop .block_on(name_server.then(|name_server| { name_server .lookup( Query::query(name.clone(), RecordType::A), DnsRequestOptions::default(), ) .first_answer() })) .expect("query failed"); assert_eq!(response.response_code(), ResponseCode::NoError); } #[test] fn test_failed_name_server() { let options = ResolverOpts { timeout: Duration::from_millis(1), // this is going to fail, make it fail fast... ..ResolverOpts::default() }; let config = NameServerConfig { socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 252)), 252), protocol: Protocol::Udp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }; let io_loop = Runtime::new().unwrap(); let name_server = future::lazy(|_| { GenericNameServer::new(config, options, TokioConnectionProvider::default()) }); let name = Name::parse("www.example.com.", None).unwrap(); assert!(io_loop .block_on(name_server.then(|name_server| { name_server .lookup( Query::query(name.clone(), RecordType::A), DnsRequestOptions::default(), ) .first_answer() })) .is_err()); } } hickory-resolver-0.24.0/src/name_server/name_server_pool.rs000064400000000000000000000527351046102023000222340ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::cmp::Ordering; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; use futures_util::future::FutureExt; use futures_util::stream::{once, FuturesUnordered, Stream, StreamExt}; use smallvec::SmallVec; use proto::xfer::{DnsHandle, DnsRequest, DnsResponse, FirstAnswer}; use proto::Time; use tracing::debug; use rand::thread_rng as rng; use rand::Rng; use crate::config::{NameServerConfigGroup, ResolverConfig, ResolverOpts, ServerOrderingStrategy}; use crate::error::{ResolveError, ResolveErrorKind}; #[cfg(feature = "mdns")] use crate::name_server; use crate::name_server::connection_provider::{ConnectionProvider, GenericConnector}; use crate::name_server::name_server::NameServer; use crate::name_server::RuntimeProvider; #[cfg(test)] #[cfg(feature = "tokio-runtime")] use crate::name_server::TokioRuntimeProvider; /// Abstract interface for mocking purpose #[derive(Clone)] pub struct NameServerPool { // TODO: switch to FuturesMutex (Mutex will have some undesirable locking) datagram_conns: Arc<[NameServer

]>, /* All NameServers must be the same type */ stream_conns: Arc<[NameServer

]>, /* All NameServers must be the same type */ #[cfg(feature = "mdns")] mdns_conns: NameServer

, /* All NameServers must be the same type */ options: ResolverOpts, } /// A pool of NameServers /// /// This is not expected to be used directly, see [crate::AsyncResolver]. pub type GenericNameServerPool

= NameServerPool>; #[cfg(test)] #[cfg(feature = "tokio-runtime")] impl GenericNameServerPool { pub(crate) fn tokio_from_config( config: &ResolverConfig, options: ResolverOpts, runtime: TokioRuntimeProvider, ) -> Self { Self::from_config_with_provider(config, options, GenericConnector::new(runtime)) } } impl

NameServerPool

where P: ConnectionProvider + 'static, { pub(crate) fn from_config_with_provider( config: &ResolverConfig, options: ResolverOpts, conn_provider: P, ) -> Self { let datagram_conns: Vec> = config .name_servers() .iter() .filter(|ns_config| ns_config.protocol.is_datagram()) .map(|ns_config| { #[cfg(feature = "dns-over-rustls")] let ns_config = { let mut ns_config = ns_config.clone(); ns_config.tls_config = config.client_config().clone(); ns_config }; #[cfg(not(feature = "dns-over-rustls"))] let ns_config = { ns_config.clone() }; NameServer::new(ns_config, options.clone(), conn_provider.clone()) }) .collect(); let stream_conns: Vec> = config .name_servers() .iter() .filter(|ns_config| ns_config.protocol.is_stream()) .map(|ns_config| { #[cfg(feature = "dns-over-rustls")] let ns_config = { let mut ns_config = ns_config.clone(); ns_config.tls_config = config.client_config().clone(); ns_config }; #[cfg(not(feature = "dns-over-rustls"))] let ns_config = { ns_config.clone() }; NameServer::new(ns_config, options.clone(), conn_provider.clone()) }) .collect(); Self { datagram_conns: Arc::from(datagram_conns), stream_conns: Arc::from(stream_conns), #[cfg(feature = "mdns")] mdns_conns: name_server::mdns_nameserver(options, conn_provider.clone(), false), options, } } /// Construct a NameServerPool from a set of name server configs pub fn from_config( name_servers: NameServerConfigGroup, options: ResolverOpts, conn_provider: P, ) -> Self { let map_config_to_ns = |ns_config| NameServer::new(ns_config, options.clone(), conn_provider.clone()); let (datagram, stream): (Vec<_>, Vec<_>) = name_servers .into_inner() .into_iter() .partition(|ns| ns.protocol.is_datagram()); let datagram_conns: Vec<_> = datagram.into_iter().map(map_config_to_ns).collect(); let stream_conns: Vec<_> = stream.into_iter().map(map_config_to_ns).collect(); Self { datagram_conns: Arc::from(datagram_conns), stream_conns: Arc::from(stream_conns), #[cfg(feature = "mdns")] mdns_conns: name_server::mdns_nameserver(*options, conn_provider.clone(), false), options, } } #[doc(hidden)] #[cfg(not(feature = "mdns"))] pub fn from_nameservers( options: ResolverOpts, datagram_conns: Vec>, stream_conns: Vec>, ) -> Self { Self { datagram_conns: Arc::from(datagram_conns), stream_conns: Arc::from(stream_conns), options, } } #[doc(hidden)] #[cfg(feature = "mdns")] pub fn from_nameservers( options: ResolverOpts, datagram_conns: Vec>, stream_conns: Vec>, mdns_conns: NameServer

, ) -> Self { GenericNameServerPool { datagram_conns: Arc::from(datagram_conns), stream_conns: Arc::from(stream_conns), mdns_conns, options, } } #[cfg(test)] #[cfg(not(feature = "mdns"))] #[allow(dead_code)] fn from_nameservers_test( options: ResolverOpts, datagram_conns: Arc<[NameServer

]>, stream_conns: Arc<[NameServer

]>, ) -> Self { Self { datagram_conns, stream_conns, options, } } #[cfg(test)] #[cfg(feature = "mdns")] fn from_nameservers_test( options: &ResolverOpts, datagram_conns: Arc<[NameServer

]>, stream_conns: Arc<[NameServer

]>, mdns_conns: NameServer

, ) -> Self { GenericNameServerPool { datagram_conns, stream_conns, mdns_conns, options: *options, } } async fn try_send( opts: ResolverOpts, conns: Arc<[NameServer

]>, request: DnsRequest, ) -> Result { let mut conns: Vec> = conns.to_vec(); match opts.server_ordering_strategy { // select the highest priority connection // reorder the connections based on current view... // this reorders the inner set ServerOrderingStrategy::QueryStatistics => conns.sort_unstable(), ServerOrderingStrategy::UserProvidedOrder => {} } let request_loop = request.clone(); parallel_conn_loop(conns, request_loop, opts).await } } impl

DnsHandle for NameServerPool

where P: ConnectionProvider + 'static, { type Response = Pin> + Send>>; type Error = ResolveError; fn send>(&self, request: R) -> Self::Response { let opts = self.options.clone(); let request = request.into(); let datagram_conns = Arc::clone(&self.datagram_conns); let stream_conns = Arc::clone(&self.stream_conns); // TODO: remove this clone, return the Message in the error? let tcp_message = request.clone(); // if it's a .local. query, then we *only* query mDNS, these should never be sent on to upstream resolvers #[cfg(feature = "mdns")] let mdns = mdns::maybe_local(&mut self.mdns_conns, request); // TODO: limited to only when mDNS is enabled, but this should probably always be enforced? #[cfg(not(feature = "mdns"))] let mdns = Local::NotMdns(request); // local queries are queried through mDNS if mdns.is_local() { return mdns.take_stream(); } // TODO: should we allow mDNS to be used for standard lookups as well? // it wasn't a local query, continue with standard lookup path let request = mdns.take_request(); Box::pin(once(async move { debug!("sending request: {:?}", request.queries()); // First try the UDP connections let udp_res = match Self::try_send(opts.clone(), datagram_conns, request).await { Ok(response) if response.truncated() => { debug!("truncated response received, retrying over TCP"); Ok(response) } Err(e) if opts.try_tcp_on_error || e.is_no_connections() => { debug!("error from UDP, retrying over TCP: {}", e); Err(e) } result => return result, }; if stream_conns.is_empty() { debug!("no TCP connections available"); return udp_res; } // Try query over TCP, as response to query over UDP was either truncated or was an // error. let tcp_res = Self::try_send(opts, stream_conns, tcp_message).await; let tcp_err = match tcp_res { res @ Ok(..) => return res, Err(e) => e, }; // Even if the UDP result was truncated, return that let udp_err = match udp_res { Ok(response) => return Ok(response), Err(e) => e, }; match udp_err.cmp_specificity(&tcp_err) { Ordering::Greater => Err(udp_err), _ => Err(tcp_err), } })) } } // TODO: we should be able to have a self-referential future here with Pin and not require cloned conns /// An async function that will loop over all the conns with a max parallel request count of ops.num_concurrent_req async fn parallel_conn_loop

( mut conns: Vec>, request: DnsRequest, opts: ResolverOpts, ) -> Result where P: ConnectionProvider + 'static, { let mut err = ResolveError::no_connections(); // If the name server we're trying is giving us backpressure by returning ProtoErrorKind::Busy, // we will first try the other name servers (as for other error types). However, if the other // servers are also busy, we're going to wait for a little while and then retry each server that // returned Busy in the previous round. If the server is still Busy, this continues, while // the backoff increases exponentially (by a factor of 2), until it hits 300ms, in which case we // give up. The request might still be retried by the caller (likely the DnsRetryHandle). // // TODO: more principled handling of timeouts. Currently, timeouts appear to be handled mostly // close to the connection, which means the top level resolution might take substantially longer // to fire than the timeout configured in `ResolverOpts`. let mut backoff = Duration::from_millis(20); let mut busy = SmallVec::<[NameServer

; 2]>::new(); loop { let request_cont = request.clone(); // construct the parallel requests, 2 is the default let mut par_conns = SmallVec::<[NameServer

; 2]>::new(); let count = conns.len().min(opts.num_concurrent_reqs.max(1)); // Shuffe DNS NameServers to avoid overloads to the first configured ones if opts.shuffle_dns_servers { for _ in 0..count { let idx = rng().gen_range(0..conns.len()); // UNWRAP: swap_remove has an implicit panicking bounds check. This should // never fail because we check that conns is not empty and generate the idx // to explicitly be in range. par_conns.push(conns.swap_remove(idx)); } } else { for conn in conns.drain(..count) { par_conns.push(conn); } } if par_conns.is_empty() { if !busy.is_empty() && backoff < Duration::from_millis(300) { <

::RuntimeProvider as RuntimeProvider>::Timer::delay_for( backoff, ) .await; conns.extend(busy.drain(..)); backoff *= 2; continue; } return Err(err); } let mut requests = par_conns .into_iter() .map(move |conn| { conn.send(request_cont.clone()) .first_answer() .map(|result| result.map_err(|e| (conn, e))) }) .collect::>(); while let Some(result) = requests.next().await { let (conn, e) = match result { Ok(sent) => return Ok(sent), Err((conn, e)) => (conn, e), }; match e.kind() { ResolveErrorKind::NoRecordsFound { trusted, .. } if *trusted => { return Err(e); } ResolveErrorKind::Proto(e) if e.is_busy() => { busy.push(conn); } _ if err.cmp_specificity(&e) == Ordering::Less => { err = e; } _ => {} } } } } #[cfg(feature = "mdns")] mod mdns { use super::*; use proto::rr::domain::usage; use proto::DnsHandle; /// Returns true pub(crate) fn maybe_local( name_server: &mut NameServer, request: DnsRequest, ) -> Local where C: DnsHandle + 'static, P: ConnectionProvider + 'static, P: ConnectionProvider, { if request .queries() .iter() .any(|query| usage::LOCAL.name().zone_of(query.name())) { Local::ResolveStream(name_server.send(request)) } else { Local::NotMdns(request) } } } #[allow(clippy::large_enum_variant)] pub(crate) enum Local { #[allow(dead_code)] ResolveStream(Pin> + Send>>), NotMdns(DnsRequest), } impl Local { fn is_local(&self) -> bool { matches!(*self, Self::ResolveStream(..)) } /// Takes the stream /// /// # Panics /// /// Panics if this is in fact a Local::NotMdns fn take_stream(self) -> Pin> + Send>> { match self { Self::ResolveStream(future) => future, _ => panic!("non Local queries have no future, see take_message()"), } } /// Takes the message /// /// # Panics /// /// Panics if this is in fact a Local::ResolveStream fn take_request(self) -> DnsRequest { match self { Self::NotMdns(request) => request, _ => panic!("Local queries must be polled, see take_future()"), } } } impl Stream for Local { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match *self { Self::ResolveStream(ref mut ns) => ns.as_mut().poll_next(cx), // TODO: making this a panic for now Self::NotMdns(..) => panic!("Local queries that are not mDNS should not be polled"), //Local::NotMdns(message) => return Err(ResolveErrorKind::Message("not mDNS")), } } } #[cfg(test)] #[cfg(feature = "tokio-runtime")] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; use tokio::runtime::Runtime; use hickory_proto::rr::RData; use proto::op::Query; use proto::rr::{Name, RecordType}; use proto::xfer::{DnsHandle, DnsRequestOptions}; use super::*; use crate::config::NameServerConfig; use crate::config::Protocol; use crate::name_server::TokioRuntimeProvider; use crate::name_server::{GenericNameServer, TokioConnectionProvider}; #[ignore] // because of there is a real connection that needs a reasonable timeout #[test] #[allow(clippy::uninlined_format_args)] fn test_failed_then_success_pool() { let config1 = NameServerConfig { socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 252)), 253), protocol: Protocol::Udp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }; let config2 = NameServerConfig { socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)), 53), protocol: Protocol::Udp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }; let mut resolver_config = ResolverConfig::new(); resolver_config.add_name_server(config1); resolver_config.add_name_server(config2); let io_loop = Runtime::new().unwrap(); let pool = GenericNameServerPool::tokio_from_config( &resolver_config, ResolverOpts::default(), TokioRuntimeProvider::new(), ); let name = Name::parse("www.example.com.", None).unwrap(); // TODO: it's not clear why there are two failures before the success for i in 0..2 { assert!( io_loop .block_on( pool.lookup( Query::query(name.clone(), RecordType::A), DnsRequestOptions::default() ) .first_answer() ) .is_err(), "iter: {}", i ); } for i in 0..10 { assert!( io_loop .block_on( pool.lookup( Query::query(name.clone(), RecordType::A), DnsRequestOptions::default() ) .first_answer() ) .is_ok(), "iter: {}", i ); } } #[test] fn test_multi_use_conns() { let io_loop = Runtime::new().unwrap(); let conn_provider = TokioConnectionProvider::default(); let tcp = NameServerConfig { socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)), 53), protocol: Protocol::Tcp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }; let opts = ResolverOpts { try_tcp_on_error: true, ..ResolverOpts::default() }; let ns_config = { tcp }; let name_server = GenericNameServer::new(ns_config, opts.clone(), conn_provider); let name_servers: Arc<[_]> = Arc::from([name_server]); #[cfg(not(feature = "mdns"))] let pool = GenericNameServerPool::from_nameservers_test( opts, Arc::from([]), Arc::clone(&name_servers), ); #[cfg(feature = "mdns")] let mut pool = GenericNameServerPool::from_nameservers_test( &opts, Arc::from([]), Arc::clone(&name_servers), name_server::mdns_nameserver(opts, TokioConnectionProvider::default(), false), ); let name = Name::from_str("www.example.com.").unwrap(); // first lookup let response = io_loop .block_on( pool.lookup( Query::query(name.clone(), RecordType::A), DnsRequestOptions::default(), ) .first_answer(), ) .expect("lookup failed"); assert_eq!( *response.answers()[0] .data() .and_then(RData::as_a) .expect("no a record available"), Ipv4Addr::new(93, 184, 216, 34).into() ); assert!( name_servers[0].is_connected(), "if this is failing then the NameServers aren't being properly shared." ); // first lookup let response = io_loop .block_on( pool.lookup( Query::query(name, RecordType::AAAA), DnsRequestOptions::default(), ) .first_answer(), ) .expect("lookup failed"); assert_eq!( *response.answers()[0] .data() .and_then(RData::as_aaaa) .expect("no aaaa record available"), Ipv6Addr::new(0x2606, 0x2800, 0x0220, 0x0001, 0x0248, 0x1893, 0x25c8, 0x1946).into() ); assert!( name_servers[0].is_connected(), "if this is failing then the NameServers aren't being properly shared." ); } } hickory-resolver-0.24.0/src/name_server/name_server_state.rs000064400000000000000000000124341046102023000223730ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::cmp::Ordering; use std::sync::atomic::{self, AtomicU8}; use std::sync::Arc; use std::time::Instant; use futures_util::lock::Mutex; use proto::op::Edns; pub(crate) struct NameServerState { conn_state: AtomicU8, remote_edns: Mutex>>, } /// State of a connection with a remote NameServer. #[derive(Debug, Eq, PartialEq, Copy, Clone)] #[repr(u8)] enum NameServerStateInner { /// For some reason the connection failed. For UDP this would generally be a timeout /// for TCP this could be either Connection could never be established, or it /// failed at some point after. The Failed state should *not* be entered due to an /// error contained in a Message received from the server. In All cases to reestablish /// a new connection will need to be created. Failed = 0, /// Initial state, if Edns is not none, then Edns will be requested Init = 1, /// There has been successful communication with the remote. /// if no Edns is associated, then the remote does not support Edns Established = 2, } impl From for u8 { /// used for ordering purposes. The highest priority is placed on open connections fn from(val: NameServerStateInner) -> Self { val as Self } } impl From for NameServerStateInner { fn from(val: u8) -> Self { match val { 2 => Self::Established, 1 => Self::Init, _ => Self::Failed, } } } impl NameServerState { fn store(&self, conn_state: NameServerStateInner) { self.conn_state .store(conn_state.into(), atomic::Ordering::Release); } fn load(&self) -> NameServerStateInner { NameServerStateInner::from(self.conn_state.load(atomic::Ordering::Acquire)) } /// Set at the new Init state /// /// If send_dns is some, this will be sent on the first request when it is established pub(crate) fn init(_send_edns: Option) -> Self { // TODO: need to track send_edns Self { conn_state: AtomicU8::new(NameServerStateInner::Init.into()), remote_edns: Mutex::new(Arc::new(None)), } } /// Set at the new Init state /// /// If send_dns is some, this will be sent on the first request when it is established pub(crate) fn reinit(&self, _send_edns: Option) { // eventually do this // self.send_edns.lock() = send_edns; self.store(NameServerStateInner::Init); } /// Transition to the Established state /// /// If remote_edns is Some, then it will be used to effect things like buffer sizes based on /// the remote's support. pub(crate) fn establish(&self, remote_edns: Option) { if remote_edns.is_some() { // best effort locking, we'll assume a different user of this connection is storing the same thing... if let Some(mut current_edns) = self.remote_edns.try_lock() { *current_edns = Arc::new(remote_edns) } } self.store(NameServerStateInner::Established); } /// transition to the Failed state /// /// when is the time of the failure /// /// * when - deprecated pub(crate) fn fail(&self, _when: /* FIXME: remove in 0.20 */ Instant) { self.store(NameServerStateInner::Failed); } /// True if this is in the Failed state pub(crate) fn is_failed(&self) -> bool { NameServerStateInner::Failed == self.load() } } impl Ord for NameServerStateInner { fn cmp(&self, other: &Self) -> Ordering { let (self_num, other_num) = (u8::from(*self), u8::from(*other)); self_num.cmp(&other_num) } } impl PartialOrd for NameServerStateInner { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl Ord for NameServerState { fn cmp(&self, other: &Self) -> Ordering { let other = other.load(); self.load().cmp(&other) } } impl PartialOrd for NameServerState { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl PartialEq for NameServerState { fn eq(&self, other: &Self) -> bool { self.load() == other.load() } } impl Eq for NameServerState {} #[cfg(test)] mod tests { use super::*; use crate::name_server::NameServerState; #[test] fn test_state_cmp() { let init = NameServerState::init(None); let established = NameServerState::init(None); established.establish(None); let failed = NameServerState::init(None); failed.fail(Instant::now()); assert_eq!(init.cmp(&init), Ordering::Equal); assert_eq!(init.cmp(&established), Ordering::Less); assert_eq!(init.cmp(&failed), Ordering::Greater); assert_eq!(established.cmp(&established), Ordering::Equal); assert_eq!(established.cmp(&failed), Ordering::Greater); assert_eq!(failed.cmp(&failed), Ordering::Equal); } } hickory-resolver-0.24.0/src/name_server/name_server_stats.rs000064400000000000000000000326701046102023000224150ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::cmp::Ordering; use std::sync::{ atomic::{self, AtomicU32}, Arc, }; use parking_lot::Mutex; use rand::Rng as _; #[cfg(not(test))] use std::time::{Duration, Instant}; #[cfg(test)] use tokio::time::{Duration, Instant}; pub(crate) struct NameServerStats { /// The smoothed round-trip time (SRTT). /// /// This value represents an exponentially weighted moving average (EWMA) of /// recorded latencies. The algorithm for computing this value is based on /// the following: /// /// https://en.wikipedia.org/wiki/Moving_average#Application_to_measuring_computer_performance /// /// It is also partially inspired by the BIND and PowerDNS implementations: /// /// - https://github.com/isc-projects/bind9/blob/7bf8a7ab1b280c1021bf1e762a239b07aac3c591/lib/dns/adb.c#L3487 /// - https://github.com/PowerDNS/pdns/blob/7c5f9ae6ae4fb17302d933eaeebc8d6f0249aab2/pdns/syncres.cc#L123 /// /// The algorithm for computing and using this value can be summarized as /// follows: /// /// 1. The value is initialized to a random value that represents a very low /// latency. /// 2. If the round-trip time (RTT) was successfully measured for a query, /// then it is incorporated into the EWMA using the formula linked above. /// 3. If the RTT could not be measured (i.e. due to a connection failure), /// then a constant penalty factor is applied to the EWMA. /// 4. When comparing EWMA values, a time-based decay is applied to each /// value. Note that this decay is only applied at read time. /// /// For the original discussion regarding this algorithm, see /// https://github.com/hickory-dns/hickory-dns/issues/1702. srtt_microseconds: AtomicU32, /// The last time the `srtt_microseconds` value was updated. last_update: Arc>>, } impl Default for NameServerStats { fn default() -> Self { // Initialize the SRTT to a randomly generated value that represents a // very low RTT. Such a value helps ensure that each server is attempted // early. Self::new(Duration::from_micros(rand::thread_rng().gen_range(1..32))) } } /// Returns an exponentially weighted value in the range of 0.0 < x < 1.0 /// /// Computes the value using the following formula: /// /// e(-tnow - tlast) / weight /// /// As the duration since the `last_update` approaches the provided `weight`, /// the returned value decreases. fn compute_srtt_factor(last_update: Instant, weight: u32) -> f64 { let exponent = (-last_update.elapsed().as_secs_f64().max(1.0)) / f64::from(weight); exponent.exp() } impl NameServerStats { const CONNECTION_FAILURE_PENALTY: u32 = Duration::from_millis(150).as_micros() as u32; const MAX_SRTT_MICROS: u32 = Duration::from_secs(5).as_micros() as u32; pub(crate) fn new(initial_srtt: Duration) -> Self { Self { srtt_microseconds: AtomicU32::new(initial_srtt.as_micros() as u32), last_update: Arc::new(Mutex::new(None)), } } /// Records the measured `rtt` for a particular query. pub(crate) fn record_rtt(&self, rtt: Duration) { // If the cast on the result does overflow (it shouldn't), then the // value is saturated to u32::MAX, which is above the `MAX_SRTT_MICROS` // limit (meaning that any potential overflow is inconsequential). // See https://github.com/rust-lang/rust/issues/10184. self.update_srtt( rtt.as_micros() as u32, |cur_srtt_microseconds, last_update| { // An arbitrarily low weight is used when computing the factor // to ensure that recent RTT measurements are weighted more // heavily. let factor = compute_srtt_factor(last_update, 3); let new_srtt = (1.0 - factor) * (rtt.as_micros() as f64) + factor * f64::from(cur_srtt_microseconds); new_srtt.round() as u32 }, ); } /// Records a connection failure for a particular query. pub(crate) fn record_connection_failure(&self) { self.update_srtt( Self::CONNECTION_FAILURE_PENALTY, |cur_srtt_microseconds, _last_update| { cur_srtt_microseconds.saturating_add(Self::CONNECTION_FAILURE_PENALTY) }, ); } /// Returns the raw SRTT value. /// /// Prefer to use `decayed_srtt` when ordering name servers. fn srtt(&self) -> Duration { Duration::from_micros(u64::from( self.srtt_microseconds.load(atomic::Ordering::Acquire), )) } /// Returns the SRTT value after applying a time based decay. /// /// The decay exponentially decreases the SRTT value. The primary reasons /// for applying a downwards decay are twofold: /// /// 1. It helps distribute query load. /// 2. It helps detect positive network changes. For example, decreases in /// latency or a server that has recovered from a failure. fn decayed_srtt(&self) -> f64 { let srtt = f64::from(self.srtt_microseconds.load(atomic::Ordering::Acquire)); self.last_update.lock().map_or(srtt, |last_update| { // In general, if the time between queries is relatively short, then // the server ordering algorithm will approximate a spike // distribution where the servers with the lowest latencies are // chosen much more frequently. Conversely, if the time between // queries is relatively long, then the query distribution will be // more uniform. A larger weight widens the window in which servers // with historically lower latencies will be heavily preferred. On // the other hand, a larger weight may also increase the time it // takes to recover from a failure or to observe positive changes in // latency. srtt * compute_srtt_factor(last_update, 180) }) } /// Updates the SRTT value. /// /// If the `last_update` value has not been set, then uses the `default` /// value to update the SRTT. Otherwise, invokes the `update_fn` with the /// current SRTT value and the `last_update` timestamp. fn update_srtt(&self, default: u32, update_fn: impl Fn(u32, Instant) -> u32) { let last_update = self.last_update.lock().replace(Instant::now()); let _ = self.srtt_microseconds.fetch_update( atomic::Ordering::SeqCst, atomic::Ordering::SeqCst, move |cur_srtt_microseconds| { Some( last_update .map_or(default, |last_update| { update_fn(cur_srtt_microseconds, last_update) }) .min(Self::MAX_SRTT_MICROS), ) }, ); } } impl PartialEq for NameServerStats { fn eq(&self, other: &Self) -> bool { self.srtt() == other.srtt() } } impl Eq for NameServerStats {} // TODO: Replace this with `f64::total_cmp` once the Rust version is bumped to // 1.62.0 (the method is stable beyond that version). In the meantime, the // implementation is copied from here: // https://github.com/rust-lang/rust/blob/master/library/core/src/num/f64.rs#L1336 fn total_cmp(x: f64, y: f64) -> Ordering { let mut left = x.to_bits() as i64; let mut right = y.to_bits() as i64; left ^= (((left >> 63) as u64) >> 1) as i64; right ^= (((right >> 63) as u64) >> 1) as i64; left.cmp(&right) } impl Ord for NameServerStats { /// Custom implementation of Ord for NameServer which incorporates the /// performance of the connection into it's ranking. fn cmp(&self, other: &Self) -> Ordering { total_cmp(self.decayed_srtt(), other.decayed_srtt()) } } impl PartialOrd for NameServerStats { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } #[cfg(test)] #[allow(clippy::extra_unused_type_parameters)] mod tests { use super::*; fn is_send_sync() -> bool { true } #[test] fn stats_are_sync() { assert!(is_send_sync::()); } #[tokio::test(start_paused = true)] async fn test_stats_cmp() { let server_a = NameServerStats::new(Duration::from_micros(10)); let server_b = NameServerStats::new(Duration::from_micros(20)); // No RTTs or failures have been recorded. The initial SRTTs should be // compared. assert_eq!(server_a.cmp(&server_b), Ordering::Less); // Server A was used. Unused server B should now be preferred. server_a.record_rtt(Duration::from_millis(30)); tokio::time::advance(Duration::from_secs(5)).await; assert_eq!(server_a.cmp(&server_b), Ordering::Greater); // Both servers have been used. Server A has a lower SRTT and should be // preferred. server_b.record_rtt(Duration::from_millis(50)); tokio::time::advance(Duration::from_secs(5)).await; assert_eq!(server_a.cmp(&server_b), Ordering::Less); // Server A experiences a connection failure, which results in Server B // being preferred. server_a.record_connection_failure(); tokio::time::advance(Duration::from_secs(5)).await; assert_eq!(server_a.cmp(&server_b), Ordering::Greater); // Server A should eventually recover and once again be preferred. while server_a.cmp(&server_b) != Ordering::Less { server_b.record_rtt(Duration::from_millis(50)); tokio::time::advance(Duration::from_secs(5)).await; } server_a.record_rtt(Duration::from_millis(30)); tokio::time::advance(Duration::from_secs(3)).await; assert_eq!(server_a.cmp(&server_b), Ordering::Less); } #[tokio::test(start_paused = true)] async fn test_record_rtt() { let server = NameServerStats::new(Duration::from_micros(10)); let first_rtt = Duration::from_millis(50); server.record_rtt(first_rtt); // The first recorded RTT should replace the initial value. assert_eq!(server.srtt(), first_rtt); tokio::time::advance(Duration::from_secs(3)).await; // Subsequent RTTs should factor in previously recorded values. server.record_rtt(Duration::from_millis(100)); assert_eq!(server.srtt(), Duration::from_micros(81606)); } #[test] fn test_record_rtt_maximum_value() { let server = NameServerStats::new(Duration::from_micros(10)); server.record_rtt(Duration::MAX); // Updates to the SRTT are capped at a maximum value. assert_eq!( server.srtt(), Duration::from_micros(NameServerStats::MAX_SRTT_MICROS.into()) ); } #[tokio::test(start_paused = true)] async fn test_record_connection_failure() { let server = NameServerStats::new(Duration::from_micros(10)); // Verify that the SRTT value is initially replaced with the penalty and // subsequent failures result in the penalty being added. for failure_count in 1..4 { server.record_connection_failure(); assert_eq!( server.srtt(), Duration::from_micros( NameServerStats::CONNECTION_FAILURE_PENALTY .checked_mul(failure_count) .expect("checked_mul overflow") .into() ) ); tokio::time::advance(Duration::from_secs(3)).await; } // Verify that the `last_update` timestamp was updated for a connection // failure and is used in subsequent calculations. server.record_rtt(Duration::from_millis(50)); assert_eq!(server.srtt(), Duration::from_micros(197152)); } #[test] fn test_record_connection_failure_maximum_value() { let server = NameServerStats::new(Duration::from_micros(10)); let num_failures = (NameServerStats::MAX_SRTT_MICROS / NameServerStats::CONNECTION_FAILURE_PENALTY) + 1; for _ in 0..num_failures { server.record_connection_failure(); } // Updates to the SRTT are capped at a maximum value. assert_eq!( server.srtt(), Duration::from_micros(NameServerStats::MAX_SRTT_MICROS.into()) ); } #[tokio::test(start_paused = true)] async fn test_decayed_srtt() { let initial_srtt = 10; let server = NameServerStats::new(Duration::from_micros(initial_srtt)); // No decay should be applied to the initial value. assert_eq!(server.decayed_srtt() as u32, initial_srtt as u32); tokio::time::advance(Duration::from_secs(5)).await; server.record_rtt(Duration::from_millis(100)); // The decay function should assume a minimum of one second has elapsed // since the last update. tokio::time::advance(Duration::from_millis(500)).await; assert_eq!(server.decayed_srtt() as u32, 99445); tokio::time::advance(Duration::from_secs(5)).await; assert_eq!(server.decayed_srtt() as u32, 96990); } } hickory-resolver-0.24.0/src/quic.rs000064400000000000000000000125651046102023000153250ustar 00000000000000// Copyright 2015-2022 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use hickory_proto::udp::QuicLocalAddr; use rustls::ClientConfig as CryptoConfig; use std::future::Future; use std::net::SocketAddr; use hickory_proto::quic::{QuicClientConnect, QuicClientStream}; use proto::udp::DnsUdpSocket; use proto::xfer::{DnsExchange, DnsExchangeConnect}; use proto::TokioTime; use crate::config::TlsClientConfig; use crate::tls::CLIENT_CONFIG; #[allow(clippy::type_complexity)] #[allow(unused)] pub(crate) fn new_quic_stream( socket_addr: SocketAddr, bind_addr: Option, dns_name: String, client_config: Option, ) -> DnsExchangeConnect { let client_config = if let Some(TlsClientConfig(client_config)) = client_config { client_config } else { match CLIENT_CONFIG.clone() { Ok(client_config) => client_config, Err(error) => return DnsExchange::error(error), } }; let mut quic_builder = QuicClientStream::builder(); // TODO: normalize the crypto config settings, can we just use common ALPN settings? let crypto_config: CryptoConfig = (*client_config).clone(); quic_builder.crypto_config(crypto_config); if let Some(bind_addr) = bind_addr { quic_builder.bind_addr(bind_addr); } DnsExchange::connect(quic_builder.build(socket_addr, dns_name)) } #[allow(clippy::type_complexity)] pub(crate) fn new_quic_stream_with_future( future: F, socket_addr: SocketAddr, dns_name: String, client_config: Option, ) -> DnsExchangeConnect where S: DnsUdpSocket + QuicLocalAddr + 'static, F: Future> + Send + 'static, { let client_config = if let Some(TlsClientConfig(client_config)) = client_config { client_config } else { match CLIENT_CONFIG.clone() { Ok(client_config) => client_config, Err(error) => return DnsExchange::error(error), } }; let mut quic_builder = QuicClientStream::builder(); // TODO: normalize the crypto config settings, can we just use common ALPN settings? let crypto_config: CryptoConfig = (*client_config).clone(); quic_builder.crypto_config(crypto_config); DnsExchange::connect(quic_builder.build_with_future(future, socket_addr, dns_name)) } #[cfg(all(test, any(feature = "native-certs", feature = "webpki-roots")))] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::sync::Arc; use tokio::runtime::Runtime; use crate::config::{NameServerConfigGroup, ResolverConfig, ResolverOpts}; use crate::name_server::TokioConnectionProvider; use crate::TokioAsyncResolver; fn quic_test(config: ResolverConfig) { let io_loop = Runtime::new().unwrap(); let resolver = TokioAsyncResolver::new( config, ResolverOpts { try_tcp_on_error: true, ..ResolverOpts::default() }, TokioConnectionProvider::default(), ); let response = io_loop .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } // check if there is another connection created let response = io_loop .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } } #[test] fn test_adguard_quic() { // AdGuard requires SNI. let mut config = (**super::CLIENT_CONFIG.as_ref().unwrap()).clone(); config.enable_sni = true; let name_servers = NameServerConfigGroup::from_ips_quic( &[ IpAddr::from([94, 140, 14, 140]), IpAddr::from([94, 140, 15, 141]), IpAddr::from([0x2a10, 0x50c0, 0, 0, 0, 0, 0x1, 0xff]), IpAddr::from([0x2a10, 0x50c0, 0, 0, 0, 0, 0x2, 0xff]), ], 853, String::from("unfiltered.adguard-dns.com"), true, ) .with_client_config(Arc::new(config)); quic_test(ResolverConfig::from_parts(None, Vec::new(), name_servers)) } } hickory-resolver-0.24.0/src/resolver.rs000064400000000000000000000212041046102023000162130ustar 00000000000000// Copyright 2015-2017 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Structs for creating and using a Resolver use std::io; use std::net::IpAddr; use std::sync::Mutex; use proto::rr::domain::TryParseIp; use proto::rr::IntoName; use proto::rr::RecordType; use tokio::runtime::{self, Runtime}; use crate::config::{ResolverConfig, ResolverOpts}; use crate::error::*; use crate::lookup; use crate::lookup::Lookup; use crate::lookup_ip::LookupIp; use crate::name_server::TokioConnectionProvider; use crate::AsyncResolver; /// The Resolver is used for performing DNS queries. /// /// For forward (A) lookups, hostname -> IP address, see: `Resolver::lookup_ip` /// /// Special note about resource consumption. The Resolver and all Hickory DNS software is built around the Tokio async-io library. This synchronous Resolver is intended to be a simpler wrapper for of the [`AsyncResolver`]. To allow the `Resolver` to be [`Send`] + [`Sync`], the construction of the `AsyncResolver` is lazy, this means some of the features of the `AsyncResolver`, like performance based resolution via the most efficient `NameServer` will be lost (the lookup cache is shared across invocations of the `Resolver`). If these other features of the Hickory DNS Resolver are desired, please use the tokio based [`AsyncResolver`]. /// /// *Note: Threaded/Sync usage*: In multithreaded scenarios, the internal Tokio Runtime will block on an internal Mutex for the tokio Runtime in use. For higher performance, it's recommended to use the [`AsyncResolver`]. pub struct Resolver { // TODO: Mutex allows this to be Sync, another option would be to instantiate a thread_local, but that has other // drawbacks. One major issues, is if this Resolver is shared across threads, it will cause all to block on any // query. A TLS on the other hand would not, at the cost of only allowing a Resolver to be configured once per Thread runtime: Mutex, async_resolver: AsyncResolver, } macro_rules! lookup_fn { ($p:ident, $l:ty) => { /// Performs a lookup for the associated type. /// /// *hint* queries that end with a '.' are fully qualified names and are cheaper lookups /// /// # Arguments /// /// * `query` - a `&str` which parses to a domain name, failure to parse will return an error pub fn $p(&self, query: N) -> ResolveResult<$l> { let lookup = self.async_resolver.$p(query); self.runtime.lock()?.block_on(lookup) } }; ($p:ident, $l:ty, $t:ty) => { /// Performs a lookup for the associated type. /// /// # Arguments /// /// * `query` - a type which can be converted to `Name` via `From`. pub fn $p(&self, query: $t) -> ResolveResult<$l> { let lookup = self.async_resolver.$p(query); self.runtime.lock()?.block_on(lookup) } }; } impl Resolver { /// Constructs a new Resolver with the specified configuration. /// /// # Arguments /// * `config` - configuration for the resolver /// * `options` - resolver options for performing lookups /// /// # Returns /// /// A new `Resolver` or an error if there was an error with the configuration. pub fn new(config: ResolverConfig, options: ResolverOpts) -> io::Result { let mut builder = runtime::Builder::new_current_thread(); builder.enable_all(); let runtime = builder.build()?; let async_resolver = AsyncResolver::new(config, options, TokioConnectionProvider::default()); Ok(Self { runtime: Mutex::new(runtime), async_resolver, }) } /// Constructs a new Resolver with default config and default options. /// /// See [`ResolverConfig::default`] and [`ResolverOpts::default`] for more information. /// /// # Returns /// /// A new `Resolver` or an error if there was an error with the configuration. #[allow(clippy::should_implement_trait)] pub fn default() -> io::Result { Self::new(ResolverConfig::default(), ResolverOpts::default()) } /// Constructs a new Resolver with the system configuration. /// /// This will use `/etc/resolv.conf` on Unix OSes and the registry on Windows. #[cfg(any(unix, target_os = "windows"))] #[cfg(feature = "system-config")] #[cfg_attr( docsrs, doc(cfg(all(feature = "system-config", any(unix, target_os = "windows")))) )] pub fn from_system_conf() -> io::Result { let (config, options) = super::system_conf::read_system_conf()?; Self::new(config, options) } /// Flushes/Removes all entries from the cache pub fn clear_cache(&self) { self.async_resolver.clear_cache(); } /// Generic lookup for any RecordType /// /// *WARNING* This interface may change in the future, please use [`Self::lookup_ip`] or another variant for more stable interfaces. /// /// # Arguments /// /// * `name` - name of the record to lookup, if name is not a valid domain name, an error will be returned /// * `record_type` - type of record to lookup pub fn lookup(&self, name: N, record_type: RecordType) -> ResolveResult { let lookup = self.async_resolver.lookup(name, record_type); self.runtime.lock()?.block_on(lookup) } /// Performs a dual-stack DNS lookup for the IP for the given hostname. /// /// See the configuration and options parameters for controlling the way in which A(Ipv4) and AAAA(Ipv6) lookups will be performed. For the least expensive query a fully-qualified-domain-name, FQDN, which ends in a final `.`, e.g. `www.example.com.`, will only issue one query. Anything else will always incur the cost of querying the `ResolverConfig::domain` and `ResolverConfig::search`. /// /// # Arguments /// /// * `host` - string hostname, if this is an invalid hostname, an error will be returned. pub fn lookup_ip(&self, host: N) -> ResolveResult { let lookup = self.async_resolver.lookup_ip(host); self.runtime.lock()?.block_on(lookup) } lookup_fn!(reverse_lookup, lookup::ReverseLookup, IpAddr); lookup_fn!(ipv4_lookup, lookup::Ipv4Lookup); lookup_fn!(ipv6_lookup, lookup::Ipv6Lookup); lookup_fn!(mx_lookup, lookup::MxLookup); lookup_fn!(ns_lookup, lookup::NsLookup); lookup_fn!(soa_lookup, lookup::SoaLookup); lookup_fn!(srv_lookup, lookup::SrvLookup); lookup_fn!(tlsa_lookup, lookup::TlsaLookup); lookup_fn!(txt_lookup, lookup::TxtLookup); } #[cfg(test)] mod tests { #![allow(clippy::dbg_macro, clippy::print_stdout)] use std::net::*; use super::*; fn require_send_sync() {} #[test] fn test_resolver_sendable() { require_send_sync::(); } #[test] fn test_lookup() { let resolver = Resolver::new(ResolverConfig::default(), ResolverOpts::default()).unwrap(); let response = resolver.lookup_ip("www.example.com.").unwrap(); println!("response records: {response:?}"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } } #[test] #[ignore] #[cfg(any(unix, target_os = "windows"))] #[cfg(feature = "system-config")] fn test_system_lookup() { let resolver = Resolver::from_system_conf().unwrap(); let response = resolver.lookup_ip("www.example.com.").unwrap(); println!("response records: {response:?}"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } } } hickory-resolver-0.24.0/src/system_conf/mod.rs000064400000000000000000000021671046102023000174710ustar 00000000000000// Copyright 2015-2017 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! System configuration loading //! //! This module is responsible for parsing and returning the configuration from //! the host system. It will read from the default location on each operating //! system, e.g. most Unixes have this written to `/etc/resolv.conf` #![allow(missing_docs, unused_extern_crates)] #[cfg(unix)] #[cfg(feature = "system-config")] mod unix; #[cfg(unix)] #[cfg(feature = "system-config")] #[cfg_attr(docsrs, doc(cfg(all(feature = "system-config", unix))))] pub use self::unix::{parse_resolv_conf, read_system_conf}; #[cfg(windows)] #[cfg(feature = "system-config")] mod windows; #[cfg(target_os = "windows")] #[cfg(feature = "system-config")] #[cfg_attr(docsrs, doc(cfg(all(feature = "system-config", windows))))] pub use self::windows::read_system_conf; hickory-resolver-0.24.0/src/system_conf/unix.rs000064400000000000000000000175011046102023000176730ustar 00000000000000// Copyright 2015-2017 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! System configuration loading //! //! This module is responsible for parsing and returning the configuration from //! the host system. It will read from the default location on each operating //! system, e.g. most Unixes have this written to `/etc/resolv.conf` use std::fs::File; use std::io; use std::io::Read; use std::net::SocketAddr; use std::path::Path; use std::str::FromStr; use std::time::Duration; use resolv_conf; use crate::config::{NameServerConfig, Protocol, ResolverConfig, ResolverOpts}; use crate::error::ResolveResult; use crate::proto::rr::Name; const DEFAULT_PORT: u16 = 53; pub fn read_system_conf() -> ResolveResult<(ResolverConfig, ResolverOpts)> { read_resolv_conf("/etc/resolv.conf") } fn read_resolv_conf>(path: P) -> ResolveResult<(ResolverConfig, ResolverOpts)> { let mut data = String::new(); let mut file = File::open(path)?; file.read_to_string(&mut data)?; parse_resolv_conf(&data) } pub fn parse_resolv_conf>(data: T) -> ResolveResult<(ResolverConfig, ResolverOpts)> { let parsed_conf = resolv_conf::Config::parse(&data).map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("Error parsing resolv.conf: {e}"), ) })?; into_resolver_config(parsed_conf) } // TODO: use a custom parsing error type maybe? fn into_resolver_config( parsed_config: resolv_conf::Config, ) -> ResolveResult<(ResolverConfig, ResolverOpts)> { let domain = if let Some(domain) = parsed_config.get_system_domain() { // The system domain name maybe appear to be valid to the resolv_conf // crate but actually be invalid. For example, if the hostname is "matt.schulte's computer" // In order to prevent a hostname which macOS or Windows would consider // valid from returning an error here we turn parse errors to options Name::from_str(domain.as_str()).ok() } else { None }; // nameservers let mut nameservers = Vec::::with_capacity(parsed_config.nameservers.len()); for ip in &parsed_config.nameservers { nameservers.push(NameServerConfig { socket_addr: SocketAddr::new(ip.into(), DEFAULT_PORT), protocol: Protocol::Udp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }); nameservers.push(NameServerConfig { socket_addr: SocketAddr::new(ip.into(), DEFAULT_PORT), protocol: Protocol::Tcp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }); } if nameservers.is_empty() { tracing::warn!("no nameservers found in config"); } // search let mut search = vec![]; for search_domain in parsed_config.get_last_search_or_domain() { // Ignore invalid search domains if search_domain == "--" { continue; } search.push(Name::from_str_relaxed(search_domain).map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("Error parsing resolv.conf: {e}"), ) })?); } let config = ResolverConfig::from_parts(domain, search, nameservers); let options = ResolverOpts { ndots: parsed_config.ndots as usize, timeout: Duration::from_secs(u64::from(parsed_config.timeout)), attempts: parsed_config.attempts as usize, ..ResolverOpts::default() }; Ok((config, options)) } #[cfg(test)] mod tests { use super::*; use proto::rr::Name; use std::env; use std::net::*; use std::str::FromStr; fn empty_config() -> ResolverConfig { ResolverConfig::from_parts(None, vec![], vec![]) } fn nameserver_config(ip: &str) -> [NameServerConfig; 2] { let addr = SocketAddr::new(IpAddr::from_str(ip).unwrap(), 53); [ NameServerConfig { socket_addr: addr, protocol: Protocol::Udp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }, NameServerConfig { socket_addr: addr, protocol: Protocol::Tcp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }, ] } fn tests_dir() -> String { let server_path = env::var("TDNS_WORKSPACE_ROOT").unwrap_or_else(|_| "../..".to_owned()); format!("{server_path}/crates/resolver/tests") } #[test] #[allow(clippy::redundant_clone)] fn test_name_server() { let parsed = parse_resolv_conf("nameserver 127.0.0.1").expect("failed"); let mut cfg = empty_config(); let nameservers = nameserver_config("127.0.0.1"); cfg.add_name_server(nameservers[0].clone()); cfg.add_name_server(nameservers[1].clone()); assert_eq!(cfg.name_servers(), parsed.0.name_servers()); assert_eq!(ResolverOpts::default(), parsed.1); } #[test] fn test_search() { let parsed = parse_resolv_conf("search localnet.").expect("failed"); let mut cfg = empty_config(); cfg.add_search(Name::from_str("localnet.").unwrap()); assert_eq!(cfg.search(), parsed.0.search()); assert_eq!(ResolverOpts::default(), parsed.1); } #[test] fn test_skips_invalid_search() { let parsed = parse_resolv_conf("\n\nnameserver 127.0.0.53\noptions edns0 trust-ad\nsearch -- lan\n") .expect("failed"); let mut cfg = empty_config(); { let nameservers = nameserver_config("127.0.0.53"); cfg.add_name_server(nameservers[0].clone()); cfg.add_name_server(nameservers[1].clone()); assert_eq!(cfg.name_servers(), parsed.0.name_servers()); assert_eq!(ResolverOpts::default(), parsed.1); } // This is the important part, that the invalid `--` is skipped during parsing { cfg.add_search(Name::from_str("lan").unwrap()); assert_eq!(cfg.search(), parsed.0.search()); assert_eq!(ResolverOpts::default(), parsed.1); } } #[test] fn test_underscore_in_search() { let parsed = parse_resolv_conf("search Speedport_000").expect("failed"); let mut cfg = empty_config(); cfg.add_search(Name::from_str_relaxed("Speedport_000.").unwrap()); assert_eq!(cfg.search(), parsed.0.search()); assert_eq!(ResolverOpts::default(), parsed.1); } #[test] fn test_domain() { let parsed = parse_resolv_conf("domain example.com").expect("failed"); let mut cfg = empty_config(); cfg.set_domain(Name::from_str("example.com").unwrap()); assert_eq!(cfg, parsed.0); assert_eq!(ResolverOpts::default(), parsed.1); } #[test] fn test_read_resolv_conf() { read_resolv_conf(format!("{}/resolv.conf-simple", tests_dir())).expect("simple failed"); read_resolv_conf(format!("{}/resolv.conf-macos", tests_dir())).expect("macos failed"); read_resolv_conf(format!("{}/resolv.conf-linux", tests_dir())).expect("linux failed"); } } hickory-resolver-0.24.0/src/system_conf/windows.rs000064400000000000000000000044411046102023000204010ustar 00000000000000// Copyright 2015-2017 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! System configuration loading for windows use std::net::SocketAddr; use std::str::FromStr; use ipconfig::computer::{get_domain, get_search_list, is_round_robin_enabled}; use ipconfig::get_adapters; use proto::rr::Name; use crate::config::{NameServerConfig, Protocol, ResolverConfig, ResolverOpts}; use crate::error::ResolveResult; /// Returns the name servers of the computer (of all adapters) fn get_name_servers() -> ResolveResult> { let adapters = get_adapters()?; let mut name_servers = vec![]; for dns_server in adapters .iter() .flat_map(|adapter| adapter.dns_servers().iter()) { let socket_addr = SocketAddr::new(*dns_server, 53); name_servers.push(NameServerConfig { socket_addr, protocol: Protocol::Udp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }); name_servers.push(NameServerConfig { socket_addr, protocol: Protocol::Tcp, tls_dns_name: None, trust_negative_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }); } Ok(name_servers) } pub fn read_system_conf() -> ResolveResult<(ResolverConfig, ResolverOpts)> { let name_servers = get_name_servers()?; let search_list: Vec = get_search_list()? .iter() .map(|x| Name::from_str(x)) .collect::, _>>()?; let domain = match get_domain()? { Some(domain) => Name::from_str(&domain)?, None => Name::root(), }; let config = ResolverConfig::from_parts(Some(domain), search_list, name_servers); let rotate = is_round_robin_enabled()?; let opts = ResolverOpts { rotate, ..Default::default() }; Ok((config, opts)) } hickory-resolver-0.24.0/src/tls/dns_over_native_tls.rs000064400000000000000000000021211046102023000212200ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. #![cfg(feature = "dns-over-native-tls")] #![allow(dead_code)] use std::net::SocketAddr; use std::pin::Pin; use futures_util::future::Future; use proto::error::ProtoError; use proto::native_tls::{TlsClientStream, TlsClientStreamBuilder}; use proto::tcp::DnsTcpStream; use proto::BufDnsStreamHandle; #[allow(clippy::type_complexity)] pub(crate) fn new_tls_stream_with_future( future: F, socket_addr: SocketAddr, dns_name: String, ) -> ( Pin, ProtoError>> + Send>>, BufDnsStreamHandle, ) where S: DnsTcpStream, F: Future> + Send + Unpin + 'static, { TlsClientStreamBuilder::new().build_with_future(future, socket_addr, dns_name) } hickory-resolver-0.24.0/src/tls/dns_over_openssl.rs000064400000000000000000000021131046102023000205340ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. #![cfg(feature = "dns-over-openssl")] #![allow(dead_code)] use std::net::SocketAddr; use std::pin::Pin; use futures_util::future::Future; use proto::error::ProtoError; use proto::openssl::{TlsClientStream, TlsClientStreamBuilder}; use proto::tcp::DnsTcpStream; use proto::BufDnsStreamHandle; #[allow(clippy::type_complexity)] pub(crate) fn new_tls_stream_with_future( future: F, socket_addr: SocketAddr, dns_name: String, ) -> ( Pin, ProtoError>> + Send>>, BufDnsStreamHandle, ) where S: DnsTcpStream, F: Future> + Send + Unpin + 'static, { TlsClientStreamBuilder::new().build_with_future(future, socket_addr, dns_name) } hickory-resolver-0.24.0/src/tls/dns_over_rustls.rs000064400000000000000000000064301046102023000204130ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. #![cfg(feature = "dns-over-rustls")] #![allow(dead_code)] use std::future; use std::io; use std::net::SocketAddr; use std::pin::Pin; use std::sync::Arc; use futures_util::future::Future; use once_cell::sync::Lazy; use rustls::{ClientConfig, RootCertStore}; use proto::error::ProtoError; use proto::rustls::tls_client_stream::tls_client_connect_with_future; use proto::rustls::TlsClientStream; use proto::tcp::DnsTcpStream; use proto::BufDnsStreamHandle; use crate::config::TlsClientConfig; pub(crate) static CLIENT_CONFIG: Lazy, ProtoError>> = Lazy::new(|| { #[cfg_attr( not(any(feature = "native-certs", feature = "webpki-roots")), allow(unused_mut) )] let mut root_store = RootCertStore::empty(); #[cfg(all(feature = "native-certs", not(feature = "webpki-roots")))] { use proto::error::ProtoErrorKind; let (added, ignored) = root_store.add_parsable_certificates(&rustls_native_certs::load_native_certs()?); if ignored > 0 { tracing::warn!( "failed to parse {} certificate(s) from the native root store", ignored, ); } if added == 0 { return Err(ProtoErrorKind::NativeCerts.into()); } } #[cfg(feature = "webpki-roots")] root_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( ta.subject, ta.spki, ta.name_constraints, ) })); let mut client_config = ClientConfig::builder() .with_safe_default_cipher_suites() .with_safe_default_kx_groups() .with_safe_default_protocol_versions() .unwrap() .with_root_certificates(root_store) .with_no_client_auth(); // The port (853) of DOT is for dns dedicated, SNI is unnecessary. (ISP block by the SNI name) client_config.enable_sni = false; Ok(Arc::new(client_config)) }); #[allow(clippy::type_complexity)] pub(crate) fn new_tls_stream_with_future( future: F, socket_addr: SocketAddr, dns_name: String, client_config: Option, ) -> ( Pin, ProtoError>> + Send>>, BufDnsStreamHandle, ) where S: DnsTcpStream, F: Future> + Send + Unpin + 'static, { let client_config = if let Some(TlsClientConfig(client_config)) = client_config { client_config } else { match CLIENT_CONFIG.clone() { Ok(client_config) => client_config, Err(err) => { return ( Box::pin(future::ready(Err(err))), BufDnsStreamHandle::new(socket_addr).0, ) } } }; let (stream, handle) = tls_client_connect_with_future(future, socket_addr, dns_name, client_config); (Box::pin(stream), handle) } hickory-resolver-0.24.0/src/tls/mod.rs000064400000000000000000000056351046102023000157450ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. #![cfg(feature = "dns-over-tls")] mod dns_over_native_tls; mod dns_over_openssl; mod dns_over_rustls; cfg_if! { if #[cfg(feature = "dns-over-rustls")] { pub(crate) use self::dns_over_rustls::new_tls_stream_with_future; #[cfg(any(feature = "dns-over-https-rustls", feature = "dns-over-quic", feature = "dns-over-h3"))] pub(crate) use self::dns_over_rustls::CLIENT_CONFIG; } else if #[cfg(feature = "dns-over-native-tls")] { pub(crate) use self::dns_over_native_tls::new_tls_stream_with_future; } else if #[cfg(feature = "dns-over-openssl")] { pub(crate) use self::dns_over_openssl::new_tls_stream_with_future; } else { compile_error!("One of the dns-over-rustls, dns-over-native-tls, or dns-over-openssl must be enabled for dns-over-tls features"); } } #[cfg(any(feature = "dns-over-native-tls", feature = "dns-over-rustls"))] #[cfg(any(feature = "webpki-roots", feature = "native-certs"))] #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use tokio::runtime::Runtime; use crate::config::{ResolverConfig, ResolverOpts}; use crate::name_server::TokioConnectionProvider; use crate::TokioAsyncResolver; fn tls_test(config: ResolverConfig) { let io_loop = Runtime::new().unwrap(); let resolver = TokioAsyncResolver::new( config, ResolverOpts { try_tcp_on_error: true, ..ResolverOpts::default() }, TokioConnectionProvider::default(), ); let response = io_loop .block_on(resolver.lookup_ip("www.example.com.")) .expect("failed to run lookup"); assert_eq!(response.iter().count(), 1); for address in response.iter() { if address.is_ipv4() { assert_eq!(address, IpAddr::V4(Ipv4Addr::new(93, 184, 216, 34))); } else { assert_eq!( address, IpAddr::V6(Ipv6Addr::new( 0x2606, 0x2800, 0x220, 0x1, 0x248, 0x1893, 0x25c8, 0x1946, )) ); } } } #[test] #[cfg(not(windows))] // flakes on AppVeyor... fn test_google_tls() { tls_test(ResolverConfig::google_tls()) } #[test] #[cfg(not(windows))] // flakes on AppVeyor... fn test_cloudflare_tls() { tls_test(ResolverConfig::cloudflare_tls()) } #[test] #[cfg(not(windows))] // flakes on AppVeyor... fn test_quad9_tls() { tls_test(ResolverConfig::quad9_tls()) } } hickory-resolver-0.24.0/tests/hosts000064400000000000000000000005071046102023000154450ustar 00000000000000## # Host Database # # localhost is used to configure the loopback interface # when the system is booting. Do not change this entry. ## 127.0.0.1 localhost 255.255.255.255 broadcasthost ::1 localhost fe80::1%lo0 localhost 10.0.1.102 example.com 10.0.1.111 a.example.com b.example.com 10.1.0.104hickory-resolver-0.24.0/tests/resolv.conf-linux000064400000000000000000000007151046102023000177010ustar 00000000000000# Not all of these are supported by Hickory DNS # They are testing that they don't break parsing options ndots:8 timeout:8 attempts:8 domain example.com search example.com sub.example.com nameserver 2001:4860:4860::8888 nameserver 2001:4860:4860::8844 nameserver 8.8.8.8 nameserver 8.8.4.4 # some options not supported by Hickory DNS options rotate options inet6 no-tld-query # A basic option not supported sortlist 130.155.160.0/255.255.240.0 130.155.0.0 hickory-resolver-0.24.0/tests/resolv.conf-macos000064400000000000000000000006351046102023000176450ustar 00000000000000# # Mac OS X Notice # # This file is not used by the host name and address resolution # or the DNS query routing mechanisms used by most processes on # this Mac OS X system. # # This file is automatically generated. # options ndots:8 timeout:8 attempts:8 domain example.com. search example.com. sub.example.com. nameserver 2001:4860:4860::8888 nameserver 2001:4860:4860::8844 nameserver 8.8.8.8 nameserver 8.8.4.4hickory-resolver-0.24.0/tests/resolv.conf-simple000064400000000000000000000000451046102023000200270ustar 00000000000000nameserver 8.8.8.8 nameserver 8.8.4.4