trust-dns-server-0.22.0/.cargo_vcs_info.json0000644000000001530000000000100143630ustar { "git": { "sha1": "19b4dc40c046b8d49991bd7b5969333771774f1b" }, "path_in_vcs": "crates/server" }trust-dns-server-0.22.0/Cargo.toml0000644000000113610000000000100123640ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "trust-dns-server" version = "0.22.0" authors = ["Benjamin Fry "] description = """ Trust-DNS is a safe and secure DNS server with DNSSec support. Eventually this could be a replacement for BIND9. The DNSSec support allows for live signing of all records, in it does not currently support records signed offline. The server supports dynamic DNS with SIG0 authenticated requests. Trust-DNS is based on the Tokio and Futures libraries, which means it should be easily integrated into other software that also use those libraries. """ homepage = "http://www.trust-dns.org/index.html" documentation = "https://docs.rs/trust-dns-server" readme = "README.md" keywords = [ "DNS", "BIND", "dig", "named", "dnssec", ] categories = ["network-programming"] license = "MIT/Apache-2.0" repository = "https://github.com/bluejekyll/trust-dns" [package.metadata.docs.rs] all-features = true default-target = "x86_64-unknown-linux-gnu" targets = [ "x86_64-apple-darwin", "x86_64-pc-windows-msvc", ] rustdoc-args = [ "--cfg", "docsrs", ] [lib] name = "trust_dns_server" path = "src/lib.rs" [dependencies.async-trait] version = "0.1.43" [dependencies.bytes] version = "1" [dependencies.cfg-if] version = "1" [dependencies.enum-as-inner] version = "0.5" [dependencies.futures-executor] version = "0.3.5" features = ["std"] default-features = false [dependencies.futures-util] version = "0.3.5" features = ["std"] default-features = false [dependencies.h2] version = "0.3.0" features = ["stream"] optional = true [dependencies.http] version = "0.2" optional = true [dependencies.openssl] version = "0.10" features = [ "v102", "v110", ] optional = true [dependencies.rusqlite] version = "0.28.0" features = [ "bundled", "time", ] optional = true [dependencies.rustls] version = "0.20" optional = true [dependencies.serde] version = "1.0.114" features = ["derive"] [dependencies.thiserror] version = "1.0.20" [dependencies.time] version = "0.3" [dependencies.tokio] version = "1.0" features = [ "net", "sync", ] [dependencies.tokio-openssl] version = "0.6.0" optional = true [dependencies.tokio-rustls] version = "0.23.0" optional = true [dependencies.toml] version = "0.5" [dependencies.tracing] version = "0.1.30" [dependencies.trust-dns-client] version = "0.22.0" [dependencies.trust-dns-proto] version = "0.22.0" [dependencies.trust-dns-recursor] version = "0.22.0" features = ["serde-config"] optional = true [dependencies.trust-dns-resolver] version = "0.22.0" features = ["serde-config"] optional = true [dev-dependencies.tokio] version = "1.0" features = [ "macros", "rt", ] [dev-dependencies.tracing-subscriber] version = "0.3" features = [ "std", "fmt", "env-filter", ] [features] backtrace = ["trust-dns-proto/backtrace"] dns-over-https = [ "h2", "http", "trust-dns-proto/dns-over-https", ] dns-over-https-rustls = [ "dns-over-https", "trust-dns-proto/dns-over-https-rustls", "trust-dns-client/dns-over-https-rustls", "trust-dns-resolver/dns-over-https-rustls", "dns-over-rustls", "tokio-rustls", ] dns-over-openssl = [ "dns-over-tls", "dnssec-openssl", "trust-dns-proto/dns-over-openssl", "tokio-openssl", "trust-dns-client/dns-over-openssl", "trust-dns-resolver/dns-over-openssl", ] dns-over-quic = [ "dns-over-rustls", "trust-dns-proto/dns-over-quic", "trust-dns-resolver/dns-over-quic", ] dns-over-rustls = [ "dns-over-tls", "dnssec-ring", "rustls", "trust-dns-proto/dns-over-rustls", "trust-dns-client/dns-over-rustls", "trust-dns-resolver/dns-over-rustls", "tokio-rustls", ] dns-over-tls = [] dnssec = [] dnssec-openssl = [ "dnssec", "openssl", "trust-dns-client/dnssec-openssl", "trust-dns-proto/dnssec-openssl", "trust-dns-resolver/dnssec-openssl", ] dnssec-ring = [ "dnssec", "trust-dns-client/dnssec-ring", "trust-dns-proto/dnssec-ring", "trust-dns-resolver/dnssec-ring", ] recursor = ["trust-dns-recursor"] resolver = ["trust-dns-resolver"] sqlite = ["rusqlite"] testing = [] tls = ["dns-over-openssl"] tls-openssl = ["dns-over-openssl"] [badges.codecov] branch = "main" repository = "bluejekyll/trust-dns" service = "github" [badges.maintenance] status = "actively-developed" trust-dns-server-0.22.0/Cargo.toml.orig000064400000000000000000000117661046102023000160560ustar 00000000000000[package] name = "trust-dns-server" version = "0.22.0" authors = ["Benjamin Fry "] edition = "2018" # A short blurb about the package. This is not rendered in any format when # uploaded to crates.io (aka this is not markdown) description = """ Trust-DNS is a safe and secure DNS server with DNSSec support. Eventually this could be a replacement for BIND9. The DNSSec support allows for live signing of all records, in it does not currently support records signed offline. The server supports dynamic DNS with SIG0 authenticated requests. Trust-DNS is based on the Tokio and Futures libraries, which means it should be easily integrated into other software that also use those libraries. """ # These URLs point to more information about the repository documentation = "https://docs.rs/trust-dns-server" homepage = "http://www.trust-dns.org/index.html" repository = "https://github.com/bluejekyll/trust-dns" # This points to a file in the repository (relative to this Cargo.toml). The # contents of this file are stored and indexed in the registry. readme = "README.md" # This is a small list of keywords used to categorize and search for this # package. keywords = ["DNS", "BIND", "dig", "named", "dnssec"] categories = ["network-programming"] # This is a string description of the license for this package. Currently # crates.io will validate the license provided against a whitelist of known # license identifiers from http://spdx.org/licenses/. Multiple licenses can # be separated with a `/` license = "MIT/Apache-2.0" [badges] #github-actions = { repository = "bluejekyll/trust-dns", branch = "main", workflow = "test" } codecov = { repository = "bluejekyll/trust-dns", branch = "main", service = "github" } maintenance = { status = "actively-developed" } [features] backtrace = ["trust-dns-proto/backtrace"] dnssec-openssl = ["dnssec", "openssl", "trust-dns-client/dnssec-openssl", "trust-dns-proto/dnssec-openssl", "trust-dns-resolver/dnssec-openssl"] dnssec-ring = ["dnssec", "trust-dns-client/dnssec-ring", "trust-dns-proto/dnssec-ring", "trust-dns-resolver/dnssec-ring"] dnssec = [] # Recursive Resolution is Experimental! recursor = ["trust-dns-recursor"] resolver = ["trust-dns-resolver"] sqlite = ["rusqlite"] # TODO: Need to figure out how to be consistent with ring/openssl usage... # dns-over-https-openssl = ["dns-over-openssl", "trust-dns-client/dns-over-https-openssl", "dns-over-https"] dns-over-https-rustls = ["dns-over-https", "trust-dns-proto/dns-over-https-rustls", "trust-dns-client/dns-over-https-rustls", "trust-dns-resolver/dns-over-https-rustls", "dns-over-rustls", "tokio-rustls"] dns-over-https = ["h2", "http", "trust-dns-proto/dns-over-https"] # TODO: migrate all tls and tls-openssl features to dns-over-tls, et al dns-over-openssl = ["dns-over-tls", "dnssec-openssl", "trust-dns-proto/dns-over-openssl", "tokio-openssl", "trust-dns-client/dns-over-openssl", "trust-dns-resolver/dns-over-openssl"] dns-over-rustls = ["dns-over-tls", "dnssec-ring", "rustls", "trust-dns-proto/dns-over-rustls", "trust-dns-client/dns-over-rustls", "trust-dns-resolver/dns-over-rustls", "tokio-rustls"] dns-over-tls = [] dns-over-quic = ["dns-over-rustls", "trust-dns-proto/dns-over-quic", "trust-dns-resolver/dns-over-quic"] # This is a deprecated feature... tls-openssl = ["dns-over-openssl"] tls = ["dns-over-openssl"] # WARNING: there is a bug in the mutual tls auth code at the moment see issue #100 # mtls = ["trust-dns-client/mtls"] testing = [] [lib] name = "trust_dns_server" path = "src/lib.rs" [dependencies] async-trait = "0.1.43" bytes = "1" cfg-if = "1" enum-as-inner = "0.5" futures-executor = { version = "0.3.5", default-features = false, features = ["std"] } futures-util = { version = "0.3.5", default-features = false, features = ["std"] } h2 = { version = "0.3.0", features = ["stream"], optional = true } http = { version = "0.2", optional = true } openssl = { version = "0.10", features = ["v102", "v110"], optional = true } rusqlite = { version = "0.28.0", features = ["bundled", "time"], optional = true } rustls = { version = "0.20", optional = true } serde = { version = "1.0.114", features = ["derive"] } thiserror = "1.0.20" time = "0.3" tracing = "0.1.30" tokio = { version = "1.0", features = ["net", "sync"] } tokio-openssl = { version = "0.6.0", optional = true } tokio-rustls = { version = "0.23.0", optional = true } toml = "0.5" trust-dns-client= { version = "0.22.0", path = "../client" } trust-dns-proto = { version = "0.22.0", path = "../proto" } trust-dns-recursor = { version = "0.22.0", path = "../recursor", features = ["serde-config"], optional = true } trust-dns-resolver = { version = "0.22.0", path = "../resolver", features = ["serde-config"], optional = true } [dev-dependencies] tokio = { version="1.0", features = ["macros", "rt"] } tracing-subscriber = { version = "0.3", features = ["std", "fmt", "env-filter"] } [package.metadata.docs.rs] all-features = true default-target = "x86_64-unknown-linux-gnu" targets = ["x86_64-apple-darwin", "x86_64-pc-windows-msvc"] rustdoc-args = ["--cfg", "docsrs"] trust-dns-server-0.22.0/LICENSE-APACHE000064400000000000000000000261361046102023000151100ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. trust-dns-server-0.22.0/LICENSE-MIT000064400000000000000000000021131046102023000146050ustar 00000000000000Copyright (c) 2015 The trust-dns Developers Copyright (c) 2017 Google LLC. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. trust-dns-server-0.22.0/README.md000064400000000000000000000034051046102023000144350ustar 00000000000000# Overview Trust-DNS Server is a library which implements the zone authoritory functionality. This library contains basic implementations for DNS zone hosting. It is capable of performing signing all records in the zone for server DNSSec RRSIG records associated with all records in a zone. There is also a `named` binary that can be generated from the library with `cargo install trust-dns`. Dynamic updates are supported via `SIG0` (an mTLS authentication method is under development). ## Features - Dynamic Update with sqlite journaling backend (SIG0) - DNSSEC online signing (NSEC not NSEC3) - DNS over TLS (DoT) - DNS over HTTPS (DoH) - Forwarding stub resolver - ANAME resolution, for zone mapping aliass to A and AAAA records - Additionals section generation for aliasing record types ## Future goals - Distributed dynamic DNS updates, with consensus - mTLS based authorization for Dynamic Updates - Online NSEC creation for queries - Full hint based resolving - Maybe NSEC3 and/or NSEC5 support ## Minimum Rust Version The current minimum rustc version for this project is `1.59` ## Versioning Trust-DNS does it's best job to follow semver. Trust-DNS will be promoted to 1.0 upon stabilization of the publicly exposed APIs. This does not mean that Trust-DNS will necessarily break on upgrades between 0.x updates. Whenever possible, old APIs will be deprecated with notes on what replaced those deprecations. Trust-DNS will make a best effort to never break software which depends on it due to API changes, though this can not be guaranteed. Deprecated interfaces will be maintained for at minimum one major release after that in which they were deprecated (where possible), with the exception of the upgrade to 1.0 where all deprecated interfaces will be planned to be removed. trust-dns-server-0.22.0/src/authority/auth_lookup.rs000064400000000000000000000306341046102023000207010ustar 00000000000000// Copyright 2015-2017 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::iter::Chain; use std::slice::Iter; use std::sync::Arc; use cfg_if::cfg_if; use crate::authority::{LookupObject, LookupOptions}; use crate::client::rr::LowerName; use crate::proto::rr::{Record, RecordSet, RecordType, RrsetRecords}; /// The result of a lookup on an Authority /// /// # Lifetimes /// /// * `'c` - the catalogue lifetime /// * `'r` - the recordset lifetime, subset of 'c /// * `'q` - the queries lifetime #[derive(Debug)] #[allow(clippy::large_enum_variant)] pub enum AuthLookup { /// No records Empty, // TODO: change the result of a lookup to a set of chained iterators... /// Records Records { /// Authoritative answers answers: LookupRecords, /// Optional set of LookupRecords additionals: Option, }, /// Soa only differs from Records in that the lifetime on the name is from the authority, and not the query SOA(LookupRecords), /// An axfr starts with soa, chained to all the records, then another soa... AXFR { /// The first SOA record in an AXFR response start_soa: LookupRecords, /// The records to return records: LookupRecords, /// The last SOA record of an AXFR (matches the first) end_soa: LookupRecords, }, } impl AuthLookup { /// Construct an answer with additional section pub fn answers(answers: LookupRecords, additionals: Option) -> Self { Self::Records { answers, additionals, } } /// Returns true if either the associated Records are empty, or this is a NameExists or NxDomain pub fn is_empty(&self) -> bool { // TODO: this needs to be cheap self.was_empty() } /// This is an NxDomain or NameExists, and has no associated records /// /// this consumes the iterator, and verifies it is empty pub fn was_empty(&self) -> bool { self.iter().count() == 0 } /// Conversion to an iterator pub fn iter(&self) -> AuthLookupIter<'_> { self.into_iter() } /// Does not panic, but will return no records if it is not of that type pub fn unwrap_records(self) -> LookupRecords { match self { // TODO: this is ugly, what about the additionals? Self::Records { answers, .. } => answers, _ => LookupRecords::default(), } } /// Takes the additional records, leaving behind None pub fn take_additionals(&mut self) -> Option { match self { Self::Records { ref mut additionals, .. } => additionals.take(), _ => None, } } } impl LookupObject for AuthLookup { fn is_empty(&self) -> bool { Self::is_empty(self) } fn iter<'a>(&'a self) -> Box + Send + 'a> { let boxed_iter = Self::iter(self); Box::new(boxed_iter) } fn take_additionals(&mut self) -> Option> { let additionals = Self::take_additionals(self); additionals.map(|a| Box::new(a) as Box) } } impl Default for AuthLookup { fn default() -> Self { Self::Empty } } impl<'a> IntoIterator for &'a AuthLookup { type Item = &'a Record; type IntoIter = AuthLookupIter<'a>; fn into_iter(self) -> Self::IntoIter { match self { AuthLookup::Empty => AuthLookupIter::Empty, // TODO: what about the additionals? is IntoIterator a bad idea? AuthLookup::Records { answers: r, .. } | AuthLookup::SOA(r) => { AuthLookupIter::Records(r.into_iter()) } AuthLookup::AXFR { start_soa, records, end_soa, } => AuthLookupIter::AXFR(start_soa.into_iter().chain(records).chain(end_soa)), } } } /// An iterator over an Authority Lookup #[allow(clippy::large_enum_variant)] pub enum AuthLookupIter<'r> { /// The empty set Empty, /// An iteration over a set of Records Records(LookupRecordsIter<'r>), /// An iteration over an AXFR AXFR(Chain, LookupRecordsIter<'r>>, LookupRecordsIter<'r>>), } impl<'r> Iterator for AuthLookupIter<'r> { type Item = &'r Record; fn next(&mut self) -> Option { match self { AuthLookupIter::Empty => None, AuthLookupIter::Records(i) => i.next(), AuthLookupIter::AXFR(i) => i.next(), } } } impl<'a> Default for AuthLookupIter<'a> { fn default() -> Self { AuthLookupIter::Empty } } impl From for AuthLookup { fn from(lookup: LookupRecords) -> Self { Self::Records { answers: lookup, additionals: None, } } } /// An iterator over an ANY query for Records. /// /// The length of this result cannot be known without consuming the iterator. /// /// # Lifetimes /// /// * `'r` - the record_set's lifetime, from the catalog /// * `'q` - the lifetime of the query/request #[derive(Debug)] pub struct AnyRecords { lookup_options: LookupOptions, rrsets: Vec>, query_type: RecordType, query_name: LowerName, } impl AnyRecords { /// construct a new lookup of any set of records pub fn new( lookup_options: LookupOptions, // TODO: potentially very expensive rrsets: Vec>, query_type: RecordType, query_name: LowerName, ) -> Self { Self { lookup_options, rrsets, query_type, query_name, } } fn iter(&self) -> AnyRecordsIter<'_> { self.into_iter() } } impl<'r> IntoIterator for &'r AnyRecords { type Item = &'r Record; type IntoIter = AnyRecordsIter<'r>; fn into_iter(self) -> Self::IntoIter { AnyRecordsIter { lookup_options: self.lookup_options, // TODO: potentially very expensive rrsets: self.rrsets.iter(), rrset: None, records: None, query_type: self.query_type, query_name: &self.query_name, } } } /// An iteration over a lookup for any Records #[allow(unused)] pub struct AnyRecordsIter<'r> { lookup_options: LookupOptions, rrsets: Iter<'r, Arc>, rrset: Option<&'r RecordSet>, records: Option>, query_type: RecordType, query_name: &'r LowerName, } impl<'r> Iterator for AnyRecordsIter<'r> { type Item = &'r Record; fn next(&mut self) -> Option { use std::borrow::Borrow; let query_type = self.query_type; let query_name = self.query_name; loop { if let Some(ref mut records) = self.records { let record = records .by_ref() .filter(|rr_set| { query_type == RecordType::ANY || rr_set.record_type() != RecordType::SOA }) .find(|rr_set| { query_type == RecordType::AXFR || &LowerName::from(rr_set.name()) == query_name }); if record.is_some() { return record; } } self.rrset = self.rrsets.next().map(Borrow::borrow); // if there are no more RecordSets, then return self.rrset?; // getting here, we must have exhausted our records from the rrset cfg_if! { if #[cfg(feature = "dnssec")] { self.records = Some( self.rrset .expect("rrset should not be None at this point") .records(self.lookup_options.is_dnssec(), self.lookup_options.supported_algorithms()), ); } else { self.records = Some(self.rrset.expect("rrset should not be None at this point").records_without_rrsigs()); } } } } } /// The result of a lookup #[derive(Debug)] pub enum LookupRecords { /// The empty set of records Empty, /// The associate records Records { /// LookupOptions for the request, e.g. dnssec and supported algorithms lookup_options: LookupOptions, /// the records found based on the query records: Arc, }, /// Vec of disjoint record sets ManyRecords(LookupOptions, Vec>), // TODO: need a better option for very large zone xfrs... /// A generic lookup response where anything is desired AnyRecords(AnyRecords), } impl LookupRecords { /// Construct a new LookupRecords pub fn new(lookup_options: LookupOptions, records: Arc) -> Self { Self::Records { lookup_options, records, } } /// Construct a new LookupRecords over a set of ResordSets pub fn many(lookup_options: LookupOptions, mut records: Vec>) -> Self { // we're reversing the records because they are output in reverse order, via pop() records.reverse(); Self::ManyRecords(lookup_options, records) } /// This is an NxDomain or NameExists, and has no associated records /// /// this consumes the iterator, and verifies it is empty pub fn was_empty(&self) -> bool { self.iter().count() == 0 } /// Conversion to an iterator pub fn iter(&self) -> LookupRecordsIter<'_> { self.into_iter() } } impl Default for LookupRecords { fn default() -> Self { Self::Empty } } impl<'a> IntoIterator for &'a LookupRecords { type Item = &'a Record; type IntoIter = LookupRecordsIter<'a>; #[allow(unused_variables)] fn into_iter(self) -> Self::IntoIter { match self { LookupRecords::Empty => LookupRecordsIter::Empty, LookupRecords::Records { lookup_options, records, } => LookupRecordsIter::RecordsIter( lookup_options.rrset_with_supported_algorithms(records), ), LookupRecords::ManyRecords(lookup_options, r) => LookupRecordsIter::ManyRecordsIter( r.iter() .map(|r| lookup_options.rrset_with_supported_algorithms(r)) .collect(), None, ), LookupRecords::AnyRecords(r) => LookupRecordsIter::AnyRecordsIter(r.iter()), } } } /// Iterator over lookup records pub enum LookupRecordsIter<'r> { /// An iteration over batch record type results AnyRecordsIter(AnyRecordsIter<'r>), /// An iteration over a single RecordSet RecordsIter(RrsetRecords<'r>), /// An iteration over many rrsets ManyRecordsIter(Vec>, Option>), /// An empty set Empty, } impl<'r> Default for LookupRecordsIter<'r> { fn default() -> Self { LookupRecordsIter::Empty } } impl<'r> Iterator for LookupRecordsIter<'r> { type Item = &'r Record; fn next(&mut self) -> Option { match self { LookupRecordsIter::Empty => None, LookupRecordsIter::AnyRecordsIter(current) => current.next(), LookupRecordsIter::RecordsIter(current) => current.next(), LookupRecordsIter::ManyRecordsIter(set, ref mut current) => loop { if let Some(o) = current.as_mut().and_then(Iterator::next) { return Some(o); } *current = set.pop(); if current.is_none() { return None; } }, } } } impl From for LookupRecords { fn from(rrset_records: AnyRecords) -> Self { Self::AnyRecords(rrset_records) } } impl LookupObject for LookupRecords { fn is_empty(&self) -> bool { Self::was_empty(self) } fn iter<'a>(&'a self) -> Box + Send + 'a> { Box::new(self.iter()) } fn take_additionals(&mut self) -> Option> { None } } trust-dns-server-0.22.0/src/authority/authority.rs000064400000000000000000000160611046102023000203750ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! All authority related types use cfg_if::cfg_if; #[cfg(feature = "dnssec")] use crate::client::{ proto::rr::dnssec::rdata::key::KEY, rr::dnssec::{DnsSecResult, SigSigner, SupportedAlgorithms}, rr::Name, }; use crate::{ authority::{LookupError, MessageRequest, UpdateResult, ZoneType}, client::rr::{LowerName, RecordSet, RecordType}, proto::rr::RrsetRecords, server::RequestInfo, }; /// LookupOptions that specify different options from the client to include or exclude various records in the response. /// /// For example, `is_dnssec` will include `RRSIG` in the response, `supported_algorithms` will only include a subset of /// `RRSIG` based on the algorithms supported by the request. #[derive(Clone, Copy, Debug, Default)] pub struct LookupOptions { is_dnssec: bool, #[cfg(feature = "dnssec")] supported_algorithms: SupportedAlgorithms, } /// Lookup Options for the request to the authority impl LookupOptions { /// Return a new LookupOptions #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn for_dnssec(is_dnssec: bool, supported_algorithms: SupportedAlgorithms) -> Self { Self { is_dnssec, supported_algorithms, } } /// Specify that this lookup should return DNSSEC related records as well, e.g. RRSIG #[allow(clippy::needless_update)] pub fn set_is_dnssec(self, val: bool) -> Self { Self { is_dnssec: val, ..self } } /// If true this lookup should return DNSSEC related records as well, e.g. RRSIG pub fn is_dnssec(&self) -> bool { self.is_dnssec } /// Specify the algorithms for which DNSSEC records should be returned #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn set_supported_algorithms(self, val: SupportedAlgorithms) -> Self { Self { supported_algorithms: val, ..self } } /// The algorithms for which DNSSEC records should be returned #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn supported_algorithms(&self) -> SupportedAlgorithms { self.supported_algorithms } /// Returns the subset of the rrset limited to the supported_algorithms pub fn rrset_with_supported_algorithms<'r>( &self, record_set: &'r RecordSet, ) -> RrsetRecords<'r> { cfg_if! { if #[cfg(feature = "dnssec")] { record_set.records( self.is_dnssec(), self.supported_algorithms(), ) } else { record_set.records_without_rrsigs() } } } } /// Authority implementations can be used with a `Catalog` #[async_trait::async_trait] pub trait Authority: Send + Sync { /// Result of a lookup type Lookup: Send + Sync + Sized + 'static; /// What type is this zone fn zone_type(&self) -> ZoneType; /// Return true if AXFR is allowed fn is_axfr_allowed(&self) -> bool; /// Perform a dynamic update of a zone async fn update(&self, update: &MessageRequest) -> UpdateResult; /// Get the origin of this zone, i.e. example.com is the origin for www.example.com fn origin(&self) -> &LowerName; /// Looks up all Resource Records matching the giving `Name` and `RecordType`. /// /// # Arguments /// /// * `name` - The `Name`, label, to lookup. /// * `rtype` - The `RecordType`, to lookup. `RecordType::ANY` will return all records matching /// `name`. `RecordType::AXFR` will return all record types except `RecordType::SOA` /// due to the requirements that on zone transfers the `RecordType::SOA` must both /// precede and follow all other records. /// * `is_secure` - If the DO bit is set on the EDNS OPT record, then return RRSIGs as well. /// /// # Return value /// /// None if there are no matching records, otherwise a `Vec` containing the found records. async fn lookup( &self, name: &LowerName, rtype: RecordType, lookup_options: LookupOptions, ) -> Result; /// Using the specified query, perform a lookup against this zone. /// /// # Arguments /// /// * `query` - the query to perform the lookup with. /// * `is_secure` - if true, then RRSIG records (if this is a secure zone) will be returned. /// /// # Return value /// /// Returns a vectory containing the results of the query, it will be empty if not found. If /// `is_secure` is true, in the case of no records found then NSEC records will be returned. async fn search( &self, request: RequestInfo<'_>, lookup_options: LookupOptions, ) -> Result; /// Get the NS, NameServer, record for the zone async fn ns(&self, lookup_options: LookupOptions) -> Result { self.lookup(self.origin(), RecordType::NS, lookup_options) .await } /// Return the NSEC records based on the given name /// /// # Arguments /// /// * `name` - given this name (i.e. the lookup name), return the NSEC record that is less than /// this /// * `is_secure` - if true then it will return RRSIG records as well async fn get_nsec_records( &self, name: &LowerName, lookup_options: LookupOptions, ) -> Result; /// Returns the SOA of the authority. /// /// *Note*: This will only return the SOA, if this is fulfilling a request, a standard lookup /// should be used, see `soa_secure()`, which will optionally return RRSIGs. async fn soa(&self) -> Result { // SOA should be origin|SOA self.lookup(self.origin(), RecordType::SOA, LookupOptions::default()) .await } /// Returns the SOA record for the zone async fn soa_secure(&self, lookup_options: LookupOptions) -> Result { self.lookup(self.origin(), RecordType::SOA, lookup_options) .await } } /// Extension to Authority to allow for DNSSEC features #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] #[async_trait::async_trait] pub trait DnssecAuthority: Authority { /// Add a (Sig0) key that is authorized to perform updates against this authority async fn add_update_auth_key(&self, name: Name, key: KEY) -> DnsSecResult<()>; /// Add Signer async fn add_zone_signing_key(&self, signer: SigSigner) -> DnsSecResult<()>; /// Sign the zone for DNSSEC async fn secure_zone(&self) -> DnsSecResult<()>; } trust-dns-server-0.22.0/src/authority/authority_object.rs000064400000000000000000000210021046102023000217120ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! All authority related types use std::sync::Arc; use tracing::debug; use crate::{ authority::{Authority, LookupError, LookupOptions, MessageRequest, UpdateResult, ZoneType}, client::rr::{LowerName, Record, RecordType}, server::RequestInfo, }; /// An Object safe Authority #[async_trait::async_trait] pub trait AuthorityObject: Send + Sync { /// Clone the object fn box_clone(&self) -> Box; /// What type is this zone fn zone_type(&self) -> ZoneType; /// Return true if AXFR is allowed fn is_axfr_allowed(&self) -> bool; /// Perform a dynamic update of a zone async fn update(&self, update: &MessageRequest) -> UpdateResult; /// Get the origin of this zone, i.e. example.com is the origin for www.example.com fn origin(&self) -> &LowerName; /// Looks up all Resource Records matching the giving `Name` and `RecordType`. /// /// # Arguments /// /// * `name` - The `Name`, label, to lookup. /// * `rtype` - The `RecordType`, to lookup. `RecordType::ANY` will return all records matching /// `name`. `RecordType::AXFR` will return all record types except `RecordType::SOA` /// due to the requirements that on zone transfers the `RecordType::SOA` must both /// precede and follow all other records. /// * `is_secure` - If the DO bit is set on the EDNS OPT record, then return RRSIGs as well. /// /// # Return value /// /// None if there are no matching records, otherwise a `Vec` containing the found records. async fn lookup( &self, name: &LowerName, rtype: RecordType, lookup_options: LookupOptions, ) -> Result, LookupError>; /// Using the specified query, perform a lookup against this zone. /// /// # Arguments /// /// * `query` - the query to perform the lookup with. /// * `is_secure` - if true, then RRSIG records (if this is a secure zone) will be returned. /// /// # Return value /// /// Returns a vectory containing the results of the query, it will be empty if not found. If /// `is_secure` is true, in the case of no records found then NSEC records will be returned. async fn search( &self, request_info: RequestInfo<'_>, lookup_options: LookupOptions, ) -> Result, LookupError>; /// Get the NS, NameServer, record for the zone async fn ns( &self, lookup_options: LookupOptions, ) -> Result, LookupError> { self.lookup(self.origin(), RecordType::NS, lookup_options) .await } /// Return the NSEC records based on the given name /// /// # Arguments /// /// * `name` - given this name (i.e. the lookup name), return the NSEC record that is less than /// this /// * `is_secure` - if true then it will return RRSIG records as well async fn get_nsec_records( &self, name: &LowerName, lookup_options: LookupOptions, ) -> Result, LookupError>; /// Returns the SOA of the authority. /// /// *Note*: This will only return the SOA, if this is fulfilling a request, a standard lookup /// should be used, see `soa_secure()`, which will optionally return RRSIGs. async fn soa(&self) -> Result, LookupError> { // SOA should be origin|SOA self.lookup(self.origin(), RecordType::SOA, LookupOptions::default()) .await } /// Returns the SOA record for the zone async fn soa_secure( &self, lookup_options: LookupOptions, ) -> Result, LookupError> { self.lookup(self.origin(), RecordType::SOA, lookup_options) .await } } #[async_trait::async_trait] impl AuthorityObject for Arc where A: Authority + Send + Sync + 'static, L: LookupObject + Send + Sync + 'static, { fn box_clone(&self) -> Box { Box::new(self.clone()) } /// What type is this zone fn zone_type(&self) -> ZoneType { Authority::zone_type(self.as_ref()) } /// Return true if AXFR is allowed fn is_axfr_allowed(&self) -> bool { Authority::is_axfr_allowed(self.as_ref()) } /// Perform a dynamic update of a zone async fn update(&self, update: &MessageRequest) -> UpdateResult { Authority::update(self.as_ref(), update).await } /// Get the origin of this zone, i.e. example.com is the origin for www.example.com fn origin(&self) -> &LowerName { Authority::origin(self.as_ref()) } /// Looks up all Resource Records matching the giving `Name` and `RecordType`. /// /// # Arguments /// /// * `name` - The `Name`, label, to lookup. /// * `rtype` - The `RecordType`, to lookup. `RecordType::ANY` will return all records matching /// `name`. `RecordType::AXFR` will return all record types except `RecordType::SOA` /// due to the requirements that on zone transfers the `RecordType::SOA` must both /// precede and follow all other records. /// * `is_secure` - If the DO bit is set on the EDNS OPT record, then return RRSIGs as well. /// /// # Return value /// /// None if there are no matching records, otherwise a `Vec` containing the found records. async fn lookup( &self, name: &LowerName, rtype: RecordType, lookup_options: LookupOptions, ) -> Result, LookupError> { let this = self.as_ref(); let lookup = Authority::lookup(this, name, rtype, lookup_options).await; lookup.map(|l| Box::new(l) as Box) } /// Using the specified query, perform a lookup against this zone. /// /// # Arguments /// /// * `query` - the query to perform the lookup with. /// * `is_secure` - if true, then RRSIG records (if this is a secure zone) will be returned. /// /// # Return value /// /// Returns a vectory containing the results of the query, it will be empty if not found. If /// `is_secure` is true, in the case of no records found then NSEC records will be returned. async fn search( &self, request_info: RequestInfo<'_>, lookup_options: LookupOptions, ) -> Result, LookupError> { let this = self.as_ref(); debug!("performing {} on {}", request_info.query, this.origin()); let lookup = Authority::search(this, request_info, lookup_options).await; lookup.map(|l| Box::new(l) as Box) } /// Return the NSEC records based on the given name /// /// # Arguments /// /// * `name` - given this name (i.e. the lookup name), return the NSEC record that is less than /// this /// * `is_secure` - if true then it will return RRSIG records as well async fn get_nsec_records( &self, name: &LowerName, lookup_options: LookupOptions, ) -> Result, LookupError> { let lookup = Authority::get_nsec_records(self.as_ref(), name, lookup_options).await; lookup.map(|l| Box::new(l) as Box) } } /// An Object Safe Lookup for Authority pub trait LookupObject: Send { /// Returns true if either the associated Records are empty, or this is a NameExists or NxDomain fn is_empty(&self) -> bool; /// Conversion to an iterator fn iter<'a>(&'a self) -> Box + Send + 'a>; /// For CNAME and similar records, this is an additional set of lookup records /// /// it is acceptable for this to return None after the first call. fn take_additionals(&mut self) -> Option>; } /// A lookup that returns no records #[derive(Clone, Copy, Debug)] pub struct EmptyLookup; impl LookupObject for EmptyLookup { fn is_empty(&self) -> bool { true } fn iter<'a>(&'a self) -> Box + Send + 'a> { Box::new([].iter()) } fn take_additionals(&mut self) -> Option> { None } } trust-dns-server-0.22.0/src/authority/catalog.rs000064400000000000000000000557511046102023000177700ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. // TODO, I've implemented this as a separate entity from the cache, but I wonder if the cache // should be the only "front-end" for lookups, where if that misses, then we go to the catalog // then, if requested, do a recursive lookup... i.e. the catalog would only point to files. use std::{borrow::Borrow, collections::HashMap, future::Future, io}; use cfg_if::cfg_if; use tracing::{debug, error, info, trace, warn}; use trust_dns_proto::rr::Record; #[cfg(feature = "dnssec")] use crate::client::rr::{ dnssec::{Algorithm, SupportedAlgorithms}, rdata::opt::{EdnsCode, EdnsOption}, }; use crate::{ authority::{ AuthLookup, AuthorityObject, EmptyLookup, LookupError, LookupObject, LookupOptions, MessageResponse, MessageResponseBuilder, ZoneType, }, client::{ op::{Edns, Header, LowerQuery, MessageType, OpCode, ResponseCode}, rr::{LowerName, RecordType}, }, server::{Request, RequestHandler, RequestInfo, ResponseHandler, ResponseInfo}, }; /// Set of authorities, zones, available to this server. #[derive(Default)] pub struct Catalog { authorities: HashMap>, } #[allow(unused_mut, unused_variables)] async fn send_response<'a, R: ResponseHandler>( response_edns: Option, mut response: MessageResponse< '_, 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, >, mut response_handle: R, ) -> io::Result { #[cfg(feature = "dnssec")] if let Some(mut resp_edns) = response_edns { // set edns DAU and DHU // send along the algorithms which are supported by this authority let mut algorithms = SupportedAlgorithms::default(); algorithms.set(Algorithm::RSASHA256); algorithms.set(Algorithm::ECDSAP256SHA256); algorithms.set(Algorithm::ECDSAP384SHA384); algorithms.set(Algorithm::ED25519); let dau = EdnsOption::DAU(algorithms); let dhu = EdnsOption::DHU(algorithms); resp_edns.options_mut().insert(dau); resp_edns.options_mut().insert(dhu); response.set_edns(resp_edns); } response_handle.send_response(response).await } #[async_trait::async_trait] impl RequestHandler for Catalog { /// Determines what needs to happen given the type of request, i.e. Query or Update. /// /// # Arguments /// /// * `request` - the requested action to perform. /// * `response_handle` - sink for the response message to be sent async fn handle_request( &self, request: &Request, mut response_handle: R, ) -> ResponseInfo { trace!("request: {:?}", request); let response_edns: Option; // check if it's edns if let Some(req_edns) = request.edns() { let mut response = MessageResponseBuilder::new(Some(request.raw_query())); let mut response_header = Header::response_from_request(request.header()); let mut resp_edns: Edns = Edns::new(); // check our version against the request // TODO: what version are we? let our_version = 0; resp_edns.set_dnssec_ok(true); resp_edns.set_max_payload(req_edns.max_payload().max(512)); resp_edns.set_version(our_version); if req_edns.version() > our_version { warn!( "request edns version greater than {}: {}", our_version, req_edns.version() ); response_header.set_response_code(ResponseCode::BADVERS); resp_edns.set_rcode_high(ResponseCode::BADVERS.high()); response.edns(resp_edns); // TODO: should ResponseHandle consume self? let result = response_handle .send_response(response.build_no_records(response_header)) .await; // couldn't handle the request return match result { Err(e) => { error!("request error: {}", e); ResponseInfo::serve_failed() } Ok(info) => info, }; } response_edns = Some(resp_edns); } else { response_edns = None; } let result = match request.message_type() { // TODO think about threading query lookups for multiple lookups, this could be a huge improvement // especially for recursive lookups MessageType::Query => match request.op_code() { OpCode::Query => { debug!("query received: {}", request.id()); let info = self.lookup(request, response_edns, response_handle).await; Ok(info) } OpCode::Update => { debug!("update received: {}", request.id()); self.update(request, response_edns, response_handle).await } c => { warn!("unimplemented op_code: {:?}", c); let response = MessageResponseBuilder::new(Some(request.raw_query())); response_handle .send_response(response.error_msg(request.header(), ResponseCode::NotImp)) .await } }, MessageType::Response => { warn!("got a response as a request from id: {}", request.id()); let response = MessageResponseBuilder::new(Some(request.raw_query())); response_handle .send_response(response.error_msg(request.header(), ResponseCode::FormErr)) .await } }; match result { Err(e) => { error!("request failed: {}", e); ResponseInfo::serve_failed() } Ok(info) => info, } } } impl Catalog { /// Constructs a new Catalog pub fn new() -> Self { Self { authorities: HashMap::new(), } } /// Insert or update a zone authority /// /// # Arguments /// /// * `name` - zone name, e.g. example.com. /// * `authority` - the zone data pub fn upsert(&mut self, name: LowerName, authority: Box) { self.authorities.insert(name, authority); } /// Remove a zone from the catalog pub fn remove(&mut self, name: &LowerName) -> Option> { self.authorities.remove(name) } /// Update the zone given the Update request. /// /// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997 /// /// ```text /// 3.1 - Process Zone Section /// /// 3.1.1. The Zone Section is checked to see that there is exactly one /// RR therein and that the RR's ZTYPE is SOA, else signal FORMERR to the /// requestor. Next, the ZNAME and ZCLASS are checked to see if the zone /// so named is one of this server's authority zones, else signal NOTAUTH /// to the requestor. If the server is a zone Secondary, the request will be /// forwarded toward the Primary Zone Server. /// /// 3.1.2 - Pseudocode For Zone Section Processing /// /// if (zcount != 1 || ztype != SOA) /// return (FORMERR) /// if (zone_type(zname, zclass) == SECONDARY) /// return forward() /// if (zone_type(zname, zclass) == PRIMARY) /// return update() /// return (NOTAUTH) /// /// Sections 3.2 through 3.8 describe the primary's behaviour, /// whereas Section 6 describes a forwarder's behaviour. /// /// 3.8 - Response /// /// At the end of UPDATE processing, a response code will be known. A /// response message is generated by copying the ID and Opcode fields /// from the request, and either copying the ZOCOUNT, PRCOUNT, UPCOUNT, /// and ADCOUNT fields and associated sections, or placing zeros (0) in /// the these "count" fields and not including any part of the original /// update. The QR bit is set to one (1), and the response is sent back /// to the requestor. If the requestor used UDP, then the response will /// be sent to the requestor's source UDP port. If the requestor used /// TCP, then the response will be sent back on the requestor's open TCP /// connection. /// ``` /// /// The "request" should be an update formatted message. /// The response will be in the alternate, all 0's format described in RFC 2136 section 3.8 /// as this is more efficient. /// /// # Arguments /// /// * `request` - an update message /// * `response_handle` - sink for the response message to be sent pub async fn update( &self, update: &Request, response_edns: Option, response_handle: R, ) -> io::Result { let request_info = update.request_info(); let verify_request = move || -> Result, ResponseCode> { // 2.3 - Zone Section // // All records to be updated must be in the same zone, and // therefore the Zone Section is allowed to contain exactly one record. // The ZNAME is the zone name, the ZTYPE must be SOA, and the ZCLASS is // the zone's class. let ztype = request_info.query.query_type(); if ztype != RecordType::SOA { warn!( "invalid update request zone type must be SOA, ztype: {}", ztype ); return Err(ResponseCode::FormErr); } Ok(request_info) }; // verify the zone type and number of zones in request, then find the zone to update let request_info = verify_request(); let authority = request_info.as_ref().map_err(|e| *e).and_then(|info| { self.find(info.query.name()) .map(|a| a.box_clone()) .ok_or(ResponseCode::Refused) }); let response_code = match authority { Ok(authority) => { #[allow(deprecated)] match authority.zone_type() { ZoneType::Secondary | ZoneType::Slave => { error!("secondary forwarding for update not yet implemented"); ResponseCode::NotImp } ZoneType::Primary | ZoneType::Master => { let update_result = authority.update(update).await; match update_result { // successful update Ok(..) => ResponseCode::NoError, Err(response_code) => response_code, } } _ => ResponseCode::NotAuth, } } Err(response_code) => response_code, }; let response = MessageResponseBuilder::new(Some(update.raw_query())); let mut response_header = Header::default(); response_header.set_id(update.id()); response_header.set_op_code(OpCode::Update); response_header.set_message_type(MessageType::Response); response_header.set_response_code(response_code); send_response( response_edns, response.build_no_records(response_header), response_handle, ) .await } /// Checks whether the `Catalog` contains DNS records for `name` /// /// Use this when you know the exact `LowerName` that was used when /// adding an authority and you don't care about the authority it /// contains. For public domain names, `LowerName` is usually the /// top level domain name like `example.com.`. /// /// If you do not know the exact domain name to use or you actually /// want to use the authority it contains, use `find` instead. pub fn contains(&self, name: &LowerName) -> bool { self.authorities.contains_key(name) } /// Given the requested query, lookup and return any matching results. /// /// # Arguments /// /// * `request` - the query message. /// * `response_handle` - sink for the response message to be sent pub async fn lookup( &self, request: &Request, response_edns: Option, response_handle: R, ) -> ResponseInfo { let request_info = request.request_info(); let authority = self.find(request_info.query.name()); if let Some(authority) = authority { lookup( request_info, authority, request, response_edns .as_ref() .map(|arc| Borrow::::borrow(arc).clone()), response_handle.clone(), ) .await } else { // if this is empty then the there are no authorities registered that can handle the request let response = MessageResponseBuilder::new(Some(request.raw_query())); let result = send_response( response_edns, response.error_msg(request.header(), ResponseCode::Refused), response_handle, ) .await; match result { Err(e) => { error!("failed to send response: {}", e); ResponseInfo::serve_failed() } Ok(r) => r, } } } /// Recursively searches the catalog for a matching authority pub fn find(&self, name: &LowerName) -> Option<&(dyn AuthorityObject + 'static)> { debug!("searching authorities for: {}", name); self.authorities .get(name) .map(|authority| &**authority) .or_else(|| { if !name.is_root() { let name = name.base_name(); self.find(&name) } else { None } }) } } async fn lookup<'a, R: ResponseHandler + Unpin>( request_info: RequestInfo<'_>, authority: &dyn AuthorityObject, request: &Request, response_edns: Option, response_handle: R, ) -> ResponseInfo { let query = request_info.query; debug!( "request: {} found authority: {}", request.id(), authority.origin() ); let (response_header, sections) = build_response( authority, request_info, request.id(), request.header(), query, request.edns(), ) .await; let response = MessageResponseBuilder::new(Some(request.raw_query())).build( response_header, sections.answers.iter(), sections.ns.iter(), sections.soa.iter(), sections.additionals.iter(), ); let result = send_response(response_edns.clone(), response, response_handle.clone()).await; match result { Err(e) => { error!("error sending response: {}", e); ResponseInfo::serve_failed() } Ok(i) => i, } } #[allow(unused_variables)] fn lookup_options_for_edns(edns: Option<&Edns>) -> LookupOptions { let edns = match edns { Some(edns) => edns, None => return LookupOptions::default(), }; cfg_if! { if #[cfg(feature = "dnssec")] { let supported_algorithms = if let Some(&EdnsOption::DAU(algs)) = edns.option(EdnsCode::DAU) { algs } else { debug!("no DAU in request, used default SupportAlgorithms"); SupportedAlgorithms::default() }; LookupOptions::for_dnssec(edns.dnssec_ok(), supported_algorithms) } else { LookupOptions::default() } } } async fn build_response( authority: &dyn AuthorityObject, request_info: RequestInfo<'_>, request_id: u16, request_header: &Header, query: &LowerQuery, edns: Option<&Edns>, ) -> (Header, LookupSections) { let lookup_options = lookup_options_for_edns(edns); // log algorithms being requested if lookup_options.is_dnssec() { info!( "request: {} lookup_options: {:?}", request_id, lookup_options ); } let mut response_header = Header::response_from_request(request_header); response_header.set_authoritative(authority.zone_type().is_authoritative()); debug!("performing {} on {}", query, authority.origin()); let future = authority.search(request_info, lookup_options); #[allow(deprecated)] let sections = match authority.zone_type() { ZoneType::Primary | ZoneType::Secondary | ZoneType::Master | ZoneType::Slave => { send_authoritative_response( future, authority, &mut response_header, lookup_options, request_id, query, ) .await } ZoneType::Forward | ZoneType::Hint => { send_forwarded_response(future, request_header, &mut response_header).await } }; (response_header, sections) } async fn send_authoritative_response( future: impl Future, LookupError>>, authority: &dyn AuthorityObject, response_header: &mut Header, lookup_options: LookupOptions, request_id: u16, query: &LowerQuery, ) -> LookupSections { // In this state we await the records, on success we transition to getting // NS records, which indicate an authoritative response. // // On Errors, the transition depends on the type of error. let answers = match future.await { Ok(records) => { response_header.set_response_code(ResponseCode::NoError); response_header.set_authoritative(true); Some(records) } // This request was refused // TODO: there are probably other error cases that should just drop through (FormErr, ServFail) Err(LookupError::ResponseCode(ResponseCode::Refused)) => { response_header.set_response_code(ResponseCode::Refused); return LookupSections { answers: Box::new(AuthLookup::default()) as Box, ns: Box::new(AuthLookup::default()) as Box, soa: Box::new(AuthLookup::default()) as Box, additionals: Box::new(AuthLookup::default()) as Box, }; } Err(e) => { if e.is_nx_domain() { response_header.set_response_code(ResponseCode::NXDomain); } else if e.is_name_exists() { response_header.set_response_code(ResponseCode::NoError); }; None } }; let (ns, soa) = if answers.is_some() { // SOA queries should return the NS records as well. if query.query_type().is_soa() { // This was a successful authoritative lookup for SOA: // get the NS records as well. match authority.ns(lookup_options).await { Ok(ns) => (Some(ns), None), Err(e) => { warn!("ns_lookup errored: {}", e); (None, None) } } } else { (None, None) } } else { let nsecs = if lookup_options.is_dnssec() { // in the dnssec case, nsec records should exist, we return NoError + NoData + NSec... debug!("request: {} non-existent adding nsecs", request_id); // run the nsec lookup future, and then transition to get soa let future = authority.get_nsec_records(query.name(), lookup_options); match future.await { // run the soa lookup Ok(nsecs) => Some(nsecs), Err(e) => { warn!("failed to lookup nsecs: {}", e); None } } } else { None }; match authority.soa_secure(lookup_options).await { Ok(soa) => (nsecs, Some(soa)), Err(e) => { warn!("failed to lookup soa: {}", e); (nsecs, None) } } }; // everything is done, return results. let (answers, additionals) = match answers { Some(mut answers) => match answers.take_additionals() { Some(additionals) => (answers, additionals), None => ( answers, Box::new(AuthLookup::default()) as Box, ), }, None => ( Box::new(AuthLookup::default()) as Box, Box::new(AuthLookup::default()) as Box, ), }; LookupSections { answers, ns: ns.unwrap_or_else(|| Box::new(AuthLookup::default()) as Box), soa: soa.unwrap_or_else(|| Box::new(AuthLookup::default()) as Box), additionals, } } async fn send_forwarded_response( future: impl Future, LookupError>>, request_header: &Header, response_header: &mut Header, ) -> LookupSections { response_header.set_recursion_available(true); response_header.set_authoritative(false); // Don't perform the recursive query if this is disabled... let answers = if !request_header.recursion_desired() { // cancel the future?? // future.cancel(); drop(future); info!( "request disabled recursion, returning no records: {}", request_header.id() ); Box::new(EmptyLookup) } else { match future.await { Err(e) => { if e.is_nx_domain() { response_header.set_response_code(ResponseCode::NXDomain); } debug!("error resolving: {}", e); Box::new(EmptyLookup) } Ok(rsp) => rsp, } }; LookupSections { answers, ns: Box::new(AuthLookup::default()) as Box, soa: Box::new(AuthLookup::default()) as Box, additionals: Box::new(AuthLookup::default()) as Box, } } struct LookupSections { answers: Box, ns: Box, soa: Box, additionals: Box, } trust-dns-server-0.22.0/src/authority/error.rs000064400000000000000000000051241046102023000174740ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::io; use enum_as_inner::EnumAsInner; use thiserror::Error; use crate::client::op::ResponseCode; #[cfg(feature = "trust-dns-resolver")] use crate::resolver::error::ResolveError; // TODO: should this implement Failure? #[allow(clippy::large_enum_variant)] /// A query could not be fulfilled #[derive(Debug, EnumAsInner, Error)] #[non_exhaustive] pub enum LookupError { /// A record at the same Name as the query exists, but not of the queried RecordType #[error("The name exists, but not for the record requested")] NameExists, /// There was an error performing the lookup #[error("Error performing lookup: {0}")] ResponseCode(ResponseCode), /// Resolve Error #[cfg(feature = "trust-dns-resolver")] #[cfg_attr(docsrs, doc(cfg(feature = "resolver")))] #[error("Forward resolution error: {0}")] ResolveError(#[from] ResolveError), /// Recursive Resolver Error #[cfg(feature = "trust-dns-recursor")] #[cfg_attr(docsrs, doc(cfg(feature = "recursor")))] #[error("Recursive resolution error: {0}")] RecursiveError(#[from] trust_dns_recursor::Error), /// An underlying IO error occurred #[error("io error: {0}")] Io(io::Error), } impl LookupError { /// Create a lookup error, specifying that a name exists at the location, but no matching RecordType pub fn for_name_exists() -> Self { Self::NameExists } /// This is a non-existent domain name pub fn is_nx_domain(&self) -> bool { matches!(*self, Self::ResponseCode(ResponseCode::NXDomain)) } /// This is a non-existent domain name pub fn is_refused(&self) -> bool { matches!(*self, Self::ResponseCode(ResponseCode::Refused)) } } impl From for LookupError { fn from(code: ResponseCode) -> Self { // this should never be a NoError debug_assert!(code != ResponseCode::NoError); Self::ResponseCode(code) } } impl From for LookupError { fn from(e: io::Error) -> Self { Self::Io(e) } } impl From for io::Error { fn from(e: LookupError) -> Self { Self::new(io::ErrorKind::Other, Box::new(e)) } } /// Result of a Lookup in the Catalog and Authority pub type LookupResult = Result; trust-dns-server-0.22.0/src/authority/message_request.rs000064400000000000000000000302001046102023000215300ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::iter::once; use crate::{ client::op::LowerQuery, proto::{ error::*, op::{ message::{self, EmitAndCount}, Edns, Header, Message, MessageType, OpCode, ResponseCode, }, rr::Record, serialize::binary::{BinDecodable, BinDecoder, BinEncodable, BinEncoder}, }, }; /// A Message which captures the data from an inbound request #[derive(Debug, PartialEq)] pub struct MessageRequest { header: Header, query: WireQuery, answers: Vec, name_servers: Vec, additionals: Vec, sig0: Vec, edns: Option, } impl MessageRequest { /// Return the request header pub fn header(&self) -> &Header { &self.header } /// see `Header::id()` pub fn id(&self) -> u16 { self.header.id() } /// see `Header::message_type()` pub fn message_type(&self) -> MessageType { self.header.message_type() } /// see `Header::op_code()` pub fn op_code(&self) -> OpCode { self.header.op_code() } /// see `Header::authoritative()` pub fn authoritative(&self) -> bool { self.header.authoritative() } /// see `Header::truncated()` pub fn truncated(&self) -> bool { self.header.truncated() } /// see `Header::recursion_desired()` pub fn recursion_desired(&self) -> bool { self.header.recursion_desired() } /// see `Header::recursion_available()` pub fn recursion_available(&self) -> bool { self.header.recursion_available() } /// see `Header::authentic_data()` pub fn authentic_data(&self) -> bool { self.header.authentic_data() } /// see `Header::checking_disabled()` pub fn checking_disabled(&self) -> bool { self.header.checking_disabled() } /// # Return value /// /// The `ResponseCode`, if this is an EDNS message then this will join the section from the OPT /// record to create the EDNS `ResponseCode` pub fn response_code(&self) -> ResponseCode { self.header.response_code() } /// ```text /// Question Carries the query name and other query parameters. /// ``` pub fn query(&self) -> &LowerQuery { &self.query.query } /// ```text /// Answer Carries RRs which directly answer the query. /// ``` pub fn answers(&self) -> &[Record] { &self.answers } /// ```text /// Authority Carries RRs which describe other authoritative servers. /// May optionally carry the SOA RR for the authoritative /// data in the answer section. /// ``` pub fn name_servers(&self) -> &[Record] { &self.name_servers } /// ```text /// Additional Carries RRs which may be helpful in using the RRs in the /// other sections. /// ``` pub fn additionals(&self) -> &[Record] { &self.additionals } /// [RFC 6891, EDNS(0) Extensions, April 2013](https://tools.ietf.org/html/rfc6891#section-6.1.1) /// /// ```text /// 6.1.1. Basic Elements /// /// An OPT pseudo-RR (sometimes called a meta-RR) MAY be added to the /// additional data section of a request. /// /// The OPT RR has RR type 41. /// /// If an OPT record is present in a received request, compliant /// responders MUST include an OPT record in their respective responses. /// /// An OPT record does not carry any DNS data. It is used only to /// contain control information pertaining to the question-and-answer /// sequence of a specific transaction. OPT RRs MUST NOT be cached, /// forwarded, or stored in or loaded from zone files. /// /// The OPT RR MAY be placed anywhere within the additional data section. /// When an OPT RR is included within any DNS message, it MUST be the /// only OPT RR in that message. If a query message with more than one /// OPT RR is received, a FORMERR (RCODE=1) MUST be returned. The /// placement flexibility for the OPT RR does not override the need for /// the TSIG or SIG(0) RRs to be the last in the additional section /// whenever they are present. /// ``` /// # Return value /// /// Returns the EDNS record if it was found in the additional section. pub fn edns(&self) -> Option<&Edns> { self.edns.as_ref() } /// Any SIG0 records for signed messages pub fn sig0(&self) -> &[Record] { &self.sig0 } /// # Return value /// /// the max payload value as it's defined in the EDNS section. pub fn max_payload(&self) -> u16 { let max_size = self.edns.as_ref().map_or(512, Edns::max_payload); if max_size < 512 { 512 } else { max_size } } /// # Return value /// /// the version as defined in the EDNS record pub fn version(&self) -> u8 { self.edns.as_ref().map_or(0, Edns::version) } /// Returns the original query received from the client pub(crate) fn raw_query(&self) -> &WireQuery { &self.query } } impl<'q> BinDecodable<'q> for MessageRequest { // TODO: generify this with Message? /// Reads a MessageRequest from the decoder fn read(decoder: &mut BinDecoder<'q>) -> ProtoResult { let mut header = Header::read(decoder)?; let mut try_parse_rest = move || { // get all counts before header moves let query_count = header.query_count() as usize; let answer_count = header.answer_count() as usize; let name_server_count = header.name_server_count() as usize; let additional_count = header.additional_count() as usize; let queries = Queries::read(decoder, query_count)?; let query = queries.try_into_query()?; let (answers, _, _) = Message::read_records(decoder, answer_count, false)?; let (name_servers, _, _) = Message::read_records(decoder, name_server_count, false)?; let (additionals, edns, sig0) = Message::read_records(decoder, additional_count, true)?; // need to grab error code from EDNS (which might have a higher value) if let Some(edns) = &edns { let high_response_code = edns.rcode_high(); header.merge_response_code(high_response_code); } Ok(Self { header, query, answers, name_servers, additionals, sig0, edns, }) }; match try_parse_rest() { Ok(message) => Ok(message), Err(e) => Err(ProtoErrorKind::FormError { header, error: Box::new(e), } .into()), } } } /// A set of Queries with the associated serialized data #[derive(Debug, PartialEq, Eq)] pub struct Queries { queries: Vec, original: Box<[u8]>, } impl Queries { fn read_queries(decoder: &mut BinDecoder<'_>, count: usize) -> ProtoResult> { let mut queries = Vec::with_capacity(count); for _ in 0..count { queries.push(LowerQuery::read(decoder)?); } Ok(queries) } /// Read queries from a decoder pub fn read(decoder: &mut BinDecoder<'_>, num_queries: usize) -> ProtoResult { let queries_start = decoder.index(); let queries = Self::read_queries(decoder, num_queries)?; let original = decoder .slice_from(queries_start)? .to_vec() .into_boxed_slice(); Ok(Self { queries, original }) } /// return the number of queries in the request pub fn len(&self) -> usize { self.queries.len() } /// Returns true if there are no queries pub fn is_empty(&self) -> bool { self.queries.is_empty() } /// returns the bytes as they were seen from the Client pub fn as_bytes(&self) -> &[u8] { self.original.as_ref() } pub(crate) fn as_emit_and_count(&self) -> QueriesEmitAndCount<'_> { QueriesEmitAndCount { length: self.queries.len(), // We don't generally support more than one query, but this will at least give us one // cache entry. first_query: self.queries.get(0), cached_serialized: self.original.as_ref(), } } /// Performs a validation that this set of Queries is one and only one Query pub(crate) fn try_into_query(mut self) -> Result { let count = self.queries.len(); if count == 1 { let query = self.queries.pop().expect("should have been at least one"); Ok(WireQuery { query, original: self.original, }) } else { Err(ProtoErrorKind::BadQueryCount(count).into()) } } } /// A query with the original bytes stored from the query #[derive(Debug, PartialEq)] pub(crate) struct WireQuery { query: LowerQuery, original: Box<[u8]>, } impl WireQuery { pub(crate) fn as_emit_and_count(&self) -> QueriesEmitAndCount<'_> { QueriesEmitAndCount { length: 1, first_query: Some(&self.query), cached_serialized: self.original.as_ref(), } } } pub(crate) struct QueriesEmitAndCount<'q> { /// Number of queries in this segment length: usize, /// Use the first query, if it exists, to pre-populate the string compression cache first_query: Option<&'q LowerQuery>, /// The cached rendering of the original (wire-format) queries cached_serialized: &'q [u8], } impl<'q> EmitAndCount for QueriesEmitAndCount<'q> { fn emit(&mut self, encoder: &mut BinEncoder<'_>) -> ProtoResult { let original_offset = encoder.offset(); encoder.emit_vec(self.cached_serialized)?; if !encoder.is_canonical_names() { if let Some(query) = self.first_query { encoder.store_label_pointer( original_offset, original_offset + query.original().name().len(), ) } } Ok(self.length) } } impl BinEncodable for MessageRequest { fn emit(&self, encoder: &mut BinEncoder<'_>) -> ProtoResult<()> { message::emit_message_parts( &self.header, // we emit the queries, not the raw bytes, in order to guarantee canonical form // in cases where that's necessary, like SIG0 validation &mut once(&self.query.query), &mut self.answers.iter(), &mut self.name_servers.iter(), &mut self.additionals.iter(), self.edns.as_ref(), &self.sig0, encoder, )?; Ok(()) } } /// A type which represents an MessageRequest for dynamic Update. pub trait UpdateRequest { /// Id of the Message fn id(&self) -> u16; /// Zone being updated, this should be the query of a Message fn zone(&self) -> &LowerQuery; /// Prerequisites map to the answers of a Message fn prerequisites(&self) -> &[Record]; /// Records to update map to the name_servers of a Message fn updates(&self) -> &[Record]; /// Additional records fn additionals(&self) -> &[Record]; /// SIG0 records for verifying the Message fn sig0(&self) -> &[Record]; } impl UpdateRequest for MessageRequest { fn id(&self) -> u16 { Self::id(self) } fn zone(&self) -> &LowerQuery { self.query() } fn prerequisites(&self) -> &[Record] { self.answers() } fn updates(&self) -> &[Record] { self.name_servers() } fn additionals(&self) -> &[Record] { self.additionals() } fn sig0(&self) -> &[Record] { self.sig0() } } trust-dns-server-0.22.0/src/authority/message_response.rs000064400000000000000000000237371046102023000217170ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::{ authority::{ message_request::{MessageRequest, QueriesEmitAndCount}, Queries, }, proto::{ error::*, op::{ message::{self, EmitAndCount}, Edns, Header, ResponseCode, }, rr::Record, serialize::binary::BinEncoder, }, server::ResponseInfo, }; use super::message_request::WireQuery; /// A EncodableMessage with borrowed data for Responses in the Server #[derive(Debug)] pub struct MessageResponse<'q, 'a, Answers, NameServers, Soa, Additionals> where Answers: Iterator + Send + 'a, NameServers: Iterator + Send + 'a, Soa: Iterator + Send + 'a, Additionals: Iterator + Send + 'a, { header: Header, query: Option<&'q WireQuery>, answers: Answers, name_servers: NameServers, soa: Soa, additionals: Additionals, sig0: Vec, edns: Option, } enum EmptyOrQueries<'q> { Empty, Queries(QueriesEmitAndCount<'q>), } impl<'q> From> for EmptyOrQueries<'q> { fn from(option: Option<&'q Queries>) -> Self { option.map_or(EmptyOrQueries::Empty, |q| { EmptyOrQueries::Queries(q.as_emit_and_count()) }) } } impl<'q> From> for EmptyOrQueries<'q> { fn from(option: Option<&'q WireQuery>) -> Self { option.map_or(EmptyOrQueries::Empty, |q| { EmptyOrQueries::Queries(q.as_emit_and_count()) }) } } impl<'q> EmitAndCount for EmptyOrQueries<'q> { fn emit(&mut self, encoder: &mut BinEncoder<'_>) -> ProtoResult { match self { EmptyOrQueries::Empty => Ok(0), EmptyOrQueries::Queries(q) => q.emit(encoder), } } } impl<'q, 'a, A, N, S, D> MessageResponse<'q, 'a, A, N, S, D> where A: Iterator + Send + 'a, N: Iterator + Send + 'a, S: Iterator + Send + 'a, D: Iterator + Send + 'a, { /// Returns the header of the message pub fn header(&self) -> &Header { &self.header } /// Get a mutable reference to the header pub fn header_mut(&mut self) -> &mut Header { &mut self.header } /// Set the EDNS options for the Response pub fn set_edns(&mut self, edns: Edns) -> &mut Self { self.edns = Some(edns); self } /// Consumes self, and emits to the encoder. pub fn destructive_emit(mut self, encoder: &mut BinEncoder<'_>) -> ProtoResult { // soa records are part of the nameserver section let mut name_servers = self.name_servers.chain(self.soa); message::emit_message_parts( &self.header, &mut EmptyOrQueries::from(self.query), &mut self.answers, &mut name_servers, &mut self.additionals, self.edns.as_ref(), &self.sig0, encoder, ) .map(Into::into) } } /// A builder for MessageResponses pub struct MessageResponseBuilder<'q> { query: Option<&'q WireQuery>, sig0: Option>, edns: Option, } impl<'q> MessageResponseBuilder<'q> { /// Constructs a new response builder /// /// # Arguments /// /// * `query` - any optional query (from the Request) to associate with the Response pub(crate) fn new(query: Option<&'q WireQuery>) -> MessageResponseBuilder<'q> { MessageResponseBuilder { query, sig0: None, edns: None, } } /// Constructs a new response builder /// /// # Arguments /// /// * `message` - original request message to associate with the response pub fn from_message_request(message: &'q MessageRequest) -> Self { Self::new(Some(message.raw_query())) } /// Associate EDNS with the Response pub fn edns(&mut self, edns: Edns) -> &mut Self { self.edns = Some(edns); self } /// Constructs the new MessageResponse with associated Header /// /// # Arguments /// /// * `header` - set of [Header]s for the Message pub fn build<'a, A, N, S, D>( self, header: Header, answers: A, name_servers: N, soa: S, additionals: D, ) -> MessageResponse<'q, 'a, A::IntoIter, N::IntoIter, S::IntoIter, D::IntoIter> where A: IntoIterator + Send + 'a, A::IntoIter: Send, N: IntoIterator + Send + 'a, N::IntoIter: Send, S: IntoIterator + Send + 'a, S::IntoIter: Send, D: IntoIterator + Send + 'a, D::IntoIter: Send, { MessageResponse { header, query: self.query, answers: answers.into_iter(), name_servers: name_servers.into_iter(), soa: soa.into_iter(), additionals: additionals.into_iter(), sig0: self.sig0.unwrap_or_default(), edns: self.edns, } } /// Construct a Response with no associated records pub fn build_no_records<'a>( self, header: Header, ) -> MessageResponse< 'q, 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, > { MessageResponse { header, query: self.query, answers: Box::new(None.into_iter()), name_servers: Box::new(None.into_iter()), soa: Box::new(None.into_iter()), additionals: Box::new(None.into_iter()), sig0: self.sig0.unwrap_or_default(), edns: self.edns, } } /// Constructs a new error MessageResponse with associated settings /// /// # Arguments /// /// * `id` - request id to which this is a response /// * `op_code` - operation for which this is a response /// * `response_code` - the type of error pub fn error_msg<'a>( self, request_header: &Header, response_code: ResponseCode, ) -> MessageResponse< 'q, 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, > { let mut header = Header::response_from_request(request_header); header.set_response_code(response_code); MessageResponse { header, query: self.query, answers: Box::new(None.into_iter()), name_servers: Box::new(None.into_iter()), soa: Box::new(None.into_iter()), additionals: Box::new(None.into_iter()), sig0: self.sig0.unwrap_or_default(), edns: self.edns, } } } #[cfg(test)] mod tests { use std::iter; use std::net::Ipv4Addr; use std::str::FromStr; use crate::proto::op::{Header, Message}; use crate::proto::rr::{DNSClass, Name, RData, Record}; use crate::proto::serialize::binary::BinEncoder; use super::*; #[test] fn test_truncation_ridiculous_number_answers() { let mut buf = Vec::with_capacity(512); { let mut encoder = BinEncoder::new(&mut buf); encoder.set_max_size(512); let answer = Record::new() .set_name(Name::from_str("www.example.com.").unwrap()) .set_data(Some(RData::A(Ipv4Addr::new(93, 184, 216, 34)))) .set_dns_class(DNSClass::NONE) .clone(); let message = MessageResponse { header: Header::new(), query: None, answers: iter::repeat(&answer), name_servers: iter::once(&answer), soa: iter::once(&answer), additionals: iter::once(&answer), sig0: vec![], edns: None, }; message .destructive_emit(&mut encoder) .expect("failed to encode"); } let response = Message::from_vec(&buf).expect("failed to decode"); assert!(response.header().truncated()); assert!(response.answer_count() > 1); // should never have written the name server field... assert_eq!(response.name_server_count(), 0); } #[test] fn test_truncation_ridiculous_number_nameservers() { let mut buf = Vec::with_capacity(512); { let mut encoder = BinEncoder::new(&mut buf); encoder.set_max_size(512); let answer = Record::new() .set_name(Name::from_str("www.example.com.").unwrap()) .set_data(Some(RData::A(Ipv4Addr::new(93, 184, 216, 34)))) .set_dns_class(DNSClass::NONE) .clone(); let message = MessageResponse { header: Header::new(), query: None, answers: iter::empty(), name_servers: iter::repeat(&answer), soa: iter::repeat(&answer), additionals: iter::repeat(&answer), sig0: vec![], edns: None, }; message .destructive_emit(&mut encoder) .expect("failed to encode"); } let response = Message::from_vec(&buf).expect("failed to decode"); assert!(response.header().truncated()); assert_eq!(response.answer_count(), 0); assert!(response.name_server_count() > 1); } } trust-dns-server-0.22.0/src/authority/mod.rs000064400000000000000000000025661046102023000171310ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Module for `Catalog` of `Authority` zones which are responsible for storing `RRSet` records. use crate::client::op::ResponseCode; /// Result of an Update operation pub type UpdateResult = Result; mod auth_lookup; #[allow(clippy::module_inception)] mod authority; pub(crate) mod authority_object; mod catalog; mod error; pub(crate) mod message_request; mod message_response; mod zone_type; pub use self::auth_lookup::{ AnyRecords, AuthLookup, AuthLookupIter, LookupRecords, LookupRecordsIter, }; pub use self::authority::{Authority, LookupOptions}; pub use self::authority_object::{AuthorityObject, EmptyLookup, LookupObject}; pub use self::catalog::Catalog; pub use self::error::{LookupError, LookupResult}; pub use self::message_request::{MessageRequest, Queries, UpdateRequest}; pub use self::message_response::{MessageResponse, MessageResponseBuilder}; pub use self::zone_type::ZoneType; #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub use self::authority::DnssecAuthority; trust-dns-server-0.22.0/src/authority/zone_type.rs000064400000000000000000000024711046102023000203610ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. #![allow(deprecated, clippy::use_self)] use serde::{Deserialize, Serialize}; /// The type of zone stored in a Catalog #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)] pub enum ZoneType { /// This authority for a zone Primary, /// This authority for a zone, i.e. the Primary #[deprecated = "please read about Juneteenth"] Master, /// A secondary, i.e. replicated from the Primary Secondary, /// A secondary, i.e. replicated from the Primary #[deprecated = "please read about Juneteenth"] Slave, /// A cached zone with recursive resolver abilities Hint, /// A cached zone where all requests are forwarded to another Resolver Forward, } impl ZoneType { /// Is this an authoritative Authority, i.e. it owns the records of the zone. pub fn is_authoritative(self) -> bool { matches!( self, Self::Primary | Self::Secondary | Self::Master | Self::Slave ) } } trust-dns-server-0.22.0/src/config/dnssec.rs000064400000000000000000000352221046102023000170410ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Configuration types for all security options in trust-dns use std::path::Path; #[cfg(all(feature = "dns-over-openssl", not(feature = "dns-over-rustls")))] use openssl::{pkey::PKey, stack::Stack, x509::X509}; #[cfg(feature = "dns-over-rustls")] use rustls::{Certificate, PrivateKey}; use serde::Deserialize; use crate::client::error::ParseResult; use crate::client::rr::domain::Name; #[cfg(feature = "dnssec")] use crate::client::rr::{ dnssec::{Algorithm, KeyFormat, KeyPair, Private, SigSigner}, domain::IntoName, }; /// Key pair configuration for DNSSec keys for signing a zone #[derive(Deserialize, PartialEq, Eq, Debug)] pub struct KeyConfig { /// file path to the key pub key_path: String, /// password to use to read the key pub password: Option, /// the type of key stored, see `Algorithm` pub algorithm: String, /// the name to use when signing records, e.g. ns.example.com pub signer_name: Option, /// specify that this key should be used for signing a zone pub is_zone_signing_key: Option, /// specifies that this key can be used for dynamic updates in the zone pub is_zone_update_auth: Option, } impl KeyConfig { /// Return a new KeyConfig /// /// # Arguments /// /// * `key_path` - file path to the key /// * `password` - password to use to read the key /// * `algorithm` - the type of key stored, see `Algorithm` /// * `signer_name` - the name to use when signing records, e.g. ns.example.com /// * `is_zone_signing_key` - specify that this key should be used for signing a zone /// * `is_zone_update_auth` - specifies that this key can be used for dynamic updates in the zone #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn new( key_path: String, password: Option, algorithm: Algorithm, signer_name: String, is_zone_signing_key: bool, is_zone_update_auth: bool, ) -> Self { Self { key_path, password, algorithm: algorithm.as_str().to_string(), signer_name: Some(signer_name), is_zone_signing_key: Some(is_zone_signing_key), is_zone_update_auth: Some(is_zone_update_auth), } } /// path to the key file, either relative to the zone file, or a explicit from the root. pub fn key_path(&self) -> &Path { Path::new(&self.key_path) } /// Converts key into #[cfg(any(feature = "dns-over-tls", feature = "dnssec"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "dns-over-tls", feature = "dnssec"))))] pub fn format(&self) -> ParseResult { use crate::client::error::ParseErrorKind; let extension = self.key_path().extension().ok_or_else(|| { ParseErrorKind::Msg(format!( "file lacks extension, e.g. '.pk8': {:?}", self.key_path() )) })?; match extension.to_str() { Some("der") => Ok(KeyFormat::Der), Some("key") => Ok(KeyFormat::Pem), // TODO: deprecate this... Some("pem") => Ok(KeyFormat::Pem), Some("pk8") => Ok(KeyFormat::Pkcs8), e => Err(ParseErrorKind::Msg(format!( "extension not understood, '{:?}': {:?}", e, self.key_path() )) .into()), } } /// Returns the password used to read the key pub fn password(&self) -> Option<&str> { self.password.as_deref() } /// algorithm for for the key, see `Algorithm` for supported algorithms. #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] #[allow(deprecated)] pub fn algorithm(&self) -> ParseResult { match self.algorithm.as_str() { "RSASHA1" => Ok(Algorithm::RSASHA1), "RSASHA256" => Ok(Algorithm::RSASHA256), "RSASHA1-NSEC3-SHA1" => Ok(Algorithm::RSASHA1NSEC3SHA1), "RSASHA512" => Ok(Algorithm::RSASHA512), "ECDSAP256SHA256" => Ok(Algorithm::ECDSAP256SHA256), "ECDSAP384SHA384" => Ok(Algorithm::ECDSAP384SHA384), "ED25519" => Ok(Algorithm::ED25519), s => Err(format!("unrecognized string {}", s).into()), } } /// the signer name for the key, this defaults to the $ORIGIN aka zone name. pub fn signer_name(&self) -> ParseResult> { if let Some(signer_name) = self.signer_name.as_ref() { let name = Name::parse(signer_name, None)?; return Ok(Some(name)); } Ok(None) } /// specifies that this key should be used to sign the zone /// /// The public key for this must be trusted by a resolver to work. The key must have a private /// portion associated with it. It will be registered as a DNSKEY in the zone. pub fn is_zone_signing_key(&self) -> bool { self.is_zone_signing_key.unwrap_or(false) } /// this is at least a public_key, and can be used for SIG0 dynamic updates. /// /// it will be registered as a KEY record in the zone. pub fn is_zone_update_auth(&self) -> bool { self.is_zone_update_auth.unwrap_or(false) } /// Tries to read the defined key into a Signer #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn try_into_signer(&self, signer_name: N) -> Result { let signer_name = signer_name .into_name() .map_err(|e| format!("error loading signer name: {}", e))?; let key = load_key(signer_name, self) .map_err(|e| format!("failed to load key: {:?} msg: {}", self.key_path(), e))?; key.test_key() .map_err(|e| format!("key failed test: {}", e))?; Ok(key) } } /// Certificate format of the file being read #[derive(Deserialize, PartialEq, Eq, Debug, Clone, Copy)] #[serde(rename_all = "snake_case")] #[non_exhaustive] pub enum CertType { /// Pkcs12 formatted certificates and private key (requires OpenSSL) Pkcs12, /// PEM formatted Certificate chain Pem, } impl Default for CertType { fn default() -> Self { Self::Pkcs12 } } /// Format of the private key file to read #[derive(Deserialize, PartialEq, Eq, Debug, Clone, Copy)] #[serde(rename_all = "snake_case")] #[non_exhaustive] pub enum PrivateKeyType { /// PKCS8 formatted key file, allows for a password (requires Rustls) Pkcs8, /// DER formatted key, raw and unencrypted Der, } impl Default for PrivateKeyType { fn default() -> Self { Self::Der } } /// Configuration for a TLS certificate #[derive(Deserialize, PartialEq, Eq, Debug)] pub struct TlsCertConfig { path: String, endpoint_name: String, cert_type: Option, password: Option, private_key: Option, private_key_type: Option, } impl TlsCertConfig { /// path to the pkcs12 der formatted certificate file pub fn get_path(&self) -> &Path { Path::new(&self.path) } /// return the DNS name of the certificate hosted at the TLS endpoint pub fn get_endpoint_name(&self) -> &str { &self.endpoint_name } /// Returns the format type of the certificate file pub fn get_cert_type(&self) -> CertType { self.cert_type.unwrap_or_default() } /// optional password for open the pkcs12, none assumes no password pub fn get_password(&self) -> Option<&str> { self.password.as_deref() } /// returns the path to the private key, as associated with the certificate pub fn get_private_key(&self) -> Option<&Path> { self.private_key.as_deref().map(Path::new) } /// returns the path to the private key pub fn get_private_key_type(&self) -> PrivateKeyType { self.private_key_type.unwrap_or_default() } } /// set of DNSSEC algorithms to use to sign the zone. enable_dnssec must be true. /// these will be lookedup by $file.{key_name}.pem, for backward compatibility /// with previous versions of Trust-DNS, if enable_dnssec is enabled but /// supported_algorithms is not specified, it will default to "RSASHA256" and /// look for the $file.pem for the key. To control key length, or other options /// keys of the specified formats can be generated in PEM format. Instructions /// for custom keys can be found elsewhere. /// /// the currently supported set of supported_algorithms are /// ["RSASHA256", "RSASHA512", "ECDSAP256SHA256", "ECDSAP384SHA384", "ED25519"] /// /// keys are listed in pairs of key_name and algorithm, the search path is the /// same directory has the zone $file: /// keys = [ "my_rsa_2048|RSASHA256", "/path/to/my_ed25519|ED25519" ] #[cfg(feature = "dnssec")] fn load_key(zone_name: Name, key_config: &KeyConfig) -> Result { use tracing::info; use std::convert::TryInto; use std::fs::File; use std::io::Read; use time::Duration; let key_path = key_config.key_path(); let algorithm = key_config .algorithm() .map_err(|e| format!("bad algorithm: {}", e))?; let format = key_config .format() .map_err(|e| format!("bad key format: {}", e))?; // read the key in let key: KeyPair = { info!("reading key: {:?}", key_path); let mut file = File::open(&key_path) .map_err(|e| format!("error opening private key file: {:?}: {}", key_path, e))?; let mut key_bytes = Vec::with_capacity(256); file.read_to_end(&mut key_bytes) .map_err(|e| format!("could not read key from: {:?}: {}", key_path, e))?; format .decode_key(&key_bytes, key_config.password(), algorithm) .map_err(|e| format!("could not decode key: {}", e))? }; let name = key_config .signer_name() .map_err(|e| format!("error reading name: {}", e))? .unwrap_or(zone_name); // add the key to the zone // TODO: allow the duration of signatures to be customized let dnskey = key .to_dnskey(algorithm) .map_err(|e| format!("error converting to dnskey: {}", e))?; Ok(SigSigner::dnssec( dnskey, key, name, Duration::weeks(52) .try_into() .map_err(|e| format!("error converting time to std::Duration: {}", e))?, )) } /// Load a Certificate from the path (with openssl) #[cfg(all(feature = "dns-over-openssl", not(feature = "dns-over-rustls")))] pub fn load_cert( zone_dir: &Path, tls_cert_config: &TlsCertConfig, ) -> Result<((X509, Option>), PKey), String> { use tracing::{info, warn}; use crate::proto::openssl::tls_server::{ read_cert_pem, read_cert_pkcs12, read_key_from_der, read_key_from_pkcs8, }; let path = zone_dir.to_owned().join(tls_cert_config.get_path()); let cert_type = tls_cert_config.get_cert_type(); let password = tls_cert_config.get_password(); let private_key_path = tls_cert_config .get_private_key() .map(|p| zone_dir.to_owned().join(p)); let private_key_type = tls_cert_config.get_private_key_type(); // if it's pkcs12, we'll be collecting the key and certs from that, otherwise continue processing let (cert, cert_chain) = match cert_type { CertType::Pem => { info!("loading TLS PEM certificate from: {:?}", path); read_cert_pem(&path)? } CertType::Pkcs12 => { if private_key_path.is_some() { warn!( "ignoring specified key, using the one in the PKCS12 file: {}", path.display() ); } info!("loading TLS PKCS12 certificate from: {:?}", path); return read_cert_pkcs12(&path, password).map_err(Into::into); } }; // it wasn't plcs12, we need to load the key separately let key = match (private_key_path, private_key_type) { (Some(private_key_path), PrivateKeyType::Pkcs8) => { info!("loading TLS PKCS8 key from: {}", private_key_path.display()); read_key_from_pkcs8(&private_key_path, password)? } (Some(private_key_path), PrivateKeyType::Der) => { info!("loading TLS DER key from: {}", private_key_path.display()); read_key_from_der(&private_key_path)? } (None, _) => { return Err(format!( "No private key associated with specified certificate" )); } }; Ok(((cert, cert_chain), key)) } /// Load a Certificate from the path (with rustls) #[cfg(feature = "dns-over-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-rustls")))] pub fn load_cert( zone_dir: &Path, tls_cert_config: &TlsCertConfig, ) -> Result<(Vec, PrivateKey), String> { use tracing::{info, warn}; use crate::proto::rustls::tls_server::{read_cert, read_key, read_key_from_der}; let path = zone_dir.to_owned().join(tls_cert_config.get_path()); let cert_type = tls_cert_config.get_cert_type(); let password = tls_cert_config.get_password(); let private_key_path = tls_cert_config .get_private_key() .map(|p| zone_dir.to_owned().join(p)); let private_key_type = tls_cert_config.get_private_key_type(); let cert = match cert_type { CertType::Pem => { info!("loading TLS PEM certificate chain from: {}", path.display()); read_cert(&path).map_err(|e| format!("error reading cert: {}", e))? } CertType::Pkcs12 => { return Err( "PKCS12 is not supported with Rustls for certificate, use PEM encoding".to_string(), ); } }; let key = match (private_key_path, private_key_type) { (Some(private_key_path), PrivateKeyType::Pkcs8) => { info!("loading TLS PKCS8 key from: {}", private_key_path.display()); if password.is_some() { warn!("Password for key supplied, but Rustls does not support encrypted PKCS8"); } read_key(&private_key_path)? } (Some(private_key_path), PrivateKeyType::Der) => { info!("loading TLS DER key from: {}", private_key_path.display()); read_key_from_der(&private_key_path)? } (None, _) => return Err("No private key associated with specified certificate".to_string()), }; Ok((cert, key)) } trust-dns-server-0.22.0/src/config/mod.rs000064400000000000000000000207061046102023000163420ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Configuration module for the server binary, `named`. pub mod dnssec; use std::fs::File; use std::io::Read; use std::net::{AddrParseError, Ipv4Addr, Ipv6Addr}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; use cfg_if::cfg_if; use serde::{self, Deserialize}; use toml; use crate::client::rr::Name; use crate::proto::error::ProtoResult; use crate::authority::ZoneType; use crate::error::{ConfigError, ConfigResult}; use crate::store::StoreConfig; static DEFAULT_PATH: &str = "/var/named"; // TODO what about windows (do I care? ;) static DEFAULT_PORT: u16 = 53; static DEFAULT_TLS_PORT: u16 = 853; static DEFAULT_HTTPS_PORT: u16 = 443; static DEFAULT_QUIC_PORT: u16 = 853; // https://www.ietf.org/archive/id/draft-ietf-dprive-dnsoquic-11.html#name-reservation-of-dedicated-po static DEFAULT_TCP_REQUEST_TIMEOUT: u64 = 5; /// Server configuration #[derive(Deserialize, Debug)] pub struct Config { /// The list of IPv4 addresses to listen on #[serde(default)] listen_addrs_ipv4: Vec, /// This list of IPv6 addresses to listen on #[serde(default)] listen_addrs_ipv6: Vec, /// Port on which to listen (associated to all IPs) listen_port: Option, /// Secure port to listen on tls_listen_port: Option, /// HTTPS port to listen on https_listen_port: Option, /// QUIC port to listen on quic_listen_port: Option, /// Timeout associated to a request before it is closed. tcp_request_timeout: Option, /// Level at which to log, default is INFO log_level: Option, /// Base configuration directory, i.e. root path for zones directory: Option, /// List of configurations for zones #[serde(default)] zones: Vec, /// Certificate to associate to TLS connections (currently the same is used for HTTPS and TLS) #[cfg(feature = "dnssec")] tls_cert: Option, } impl Config { /// read a Config file from the file specified at path. pub fn read_config(path: &Path) -> ConfigResult { let mut file: File = File::open(path)?; let mut toml: String = String::new(); file.read_to_string(&mut toml)?; toml.parse().map_err(Into::into) } /// set of listening ipv4 addresses (for TCP and UDP) pub fn get_listen_addrs_ipv4(&self) -> Result, AddrParseError> { self.listen_addrs_ipv4.iter().map(|s| s.parse()).collect() } /// set of listening ipv6 addresses (for TCP and UDP) pub fn get_listen_addrs_ipv6(&self) -> Result, AddrParseError> { self.listen_addrs_ipv6.iter().map(|s| s.parse()).collect() } /// port on which to listen for connections on specified addresses pub fn get_listen_port(&self) -> u16 { self.listen_port.unwrap_or(DEFAULT_PORT) } /// port on which to listen for TLS connections pub fn get_tls_listen_port(&self) -> u16 { self.tls_listen_port.unwrap_or(DEFAULT_TLS_PORT) } /// port on which to listen for HTTPS connections pub fn get_https_listen_port(&self) -> u16 { self.https_listen_port.unwrap_or(DEFAULT_HTTPS_PORT) } /// port on which to listen for QUIC connections pub fn get_quic_listen_port(&self) -> u16 { self.quic_listen_port.unwrap_or(DEFAULT_QUIC_PORT) } /// default timeout for all TCP connections before forceably shutdown pub fn get_tcp_request_timeout(&self) -> Duration { Duration::from_secs( self.tcp_request_timeout .unwrap_or(DEFAULT_TCP_REQUEST_TIMEOUT), ) } /// specify the log level which should be used, ["Trace", "Debug", "Info", "Warn", "Error"] pub fn get_log_level(&self) -> tracing::Level { if let Some(ref level_str) = self.log_level { tracing::Level::from_str(level_str).unwrap_or(tracing::Level::INFO) } else { tracing::Level::INFO } } /// the path for all zone configurations, defaults to `/var/named` pub fn get_directory(&self) -> &Path { self.directory .as_ref() .map_or(Path::new(DEFAULT_PATH), Path::new) } /// the set of zones which should be loaded pub fn get_zones(&self) -> &[ZoneConfig] { &self.zones } /// the tls certificate to use for accepting tls connections pub fn get_tls_cert(&self) -> Option<&dnssec::TlsCertConfig> { cfg_if! { if #[cfg(feature = "dnssec")] { self.tls_cert.as_ref() } else { None } } } } impl FromStr for Config { type Err = ConfigError; fn from_str(toml: &str) -> ConfigResult { toml::de::from_str(toml).map_err(Into::into) } } /// Configuration for a zone #[derive(Deserialize, PartialEq, Eq, Debug)] pub struct ZoneConfig { /// name of the zone pub zone: String, // TODO: make Domain::Name decodable /// type of the zone pub zone_type: ZoneType, /// location of the file (short for StoreConfig::FileConfig{zone_file_path}) pub file: Option, /// Deprecated allow_update, this is a Store option pub allow_update: Option, /// Allow AXFR (TODO: need auth) pub allow_axfr: Option, /// Enable DnsSec TODO: should this move to StoreConfig? pub enable_dnssec: Option, /// Keys for use by the zone #[serde(default)] pub keys: Vec, /// Store configurations, TODO: allow chained Stores #[serde(default)] pub stores: Option, } impl ZoneConfig { /// Return a new zone configuration /// /// # Arguments /// /// * `zone` - name of a zone, e.g. example.com /// * `zone_type` - Type of zone, e.g. Primary, Secondary, etc. /// * `file` - relative to Config base path, to the zone file /// * `allow_update` - enable dynamic updates /// * `allow_axfr` - enable AXFR transfers /// * `enable_dnssec` - enable signing of the zone for DNSSec /// * `keys` - list of private and public keys used to sign a zone pub fn new( zone: String, zone_type: ZoneType, file: String, allow_update: Option, allow_axfr: Option, enable_dnssec: Option, keys: Vec, ) -> Self { Self { zone, zone_type, file: Some(file), allow_update, allow_axfr, enable_dnssec, keys, stores: None, } } // TODO this is a little ugly for the parse, b/c there is no terminal char /// returns the name of the Zone, i.e. the `example.com` of `www.example.com.` pub fn get_zone(&self) -> ProtoResult { Name::parse(&self.zone, Some(&Name::new())) } /// the type of the zone pub fn get_zone_type(&self) -> ZoneType { self.zone_type } /// path to the zone file, i.e. the base set of original records in the zone /// /// this is ony used on first load, if dynamic update is enabled for the zone, then the journal /// file is the actual source of truth for the zone. pub fn get_file(&self) -> PathBuf { // TODO: Option on PathBuf PathBuf::from(self.file.as_ref().expect("file was none")) } /// enable dynamic updates for the zone (see SIG0 and the registered keys) pub fn is_update_allowed(&self) -> bool { self.allow_update.unwrap_or(false) } /// enable AXFR transfers pub fn is_axfr_allowed(&self) -> bool { self.allow_axfr.unwrap_or(false) } /// declare that this zone should be signed, see keys for configuration of the keys for signing pub fn is_dnssec_enabled(&self) -> bool { cfg_if! { if #[cfg(feature = "dnssec")] { self.enable_dnssec.unwrap_or(false) } else { false } } } /// the configuration for the keys used for auth and/or dnssec zone signing. #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn get_keys(&self) -> &[dnssec::KeyConfig] { &self.keys } } trust-dns-server-0.22.0/src/error/config_error.rs000064400000000000000000000043001046102023000201150ustar 00000000000000// Copyright 2015-2020 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::{fmt, io}; use thiserror::Error; #[cfg(feature = "backtrace")] use crate::proto::{trace, ExtBacktrace}; /// An alias for results returned by functions of this crate pub type Result = ::std::result::Result; /// The error kind for errors that get returned in the crate #[derive(Debug, Error)] #[non_exhaustive] pub enum ErrorKind { // foreign /// An error got returned from IO #[error("io error: {0}")] Io(#[from] io::Error), /// An error occurred while decoding toml data #[error("toml decode error: {0}")] TomlDecode(#[from] toml::de::Error), /// An error occurred while parsing a zone file #[error("failed to parse the zone file: {0}")] ZoneParse(#[from] trust_dns_client::error::ParseError), } /// The error type for errors that get returned in the crate #[derive(Debug)] pub struct Error { kind: Box, #[cfg(feature = "backtrace")] backtrack: Option, } impl Error { /// Get the kind of the error pub fn kind(&self) -> &ErrorKind { &self.kind } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { cfg_if::cfg_if! { if #[cfg(feature = "backtrace")] { if let Some(ref backtrace) = self.backtrack { fmt::Display::fmt(&self.kind, f)?; fmt::Debug::fmt(backtrace, f) } else { fmt::Display::fmt(&self.kind, f) } } else { fmt::Display::fmt(&self.kind, f) } } } } impl From for Error where E: Into, { fn from(error: E) -> Self { let kind: ErrorKind = error.into(); Self { kind: Box::new(kind), #[cfg(feature = "backtrace")] backtrack: trace!(), } } } trust-dns-server-0.22.0/src/error/mod.rs000064400000000000000000000020361046102023000162220ustar 00000000000000/* * Copyright (C) 2015 Benjamin Fry * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! All defined errors for Trust-DNS mod config_error; mod persistence_error; pub use self::config_error::Error as ConfigError; pub use self::persistence_error::Error as PersistenceError; pub use self::config_error::ErrorKind as ConfigErrorKind; pub use self::persistence_error::ErrorKind as PersistenceErrorKind; pub use self::config_error::Result as ConfigResult; pub use self::persistence_error::Result as PersistenceResult; trust-dns-server-0.22.0/src/error/persistence_error.rs000064400000000000000000000060141046102023000212000ustar 00000000000000// Copyright 2015-2020 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::fmt; use crate::proto::error::*; use thiserror::Error; #[cfg(feature = "backtrace")] use crate::proto::{trace, ExtBacktrace}; /// An alias for results returned by functions of this crate pub type Result = ::std::result::Result; /// The error kind for errors that get returned in the crate #[derive(Debug, Error)] #[non_exhaustive] pub enum ErrorKind { /// An error that occurred when recovering from journal #[error("error recovering from journal: {}", _0)] Recovery(&'static str), /// The number of inserted records didn't match the expected amount #[error("wrong insert count: {} expect: {}", got, expect)] WrongInsertCount { /// The number of inserted records got: usize, /// The number of records expected to be inserted expect: usize, }, // foreign /// An error got returned by the trust-dns-proto crate #[error("proto error: {0}")] Proto(#[from] ProtoError), /// An error got returned from the rusqlite crate #[cfg(feature = "sqlite")] #[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))] #[error("sqlite error: {0}")] Sqlite(#[from] rusqlite::Error), /// A request timed out #[error("request timed out")] Timeout, } /// The error type for errors that get returned in the crate #[derive(Debug, Error)] pub struct Error { kind: ErrorKind, #[cfg(feature = "backtrace")] backtrack: Option, } impl Error { /// Get the kind of the error pub fn kind(&self) -> &ErrorKind { &self.kind } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { cfg_if::cfg_if! { if #[cfg(feature = "backtrace")] { if let Some(ref backtrace) = self.backtrack { fmt::Display::fmt(&self.kind, f)?; fmt::Debug::fmt(backtrace, f) } else { fmt::Display::fmt(&self.kind, f) } } else { fmt::Display::fmt(&self.kind, f) } } } } impl From for Error { fn from(kind: ErrorKind) -> Self { Self { kind, #[cfg(feature = "backtrace")] backtrack: trace!(), } } } impl From for Error { fn from(e: ProtoError) -> Self { match *e.kind() { ProtoErrorKind::Timeout => ErrorKind::Timeout.into(), _ => ErrorKind::from(e).into(), } } } #[cfg(feature = "sqlite")] #[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))] impl From for Error { fn from(e: rusqlite::Error) -> Self { ErrorKind::from(e).into() } } trust-dns-server-0.22.0/src/lib.rs000064400000000000000000000037231046102023000150640ustar 00000000000000/* * Copyright (C) 2015 Benjamin Fry * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // LIBRARY WARNINGS #![warn( clippy::default_trait_access, clippy::dbg_macro, clippy::print_stdout, clippy::unimplemented, clippy::use_self, missing_copy_implementations, missing_docs, non_snake_case, non_upper_case_globals, rust_2018_idioms, unreachable_pub )] #![allow( clippy::single_component_path_imports, clippy::upper_case_acronyms, // can be removed on a major release boundary )] #![recursion_limit = "2048"] #![cfg_attr(docsrs, feature(doc_cfg))] //! Trust-DNS is intended to be a fully compliant domain name server and client library. //! //! # Goals //! //! * Only safe Rust //! * All errors handled //! * Simple to manage servers //! * High level abstraction for clients //! * Secure dynamic update //! * New features for securing public information pub use trust_dns_client as client; pub use trust_dns_proto as proto; #[cfg(feature = "trust-dns-recursor")] #[cfg_attr(docsrs, doc(cfg(feature = "recursor")))] pub use trust_dns_recursor as recursor; #[cfg(feature = "trust-dns-resolver")] #[cfg_attr(docsrs, doc(cfg(feature = "resolver")))] pub use trust_dns_resolver as resolver; pub mod authority; pub mod config; pub mod error; pub mod server; pub mod store; pub use self::server::ServerFuture; /// Returns the current version of Trust-DNS pub fn version() -> &'static str { env!("CARGO_PKG_VERSION") } trust-dns-server-0.22.0/src/server/https_handler.rs000064400000000000000000000073351046102023000204660ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::{io, net::SocketAddr, sync::Arc}; use bytes::{Bytes, BytesMut}; use futures_util::lock::Mutex; use h2::server; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::{debug, warn}; use trust_dns_proto::rr::Record; use crate::{ authority::MessageResponse, proto::https::https_server, server::{ request_handler::RequestHandler, response_handler::ResponseHandler, server_future, Protocol, ResponseInfo, }, }; pub(crate) async fn h2_handler( handler: Arc, io: I, src_addr: SocketAddr, dns_hostname: Arc, ) where T: RequestHandler, I: AsyncRead + AsyncWrite + Unpin, { let dns_hostname = dns_hostname.clone(); // Start the HTTP/2.0 connection handshake let mut h2 = match server::handshake(io).await { Ok(h2) => h2, Err(err) => { warn!("handshake error from {}: {}", src_addr, err); return; } }; // Accept all inbound HTTP/2.0 streams sent over the // connection. while let Some(next_request) = h2.accept().await { let (request, respond) = match next_request { Ok(next_request) => next_request, Err(err) => { warn!("error accepting request {}: {}", src_addr, err); return; } }; debug!("Received request: {:#?}", request); let dns_hostname = dns_hostname.clone(); let handler = handler.clone(); let responder = HttpsResponseHandle(Arc::new(Mutex::new(respond))); match https_server::message_from(dns_hostname, request).await { Ok(bytes) => handle_request(bytes, src_addr, handler, responder).await, Err(err) => warn!("error while handling request from {}: {}", src_addr, err), }; // we'll continue handling requests from here. } } async fn handle_request( bytes: BytesMut, src_addr: SocketAddr, handler: Arc, responder: HttpsResponseHandle, ) where T: RequestHandler, { server_future::handle_request(&bytes, src_addr, Protocol::Https, handler, responder).await } #[derive(Clone)] struct HttpsResponseHandle(Arc>>); #[async_trait::async_trait] impl ResponseHandler for HttpsResponseHandle { async fn send_response<'a>( &mut self, response: MessageResponse< '_, 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, >, ) -> io::Result { use crate::proto::https::response; use crate::proto::https::HttpsError; use crate::proto::serialize::binary::BinEncoder; let mut bytes = Vec::with_capacity(512); // mut block let info = { let mut encoder = BinEncoder::new(&mut bytes); response.destructive_emit(&mut encoder)? }; let bytes = Bytes::from(bytes); let response = response::new(bytes.len())?; debug!("sending response: {:#?}", response); let mut stream = self .0 .lock() .await .send_response(response, false) .map_err(HttpsError::from)?; stream.send_data(bytes, true).map_err(HttpsError::from)?; Ok(info) } } trust-dns-server-0.22.0/src/server/mod.rs000064400000000000000000000016061046102023000164010ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! `Server` component for hosting a domain name servers operations. #[cfg(feature = "dns-over-https")] mod https_handler; mod protocol; #[cfg(feature = "dns-over-quic")] mod quic_handler; mod request_handler; mod response_handler; mod server_future; mod timeout_stream; pub use self::protocol::Protocol; pub use self::request_handler::{Request, RequestHandler, RequestInfo, ResponseInfo}; pub use self::response_handler::{ResponseHandle, ResponseHandler}; pub use self::server_future::ServerFuture; pub use self::timeout_stream::TimeoutStream; trust-dns-server-0.22.0/src/server/protocol.rs000064400000000000000000000027741046102023000174720ustar 00000000000000// Copyright 2015-2022 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::fmt; /// For tracking purposes of inbound requests, which protocol was used #[non_exhaustive] #[derive(Clone, Copy)] pub enum Protocol { /// User Datagram Protocol, the default for all DNS requests Udp, /// Transmission Control Protocol, used in DNS primarily for large responses (avoids truncation) and AXFR/IXFR Tcp, /// Transport Layer Security over TCP, for establishing a privacy, DoT (similar to DoH) Tls, /// Datagram Transport Layer Security over UDP Dtls, /// HTTP over TLS, DNS over HTTPS, aka DoH (similar to DoT) Https, /// Quic, DNS over Quic, aka DoQ (similar to DoH) Quic, } impl fmt::Display for Protocol { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { let s = match self { Self::Udp => "UDP", Self::Tcp => "TCP", Self::Tls => "TLS", Self::Dtls => "DTLS", Self::Https => "HTTPS", Self::Quic => "QUIC", }; f.write_str(s) } } impl fmt::Debug for Protocol { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { fmt::Display::fmt(self, f) } } trust-dns-server-0.22.0/src/server/quic_handler.rs000064400000000000000000000072651046102023000202670ustar 00000000000000// Copyright 2015-2022 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::{io, net::SocketAddr, sync::Arc}; use bytes::{Bytes, BytesMut}; use futures_util::lock::Mutex; use tracing::{debug, warn}; use trust_dns_proto::{ error::ProtoError, quic::{DoqErrorCode, QuicStream}, rr::Record, }; use crate::{ authority::MessageResponse, proto::quic::QuicStreams, server::{ request_handler::RequestHandler, response_handler::ResponseHandler, server_future, Protocol, ResponseInfo, }, }; pub(crate) async fn quic_handler( handler: Arc, mut quic_streams: QuicStreams, src_addr: SocketAddr, _dns_hostname: Arc, ) -> Result<(), ProtoError> where T: RequestHandler, { // TODO: we should make this configurable let mut max_requests = 100u32; // Accept all inbound quic streams sent over the connection. while let Some(next_request) = quic_streams.next().await { let mut request_stream = match next_request { Ok(next_request) => next_request, Err(err) => { warn!("error accepting request {}: {}", src_addr, err); return Err(err); } }; let request = request_stream.receive_bytes().await?; debug!( "Received bytes {} from {src_addr} {request:?}", request.len() ); let handler = handler.clone(); let stream = Arc::new(Mutex::new(request_stream)); let responder = QuicResponseHandle(stream.clone()); handle_request(request, src_addr, handler, responder).await; max_requests -= 1; if max_requests == 0 { warn!("exceeded request count, shutting down quic conn: {src_addr}"); // DOQ_NO_ERROR (0x0): No error. This is used when the connection or stream needs to be closed, but there is no error to signal. stream.lock().await.stop(DoqErrorCode::NoError)?; break; } // we'll continue handling requests from here. } Ok(()) } async fn handle_request( bytes: BytesMut, src_addr: SocketAddr, handler: Arc, responder: QuicResponseHandle, ) where T: RequestHandler, { server_future::handle_request(&bytes, src_addr, Protocol::Quic, handler, responder).await } #[derive(Clone)] struct QuicResponseHandle(Arc>); #[async_trait::async_trait] impl ResponseHandler for QuicResponseHandle { // TODO: rethink this entire interface async fn send_response<'a>( &mut self, mut response: MessageResponse< '_, 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, >, ) -> io::Result { use crate::proto::serialize::binary::BinEncoder; // The id should always be 0 in DoQ response.header_mut().set_id(0); let mut bytes = Vec::with_capacity(512); let info = { let mut encoder = BinEncoder::new(&mut bytes); response.destructive_emit(&mut encoder)? }; let bytes = Bytes::from(bytes); debug!("sending quic response: {}", bytes.len()); let mut lock = self.0.lock().await; lock.send_bytes(bytes).await?; lock.finish().await?; Ok(info) } } trust-dns-server-0.22.0/src/server/request_handler.rs000064400000000000000000000112501046102023000210030ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Request Handler for incoming requests use std::net::SocketAddr; use crate::{ authority::MessageRequest, client::op::LowerQuery, proto::op::{Header, ResponseCode}, server::{Protocol, ResponseHandler}, }; /// An incoming request to the DNS catalog #[derive(Debug)] pub struct Request { /// Message with the associated query or update data message: MessageRequest, /// Source address of the Client src: SocketAddr, /// Protocol of the request protocol: Protocol, } impl Request { /// Build a new requests with the inbound message, source address, and protocol. /// /// This will return an error on bad verification. pub fn new(message: MessageRequest, src: SocketAddr, protocol: Protocol) -> Self { Self { message, src, protocol, } } /// Return just the header and request information from the Request Message pub fn request_info(&self) -> RequestInfo<'_> { RequestInfo { src: self.src, protocol: self.protocol, header: self.message.header(), query: self.message.query(), } } /// The IP address from which the request originated. pub fn src(&self) -> SocketAddr { self.src } /// The protocol that was used for the request pub fn protocol(&self) -> Protocol { self.protocol } } impl std::ops::Deref for Request { type Target = MessageRequest; fn deref(&self) -> &Self::Target { &self.message } } // TODO: add ProtocolInfo that would have TLS details or other additional things... /// A narrow view of the Request, specifically a verified single query for the request #[non_exhaustive] #[derive(Clone)] pub struct RequestInfo<'a> { /// The source address from which the request came pub src: SocketAddr, /// The protocol used for the request pub protocol: Protocol, /// The header from the original request pub header: &'a Header, /// The query from the request pub query: &'a LowerQuery, } impl<'a> RequestInfo<'a> { /// Construct a new RequestInfo /// /// # Arguments /// /// * `src` - The source address from which the request came /// * `protocol` - The protocol used for the request /// * `header` - The header from the original request /// * `query` - The query from the request, LowerQuery is intended to reduce complexity for lookups in authorities pub fn new( src: SocketAddr, protocol: Protocol, header: &'a Header, query: &'a LowerQuery, ) -> Self { Self { src, protocol, header, query, } } } /// Information about the response sent for a request #[derive(Clone, Copy)] #[repr(transparent)] pub struct ResponseInfo(Header); impl ResponseInfo { pub(crate) fn serve_failed() -> Self { let mut header = Header::new(); header.set_response_code(ResponseCode::ServFail); header.into() } } impl From
for ResponseInfo { fn from(header: Header) -> Self { Self(header) } } impl std::ops::Deref for ResponseInfo { type Target = Header; fn deref(&self) -> &Self::Target { &self.0 } } /// Trait for handling incoming requests, and providing a message response. #[async_trait::async_trait] pub trait RequestHandler: Send + Sync + Unpin + 'static { /// Determines what needs to happen given the type of request, i.e. Query or Update. /// /// # Arguments /// /// * `request` - the requested action to perform. /// * `response_handle` - handle to which a return message should be sent async fn handle_request( &self, request: &Request, response_handle: R, ) -> ResponseInfo; } #[cfg(test)] mod tests { use trust_dns_client::op::{Header, Query}; use crate::server::Protocol; use super::RequestInfo; #[test] fn request_info_clone() { let query: Query = Query::new(); let header = Header::new(); let lower_query = query.into(); let origin = RequestInfo::new( "127.0.0.1:3000".parse().unwrap(), Protocol::Udp, &header, &lower_query, ); let cloned = origin.clone(); assert_eq!(origin.header, cloned.header); } } trust-dns-server-0.22.0/src/server/response_handler.rs000064400000000000000000000062171046102023000211600ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::{io, net::SocketAddr}; use tracing::debug; use trust_dns_proto::rr::Record; use crate::{ authority::MessageResponse, client::serialize::binary::BinEncoder, proto::{xfer::SerialMessage, BufDnsStreamHandle, DnsStreamHandle}, server::ResponseInfo, }; /// A handler for send a response to a client #[async_trait::async_trait] pub trait ResponseHandler: Clone + Send + Sync + Unpin + 'static { // TODO: add associated error type //type Error; /// Serializes and sends a message to to the wrapped handle /// /// self is consumed as only one message should ever be sent in response to a Request async fn send_response<'a>( &mut self, response: MessageResponse< '_, 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, >, ) -> io::Result; } /// A handler for wrapping a BufStreamHandle, which will properly serialize the message and add the /// associated destination. #[derive(Clone)] pub struct ResponseHandle { dst: SocketAddr, stream_handle: BufDnsStreamHandle, } impl ResponseHandle { /// Returns a new `ResponseHandle` for sending a response message pub fn new(dst: SocketAddr, stream_handle: BufDnsStreamHandle) -> Self { Self { dst, stream_handle } } } #[async_trait::async_trait] impl ResponseHandler for ResponseHandle { /// Serializes and sends a message to to the wrapped handle /// /// self is consumed as only one message should ever be sent in response to a Request async fn send_response<'a>( &mut self, response: MessageResponse< '_, 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, >, ) -> io::Result { debug!( "response: {} response_code: {}", response.header().id(), response.header().response_code(), ); let mut buffer = Vec::with_capacity(512); let encode_result = { let mut encoder = BinEncoder::new(&mut buffer); response.destructive_emit(&mut encoder) }; let info = encode_result.map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("error encoding message: {}", e), ) })?; self.stream_handle .send(SerialMessage::new(buffer, self.dst)) .map_err(|_| io::Error::new(io::ErrorKind::Other, "unknown"))?; Ok(info) } } trust-dns-server-0.22.0/src/server/server_future.rs000064400000000000000000001163701046102023000205270ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::{ io, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, sync::Arc, time::Duration, }; use futures_util::{future, StreamExt}; #[cfg(feature = "dns-over-rustls")] use rustls::{Certificate, PrivateKey}; use tokio::{net, task::JoinHandle}; use tracing::{debug, info, warn}; use trust_dns_proto::rr::Record; #[cfg(all(feature = "dns-over-openssl", not(feature = "dns-over-rustls")))] use crate::proto::openssl::tls_server::*; use crate::{ authority::{MessageRequest, MessageResponseBuilder}, client::op::LowerQuery, proto::{ error::ProtoError, iocompat::AsyncIoTokioAsStd, op::{Edns, Header, Query, ResponseCode}, serialize::binary::{BinDecodable, BinDecoder}, tcp::TcpStream, udp::UdpStream, xfer::SerialMessage, BufDnsStreamHandle, }, server::{Protocol, Request, RequestHandler, ResponseHandle, ResponseHandler, TimeoutStream}, }; // TODO, would be nice to have a Slab for buffers here... /// A Futures based implementation of a DNS server pub struct ServerFuture { handler: Arc, tasks: Vec, } impl ServerFuture { /// Creates a new ServerFuture with the specified Handler. pub fn new(handler: T) -> Self { Self { handler: Arc::new(handler), tasks: vec![], } } /// Register a UDP socket. Should be bound before calling this function. pub fn register_socket(&mut self, socket: net::UdpSocket) { debug!("registering udp: {:?}", socket); // create the new UdpStream, the IP address isn't relevant, and ideally goes essentially no where. // the address used is acquired from the inbound queries let (mut buf_stream, stream_handle) = UdpStream::with_bound(socket, ([127, 255, 255, 254], 0).into()); //let request_stream = RequestStream::new(buf_stream, stream_handle); let handler = self.handler.clone(); // this spawns a ForEach future which handles all the requests into a Handler. let task = tokio::spawn({ async move { while let Some(message) = buf_stream.next().await { let message = match message { Err(e) => { warn!("error receiving message on udp_socket: {}", e); break; } Ok(message) => message, }; let src_addr = message.addr(); debug!("received udp request from: {}", src_addr); // verify that the src address is safe for responses if let Err(e) = sanitize_src_address(src_addr) { warn!( "address can not be responded to {src_addr}: {e}", src_addr = src_addr, e = e ); continue; } let handler = handler.clone(); let stream_handle = stream_handle.with_remote_addr(src_addr); tokio::spawn(async move { self::handle_raw_request(message, Protocol::Udp, handler, stream_handle) .await; }); } // TODO: let's consider capturing all the initial configuration details so that the socket could be recreated... Err(ProtoError::from("unexpected close of UDP socket")) } }); self.tasks.push(ServerTask(task)); } /// Register a UDP socket. Should be bound before calling this function. pub fn register_socket_std(&mut self, socket: std::net::UdpSocket) -> io::Result<()> { self.register_socket(net::UdpSocket::from_std(socket)?); Ok(()) } /// Register a TcpListener to the Server. This should already be bound to either an IPv6 or an /// IPv4 address. /// /// To make the server more resilient to DOS issues, there is a timeout. Care should be taken /// to not make this too low depending on use cases. /// /// # Arguments /// * `listener` - a bound TCP socket /// * `timeout` - timeout duration of incoming requests, any connection that does not send /// requests within this time period will be closed. In the future it should be /// possible to create long-lived queries, but these should be from trusted sources /// only, this would require some type of whitelisting. pub fn register_listener(&mut self, listener: net::TcpListener, timeout: Duration) { debug!("register tcp: {:?}", listener); let handler = self.handler.clone(); // for each incoming request... let task = tokio::spawn({ async move { loop { let tcp_stream = listener.accept().await; let (tcp_stream, src_addr) = match tcp_stream { Ok((t, s)) => (t, s), Err(e) => { debug!("error receiving TCP tcp_stream error: {}", e); continue; } }; // verify that the src address is safe for responses if let Err(e) = sanitize_src_address(src_addr) { warn!( "address can not be responded to {src_addr}: {e}", src_addr = src_addr, e = e ); continue; } let handler = handler.clone(); // and spawn to the io_loop tokio::spawn(async move { debug!("accepted request from: {}", src_addr); // take the created stream... let (buf_stream, stream_handle) = TcpStream::from_stream(AsyncIoTokioAsStd(tcp_stream), src_addr); let mut timeout_stream = TimeoutStream::new(buf_stream, timeout); //let request_stream = RequestStream::new(timeout_stream, stream_handle); while let Some(message) = timeout_stream.next().await { let message = match message { Ok(message) => message, Err(e) => { debug!( "error in TCP request_stream src: {} error: {}", src_addr, e ); // we're going to bail on this connection... return; } }; // we don't spawn here to limit clients from getting too many resources self::handle_raw_request( message, Protocol::Tcp, handler.clone(), stream_handle.clone(), ) .await; } }); } } }); self.tasks.push(ServerTask(task)); } /// Register a TcpListener to the Server. This should already be bound to either an IPv6 or an /// IPv4 address. /// /// To make the server more resilient to DOS issues, there is a timeout. Care should be taken /// to not make this too low depending on use cases. /// /// # Arguments /// * `listener` - a bound TCP socket /// * `timeout` - timeout duration of incoming requests, any connection that does not send /// requests within this time period will be closed. In the future it should be /// possible to create long-lived queries, but these should be from trusted sources /// only, this would require some type of whitelisting. pub fn register_listener_std( &mut self, listener: std::net::TcpListener, timeout: Duration, ) -> io::Result<()> { self.register_listener(net::TcpListener::from_std(listener)?, timeout); Ok(()) } /// Register a TlsListener to the Server. The TlsListener should already be bound to either an /// IPv6 or an IPv4 address. /// /// To make the server more resilient to DOS issues, there is a timeout. Care should be taken /// to not make this too low depending on use cases. /// /// # Arguments /// * `listener` - a bound TCP (needs to be on a different port from standard TCP connections) socket /// * `timeout` - timeout duration of incoming requests, any connection that does not send /// requests within this time period will be closed. In the future it should be /// possible to create long-lived queries, but these should be from trusted sources /// only, this would require some type of whitelisting. /// * `pkcs12` - certificate used to announce to clients #[cfg(all(feature = "dns-over-openssl", not(feature = "dns-over-rustls")))] #[cfg_attr( docsrs, doc(cfg(all(feature = "dns-over-openssl", not(feature = "dns-over-rustls")))) )] pub fn register_tls_listener( &mut self, listener: net::TcpListener, timeout: Duration, certificate_and_key: ((X509, Option>), PKey), ) -> io::Result<()> { use crate::proto::openssl::{tls_server, TlsStream}; use openssl::ssl::Ssl; use std::pin::Pin; use tokio_openssl::SslStream as TokioSslStream; let ((cert, chain), key) = certificate_and_key; let handler = self.handler.clone(); debug!("registered tcp: {:?}", listener); let tls_acceptor = Box::pin(tls_server::new_acceptor(cert, chain, key)?); // for each incoming request... let task = tokio::spawn({ async move { loop { let tcp_stream = listener.accept().await; let (tcp_stream, src_addr) = match tcp_stream { Ok((t, s)) => (t, s), Err(e) => { debug!("error receiving TLS tcp_stream error: {}", e); continue; } }; // verify that the src address is safe for responses if let Err(e) = sanitize_src_address(src_addr) { warn!( "address can not be responded to {src_addr}: {e}", src_addr = src_addr, e = e ); continue; } let handler = handler.clone(); let tls_acceptor = tls_acceptor.clone(); // kick out to a different task immediately, let them do the TLS handshake tokio::spawn(async move { debug!("starting TLS request from: {}", src_addr); // perform the TLS let mut tls_stream = match Ssl::new(tls_acceptor.context()) .and_then(|ssl| TokioSslStream::new(ssl, tcp_stream)) { Ok(tls_stream) => tls_stream, Err(e) => { debug!("tls handshake src: {} error: {}", src_addr, e); return (); } }; match Pin::new(&mut tls_stream).accept().await { Ok(()) => {} Err(e) => { debug!("tls handshake src: {} error: {}", src_addr, e); return (); } }; debug!("accepted TLS request from: {}", src_addr); let (buf_stream, stream_handle) = TlsStream::from_stream(AsyncIoTokioAsStd(tls_stream), src_addr); let mut timeout_stream = TimeoutStream::new(buf_stream, timeout); while let Some(message) = timeout_stream.next().await { let message = match message { Ok(message) => message, Err(e) => { debug!( "error in TLS request_stream src: {:?} error: {}", src_addr, e ); // kill this connection return (); } }; self::handle_raw_request( message, Protocol::Tls, handler.clone(), stream_handle.clone(), ) .await; } }); } } }); self.tasks.push(ServerTask(task)); Ok(()) } /// Register a TlsListener to the Server. The TlsListener should already be bound to either an /// IPv6 or an IPv4 address. /// /// To make the server more resilient to DOS issues, there is a timeout. Care should be taken /// to not make this too low depending on use cases. /// /// # Arguments /// * `listener` - a bound TCP (needs to be on a different port from standard TCP connections) socket /// * `timeout` - timeout duration of incoming requests, any connection that does not send /// requests within this time period will be closed. In the future it should be /// possible to create long-lived queries, but these should be from trusted sources /// only, this would require some type of whitelisting. /// * `pkcs12` - certificate used to announce to clients #[cfg(all(feature = "dns-over-openssl", not(feature = "dns-over-rustls")))] #[cfg_attr( docsrs, doc(cfg(all(feature = "dns-over-openssl", not(feature = "dns-over-rustls")))) )] pub fn register_tls_listener_std( &mut self, listener: std::net::TcpListener, timeout: Duration, certificate_and_key: ((X509, Option>), PKey), ) -> io::Result<()> { self.register_tls_listener( net::TcpListener::from_std(listener)?, timeout, certificate_and_key, ) } /// Register a TlsListener to the Server. The TlsListener should already be bound to either an /// IPv6 or an IPv4 address. /// /// To make the server more resilient to DOS issues, there is a timeout. Care should be taken /// to not make this too low depending on use cases. /// /// # Arguments /// * `listener` - a bound TCP (needs to be on a different port from standard TCP connections) socket /// * `timeout` - timeout duration of incoming requests, any connection that does not send /// requests within this time period will be closed. In the future it should be /// possible to create long-lived queries, but these should be from trusted sources /// only, this would require some type of whitelisting. /// * `pkcs12` - certificate used to announce to clients #[cfg(feature = "dns-over-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-rustls")))] pub fn register_tls_listener( &mut self, listener: net::TcpListener, timeout: Duration, certificate_and_key: (Vec, PrivateKey), ) -> io::Result<()> { use crate::proto::rustls::{tls_from_stream, tls_server}; use tokio_rustls::TlsAcceptor; let handler = self.handler.clone(); debug!("registered tcp: {:?}", listener); let tls_acceptor = tls_server::new_acceptor(certificate_and_key.0, certificate_and_key.1) .map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("error creating TLS acceptor: {}", e), ) })?; let tls_acceptor = TlsAcceptor::from(Arc::new(tls_acceptor)); // for each incoming request... let task = tokio::spawn({ async move { loop { let tcp_stream = listener.accept().await; let (tcp_stream, src_addr) = match tcp_stream { Ok((t, s)) => (t, s), Err(e) => { debug!("error receiving TLS tcp_stream error: {}", e); continue; } }; // verify that the src address is safe for responses if let Err(e) = sanitize_src_address(src_addr) { warn!( "address can not be responded to {src_addr}: {e}", src_addr = src_addr, e = e ); continue; } let handler = handler.clone(); let tls_acceptor = tls_acceptor.clone(); // kick out to a different task immediately, let them do the TLS handshake tokio::spawn(async move { debug!("starting TLS request from: {}", src_addr); // perform the TLS let tls_stream = tls_acceptor.accept(tcp_stream).await; let tls_stream = match tls_stream { Ok(tls_stream) => AsyncIoTokioAsStd(tls_stream), Err(e) => { debug!("tls handshake src: {} error: {}", src_addr, e); return; } }; debug!("accepted TLS request from: {}", src_addr); let (buf_stream, stream_handle) = tls_from_stream(tls_stream, src_addr); let mut timeout_stream = TimeoutStream::new(buf_stream, timeout); while let Some(message) = timeout_stream.next().await { let message = match message { Ok(message) => message, Err(e) => { debug!( "error in TLS request_stream src: {:?} error: {}", src_addr, e ); // kill this connection return; } }; self::handle_raw_request( message, Protocol::Tls, handler.clone(), stream_handle.clone(), ) .await; } }); } } }); self.tasks.push(ServerTask(task)); Ok(()) } /// Register a TlsListener to the Server. The TlsListener should already be bound to either an /// IPv6 or an IPv4 address. /// /// To make the server more resilient to DOS issues, there is a timeout. Care should be taken /// to not make this too low depending on use cases. /// /// # Arguments /// * `listener` - a bound TCP (needs to be on a different port from standard TCP connections) socket /// * `timeout` - timeout duration of incoming requests, any connection that does not send /// requests within this time period will be closed. In the future it should be /// possible to create long-lived queries, but these should be from trusted sources /// only, this would require some type of whitelisting. /// * `pkcs12` - certificate used to announce to clients #[cfg(all( feature = "dns-over-https-openssl", not(feature = "dns-over-https-rustls") ))] #[cfg_attr( docsrs, doc(cfg(all( feature = "dns-over-https-openssl", not(feature = "dns-over-https-rustls") ))) )] pub fn register_https_listener( &self, listener: tcp::TcpListener, timeout: Duration, pkcs12: ParsedPkcs12, ) -> io::Result<()> { unimplemented!("openssl based `dns-over-https` not yet supported. see the `dns-over-https-rustls` feature") } /// Register a TcpListener for HTTPS (h2) to the Server for supporting DoH (dns-over-https). The TcpListener should already be bound to either an /// IPv6 or an IPv4 address. /// /// To make the server more resilient to DOS issues, there is a timeout. Care should be taken /// to not make this too low depending on use cases. /// /// # Arguments /// * `listener` - a bound TCP (needs to be on a different port from standard TCP connections) socket /// * `timeout` - timeout duration of incoming requests, any connection that does not send /// requests within this time period will be closed. In the future it should be /// possible to create long-lived queries, but these should be from trusted sources /// only, this would require some type of whitelisting. /// * `certificate_and_key` - certificate and key used to announce to clients #[cfg(feature = "dns-over-https-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-https-rustls")))] pub fn register_https_listener( &mut self, listener: net::TcpListener, // TODO: need to set a timeout between requests. _timeout: Duration, certificate_and_key: (Vec, PrivateKey), dns_hostname: String, ) -> io::Result<()> { use tokio_rustls::TlsAcceptor; use crate::proto::rustls::tls_server; use crate::server::https_handler::h2_handler; let dns_hostname: Arc = Arc::from(dns_hostname); let handler = self.handler.clone(); debug!("registered https: {:?}", listener); let tls_acceptor = tls_server::new_acceptor(certificate_and_key.0, certificate_and_key.1) .map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("error creating TLS acceptor: {}", e), ) })?; let tls_acceptor = TlsAcceptor::from(Arc::new(tls_acceptor)); // for each incoming request... let dns_hostname = dns_hostname; let task = tokio::spawn({ async move { let dns_hostname = dns_hostname; loop { let tcp_stream = listener.accept().await; let (tcp_stream, src_addr) = match tcp_stream { Ok((t, s)) => (t, s), Err(e) => { debug!("error receiving HTTPS tcp_stream error: {}", e); continue; } }; // verify that the src address is safe for responses if let Err(e) = sanitize_src_address(src_addr) { warn!( "address can not be responded to {src_addr}: {e}", src_addr = src_addr, e = e ); continue; } let handler = handler.clone(); let tls_acceptor = tls_acceptor.clone(); let dns_hostname = dns_hostname.clone(); tokio::spawn(async move { debug!("starting HTTPS request from: {}", src_addr); // TODO: need to consider timeout of total connect... // take the created stream... let tls_stream = tls_acceptor.accept(tcp_stream).await; let tls_stream = match tls_stream { Ok(tls_stream) => tls_stream, Err(e) => { debug!("https handshake src: {} error: {}", src_addr, e); return; } }; debug!("accepted HTTPS request from: {}", src_addr); h2_handler(handler, tls_stream, src_addr, dns_hostname).await; }); } } }); self.tasks.push(ServerTask(task)); Ok(()) } /// Register a UdpSocket to the Server for supporting DoQ (dns-over-quic). The UdpSocket should already be bound to either an /// IPv6 or an IPv4 address. /// /// To make the server more resilient to DOS issues, there is a timeout. Care should be taken /// to not make this too low depending on use cases. /// /// # Arguments /// * `listener` - a bound TCP (needs to be on a different port from standard TCP connections) socket /// * `timeout` - timeout duration of incoming requests, any connection that does not send /// requests within this time period will be closed. In the future it should be /// possible to create long-lived queries, but these should be from trusted sources /// only, this would require some type of whitelisting. /// * `pkcs12` - certificate used to announce to clients #[cfg(feature = "dns-over-quic")] #[cfg_attr(docsrs, doc(cfg(feature = "dns-over-quic")))] pub fn register_quic_listener( &mut self, socket: net::UdpSocket, // TODO: need to set a timeout between requests. _timeout: Duration, certificate_and_key: (Vec, PrivateKey), dns_hostname: String, ) -> io::Result<()> { use crate::proto::quic::QuicServer; use crate::server::quic_handler::quic_handler; let dns_hostname: Arc = Arc::from(dns_hostname); let handler = self.handler.clone(); debug!("registered quic: {:?}", socket); let mut server = QuicServer::with_socket(socket, certificate_and_key.0, certificate_and_key.1)?; // for each incoming request... let dns_hostname = dns_hostname; let task = tokio::spawn({ async move { let dns_hostname = dns_hostname; loop { let (streams, src_addr) = match server.next().await { Ok(Some(c)) => c, Ok(None) => continue, Err(e) => { debug!("error receiving quic connection: {e}"); continue; } }; // verify that the src address is safe for responses // TODO: we're relying the quinn library to actually validate responses before we get here, but this check is still worth doing if let Err(e) = sanitize_src_address(src_addr) { warn!( "address can not be responded to {src_addr}: {e}", src_addr = src_addr, e = e ); continue; } let handler = handler.clone(); let dns_hostname = dns_hostname.clone(); tokio::spawn(async move { debug!("starting quic stream request from: {src_addr}"); // TODO: need to consider timeout of total connect... let result = quic_handler(handler, streams, src_addr, dns_hostname).await; if let Err(e) = result { warn!("quic stream processing failed from {src_addr}: {e}") } }); } } }); self.tasks.push(ServerTask(task)); Ok(()) } /// This will run until all background tasks of the trust_dns_server end. pub async fn block_until_done(self) -> Result<(), ProtoError> { let (result, _, _) = future::select_all(self.tasks).await; result.map_err(|e| ProtoError::from(format!("Internal error in spawn: {}", e)))? } } /// Wrapping the join handle ensures that the tasks can be aborted if the handle is dropped. struct ServerTask(JoinHandle>); impl std::future::Future for ServerTask { type Output = Result, tokio::task::JoinError>; fn poll( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll { std::pin::Pin::new(&mut self.0).poll(cx) } } impl Drop for ServerTask { fn drop(&mut self) { self.0.abort(); } } pub(crate) async fn handle_raw_request( message: SerialMessage, protocol: Protocol, request_handler: Arc, response_handler: BufDnsStreamHandle, ) { let src_addr = message.addr(); let response_handler = ResponseHandle::new(message.addr(), response_handler); self::handle_request( message.bytes(), src_addr, protocol, request_handler, response_handler, ) .await; } #[derive(Clone)] struct ReportingResponseHandler { request_header: Header, query: LowerQuery, protocol: Protocol, src_addr: SocketAddr, handler: R, } #[async_trait::async_trait] impl ResponseHandler for ReportingResponseHandler { async fn send_response<'a>( &mut self, response: crate::authority::MessageResponse< '_, 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, impl Iterator + Send + 'a, >, ) -> io::Result { let response_info = self.handler.send_response(response).await?; let id = self.request_header.id(); let rid = response_info.id(); if id != rid { warn!("request id:{} does not match response id:{}", id, rid); debug_assert_eq!(id, rid, "request id and response id should match"); } let rflags = response_info.flags(); let answer_count = response_info.answer_count(); let authority_count = response_info.name_server_count(); let additional_count = response_info.additional_count(); let response_code = response_info.response_code(); info!("request:{id} src:{proto}://{addr}#{port} {op}:{query}:{qtype}:{class} qflags:{qflags} response:{code:?} rr:{answers}/{authorities}/{additionals} rflags:{rflags}", id = rid, proto = self.protocol, addr = self.src_addr.ip(), port = self.src_addr.port(), op = self.request_header.op_code(), query = self.query.name(), qtype = self.query.query_type(), class = self.query.query_class(), qflags = self.request_header.flags(), code = response_code, answers = answer_count, authorities = authority_count, additionals = additional_count, rflags = rflags ); Ok(response_info) } } pub(crate) async fn handle_request( // TODO: allow Message here... message_bytes: &[u8], src_addr: SocketAddr, protocol: Protocol, request_handler: Arc, response_handler: R, ) { let mut decoder = BinDecoder::new(message_bytes); // method to handle the request let inner_handle_request = |message: MessageRequest, response_handler: R| async move { let id = message.id(); let qflags = message.header().flags(); let qop_code = message.op_code(); let message_type = message.message_type(); let is_dnssec = message.edns().map_or(false, Edns::dnssec_ok); let request = Request::new(message, src_addr, protocol); let info = request.request_info(); let query = info.query.clone(); let query_name = info.query.name(); let query_type = info.query.query_type(); let query_class = info.query.query_class(); debug!( "request:{id} src:{proto}://{addr}#{port} type:{message_type} dnssec:{is_dnssec} {op}:{query}:{qtype}:{class} qflags:{qflags}", id = id, proto = protocol, addr = src_addr.ip(), port = src_addr.port(), message_type= message_type, is_dnssec = is_dnssec, op = qop_code, query = query_name, qtype = query_type, class = query_class, qflags = qflags, ); // The reporter will handle making sure to log the result of the request let reporter = ReportingResponseHandler { request_header: *request.header(), query, protocol, src_addr, handler: response_handler, }; request_handler.handle_request(&request, reporter).await; }; // Attempt to decode the message match MessageRequest::read(&mut decoder) { Ok(message) => { inner_handle_request(message, response_handler).await; } Err(ProtoError { kind, .. }) if kind.as_form_error().is_some() => { // We failed to parse the request due to some issue in the message, but the header is available, so we can respond let (header, error) = kind .into_form_error() .expect("as form_error already confirmed this is a FormError"); let query = LowerQuery::query(Query::default()); // debug for more info on why the message parsing failed debug!( "request:{id} src:{proto}://{addr}#{port} type:{message_type} {op}:FormError:{error}", id = header.id(), proto = protocol, addr = src_addr.ip(), port = src_addr.port(), message_type= header.message_type(), op = header.op_code(), error = error, ); // The reporter will handle making sure to log the result of the request let mut reporter = ReportingResponseHandler { request_header: header, query, protocol, src_addr, handler: response_handler, }; let response = MessageResponseBuilder::new(None); let result = reporter .send_response(response.error_msg(&header, ResponseCode::FormErr)) .await; if let Err(e) = result { warn!("failed to return FormError to client: {}", e); } } Err(e) => warn!("failed to read message: {}", e), } } /// Checks if the IP address is safe for returning messages /// /// Examples of unsafe addresses are any with a port of `0` /// /// # Returns /// /// Error if the address should not be used for returned requests fn sanitize_src_address(src: SocketAddr) -> Result<(), String> { // currently checks that the src address aren't either the undefined IPv4 or IPv6 address, and not port 0. if src.port() == 0 { return Err(format!("cannot respond to src on port 0: {}", src)); } fn verify_v4(src: Ipv4Addr) -> Result<(), String> { if src.is_unspecified() { return Err(format!("cannot respond to unspecified v4 addr: {}", src)); } if src.is_broadcast() { return Err(format!("cannot respond to broadcast v4 addr: {}", src)); } // TODO: add check for is_reserved when that stabilizes Ok(()) } fn verify_v6(src: Ipv6Addr) -> Result<(), String> { if src.is_unspecified() { return Err(format!("cannot respond to unspecified v6 addr: {}", src)); } Ok(()) } // currently checks that the src address aren't either the undefined IPv4 or IPv6 address, and not port 0. match src.ip() { IpAddr::V4(v4) => verify_v4(v4), IpAddr::V6(v6) => verify_v6(v6), } } #[cfg(test)] mod tests { use super::*; use crate::authority::Catalog; use futures_util::future; use std::net::{Ipv4Addr, SocketAddr, UdpSocket}; #[test] fn cleanup_after_shutdown() { let runtime = tokio::runtime::Runtime::new().unwrap(); let random_port = UdpSocket::bind((Ipv4Addr::LOCALHOST, 0)) .unwrap() .local_addr() .unwrap() .port(); let bind_addr = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), random_port); let (server_future, abort_handle) = future::abortable(async move { let mut server_future = ServerFuture::new(Catalog::new()); let udp_socket = tokio::net::UdpSocket::bind(bind_addr).await.unwrap(); server_future.register_socket(udp_socket); server_future.block_until_done().await }); abort_handle.abort(); runtime.block_on(async move { let _ = server_future.await; }); UdpSocket::bind(bind_addr).unwrap(); } #[test] fn test_sanitize_src_addr() { // ipv4 tests assert!(sanitize_src_address(SocketAddr::from(([192, 168, 1, 1], 4096))).is_ok()); assert!(sanitize_src_address(SocketAddr::from(([127, 0, 0, 1], 53))).is_ok()); assert!(sanitize_src_address(SocketAddr::from(([0, 0, 0, 0], 0))).is_err()); assert!(sanitize_src_address(SocketAddr::from(([192, 168, 1, 1], 0))).is_err()); assert!(sanitize_src_address(SocketAddr::from(([0, 0, 0, 0], 4096))).is_err()); assert!(sanitize_src_address(SocketAddr::from(([255, 255, 255, 255], 4096))).is_err()); // ipv6 tests assert!( sanitize_src_address(SocketAddr::from(([0x20, 0, 0, 0, 0, 0, 0, 0x1], 4096))).is_ok() ); assert!(sanitize_src_address(SocketAddr::from(([0, 0, 0, 0, 0, 0, 0, 1], 4096))).is_ok()); assert!(sanitize_src_address(SocketAddr::from(([0, 0, 0, 0, 0, 0, 0, 0], 4096))).is_err()); assert!(sanitize_src_address(SocketAddr::from(([0, 0, 0, 0, 0, 0, 0, 0], 0))).is_err()); assert!( sanitize_src_address(SocketAddr::from(([0x20, 0, 0, 0, 0, 0, 0, 0x1], 0))).is_err() ); } } trust-dns-server-0.22.0/src/server/timeout_stream.rs000064400000000000000000000065401046102023000206650ustar 00000000000000use std::io; use std::mem; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; use futures_util::stream::{Stream, StreamExt}; use futures_util::FutureExt; use tokio::time::Sleep; use tracing::{debug, warn}; /// This wraps the underlying Stream in a timeout. /// /// Any `Ok(Poll::Ready(_))` from the underlying Stream will reset the timeout. pub struct TimeoutStream { stream: S, timeout_duration: Duration, timeout: Option>>, } impl TimeoutStream { /// Returns a new TimeoutStream /// /// # Arguments /// /// * `stream` - stream to wrap /// * `timeout_duration` - timeout between each request, once exceed the connection is killed /// * `reactor_handle` - reactor used for registering new timeouts pub fn new(stream: S, timeout_duration: Duration) -> Self { Self { stream, timeout_duration, timeout: None, } } fn timeout(timeout_duration: Duration) -> Option>> { if timeout_duration > Duration::from_millis(0) { Some(Box::pin(tokio::time::sleep(timeout_duration))) } else { None } } } impl Stream for TimeoutStream where S: Stream> + Unpin, { type Item = Result; // somehow insert a timeout here... fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // if the timer isn't set, set one now if self.timeout.is_none() { let timeout = Self::timeout(self.timeout_duration); self.as_mut().timeout = timeout; } match self.stream.poll_next_unpin(cx) { r @ Poll::Ready(_) => { // reset the timeout to wait for the next request... let timeout = if let Some(mut timeout) = Self::timeout(self.timeout_duration) { // ensure that interest in the Timeout is registered match timeout.poll_unpin(cx) { Poll::Ready(_) => { warn!("timeout fired immediately!"); return Poll::Ready(Some(Err(io::Error::new( io::ErrorKind::TimedOut, "timeout fired immediately!", )))); } Poll::Pending => (), // this is the expected state... } Some(timeout) } else { None }; drop(mem::replace(&mut self.timeout, timeout)); r } Poll::Pending => { if let Some(ref mut timeout) = self.timeout { match timeout.poll_unpin(cx) { Poll::Pending => Poll::Pending, Poll::Ready(()) => { debug!("timeout on stream"); Poll::Ready(Some(Err(io::Error::new( io::ErrorKind::TimedOut, format!("nothing ready in {:?}", self.timeout_duration), )))) } } } else { Poll::Pending } } } } } trust-dns-server-0.22.0/src/store/config.rs000064400000000000000000000025631046102023000167200ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Configuration for the stores use serde::Deserialize; use crate::store::file::FileConfig; #[cfg(feature = "trust-dns-resolver")] use crate::store::forwarder::ForwardConfig; #[cfg(feature = "trust-dns-recursor")] use crate::store::recursor::RecursiveConfig; #[cfg(feature = "sqlite")] use crate::store::sqlite::SqliteConfig; /// Enumeration over all Store configurations #[derive(Deserialize, PartialEq, Eq, Debug)] #[serde(tag = "type")] #[serde(rename_all = "lowercase")] #[non_exhaustive] pub enum StoreConfig { /// File based configuration File(FileConfig), /// Sqlite based configuration file #[cfg(feature = "sqlite")] #[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))] Sqlite(SqliteConfig), /// Forwarding Resolver #[cfg(feature = "trust-dns-resolver")] #[cfg_attr(docsrs, doc(cfg(feature = "resolver")))] Forward(ForwardConfig), /// Recursive Resolver #[cfg(feature = "trust-dns-recursor")] #[cfg_attr(docsrs, doc(cfg(feature = "recursor")))] Recursor(RecursiveConfig), } trust-dns-server-0.22.0/src/store/file/authority.rs000064400000000000000000000351031046102023000204160ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! All authority related types use std::{ collections::BTreeMap, fs::File, io::{BufRead, BufReader}, ops::{Deref, DerefMut}, path::{Path, PathBuf}, }; use tracing::{debug, info}; #[cfg(feature = "dnssec")] use crate::{ authority::DnssecAuthority, client::{ proto::rr::dnssec::rdata::key::KEY, rr::dnssec::{DnsSecResult, SigSigner}, }, }; use crate::{ authority::{Authority, LookupError, LookupOptions, MessageRequest, UpdateResult, ZoneType}, client::{ rr::{LowerName, Name, RecordSet, RecordType, RrKey}, serialize::txt::{Lexer, Parser, Token}, }, server::RequestInfo, store::{file::FileConfig, in_memory::InMemoryAuthority}, }; /// FileAuthority is responsible for storing the resource records for a particular zone. /// /// Authorities default to DNSClass IN. The ZoneType specifies if this should be treated as the /// start of authority for the zone, is a Secondary, or a cached zone. pub struct FileAuthority(InMemoryAuthority); /// Max traversal depth for $INCLUDE files const MAX_INCLUDE_LEVEL: u16 = 256; /// Inner state of zone file loader, tracks depth of $INCLUDE /// loads as well as visited previously files, so the loader /// is able to abort e.g. when cycle is detected /// /// Note, that tracking max depth level explicitly covers also /// cycles in $INCLUDEs. The error description in this case would /// not be very helpful to detect the root cause of the problem /// though. The way to improve diagnose experience would be to /// traverse $INCLUDE files in topologically sorted order which /// requires quite some re-arrangements in the code and in the /// way loader is currently implemented. struct FileReaderState { level: u16, } impl FileReaderState { fn new() -> Self { Self { level: 0 } } fn next_level(&self) -> Self { Self { level: self.level + 1, } } } impl FileAuthority { /// Creates a new Authority. /// /// # Arguments /// /// * `origin` - The zone `Name` being created, this should match that of the `RecordType::SOA` /// record. /// * `records` - The map of the initial set of records in the zone. /// * `zone_type` - The type of zone, i.e. is this authoritative? /// * `allow_update` - If true, then this zone accepts dynamic updates. /// * `is_dnssec_enabled` - If true, then the zone will sign the zone with all registered keys, /// (see `add_zone_signing_key()`) /// /// # Return value /// /// The new `Authority`. pub fn new( origin: Name, records: BTreeMap, zone_type: ZoneType, allow_axfr: bool, ) -> Result { InMemoryAuthority::new(origin, records, zone_type, allow_axfr).map(Self) } /// Read given file line by line and recursively invokes reader for /// $INCLUDE directives /// /// TODO: it looks hacky as far we effectively duplicate parser's functionallity /// (at least partially) and performing lexing twice. /// Better solution requires us to change lexer to deal /// with Lines-like iterator instead of String buf (or capability to combine a few /// lexer instances into a single lexer). /// /// TODO: $INCLUDE could specify domain name -- to support on-flight swap for Origin /// value we definitely need to rethink and rework loader/parser/lexer fn read_file( zone_path: PathBuf, buf: &mut String, state: FileReaderState, ) -> Result<(), String> { let file = File::open(&zone_path) .map_err(|e| format!("failed to read {}: {:?}", zone_path.display(), e))?; let reader = BufReader::new(file); for line in reader.lines() { let content = line.map_err(|err| format!("failed to read line: {:?}", err))?; let mut lexer = Lexer::new(&content); match (lexer.next_token(), lexer.next_token(), lexer.next_token()) { ( Ok(Some(Token::Include)), Ok(Some(Token::CharData(include_path))), Ok(Some(Token::CharData(_domain))), ) => { return Err(format!( "Domain name for $INCLUDE is not supported at {}, trying to include {}", zone_path.display(), include_path )); } (Ok(Some(Token::Include)), Ok(Some(Token::CharData(include_path))), _) => { // RFC1035 (section 5) does not specify how filename for $INCLUDE // should be resolved into file path. The underlying code implements the // following: // * if the path is absolute (relies on Path::is_absolute), it uses normalized path // * otherwise, it joins the path with parent root of the current file // // TODO: Inlining files specified using non-relative path might potentially introduce // security issue in some cases (e.g. when working with zone files from untrusted sources) // and should probably be configurable by user. let include_path = Path::new(&include_path); let include_zone_path = if include_path.is_absolute() { include_path.to_path_buf() } else { let parent_dir = zone_path.parent().expect("file has to have parent folder"); parent_dir.join(include_path) }; if state.level >= MAX_INCLUDE_LEVEL { return Err(format!("Max depth level for nested $INCLUDE is reached at {}, trying to include {}", zone_path.display(), include_zone_path.display())); } let mut include_buf = String::new(); info!( "including file {} into {}", include_zone_path.display(), zone_path.display() ); Self::read_file(include_zone_path, &mut include_buf, state.next_level())?; buf.push_str(&include_buf); } _ => { buf.push_str(&content); } } buf.push('\n'); } Ok(()) } /// Read the Authority for the origin from the specified configuration pub fn try_from_config( origin: Name, zone_type: ZoneType, allow_axfr: bool, root_dir: Option<&Path>, config: &FileConfig, ) -> Result { let root_dir_path = root_dir.map(PathBuf::from).unwrap_or_else(PathBuf::new); let zone_path = root_dir_path.join(&config.zone_file_path); info!("loading zone file: {:?}", zone_path); let mut buf = String::new(); // TODO: this should really use something to read line by line or some other method to // keep the usage down. and be a custom lexer... Self::read_file(zone_path, &mut buf, FileReaderState::new()) .map_err(|e| format!("failed to read {}: {:?}", &config.zone_file_path, e))?; let lexer = Lexer::new(&buf); let (origin, records) = Parser::new() .parse(lexer, Some(origin), None) .map_err(|e| format!("failed to parse {}: {:?}", config.zone_file_path, e))?; info!( "zone file loaded: {} with {} records", origin, records.len() ); debug!("zone: {:#?}", records); Self::new(origin, records, zone_type, allow_axfr) } /// Unwrap the InMemoryAuthority pub fn unwrap(self) -> InMemoryAuthority { self.0 } } impl Deref for FileAuthority { type Target = InMemoryAuthority; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for FileAuthority { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[async_trait::async_trait] impl Authority for FileAuthority { type Lookup = ::Lookup; /// What type is this zone fn zone_type(&self) -> ZoneType { self.0.zone_type() } /// Return true if AXFR is allowed fn is_axfr_allowed(&self) -> bool { self.0.is_axfr_allowed() } /// Perform a dynamic update of a zone async fn update(&self, _update: &MessageRequest) -> UpdateResult { use crate::proto::op::ResponseCode; Err(ResponseCode::NotImp) } /// Get the origin of this zone, i.e. example.com is the origin for www.example.com fn origin(&self) -> &LowerName { self.0.origin() } /// Looks up all Resource Records matching the giving `Name` and `RecordType`. /// /// # Arguments /// /// * `name` - The `Name`, label, to lookup. /// * `rtype` - The `RecordType`, to lookup. `RecordType::ANY` will return all records matching /// `name`. `RecordType::AXFR` will return all record types except `RecordType::SOA` /// due to the requirements that on zone transfers the `RecordType::SOA` must both /// precede and follow all other records. /// * `is_secure` - If the DO bit is set on the EDNS OPT record, then return RRSIGs as well. /// /// # Return value /// /// None if there are no matching records, otherwise a `Vec` containing the found records. async fn lookup( &self, name: &LowerName, rtype: RecordType, lookup_options: LookupOptions, ) -> Result { self.0.lookup(name, rtype, lookup_options).await } /// Using the specified query, perform a lookup against this zone. /// /// # Arguments /// /// * `query` - the query to perform the lookup with. /// * `is_secure` - if true, then RRSIG records (if this is a secure zone) will be returned. /// /// # Return value /// /// Returns a vectory containing the results of the query, it will be empty if not found. If /// `is_secure` is true, in the case of no records found then NSEC records will be returned. async fn search( &self, request_info: RequestInfo<'_>, lookup_options: LookupOptions, ) -> Result { self.0.search(request_info, lookup_options).await } /// Get the NS, NameServer, record for the zone async fn ns(&self, lookup_options: LookupOptions) -> Result { self.0.ns(lookup_options).await } /// Return the NSEC records based on the given name /// /// # Arguments /// /// * `name` - given this name (i.e. the lookup name), return the NSEC record that is less than /// this /// * `is_secure` - if true then it will return RRSIG records as well async fn get_nsec_records( &self, name: &LowerName, lookup_options: LookupOptions, ) -> Result { self.0.get_nsec_records(name, lookup_options).await } /// Returns the SOA of the authority. /// /// *Note*: This will only return the SOA, if this is fulfilling a request, a standard lookup /// should be used, see `soa_secure()`, which will optionally return RRSIGs. async fn soa(&self) -> Result { self.0.soa().await } /// Returns the SOA record for the zone async fn soa_secure(&self, lookup_options: LookupOptions) -> Result { self.0.soa_secure(lookup_options).await } } #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] #[async_trait::async_trait] impl DnssecAuthority for FileAuthority { /// Add a (Sig0) key that is authorized to perform updates against this authority async fn add_update_auth_key(&self, name: Name, key: KEY) -> DnsSecResult<()> { self.0.add_update_auth_key(name, key).await } /// Add Signer async fn add_zone_signing_key(&self, signer: SigSigner) -> DnsSecResult<()> { self.0.add_zone_signing_key(signer).await } /// Sign the zone for DNSSEC async fn secure_zone(&self) -> DnsSecResult<()> { DnssecAuthority::secure_zone(&self.0).await } } #[cfg(test)] mod tests { use std::net::Ipv4Addr; use std::str::FromStr; use crate::client::rr::RData; use futures_executor::block_on; use super::*; use crate::authority::ZoneType; #[test] fn test_load_zone() { #[cfg(feature = "dnssec")] let config = FileConfig { zone_file_path: "../../tests/test-data/named_test_configs/dnssec/example.com.zone" .to_string(), }; #[cfg(not(feature = "dnssec"))] let config = FileConfig { zone_file_path: "../../tests/test-data/named_test_configs/example.com.zone".to_string(), }; let authority = FileAuthority::try_from_config( Name::from_str("example.com.").unwrap(), ZoneType::Primary, false, None, &config, ) .expect("failed to load file"); let lookup = block_on(Authority::lookup( &authority, &LowerName::from_str("www.example.com.").unwrap(), RecordType::A, LookupOptions::default(), )) .expect("lookup failed"); match lookup .into_iter() .next() .expect("A record not found in authity") .data() { Some(RData::A(ip)) => assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *ip), _ => panic!("wrong rdata type returned"), } let include_lookup = block_on(Authority::lookup( &authority, &LowerName::from_str("include.alias.example.com.").unwrap(), RecordType::A, LookupOptions::default(), )) .expect("INCLUDE lookup failed"); match include_lookup .into_iter() .next() .expect("A record not found in authity") .data() { Some(RData::A(ip)) => assert_eq!(Ipv4Addr::new(127, 0, 0, 5), *ip), _ => panic!("wrong rdata type returned"), } } } trust-dns-server-0.22.0/src/store/file/config.rs000064400000000000000000000010501046102023000176250ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use serde::Deserialize; /// Configuration for file based zones #[derive(Deserialize, PartialEq, Eq, Debug)] pub struct FileConfig { /// path to the zone file pub zone_file_path: String, } trust-dns-server-0.22.0/src/store/file/mod.rs000064400000000000000000000010161046102023000171410ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Zone file based serving with Dynamic DNS and journaling support mod authority; mod config; pub use self::authority::FileAuthority; pub use self::config::FileConfig; trust-dns-server-0.22.0/src/store/forwarder/authority.rs000064400000000000000000000132421046102023000214720ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::io; use tracing::{debug, info}; use crate::{ authority::{ Authority, LookupError, LookupObject, LookupOptions, MessageRequest, UpdateResult, ZoneType, }, client::{ op::ResponseCode, rr::{LowerName, Name, Record, RecordType}, }, resolver::{ config::ResolverConfig, lookup::Lookup as ResolverLookup, TokioAsyncResolver, TokioHandle, }, server::RequestInfo, store::forwarder::ForwardConfig, }; /// An authority that will forward resolutions to upstream resolvers. /// /// This uses the trust-dns-resolver for resolving requests. pub struct ForwardAuthority { origin: LowerName, resolver: TokioAsyncResolver, } impl ForwardAuthority { /// TODO: change this name to create or something #[allow(clippy::new_without_default)] #[doc(hidden)] pub fn new(runtime: TokioHandle) -> Result { let resolver = TokioAsyncResolver::from_system_conf(runtime) .map_err(|e| format!("error constructing new Resolver: {}", e))?; Ok(Self { origin: Name::root().into(), resolver, }) } /// Read the Authority for the origin from the specified configuration pub fn try_from_config( origin: Name, _zone_type: ZoneType, config: &ForwardConfig, ) -> Result { info!("loading forwarder config: {}", origin); let name_servers = config.name_servers.clone(); let mut options = config.options.unwrap_or_default(); // See RFC 1034, Section 4.3.2: // "If the data at the node is a CNAME, and QTYPE doesn't match // CNAME, copy the CNAME RR into the answer section of the response, // change QNAME to the canonical name in the CNAME RR, and go // back to step 1." // // Essentially, it's saying that servers (including forwarders) // should emit any found CNAMEs in a response ("copy the CNAME // RR into the answer section"). This is the behavior that // preserve_intemediates enables when set to true, and disables // when set to false. So we set it to true. if !options.preserve_intermediates { tracing::warn!( "preserve_intermediates set to false, which is invalid \ for a forwarder; switching to true" ); options.preserve_intermediates = true; } let config = ResolverConfig::from_parts(None, vec![], name_servers); let resolver = TokioAsyncResolver::new(config, options, TokioHandle) .map_err(|e| format!("error constructing new Resolver: {}", e))?; info!("forward resolver configured: {}: ", origin); // TODO: this might be infallible? Ok(Self { origin: origin.into(), resolver, }) } } #[async_trait::async_trait] impl Authority for ForwardAuthority { type Lookup = ForwardLookup; /// Always Forward fn zone_type(&self) -> ZoneType { ZoneType::Forward } /// Always false for Forward zones fn is_axfr_allowed(&self) -> bool { false } async fn update(&self, _update: &MessageRequest) -> UpdateResult { Err(ResponseCode::NotImp) } /// Get the origin of this zone, i.e. example.com is the origin for www.example.com /// /// In the context of a forwarder, this is either a zone which this forwarder is associated, /// or `.`, the root zone for all zones. If this is not the root zone, then it will only forward /// for lookups which match the given zone name. fn origin(&self) -> &LowerName { &self.origin } /// Forwards a lookup given the resolver configuration for this Forwarded zone async fn lookup( &self, name: &LowerName, rtype: RecordType, _lookup_options: LookupOptions, ) -> Result { // TODO: make this an error? debug_assert!(self.origin.zone_of(name)); debug!("forwarding lookup: {} {}", name, rtype); let name: LowerName = name.clone(); let resolve = self.resolver.lookup(name, rtype).await; resolve.map(ForwardLookup).map_err(LookupError::from) } async fn search( &self, request_info: RequestInfo<'_>, lookup_options: LookupOptions, ) -> Result { self.lookup( request_info.query.name(), request_info.query.query_type(), lookup_options, ) .await } async fn get_nsec_records( &self, _name: &LowerName, _lookup_options: LookupOptions, ) -> Result { Err(LookupError::from(io::Error::new( io::ErrorKind::Other, "Getting NSEC records is unimplemented for the forwarder", ))) } } /// A structure that holds the results of a forwarding lookup. /// /// This exposes an interator interface for consumption downstream. pub struct ForwardLookup(pub ResolverLookup); impl LookupObject for ForwardLookup { fn is_empty(&self) -> bool { self.0.is_empty() } fn iter<'a>(&'a self) -> Box + Send + 'a> { Box::new(self.0.record_iter()) } fn take_additionals(&mut self) -> Option> { None } } trust-dns-server-0.22.0/src/store/forwarder/config.rs000064400000000000000000000013221046102023000207030ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use serde::Deserialize; use crate::resolver::config::{NameServerConfigGroup, ResolverOpts}; /// Configuration for file based zones #[derive(Clone, Deserialize, PartialEq, Eq, Debug)] pub struct ForwardConfig { /// upstream name_server configurations pub name_servers: NameServerConfigGroup, /// Resolver options pub options: Option, } trust-dns-server-0.22.0/src/store/forwarder/mod.rs000064400000000000000000000011071046102023000202160ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. #![cfg(feature = "trust-dns-resolver")] //! Forwarding resolver related types mod authority; mod config; pub use self::authority::ForwardAuthority; pub use self::authority::ForwardLookup; pub use self::config::ForwardConfig; trust-dns-server-0.22.0/src/store/in_memory/authority.rs000064400000000000000000001460751046102023000215100ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! All authority related types use std::{ borrow::Borrow, collections::{BTreeMap, HashSet}, ops::DerefMut, sync::Arc, }; use cfg_if::cfg_if; use futures_util::future::{self, TryFutureExt}; use tracing::{debug, error, warn}; use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; #[cfg(feature = "dnssec")] use crate::{ authority::DnssecAuthority, client::rr::{ dnssec::{DnsSecResult, SigSigner, SupportedAlgorithms}, rdata::{key::KEY, DNSSECRData}, }, }; use crate::{ authority::{ AnyRecords, AuthLookup, Authority, LookupError, LookupOptions, LookupRecords, LookupResult, MessageRequest, UpdateResult, ZoneType, }, client::{ op::ResponseCode, rr::{ rdata::SOA, {DNSClass, LowerName, Name, RData, Record, RecordSet, RecordType, RrKey}, }, }, server::RequestInfo, }; #[cfg(all(feature = "dnssec", feature = "testing"))] use std::ops::Deref; /// InMemoryAuthority is responsible for storing the resource records for a particular zone. /// /// Authorities default to DNSClass IN. The ZoneType specifies if this should be treated as the /// start of authority for the zone, is a Secondary, or a cached zone. pub struct InMemoryAuthority { origin: LowerName, class: DNSClass, zone_type: ZoneType, allow_axfr: bool, inner: RwLock, } impl InMemoryAuthority { /// Creates a new Authority. /// /// # Arguments /// /// * `origin` - The zone `Name` being created, this should match that of the `RecordType::SOA` /// record. /// * `records` - The map of the initial set of records in the zone. /// * `zone_type` - The type of zone, i.e. is this authoritative? /// * `allow_update` - If true, then this zone accepts dynamic updates. /// * `is_dnssec_enabled` - If true, then the zone will sign the zone with all registered keys, /// (see `add_zone_signing_key()`) /// /// # Return value /// /// The new `Authority`. pub fn new( origin: Name, records: BTreeMap, zone_type: ZoneType, allow_axfr: bool, ) -> Result { let mut this = Self::empty(origin.clone(), zone_type, allow_axfr); let inner = this.inner.get_mut(); // SOA must be present let serial = records .iter() .find(|(key, _)| key.record_type == RecordType::SOA) .and_then(|(_, rrset)| rrset.records_without_rrsigs().next()) .and_then(Record::data) .and_then(RData::as_soa) .map(SOA::serial) .ok_or_else(|| format!("SOA record must be present: {}", origin))?; let iter = records.into_iter().map(|(_key, record)| record); // add soa to the records for rrset in iter { let name = rrset.name().clone(); let rr_type = rrset.record_type(); for record in rrset.records_without_rrsigs() { if !inner.upsert(record.clone(), serial, this.class) { return Err(format!( "Failed to insert {} {} to zone: {}", name, rr_type, origin )); }; } } Ok(this) } /// Creates an empty Authority /// /// # Warning /// /// This is an invalid zone, SOA must be added pub fn empty(origin: Name, zone_type: ZoneType, allow_axfr: bool) -> Self { Self { origin: LowerName::new(&origin), class: DNSClass::IN, zone_type, allow_axfr, inner: RwLock::new(InnerInMemory::default()), } } /// The DNSClass of this zone pub fn class(&self) -> DNSClass { self.class } /// Allow AXFR's (zone transfers) #[cfg(any(test, feature = "testing"))] #[cfg_attr(docsrs, doc(cfg(feature = "testing")))] pub fn set_allow_axfr(&mut self, allow_axfr: bool) { self.allow_axfr = allow_axfr; } /// Clears all records (including SOA, etc) pub fn clear(&mut self) { self.inner.get_mut().records.clear() } /// Retrieve the Signer, which contains the private keys, for this zone #[cfg(all(feature = "dnssec", feature = "testing"))] pub async fn secure_keys(&self) -> impl Deref + '_ { RwLockWriteGuard::map(self.inner.write().await, |i| i.secure_keys.as_mut_slice()) } /// Get all the records pub async fn records(&self) -> BTreeMap> { let records = RwLockReadGuard::map(self.inner.read().await, |i| &i.records); records.clone() } /// Get a mutable reference to the records pub async fn records_mut( &self, ) -> impl DerefMut>> + '_ { RwLockWriteGuard::map(self.inner.write().await, |i| &mut i.records) } /// Get a mutable reference to the records pub fn records_get_mut(&mut self) -> &mut BTreeMap> { &mut self.inner.get_mut().records } /// Returns the minimum ttl (as used in the SOA record) pub async fn minimum_ttl(&self) -> u32 { self.inner.read().await.minimum_ttl(self.origin()) } /// get the current serial number for the zone. pub async fn serial(&self) -> u32 { self.inner.read().await.serial(self.origin()) } #[cfg(any(feature = "dnssec", feature = "sqlite"))] #[allow(unused)] pub(crate) async fn increment_soa_serial(&self) -> u32 { self.inner .write() .await .increment_soa_serial(self.origin(), self.class) } /// Inserts or updates a `Record` depending on it's existence in the authority. /// /// Guarantees that SOA, CNAME only has one record, will implicitly update if they already exist. /// /// # Arguments /// /// * `record` - The `Record` to be inserted or updated. /// * `serial` - Current serial number to be recorded against updates. /// /// # Return value /// /// true if the value was inserted, false otherwise pub async fn upsert(&self, record: Record, serial: u32) -> bool { self.inner.write().await.upsert(record, serial, self.class) } /// Non-async version of upsert when behind a mutable reference. pub fn upsert_mut(&mut self, record: Record, serial: u32) -> bool { self.inner.get_mut().upsert(record, serial, self.class) } /// Add a (Sig0) key that is authorized to perform updates against this authority #[cfg(feature = "dnssec")] fn inner_add_update_auth_key( inner: &mut InnerInMemory, name: Name, key: KEY, origin: &LowerName, dns_class: DNSClass, ) -> DnsSecResult<()> { let rdata = RData::DNSSEC(DNSSECRData::KEY(key)); // TODO: what TTL? let record = Record::from_rdata(name, 86400, rdata); let serial = inner.serial(origin); if inner.upsert(record, serial, dns_class) { Ok(()) } else { Err("failed to add auth key".into()) } } /// Non-async method of add_update_auth_key when behind a mutable reference #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn add_update_auth_key_mut(&mut self, name: Name, key: KEY) -> DnsSecResult<()> { let Self { ref origin, ref mut inner, class, .. } = self; Self::inner_add_update_auth_key(inner.get_mut(), name, key, origin, *class) } /// By adding a secure key, this will implicitly enable dnssec for the zone. /// /// # Arguments /// /// * `signer` - Signer with associated private key #[cfg(feature = "dnssec")] fn inner_add_zone_signing_key( inner: &mut InnerInMemory, signer: SigSigner, origin: &LowerName, dns_class: DNSClass, ) -> DnsSecResult<()> { // also add the key to the zone let zone_ttl = inner.minimum_ttl(origin); let dnskey = signer.key().to_dnskey(signer.algorithm())?; let dnskey = Record::from_rdata( origin.clone().into(), zone_ttl, RData::DNSSEC(DNSSECRData::DNSKEY(dnskey)), ); // TODO: also generate the CDS and CDNSKEY let serial = inner.serial(origin); inner.upsert(dnskey, serial, dns_class); inner.secure_keys.push(signer); Ok(()) } /// Non-async method of add_zone_signing_key when behind a mutable reference #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn add_zone_signing_key_mut(&mut self, signer: SigSigner) -> DnsSecResult<()> { let Self { ref origin, ref mut inner, class, .. } = self; Self::inner_add_zone_signing_key(inner.get_mut(), signer, origin, *class) } /// (Re)generates the nsec records, increments the serial number and signs the zone #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn secure_zone_mut(&mut self) -> DnsSecResult<()> { let Self { ref origin, ref mut inner, .. } = self; inner.get_mut().secure_zone_mut(origin, self.class) } /// (Re)generates the nsec records, increments the serial number and signs the zone #[cfg(not(feature = "dnssec"))] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] pub fn secure_zone_mut(&mut self) -> Result<(), &str> { Err("DNSSEC was not enabled during compilation.") } } #[derive(Default)] struct InnerInMemory { records: BTreeMap>, // Private key mapped to the Record of the DNSKey // TODO: these private_keys should be stored securely. Ideally, we have keys only stored per // server instance, but that requires requesting updates from the parent zone, which may or // may not support dynamic updates to register the new key... Trust-DNS will provide support // for this, in some form, perhaps alternate root zones... #[cfg(feature = "dnssec")] secure_keys: Vec, } impl InnerInMemory { /// Retrieve the Signer, which contains the private keys, for this zone #[cfg(feature = "dnssec")] fn secure_keys(&self) -> &[SigSigner] { &self.secure_keys } // /// Get all the records // fn records(&self) -> &BTreeMap> { // &self.records // } // /// Get a mutable reference to the records // fn records_mut(&mut self) -> &mut BTreeMap> { // &mut self.records // } fn inner_soa(&self, origin: &LowerName) -> Option<&SOA> { // TODO: can't there be an RrKeyRef? let rr_key = RrKey::new(origin.clone(), RecordType::SOA); self.records .get(&rr_key) .and_then(|rrset| rrset.records_without_rrsigs().next()) .and_then(Record::data) .and_then(RData::as_soa) } /// Returns the minimum ttl (as used in the SOA record) fn minimum_ttl(&self, origin: &LowerName) -> u32 { let soa = self.inner_soa(origin); let soa = match soa { Some(soa) => soa, None => { error!("could not lookup SOA for authority: {}", origin); return 0; } }; soa.minimum() } /// get the current serial number for the zone. fn serial(&self, origin: &LowerName) -> u32 { let soa = self.inner_soa(origin); let soa = match soa { Some(soa) => soa, None => { error!("could not lookup SOA for authority: {}", origin); return 0; } }; soa.serial() } fn inner_lookup( &self, name: &LowerName, record_type: RecordType, lookup_options: LookupOptions, ) -> Option> { // this range covers all the records for any of the RecordTypes at a given label. let start_range_key = RrKey::new(name.clone(), RecordType::Unknown(u16::min_value())); let end_range_key = RrKey::new(name.clone(), RecordType::Unknown(u16::max_value())); fn aname_covers_type(key_type: RecordType, query_type: RecordType) -> bool { (query_type == RecordType::A || query_type == RecordType::AAAA) && key_type == RecordType::ANAME } let lookup = self .records .range(&start_range_key..&end_range_key) // remember CNAME can be the only record at a particular label .find(|(key, _)| { key.record_type == record_type || key.record_type == RecordType::CNAME || aname_covers_type(key.record_type, record_type) }) .map(|(_key, rr_set)| rr_set); // TODO: maybe unwrap this recursion. match lookup { None => self.inner_lookup_wildcard(name, record_type, lookup_options), l => l.cloned(), } } fn inner_lookup_wildcard( &self, name: &LowerName, record_type: RecordType, lookup_options: LookupOptions, ) -> Option> { // if this is a wildcard or a root, both should break continued lookups let wildcard = if name.is_wildcard() || name.is_root() { return None; } else { name.clone().into_wildcard() }; #[allow(clippy::needless_late_init)] self.inner_lookup(&wildcard, record_type, lookup_options) // we need to change the name to the query name in the result set since this was a wildcard .map(|rrset| { let mut new_answer = RecordSet::new(name.borrow(), rrset.record_type(), rrset.ttl()); let records; let _rrsigs: Vec<&Record>; cfg_if! { if #[cfg(feature = "dnssec")] { let (records_tmp, rrsigs_tmp) = rrset .records(lookup_options.is_dnssec(), lookup_options.supported_algorithms()) .partition(|r| r.record_type() != RecordType::RRSIG); records = records_tmp; _rrsigs = rrsigs_tmp; } else { let (records_tmp, rrsigs_tmp) = (rrset.records_without_rrsigs(), Vec::with_capacity(0)); records = records_tmp; _rrsigs = rrsigs_tmp; } }; for record in records { if let Some(rdata) = record.data() { new_answer.add_rdata(rdata.clone()); } } #[cfg(feature = "dnssec")] for rrsig in _rrsigs { new_answer.insert_rrsig(rrsig.clone()) } Arc::new(new_answer) }) } /// Search for additional records to include in the response /// /// # Arguments /// /// * original_name - the original name that was being looked up /// * query_type - original type in the request query /// * next_name - the name from the CNAME, ANAME, MX, etc. record that is being searched /// * search_type - the root search type, ANAME, CNAME, MX, i.e. the beginning of the chain fn additional_search( &self, original_name: &LowerName, original_query_type: RecordType, next_name: LowerName, _search_type: RecordType, lookup_options: LookupOptions, ) -> Option>> { let mut additionals: Vec> = vec![]; // if it's a CNAME or other forwarding record, we'll be adding additional records based on the query_type let mut query_types_arr = [original_query_type; 2]; let query_types: &[RecordType] = match original_query_type { RecordType::ANAME | RecordType::NS | RecordType::MX | RecordType::SRV => { query_types_arr = [RecordType::A, RecordType::AAAA]; &query_types_arr[..] } _ => &query_types_arr[..1], }; for query_type in query_types { // loop and collect any additional records to send // Track the names we've looked up for this query type. let mut names = HashSet::new(); // If we're just going to repeat the same query then bail out. if query_type == &original_query_type { names.insert(original_name.clone()); } let mut next_name = Some(next_name.clone()); while let Some(search) = next_name.take() { // If we've already looked up this name then bail out. if names.contains(&search) { break; } let additional = self.inner_lookup(&search, *query_type, lookup_options); names.insert(search); if let Some(additional) = additional { // assuming no crazy long chains... if !additionals.contains(&additional) { additionals.push(additional.clone()); } next_name = maybe_next_name(&additional, *query_type).map(|(name, _search_type)| name); } } } if !additionals.is_empty() { Some(additionals) } else { None } } #[cfg(any(feature = "dnssec", feature = "sqlite"))] fn increment_soa_serial(&mut self, origin: &LowerName, dns_class: DNSClass) -> u32 { // we'll remove the SOA and then replace it let rr_key = RrKey::new(origin.clone(), RecordType::SOA); let record = self .records .remove(&rr_key) // TODO: there should be an unwrap on rrset, but it's behind Arc .and_then(|rrset| rrset.records_without_rrsigs().next().cloned()); let mut record = if let Some(record) = record { record } else { error!("could not lookup SOA for authority: {}", origin); return 0; }; let serial = if let Some(RData::SOA(ref mut soa_rdata)) = record.data_mut() { soa_rdata.increment_serial(); soa_rdata.serial() } else { panic!("This was not an SOA record"); // valid panic, never should happen }; self.upsert(record, serial, dns_class); serial } /// Inserts or updates a `Record` depending on it's existence in the authority. /// /// Guarantees that SOA, CNAME only has one record, will implicitly update if they already exist. /// /// # Arguments /// /// * `record` - The `Record` to be inserted or updated. /// * `serial` - Current serial number to be recorded against updates. /// /// # Return value /// /// true if the value was inserted, false otherwise fn upsert(&mut self, record: Record, serial: u32, dns_class: DNSClass) -> bool { if dns_class != record.dns_class() { warn!( "mismatched dns_class on record insert, zone: {} record: {}", dns_class, record.dns_class() ); return false; } #[cfg(feature = "dnssec")] fn is_nsec(upsert_type: RecordType, occupied_type: RecordType) -> bool { // NSEC is always allowed upsert_type == RecordType::NSEC || upsert_type == RecordType::NSEC3 || occupied_type == RecordType::NSEC || occupied_type == RecordType::NSEC3 } #[cfg(not(feature = "dnssec"))] fn is_nsec(_upsert_type: RecordType, _occupied_type: RecordType) -> bool { // TODO: we should make the DNSSec RecordTypes always visible false } /// returns true if an only if the label can not cooccupy space with the checked type #[allow(clippy::nonminimal_bool)] fn label_does_not_allow_multiple( upsert_type: RecordType, occupied_type: RecordType, check_type: RecordType, ) -> bool { // it's a CNAME/ANAME but there's a record that's not a CNAME/ANAME at this location (upsert_type == check_type && occupied_type != check_type) || // it's a different record, but there is already a CNAME/ANAME here (upsert_type != check_type && occupied_type == check_type) } // check that CNAME and ANAME is either not already present, or no other records are if it's a CNAME let start_range_key = RrKey::new(record.name().into(), RecordType::Unknown(u16::min_value())); let end_range_key = RrKey::new(record.name().into(), RecordType::Unknown(u16::max_value())); let multiple_records_at_label_disallowed = self .records .range(&start_range_key..&end_range_key) // remember CNAME can be the only record at a particular label .any(|(key, _)| { !is_nsec(record.record_type(), key.record_type) && label_does_not_allow_multiple( record.record_type(), key.record_type, RecordType::CNAME, ) }); if multiple_records_at_label_disallowed { // consider making this an error? return false; } let rr_key = RrKey::new(record.name().into(), record.rr_type()); let records: &mut Arc = self .records .entry(rr_key) .or_insert_with(|| Arc::new(RecordSet::new(record.name(), record.rr_type(), serial))); // because this is and Arc, we need to clone and then replace the entry let mut records_clone = RecordSet::clone(&*records); if records_clone.insert(record, serial) { *records = Arc::new(records_clone); true } else { false } } /// (Re)generates the nsec records, increments the serial number and signs the zone #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] fn secure_zone_mut(&mut self, origin: &LowerName, dns_class: DNSClass) -> DnsSecResult<()> { // TODO: only call nsec_zone after adds/deletes // needs to be called before incrementing the soa serial, to make sure IXFR works properly self.nsec_zone(origin, dns_class); // need to resign any records at the current serial number and bump the number. // first bump the serial number on the SOA, so that it is resigned with the new serial. self.increment_soa_serial(origin, dns_class); // TODO: should we auto sign here? or maybe up a level... self.sign_zone(origin, dns_class) } /// Dummy implementation for when DNSSEC is disabled. #[cfg(feature = "dnssec")] fn nsec_zone(&mut self, origin: &LowerName, dns_class: DNSClass) { use crate::client::rr::rdata::NSEC; // only create nsec records for secure zones if self.secure_keys.is_empty() { return; } debug!("generating nsec records: {}", origin); // first remove all existing nsec records let delete_keys: Vec = self .records .keys() .filter(|k| k.record_type == RecordType::NSEC) .cloned() .collect(); for key in delete_keys { self.records.remove(&key); } // now go through and generate the nsec records let ttl = self.minimum_ttl(origin); let serial = self.serial(origin); let mut records: Vec = vec![]; { let mut nsec_info: Option<(&Name, Vec)> = None; for key in self.records.keys() { match nsec_info { None => nsec_info = Some((key.name.borrow(), vec![key.record_type])), Some((name, ref mut vec)) if LowerName::new(name) == key.name => { vec.push(key.record_type) } Some((name, vec)) => { // names aren't equal, create the NSEC record let mut record = Record::with(name.clone(), RecordType::NSEC, ttl); let rdata = NSEC::new_cover_self(key.name.clone().into(), vec); record.set_data(Some(RData::DNSSEC(DNSSECRData::NSEC(rdata)))); records.push(record); // new record... nsec_info = Some((key.name.borrow(), vec![key.record_type])) } } } // the last record if let Some((name, vec)) = nsec_info { // names aren't equal, create the NSEC record let mut record = Record::with(name.clone(), RecordType::NSEC, ttl); let rdata = NSEC::new_cover_self(origin.clone().into(), vec); record.set_data(Some(RData::DNSSEC(DNSSECRData::NSEC(rdata)))); records.push(record); } } // insert all the nsec records for record in records { let upserted = self.upsert(record, serial, dns_class); debug_assert!(upserted); } } /// Signs an RecordSet, and stores the RRSIGs in the RecordSet /// /// This will sign the RecordSet with all the registered keys in the zone /// /// # Arguments /// /// * `rr_set` - RecordSet to sign /// * `secure_keys` - Set of keys to use to sign the RecordSet, see `self.signers()` /// * `zone_ttl` - the zone TTL, see `self.minimum_ttl()` /// * `zone_class` - DNSClass of the zone, see `self.zone_class()` #[cfg(feature = "dnssec")] fn sign_rrset( rr_set: &mut RecordSet, secure_keys: &[SigSigner], zone_ttl: u32, zone_class: DNSClass, ) -> DnsSecResult<()> { use crate::client::rr::dnssec::tbs; use crate::client::rr::rdata::SIG; use time::OffsetDateTime; let inception = OffsetDateTime::now_utc(); rr_set.clear_rrsigs(); let rrsig_temp = Record::with(rr_set.name().clone(), RecordType::RRSIG, zone_ttl); for signer in secure_keys { debug!( "signing rr_set: {}, {} with: {}", rr_set.name(), rr_set.record_type(), signer.algorithm(), ); let expiration = inception + signer.sig_duration(); let tbs = tbs::rrset_tbs( rr_set.name(), zone_class, rr_set.name().num_labels(), rr_set.record_type(), signer.algorithm(), rr_set.ttl(), expiration.unix_timestamp() as u32, inception.unix_timestamp() as u32, signer.calculate_key_tag()?, signer.signer_name(), // TODO: this is a nasty clone... the issue is that the vec // from records is of Vec<&R>, but we really want &[R] &rr_set .records_without_rrsigs() .cloned() .collect::>(), ); // TODO, maybe chain these with some ETL operations instead? let tbs = match tbs { Ok(tbs) => tbs, Err(err) => { error!("could not serialize rrset to sign: {}", err); continue; } }; let signature = signer.sign(&tbs); let signature = match signature { Ok(signature) => signature, Err(err) => { error!("could not sign rrset: {}", err); continue; } }; let mut rrsig = rrsig_temp.clone(); rrsig.set_data(Some(RData::DNSSEC(DNSSECRData::SIG(SIG::new( // type_covered: RecordType, rr_set.record_type(), // algorithm: Algorithm, signer.algorithm(), // num_labels: u8, rr_set.name().num_labels(), // original_ttl: u32, rr_set.ttl(), // sig_expiration: u32, expiration.unix_timestamp() as u32, // sig_inception: u32, inception.unix_timestamp() as u32, // key_tag: u16, signer.calculate_key_tag()?, // signer_name: Name, signer.signer_name().clone(), // sig: Vec signature, ))))); rr_set.insert_rrsig(rrsig); } Ok(()) } /// Signs any records in the zone that have serial numbers greater than or equal to `serial` #[cfg(feature = "dnssec")] fn sign_zone(&mut self, origin: &LowerName, dns_class: DNSClass) -> DnsSecResult<()> { debug!("signing zone: {}", origin); let minimum_ttl = self.minimum_ttl(origin); let secure_keys = &self.secure_keys; let records = &mut self.records; // TODO: should this be an error? if secure_keys.is_empty() { warn!( "attempt to sign_zone {} for dnssec, but no keys available!", origin ) } // sign all record_sets, as of 0.12.1 this includes DNSKEY for rr_set_orig in records.values_mut() { // because the rrset is an Arc, it must be cloned before mutated let rr_set = Arc::make_mut(rr_set_orig); Self::sign_rrset(rr_set, secure_keys, minimum_ttl, dns_class)?; } Ok(()) } } /// Gets the next search name, and returns the RecordType that it originated from fn maybe_next_name( record_set: &RecordSet, query_type: RecordType, ) -> Option<(LowerName, RecordType)> { match (record_set.record_type(), query_type) { // ANAME is similar to CNAME, // unlike CNAME, it is only something that continue to additional processing if the // the query was for address (A, AAAA, or ANAME itself) record types. (t @ RecordType::ANAME, RecordType::A) | (t @ RecordType::ANAME, RecordType::AAAA) | (t @ RecordType::ANAME, RecordType::ANAME) => record_set .records_without_rrsigs() .next() .and_then(Record::data) .and_then(RData::as_aname) .map(LowerName::from) .map(|name| (name, t)), (t @ RecordType::NS, RecordType::NS) => record_set .records_without_rrsigs() .next() .and_then(Record::data) .and_then(RData::as_ns) .map(LowerName::from) .map(|name| (name, t)), // CNAME will continue to additional processing for any query type (t @ RecordType::CNAME, _) => record_set .records_without_rrsigs() .next() .and_then(Record::data) .and_then(RData::as_cname) .map(LowerName::from) .map(|name| (name, t)), (t @ RecordType::MX, RecordType::MX) => record_set .records_without_rrsigs() .next() .and_then(Record::data) .and_then(RData::as_mx) .map(|mx| mx.exchange().clone()) .map(LowerName::from) .map(|name| (name, t)), (t @ RecordType::SRV, RecordType::SRV) => record_set .records_without_rrsigs() .next() .and_then(Record::data) .and_then(RData::as_srv) .map(|srv| srv.target().clone()) .map(LowerName::from) .map(|name| (name, t)), // other additional collectors can be added here can be added here _ => None, } } #[async_trait::async_trait] impl Authority for InMemoryAuthority { type Lookup = AuthLookup; /// What type is this zone fn zone_type(&self) -> ZoneType { self.zone_type } /// Return true if AXFR is allowed fn is_axfr_allowed(&self) -> bool { self.allow_axfr } /// Takes the UpdateMessage, extracts the Records, and applies the changes to the record set. /// /// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997 /// /// ```text /// /// 3.4 - Process Update Section /// /// Next, the Update Section is processed as follows. /// /// 3.4.2 - Update /// /// The Update Section is parsed into RRs and these RRs are processed in /// order. /// /// 3.4.2.1. If any system failure (such as an out of memory condition, /// or a hardware error in persistent storage) occurs during the /// processing of this section, signal SERVFAIL to the requestor and undo /// all updates applied to the zone during this transaction. /// /// 3.4.2.2. Any Update RR whose CLASS is the same as ZCLASS is added to /// the zone. In case of duplicate RDATAs (which for SOA RRs is always /// the case, and for WKS RRs is the case if the ADDRESS and PROTOCOL /// fields both match), the Zone RR is replaced by Update RR. If the /// TYPE is SOA and there is no Zone SOA RR, or the new SOA.SERIAL is /// lower (according to [RFC1982]) than or equal to the current Zone SOA /// RR's SOA.SERIAL, the Update RR is ignored. In the case of a CNAME /// Update RR and a non-CNAME Zone RRset or vice versa, ignore the CNAME /// Update RR, otherwise replace the CNAME Zone RR with the CNAME Update /// RR. /// /// 3.4.2.3. For any Update RR whose CLASS is ANY and whose TYPE is ANY, /// all Zone RRs with the same NAME are deleted, unless the NAME is the /// same as ZNAME in which case only those RRs whose TYPE is other than /// SOA or NS are deleted. For any Update RR whose CLASS is ANY and /// whose TYPE is not ANY all Zone RRs with the same NAME and TYPE are /// deleted, unless the NAME is the same as ZNAME in which case neither /// SOA or NS RRs will be deleted. /// /// 3.4.2.4. For any Update RR whose class is NONE, any Zone RR whose /// NAME, TYPE, RDATA and RDLENGTH are equal to the Update RR is deleted, /// unless the NAME is the same as ZNAME and either the TYPE is SOA or /// the TYPE is NS and the matching Zone RR is the only NS remaining in /// the RRset, in which case this Update RR is ignored. /// /// 3.4.2.5. Signal NOERROR to the requestor. /// ``` /// /// # Arguments /// /// * `update` - The `UpdateMessage` records will be extracted and used to perform the update /// actions as specified in the above RFC. /// /// # Return value /// /// true if any of additions, updates or deletes were made to the zone, false otherwise. Err is /// returned in the case of bad data, etc. async fn update(&self, _update: &MessageRequest) -> UpdateResult { Err(ResponseCode::NotImp) } /// Get the origin of this zone, i.e. example.com is the origin for www.example.com fn origin(&self) -> &LowerName { &self.origin } /// Looks up all Resource Records matching the giving `Name` and `RecordType`. /// /// # Arguments /// /// * `name` - The `Name`, label, to lookup. /// * `rtype` - The `RecordType`, to lookup. `RecordType::ANY` will return all records matching /// `name`. `RecordType::AXFR` will return all record types except `RecordType::SOA` /// due to the requirements that on zone transfers the `RecordType::SOA` must both /// precede and follow all other records. /// * `is_secure` - If the DO bit is set on the EDNS OPT record, then return RRSIGs as well. /// /// # Return value /// /// None if there are no matching records, otherwise a `Vec` containing the found records. async fn lookup( &self, name: &LowerName, query_type: RecordType, lookup_options: LookupOptions, ) -> Result { let inner = self.inner.read().await; // Collect the records from each rr_set let (result, additionals): (LookupResult, Option) = match query_type { RecordType::AXFR | RecordType::ANY => { let result = AnyRecords::new( lookup_options, inner.records.values().cloned().collect(), query_type, name.clone(), ); (Ok(LookupRecords::AnyRecords(result)), None) } _ => { // perform the lookup let answer = inner.inner_lookup(name, query_type, lookup_options); // evaluate any cnames for additional inclusion let additionals_root_chain_type: Option<(_, _)> = answer .as_ref() .and_then(|a| maybe_next_name(a, query_type)) .and_then(|(search_name, search_type)| { inner .additional_search( name, query_type, search_name, search_type, lookup_options, ) .map(|adds| (adds, search_type)) }); // if the chain started with an ANAME, take the A or AAAA record from the list let (additionals, answer) = match (additionals_root_chain_type, answer, query_type) { ( Some((additionals, RecordType::ANAME)), Some(answer), RecordType::A, ) | ( Some((additionals, RecordType::ANAME)), Some(answer), RecordType::AAAA, ) => { // This should always be true... debug_assert_eq!(answer.record_type(), RecordType::ANAME); // in the case of ANAME the final record should be the A or AAAA record let (rdatas, a_aaaa_ttl) = { let last_record = additionals.last(); let a_aaaa_ttl = last_record.map_or(u32::max_value(), |r| r.ttl()); // grap the rdatas let rdatas: Option> = last_record .and_then(|record| match record.record_type() { RecordType::A | RecordType::AAAA => { // the RRSIGS will be useless since we're changing the record type Some(record.records_without_rrsigs()) } _ => None, }) .map(|records| { records .filter_map(Record::data) .cloned() .collect::>() }); (rdatas, a_aaaa_ttl) }; // now build up a new RecordSet // the name comes from the ANAME record // according to the rfc the ttl is from the ANAME // TODO: technically we should take the min of the potential CNAME chain let ttl = answer.ttl().min(a_aaaa_ttl); let mut new_answer = RecordSet::new(answer.name(), query_type, ttl); for rdata in rdatas.into_iter().flatten() { new_answer.add_rdata(rdata); } // if DNSSEC is enabled, and the request had the DO set, sign the recordset #[cfg(feature = "dnssec")] { use tracing::warn; // ANAME's are constructed on demand, so need to be signed before return if lookup_options.is_dnssec() { InnerInMemory::sign_rrset( &mut new_answer, inner.secure_keys(), inner.minimum_ttl(self.origin()), self.class(), ) // rather than failing the request, we'll just warn .map_err(|e| warn!("failed to sign ANAME record: {}", e)) .ok(); } } // prepend answer to additionals here (answer is the ANAME record) let additionals = std::iter::once(answer) .chain(additionals.into_iter()) .collect(); // return the new answer // because the searched set was an Arc, we need to arc too (Some(additionals), Some(Arc::new(new_answer))) } (Some((additionals, _)), answer, _) => (Some(additionals), answer), (None, answer, _) => (None, answer), }; // map the answer to a result let answer = answer .map_or(Err(LookupError::from(ResponseCode::NXDomain)), |rr_set| { Ok(LookupRecords::new(lookup_options, rr_set)) }); let additionals = additionals.map(|a| LookupRecords::many(lookup_options, a)); (answer, additionals) } }; // This is annoying. The 1035 spec literally specifies that most DNS authorities would want to store // records in a list except when there are a lot of records. But this makes indexed lookups by name+type // always return empty sets. This is only important in the negative case, where other DNS authorities // generally return NoError and no results when other types exist at the same name. bah. // TODO: can we get rid of this? let result = match result { Err(LookupError::ResponseCode(ResponseCode::NXDomain)) => { if inner .records .keys() .any(|key| key.name() == name || name.zone_of(key.name())) { return Err(LookupError::NameExists); } else { let code = if self.origin().zone_of(name) { ResponseCode::NXDomain } else { ResponseCode::Refused }; return Err(LookupError::from(code)); } } Err(e) => return Err(e), o => o, }; result.map(|answers| AuthLookup::answers(answers, additionals)) } async fn search( &self, request_info: RequestInfo<'_>, lookup_options: LookupOptions, ) -> Result { debug!("searching InMemoryAuthority for: {}", request_info.query); let lookup_name = request_info.query.name(); let record_type: RecordType = request_info.query.query_type(); // if this is an AXFR zone transfer, verify that this is either the Secondary or Primary // for AXFR the first and last record must be the SOA if RecordType::AXFR == record_type { // TODO: support more advanced AXFR options if !self.is_axfr_allowed() { return Err(LookupError::from(ResponseCode::Refused)); } #[allow(deprecated)] match self.zone_type() { ZoneType::Primary | ZoneType::Secondary | ZoneType::Master | ZoneType::Slave => (), // TODO: Forward? _ => return Err(LookupError::from(ResponseCode::NXDomain)), } } // perform the actual lookup match record_type { RecordType::SOA => { self.lookup(self.origin(), record_type, lookup_options) .await } RecordType::AXFR => { // TODO: shouldn't these SOA's be secure? at least the first, perhaps not the last? let lookup = future::try_join3( // TODO: maybe switch this to be an soa_inner type call? self.soa_secure(lookup_options), self.soa(), self.lookup(lookup_name, record_type, lookup_options), ) .map_ok(|(start_soa, end_soa, records)| match start_soa { l @ AuthLookup::Empty => l, start_soa => AuthLookup::AXFR { start_soa: start_soa.unwrap_records(), records: records.unwrap_records(), end_soa: end_soa.unwrap_records(), }, }); lookup.await } // A standard Lookup path _ => self.lookup(lookup_name, record_type, lookup_options).await, } } /// Return the NSEC records based on the given name /// /// # Arguments /// /// * `name` - given this name (i.e. the lookup name), return the NSEC record that is less than /// this /// * `is_secure` - if true then it will return RRSIG records as well #[cfg(feature = "dnssec")] async fn get_nsec_records( &self, name: &LowerName, lookup_options: LookupOptions, ) -> Result { let inner = self.inner.read().await; fn is_nsec_rrset(rr_set: &RecordSet) -> bool { rr_set.record_type() == RecordType::NSEC } // TODO: need a BorrowdRrKey let rr_key = RrKey::new(name.clone(), RecordType::NSEC); let no_data = inner .records .get(&rr_key) .map(|rr_set| LookupRecords::new(lookup_options, rr_set.clone())); if let Some(no_data) = no_data { return Ok(no_data.into()); } let get_closest_nsec = |name: &LowerName| -> Option> { inner .records .values() .rev() .filter(|rr_set| is_nsec_rrset(rr_set)) // the name must be greater than the name in the nsec .filter(|rr_set| *name >= rr_set.name().into()) // now find the next record where the covered name is greater .find(|rr_set| { // there should only be one record rr_set .records(false, SupportedAlgorithms::default()) .next() .and_then(Record::data) .and_then(RData::as_dnssec) .and_then(DNSSECRData::as_nsec) .map_or(false, |r| { // the search name is less than the next NSEC record *name < r.next_domain_name().into() || // this is the last record, and wraps to the beginning of the zone r.next_domain_name() < rr_set.name() }) }) .cloned() }; let closest_proof = get_closest_nsec(name); // we need the wildcard proof, but make sure that it's still part of the zone. let wildcard = name.base_name(); let origin = self.origin(); let wildcard = if origin.zone_of(&wildcard) { wildcard } else { origin.clone() }; // don't duplicate the record... let wildcard_proof = if wildcard != *name { get_closest_nsec(&wildcard) } else { None }; let proofs = match (closest_proof, wildcard_proof) { (Some(closest_proof), Some(wildcard_proof)) => { // dedup with the wildcard proof if wildcard_proof != closest_proof { vec![wildcard_proof, closest_proof] } else { vec![closest_proof] } } (None, Some(proof)) | (Some(proof), None) => vec![proof], (None, None) => vec![], }; Ok(LookupRecords::many(lookup_options, proofs).into()) } #[cfg(not(feature = "dnssec"))] async fn get_nsec_records( &self, _name: &LowerName, _lookup_options: LookupOptions, ) -> Result { Ok(AuthLookup::default()) } } #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] #[async_trait::async_trait] impl DnssecAuthority for InMemoryAuthority { /// Add a (Sig0) key that is authorized to perform updates against this authority async fn add_update_auth_key(&self, name: Name, key: KEY) -> DnsSecResult<()> { let mut inner = self.inner.write().await; Self::inner_add_update_auth_key(&mut inner, name, key, self.origin(), self.class) } /// By adding a secure key, this will implicitly enable dnssec for the zone. /// /// # Arguments /// /// * `signer` - Signer with associated private key async fn add_zone_signing_key(&self, signer: SigSigner) -> DnsSecResult<()> { let mut inner = self.inner.write().await; Self::inner_add_zone_signing_key(&mut inner, signer, self.origin(), self.class) } /// Sign the zone for DNSSEC async fn secure_zone(&self) -> DnsSecResult<()> { let mut inner = self.inner.write().await; inner.secure_zone_mut(self.origin(), self.class) } } trust-dns-server-0.22.0/src/store/in_memory/mod.rs000064400000000000000000000007441046102023000202270ustar 00000000000000// Copyright 2015-2019 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Zone file based serving with Dynamic DNS and journaling support mod authority; pub use self::authority::InMemoryAuthority; trust-dns-server-0.22.0/src/store/mod.rs000064400000000000000000000012141046102023000162220ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! All persistent store implementations mod config; pub mod file; pub mod forwarder; pub mod in_memory; pub mod recursor; #[cfg(feature = "sqlite")] #[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))] pub mod sqlite; // TODO: add a dynamic library option? pub use self::config::StoreConfig; trust-dns-server-0.22.0/src/store/recursor/authority.rs000064400000000000000000000117651046102023000213530ustar 00000000000000// Copyright 2015-2022 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::{io, path::Path, time::Instant}; use tracing::{debug, info}; use trust_dns_client::op::Query; pub(crate) use trust_dns_resolver::lookup::Lookup; use crate::{ authority::{ Authority, LookupError, LookupObject, LookupOptions, MessageRequest, UpdateResult, ZoneType, }, client::{ op::ResponseCode, rr::{LowerName, Name, Record, RecordType}, }, recursor::Recursor, resolver::config::{NameServerConfig, NameServerConfigGroup, Protocol}, server::RequestInfo, store::recursor::RecursiveConfig, }; /// An authority that will forward resolutions to upstream resolvers. /// /// This uses the trust-dns-resolver for resolving requests. pub struct RecursiveAuthority { origin: LowerName, recursor: Recursor, } impl RecursiveAuthority { /// Read the Authority for the origin from the specified configuration pub async fn try_from_config( origin: Name, _zone_type: ZoneType, config: &RecursiveConfig, root_dir: Option<&Path>, ) -> Result { info!("loading recursor config: {}", origin); // read the roots let root_addrs = config .read_roots(root_dir) .map_err(|e| format!("failed to read roots {}: {}", config.roots.display(), e))?; // Configure all the name servers let mut roots = NameServerConfigGroup::new(); for socket_addr in root_addrs { roots.push(NameServerConfig { socket_addr, protocol: Protocol::Tcp, tls_dns_name: None, trust_nx_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, // TODO: need to support bind addresses }); roots.push(NameServerConfig { socket_addr, protocol: Protocol::Udp, tls_dns_name: None, trust_nx_responses: false, #[cfg(feature = "dns-over-rustls")] tls_config: None, bind_addr: None, }); } let recursor = Recursor::new(roots).map_err(|e| format!("failed to initialize recursor: {}", e))?; Ok(Self { origin: origin.into(), recursor, }) } } #[async_trait::async_trait] impl Authority for RecursiveAuthority { type Lookup = RecursiveLookup; /// Always Recursive fn zone_type(&self) -> ZoneType { ZoneType::Hint } /// Always false for Forward zones fn is_axfr_allowed(&self) -> bool { false } async fn update(&self, _update: &MessageRequest) -> UpdateResult { Err(ResponseCode::NotImp) } /// Get the origin of this zone, i.e. example.com is the origin for www.example.com /// /// In the context of a forwarder, this is either a zone which this forwarder is associated, /// or `.`, the root zone for all zones. If this is not the root zone, then it will only forward /// for lookups which match the given zone name. fn origin(&self) -> &LowerName { &self.origin } /// Forwards a lookup given the resolver configuration for this Forwarded zone async fn lookup( &self, name: &LowerName, rtype: RecordType, _lookup_options: LookupOptions, ) -> Result { debug!("recursive lookup: {} {}", name, rtype); let query = Query::query(name.into(), rtype); let now = Instant::now(); self.recursor .resolve(query, now) .await .map(RecursiveLookup) .map_err(Into::into) } async fn search( &self, request_info: RequestInfo<'_>, lookup_options: LookupOptions, ) -> Result { self.lookup( request_info.query.name(), request_info.query.query_type(), lookup_options, ) .await } async fn get_nsec_records( &self, _name: &LowerName, _lookup_options: LookupOptions, ) -> Result { Err(LookupError::from(io::Error::new( io::ErrorKind::Other, "Getting NSEC records is unimplemented for the recursor", ))) } } pub struct RecursiveLookup(Lookup); impl LookupObject for RecursiveLookup { fn is_empty(&self) -> bool { self.0.is_empty() } fn iter<'a>(&'a self) -> Box + Send + 'a> { Box::new(self.0.record_iter()) } fn take_additionals(&mut self) -> Option> { None } } trust-dns-server-0.22.0/src/store/recursor/config.rs000064400000000000000000000035561046102023000205670ustar 00000000000000// Copyright 2015-2022 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::{ borrow::Cow, fs::File, io::Read, net::SocketAddr, path::{Path, PathBuf}, }; use serde::Deserialize; use trust_dns_client::{ rr::{DNSClass, RData, Record, RecordSet}, serialize::txt::{Lexer, Parser}, }; use trust_dns_resolver::Name; use crate::error::ConfigError; /// Configuration for file based zones #[derive(Clone, Deserialize, Eq, PartialEq, Debug)] pub struct RecursiveConfig { /// File with roots, aka hints pub roots: PathBuf, } impl RecursiveConfig { pub(crate) fn read_roots( &self, root_dir: Option<&Path>, ) -> Result, ConfigError> { let path = if let Some(root_dir) = root_dir { Cow::Owned(root_dir.join(&self.roots)) } else { Cow::Borrowed(&self.roots) }; let mut roots = File::open(path.as_ref())?; let mut roots_str = String::new(); roots.read_to_string(&mut roots_str)?; let lexer = Lexer::new(&roots_str); let mut parser = Parser::new(); let (_zone, roots_zone) = parser.parse(lexer, Some(Name::root()), Some(DNSClass::IN))?; // TODO: we may want to deny some of the root nameservers, for reasons... Ok(roots_zone .values() .flat_map(RecordSet::records_without_rrsigs) .filter_map(Record::data) .filter_map(RData::to_ip_addr) // we only want IPs .map(|ip| SocketAddr::from((ip, 53))) // all the roots only have tradition DNS ports .collect()) } } trust-dns-server-0.22.0/src/store/recursor/mod.rs000064400000000000000000000010421046102023000200650ustar 00000000000000// Copyright 2015-2022 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. #![cfg(feature = "trust-dns-recursor")] //! Recursive resolver related types mod authority; mod config; pub use self::authority::RecursiveAuthority; pub use self::config::RecursiveConfig; trust-dns-server-0.22.0/src/store/sqlite/authority.rs000064400000000000000000001246101046102023000210020ustar 00000000000000// Copyright 2015-2021 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! All authority related types use std::{ ops::{Deref, DerefMut}, path::{Path, PathBuf}, sync::Arc, }; use futures_util::lock::Mutex; use tracing::{error, info, warn}; use crate::{ authority::{Authority, LookupError, LookupOptions, MessageRequest, UpdateResult, ZoneType}, client::rr::{LowerName, RrKey}, error::{PersistenceErrorKind, PersistenceResult}, proto::{ op::ResponseCode, rr::{DNSClass, Name, RData, Record, RecordSet, RecordType}, }, server::RequestInfo, store::{ in_memory::InMemoryAuthority, sqlite::{Journal, SqliteConfig}, }, }; #[cfg(feature = "dnssec")] use crate::{ authority::{DnssecAuthority, UpdateRequest}, client::rr::dnssec::{DnsSecResult, SigSigner}, proto::rr::dnssec::rdata::key::KEY, }; /// SqliteAuthority is responsible for storing the resource records for a particular zone. /// /// Authorities default to DNSClass IN. The ZoneType specifies if this should be treated as the /// start of authority for the zone, is a Secondary, or a cached zone. #[allow(dead_code)] pub struct SqliteAuthority { in_memory: InMemoryAuthority, journal: Mutex>, allow_update: bool, is_dnssec_enabled: bool, } impl SqliteAuthority { /// Creates a new Authority. /// /// # Arguments /// /// * `in_memory` - InMemoryAuthority for all records. /// * `allow_update` - If true, then this zone accepts dynamic updates. /// * `is_dnssec_enabled` - If true, then the zone will sign the zone with all registered keys, /// (see `add_zone_signing_key()`) /// /// # Return value /// /// The new `Authority`. pub fn new(in_memory: InMemoryAuthority, allow_update: bool, is_dnssec_enabled: bool) -> Self { Self { in_memory, journal: Mutex::new(None), allow_update, is_dnssec_enabled, } } /// load the authority from the configuration pub async fn try_from_config( origin: Name, zone_type: ZoneType, allow_axfr: bool, enable_dnssec: bool, root_dir: Option<&Path>, config: &SqliteConfig, ) -> Result { use crate::store::file::{FileAuthority, FileConfig}; let zone_name: Name = origin; let root_zone_dir = root_dir.map(PathBuf::from).unwrap_or_else(PathBuf::new); // to be compatible with previous versions, the extension might be zone, not jrnl let journal_path: PathBuf = root_zone_dir.join(&config.journal_file_path); let zone_path: PathBuf = root_zone_dir.join(&config.zone_file_path); // load the zone if journal_path.exists() { info!("recovering zone from journal: {:?}", journal_path); let journal = Journal::from_file(&journal_path) .map_err(|e| format!("error opening journal: {:?}: {}", journal_path, e))?; let in_memory = InMemoryAuthority::empty(zone_name.clone(), zone_type, allow_axfr); let mut authority = Self::new(in_memory, config.allow_update, enable_dnssec); authority .recover_with_journal(&journal) .await .map_err(|e| format!("error recovering from journal: {}", e))?; authority.set_journal(journal).await; info!("recovered zone: {}", zone_name); Ok(authority) } else if zone_path.exists() { // TODO: deprecate this portion of loading, instantiate the journal through a separate tool info!("loading zone file: {:?}", zone_path); let file_config = FileConfig { zone_file_path: config.zone_file_path.clone(), }; let in_memory = FileAuthority::try_from_config( zone_name.clone(), zone_type, allow_axfr, root_dir, &file_config, )? .unwrap(); let mut authority = Self::new(in_memory, config.allow_update, enable_dnssec); // if dynamic update is enabled, enable the journal info!("creating new journal: {:?}", journal_path); let journal = Journal::from_file(&journal_path) .map_err(|e| format!("error creating journal {:?}: {}", journal_path, e))?; authority.set_journal(journal).await; // preserve to the new journal, i.e. we just loaded the zone from disk, start the journal authority .persist_to_journal() .await .map_err(|e| format!("error persisting to journal {:?}: {}", journal_path, e))?; info!("zone file loaded: {}", zone_name); Ok(authority) } else { Err(format!( "no zone file or journal defined at: {:?}", zone_path )) } } /// Recovers the zone from a Journal, returns an error on failure to recover the zone. /// /// # Arguments /// /// * `journal` - the journal from which to load the persisted zone. pub async fn recover_with_journal(&mut self, journal: &Journal) -> PersistenceResult<()> { assert!( self.in_memory.records_get_mut().is_empty(), "records should be empty during a recovery" ); info!("recovering from journal"); for record in journal.iter() { // AXFR is special, it is used to mark the dump of a full zone. // when recovering, if an AXFR is encountered, we should remove all the records in the // authority. if record.rr_type() == RecordType::AXFR { self.in_memory.clear(); } else if let Err(error) = self.update_records(&[record], false).await { return Err(PersistenceErrorKind::Recovery(error.to_str()).into()); } } Ok(()) } /// Persist the state of the current zone to the journal, does nothing if there is no associated /// Journal. /// /// Returns an error if there was an issue writing to the persistence layer. pub async fn persist_to_journal(&self) -> PersistenceResult<()> { if let Some(journal) = self.journal.lock().await.as_ref() { let serial = self.in_memory.serial().await; info!("persisting zone to journal at SOA.serial: {}", serial); // TODO: THIS NEEDS TO BE IN A TRANSACTION!!! journal.insert_record(serial, Record::new().set_rr_type(RecordType::AXFR))?; for rr_set in self.in_memory.records().await.values() { // TODO: should we preserve rr_sets or not? for record in rr_set.records_without_rrsigs() { journal.insert_record(serial, record)?; } } // TODO: COMMIT THE TRANSACTION!!! } Ok(()) } /// Associate a backing Journal with this Authority for Updatable zones pub async fn set_journal(&mut self, journal: Journal) { *self.journal.lock().await = Some(journal); } /// Returns the associated Journal #[cfg(any(test, feature = "testing"))] #[cfg_attr(docsrs, doc(cfg(feature = "testing")))] pub async fn journal(&self) -> impl Deref> + '_ { self.journal.lock().await } /// Enables the zone for dynamic DNS updates pub fn set_allow_update(&mut self, allow_update: bool) { self.allow_update = allow_update; } /// Get serial #[cfg(any(test, feature = "testing"))] #[cfg_attr(docsrs, doc(cfg(feature = "testing")))] pub async fn serial(&self) -> u32 { self.in_memory.serial().await } /// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997 /// /// ```text /// /// 3.2 - Process Prerequisite Section /// /// Next, the Prerequisite Section is checked to see that all /// prerequisites are satisfied by the current state of the zone. Using /// the definitions expressed in Section 1.2, if any RR's NAME is not /// within the zone specified in the Zone Section, signal NOTZONE to the /// requestor. /// /// 3.2.1. For RRs in this section whose CLASS is ANY, test to see that /// TTL and RDLENGTH are both zero (0), else signal FORMERR to the /// requestor. If TYPE is ANY, test to see that there is at least one RR /// in the zone whose NAME is the same as that of the Prerequisite RR, /// else signal NXDOMAIN to the requestor. If TYPE is not ANY, test to /// see that there is at least one RR in the zone whose NAME and TYPE are /// the same as that of the Prerequisite RR, else signal NXRRSET to the /// requestor. /// /// 3.2.2. For RRs in this section whose CLASS is NONE, test to see that /// the TTL and RDLENGTH are both zero (0), else signal FORMERR to the /// requestor. If the TYPE is ANY, test to see that there are no RRs in /// the zone whose NAME is the same as that of the Prerequisite RR, else /// signal YXDOMAIN to the requestor. If the TYPE is not ANY, test to /// see that there are no RRs in the zone whose NAME and TYPE are the /// same as that of the Prerequisite RR, else signal YXRRSET to the /// requestor. /// /// 3.2.3. For RRs in this section whose CLASS is the same as the ZCLASS, /// test to see that the TTL is zero (0), else signal FORMERR to the /// requestor. Then, build an RRset for each unique and /// compare each resulting RRset for set equality (same members, no more, /// no less) with RRsets in the zone. If any Prerequisite RRset is not /// entirely and exactly matched by a zone RRset, signal NXRRSET to the /// requestor. If any RR in this section has a CLASS other than ZCLASS /// or NONE or ANY, signal FORMERR to the requestor. /// /// 3.2.4 - Table Of Metavalues Used In Prerequisite Section /// /// CLASS TYPE RDATA Meaning /// ------------------------------------------------------------ /// ANY ANY empty Name is in use /// ANY rrset empty RRset exists (value independent) /// NONE ANY empty Name is not in use /// NONE rrset empty RRset does not exist /// zone rrset rr RRset exists (value dependent) /// ``` pub async fn verify_prerequisites(&self, pre_requisites: &[Record]) -> UpdateResult<()> { // 3.2.5 - Pseudocode for Prerequisite Section Processing // // for rr in prerequisites // if (rr.ttl != 0) // return (FORMERR) // if (zone_of(rr.name) != ZNAME) // return (NOTZONE); // if (rr.class == ANY) // if (rr.rdlength != 0) // return (FORMERR) // if (rr.type == ANY) // if (!zone_name) // return (NXDOMAIN) // else // if (!zone_rrset) // return (NXRRSET) // if (rr.class == NONE) // if (rr.rdlength != 0) // return (FORMERR) // if (rr.type == ANY) // if (zone_name) // return (YXDOMAIN) // else // if (zone_rrset) // return (YXRRSET) // if (rr.class == zclass) // temp += rr // else // return (FORMERR) // // for rrset in temp // if (zone_rrset != rrset) // return (NXRRSET) for require in pre_requisites { let required_name = LowerName::from(require.name()); if require.ttl() != 0 { warn!("ttl must be 0 for: {:?}", require); return Err(ResponseCode::FormErr); } let origin = self.origin(); if !origin.zone_of(&require.name().into()) { warn!("{} is not a zone_of {}", require.name(), origin); return Err(ResponseCode::NotZone); } match require.dns_class() { DNSClass::ANY => { if let None | Some(RData::NULL(..)) = require.data() { match require.rr_type() { // ANY ANY empty Name is in use RecordType::ANY => { if self .lookup( &required_name, RecordType::ANY, LookupOptions::default(), ) .await .unwrap_or_default() .was_empty() { return Err(ResponseCode::NXDomain); } else { continue; } } // ANY rrset empty RRset exists (value independent) rrset => { if self .lookup(&required_name, rrset, LookupOptions::default()) .await .unwrap_or_default() .was_empty() { return Err(ResponseCode::NXRRSet); } else { continue; } } } } else { return Err(ResponseCode::FormErr); } } DNSClass::NONE => { if let None | Some(RData::NULL(..)) = require.data() { match require.rr_type() { // NONE ANY empty Name is not in use RecordType::ANY => { if !self .lookup( &required_name, RecordType::ANY, LookupOptions::default(), ) .await .unwrap_or_default() .was_empty() { return Err(ResponseCode::YXDomain); } else { continue; } } // NONE rrset empty RRset does not exist rrset => { if !self .lookup(&required_name, rrset, LookupOptions::default()) .await .unwrap_or_default() .was_empty() { return Err(ResponseCode::YXRRSet); } else { continue; } } } } else { return Err(ResponseCode::FormErr); } } class if class == self.in_memory.class() => // zone rrset rr RRset exists (value dependent) { if !self .lookup(&required_name, require.rr_type(), LookupOptions::default()) .await .unwrap_or_default() .iter() .any(|rr| rr == require) { return Err(ResponseCode::NXRRSet); } else { continue; } } _ => return Err(ResponseCode::FormErr), } } // if we didn't bail everything checked out... Ok(()) } /// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997 /// /// ```text /// /// 3.3 - Check Requestor's Permissions /// /// 3.3.1. Next, the requestor's permission to update the RRs named in /// the Update Section may be tested in an implementation dependent /// fashion or using mechanisms specified in a subsequent Secure DNS /// Update protocol. If the requestor does not have permission to /// perform these updates, the server may write a warning message in its /// operations log, and may either signal REFUSED to the requestor, or /// ignore the permission problem and proceed with the update. /// /// 3.3.2. While the exact processing is implementation defined, if these /// verification activities are to be performed, this is the point in the /// server's processing where such performance should take place, since /// if a REFUSED condition is encountered after an update has been /// partially applied, it will be necessary to undo the partial update /// and restore the zone to its original state before answering the /// requestor. /// ``` /// #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] #[allow(clippy::blocks_in_if_conditions)] pub async fn authorize(&self, update_message: &MessageRequest) -> UpdateResult<()> { use tracing::debug; use crate::client::rr::rdata::DNSSECRData; use crate::proto::rr::dnssec::Verifier; // 3.3.3 - Pseudocode for Permission Checking // // if (security policy exists) // if (this update is not permitted) // if (local option) // log a message about permission problem // if (local option) // return (REFUSED) // does this authority allow_updates? if !self.allow_update { warn!( "update attempted on non-updatable Authority: {}", self.origin() ); return Err(ResponseCode::Refused); } // verify sig0, currently the only authorization that is accepted. let sig0s: &[Record] = update_message.sig0(); debug!("authorizing with: {:?}", sig0s); if !sig0s.is_empty() { let mut found_key = false; for sig in sig0s.iter().filter_map(|sig0| { sig0.data() .and_then(RData::as_dnssec) .and_then(DNSSECRData::as_sig) }) { let name = LowerName::from(sig.signer_name()); let keys = self .lookup(&name, RecordType::KEY, LookupOptions::default()) .await; let keys = match keys { Ok(keys) => keys, Err(_) => continue, // error trying to lookup a key by that name, try the next one. }; debug!("found keys {:?}", keys); // TODO: check key usage flags and restrictions found_key = keys .iter() .filter_map(|rr_set| { rr_set .data() .and_then(RData::as_dnssec) .and_then(DNSSECRData::as_key) }) .any(|key| { key.verify_message(update_message, sig.sig(), sig) .map(|_| { info!("verified sig: {:?} with key: {:?}", sig, key); true }) .unwrap_or_else(|_| { debug!("did not verify sig: {:?} with key: {:?}", sig, key); false }) }); if found_key { break; // stop searching for matching keys, we found one } } if found_key { return Ok(()); } } else { warn!( "no sig0 matched registered records: id {}", update_message.id() ); } // getting here, we will always default to rejecting the request // the code will only ever explicitly return authorized actions. Err(ResponseCode::Refused) } /// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997 /// /// ```text /// /// 3.4 - Process Update Section /// /// Next, the Update Section is processed as follows. /// /// 3.4.1 - Prescan /// /// The Update Section is parsed into RRs and each RR's CLASS is checked /// to see if it is ANY, NONE, or the same as the Zone Class, else signal /// a FORMERR to the requestor. Using the definitions in Section 1.2, /// each RR's NAME must be in the zone specified by the Zone Section, /// else signal NOTZONE to the requestor. /// /// 3.4.1.2. For RRs whose CLASS is not ANY, check the TYPE and if it is /// ANY, AXFR, MAILA, MAILB, or any other QUERY metatype, or any /// unrecognized type, then signal FORMERR to the requestor. For RRs /// whose CLASS is ANY or NONE, check the TTL to see that it is zero (0), /// else signal a FORMERR to the requestor. For any RR whose CLASS is /// ANY, check the RDLENGTH to make sure that it is zero (0) (that is, /// the RDATA field is empty), and that the TYPE is not AXFR, MAILA, /// MAILB, or any other QUERY metatype besides ANY, or any unrecognized /// type, else signal FORMERR to the requestor. /// ``` #[allow(clippy::unused_unit)] pub async fn pre_scan(&self, records: &[Record]) -> UpdateResult<()> { // 3.4.1.3 - Pseudocode For Update Section Prescan // // [rr] for rr in updates // if (zone_of(rr.name) != ZNAME) // return (NOTZONE); // if (rr.class == zclass) // if (rr.type & ANY|AXFR|MAILA|MAILB) // return (FORMERR) // elsif (rr.class == ANY) // if (rr.ttl != 0 || rr.rdlength != 0 // || rr.type & AXFR|MAILA|MAILB) // return (FORMERR) // elsif (rr.class == NONE) // if (rr.ttl != 0 || rr.type & ANY|AXFR|MAILA|MAILB) // return (FORMERR) // else // return (FORMERR) for rr in records { if !self.origin().zone_of(&rr.name().into()) { return Err(ResponseCode::NotZone); } let class: DNSClass = rr.dns_class(); if class == self.in_memory.class() { match rr.rr_type() { RecordType::ANY | RecordType::AXFR | RecordType::IXFR => { return Err(ResponseCode::FormErr); } _ => (), } } else { match class { DNSClass::ANY => { if rr.ttl() != 0 { return Err(ResponseCode::FormErr); } if let None | Some(RData::NULL(..)) = rr.data() { () } else { return Err(ResponseCode::FormErr); } match rr.rr_type() { RecordType::AXFR | RecordType::IXFR => { return Err(ResponseCode::FormErr); } _ => (), } } DNSClass::NONE => { if rr.ttl() != 0 { return Err(ResponseCode::FormErr); } match rr.rr_type() { RecordType::ANY | RecordType::AXFR | RecordType::IXFR => { return Err(ResponseCode::FormErr); } _ => (), } } _ => return Err(ResponseCode::FormErr), } } } Ok(()) } /// Updates the specified records according to the update section. /// /// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997 /// /// ```text /// /// 3.4.2.6 - Table Of Metavalues Used In Update Section /// /// CLASS TYPE RDATA Meaning /// --------------------------------------------------------- /// ANY ANY empty Delete all RRsets from a name /// ANY rrset empty Delete an RRset /// NONE rrset rr Delete an RR from an RRset /// zone rrset rr Add to an RRset /// ``` /// /// # Arguments /// /// * `records` - set of record instructions for update following above rules /// * `auto_signing_and_increment` - if true, the zone will sign and increment the SOA, this /// should be disabled during recovery. pub async fn update_records( &self, records: &[Record], auto_signing_and_increment: bool, ) -> UpdateResult { let mut updated = false; let serial: u32 = self.in_memory.serial().await; // the persistence act as a write-ahead log. The WAL will also be used for recovery of a zone // subsequent to a failure of the server. if let Some(ref journal) = *self.journal.lock().await { if let Err(error) = journal.insert_records(serial, records) { error!("could not persist update records: {}", error); return Err(ResponseCode::ServFail); } } // 3.4.2.7 - Pseudocode For Update Section Processing // // [rr] for rr in updates // if (rr.class == zclass) // if (rr.type == CNAME) // if (zone_rrset) // next [rr] // elsif (zone_rrset) // next [rr] // if (rr.type == SOA) // if (!zone_rrset || // zone_rr.serial > rr.soa.serial) // next [rr] // for zrr in zone_rrset // if (rr.type == CNAME || rr.type == SOA || // (rr.type == WKS && rr.proto == zrr.proto && // rr.address == zrr.address) || // rr.rdata == zrr.rdata) // zrr = rr // next [rr] // zone_rrset += rr // elsif (rr.class == ANY) // if (rr.type == ANY) // if (rr.name == zname) // zone_rrset = Nil // else // zone_rrset = Nil // elsif (rr.name == zname && // (rr.type == SOA || rr.type == NS)) // next [rr] // else // zone_rrset = Nil // elsif (rr.class == NONE) // if (rr.type == SOA) // next [rr] // if (rr.type == NS && zone_rrset == rr) // next [rr] // zone_rr = Nil // return (NOERROR) for rr in records { let rr_name = LowerName::from(rr.name()); let rr_key = RrKey::new(rr_name.clone(), rr.rr_type()); match rr.dns_class() { class if class == self.in_memory.class() => { // RFC 2136 - 3.4.2.2. Any Update RR whose CLASS is the same as ZCLASS is added to // the zone. In case of duplicate RDATAs (which for SOA RRs is always // the case, and for WKS RRs is the case if the ADDRESS and PROTOCOL // fields both match), the Zone RR is replaced by Update RR. If the // TYPE is SOA and there is no Zone SOA RR, or the new SOA.SERIAL is // lower (according to [RFC1982]) than or equal to the current Zone SOA // RR's SOA.SERIAL, the Update RR is ignored. In the case of a CNAME // Update RR and a non-CNAME Zone RRset or vice versa, ignore the CNAME // Update RR, otherwise replace the CNAME Zone RR with the CNAME Update // RR. // zone rrset rr Add to an RRset info!("upserting record: {:?}", rr); updated = self.in_memory.upsert(rr.clone(), serial).await || updated; } DNSClass::ANY => { // This is a delete of entire RRSETs, either many or one. In either case, the spec is clear: match rr.rr_type() { t @ RecordType::SOA | t @ RecordType::NS if rr_name == *self.origin() => { // SOA and NS records are not to be deleted if they are the origin records info!("skipping delete of {:?} see RFC 2136 - 3.4.2.3", t); continue; } RecordType::ANY => { // RFC 2136 - 3.4.2.3. For any Update RR whose CLASS is ANY and whose TYPE is ANY, // all Zone RRs with the same NAME are deleted, unless the NAME is the // same as ZNAME in which case only those RRs whose TYPE is other than // SOA or NS are deleted. // ANY ANY empty Delete all RRsets from a name info!( "deleting all records at name (not SOA or NS at origin): {:?}", rr_name ); let origin = self.origin(); let to_delete = self .in_memory .records() .await .keys() .filter(|k| { !((k.record_type == RecordType::SOA || k.record_type == RecordType::NS) && k.name != *origin) }) .filter(|k| k.name == rr_name) .cloned() .collect::>(); for delete in to_delete { self.in_memory.records_mut().await.remove(&delete); updated = true; } } _ => { // RFC 2136 - 3.4.2.3. For any Update RR whose CLASS is ANY and // whose TYPE is not ANY all Zone RRs with the same NAME and TYPE are // deleted, unless the NAME is the same as ZNAME in which case neither // SOA or NS RRs will be deleted. // ANY rrset empty Delete an RRset if let None | Some(RData::NULL(..)) = rr.data() { let deleted = self.in_memory.records_mut().await.remove(&rr_key); info!("deleted rrset: {:?}", deleted); updated = updated || deleted.is_some(); } else { info!("expected empty rdata: {:?}", rr); return Err(ResponseCode::FormErr); } } } } DNSClass::NONE => { info!("deleting specific record: {:?}", rr); // NONE rrset rr Delete an RR from an RRset if let Some(rrset) = self.in_memory.records_mut().await.get_mut(&rr_key) { // b/c this is an Arc, we need to clone, then remove, and replace the node. let mut rrset_clone: RecordSet = RecordSet::clone(&*rrset); let deleted = rrset_clone.remove(rr, serial); info!("deleted ({}) specific record: {:?}", deleted, rr); updated = updated || deleted; if deleted { *rrset = Arc::new(rrset_clone); } } } class => { info!("unexpected DNS Class: {:?}", class); return Err(ResponseCode::FormErr); } } } // update the serial... if updated && auto_signing_and_increment { if self.is_dnssec_enabled { cfg_if::cfg_if! { if #[cfg(feature = "dnssec")] { self.secure_zone().await.map_err(|e| { error!("failure securing zone: {}", e); ResponseCode::ServFail })? } else { error!("failure securing zone, dnssec feature not enabled"); return Err(ResponseCode::ServFail) } } } else { // the secure_zone() function increments the SOA during it's operation, if we're not // dnssec, then we need to do it here... self.in_memory.increment_soa_serial().await; } } Ok(updated) } } impl Deref for SqliteAuthority { type Target = InMemoryAuthority; fn deref(&self) -> &Self::Target { &self.in_memory } } impl DerefMut for SqliteAuthority { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.in_memory } } #[async_trait::async_trait] impl Authority for SqliteAuthority { type Lookup = ::Lookup; /// What type is this zone fn zone_type(&self) -> ZoneType { self.in_memory.zone_type() } /// Return true if AXFR is allowed fn is_axfr_allowed(&self) -> bool { self.in_memory.is_axfr_allowed() } /// Takes the UpdateMessage, extracts the Records, and applies the changes to the record set. /// /// [RFC 2136](https://tools.ietf.org/html/rfc2136), DNS Update, April 1997 /// /// ```text /// /// 3.4 - Process Update Section /// /// Next, the Update Section is processed as follows. /// /// 3.4.2 - Update /// /// The Update Section is parsed into RRs and these RRs are processed in /// order. /// /// 3.4.2.1. If any system failure (such as an out of memory condition, /// or a hardware error in persistent storage) occurs during the /// processing of this section, signal SERVFAIL to the requestor and undo /// all updates applied to the zone during this transaction. /// /// 3.4.2.2. Any Update RR whose CLASS is the same as ZCLASS is added to /// the zone. In case of duplicate RDATAs (which for SOA RRs is always /// the case, and for WKS RRs is the case if the ADDRESS and PROTOCOL /// fields both match), the Zone RR is replaced by Update RR. If the /// TYPE is SOA and there is no Zone SOA RR, or the new SOA.SERIAL is /// lower (according to [RFC1982]) than or equal to the current Zone SOA /// RR's SOA.SERIAL, the Update RR is ignored. In the case of a CNAME /// Update RR and a non-CNAME Zone RRset or vice versa, ignore the CNAME /// Update RR, otherwise replace the CNAME Zone RR with the CNAME Update /// RR. /// /// 3.4.2.3. For any Update RR whose CLASS is ANY and whose TYPE is ANY, /// all Zone RRs with the same NAME are deleted, unless the NAME is the /// same as ZNAME in which case only those RRs whose TYPE is other than /// SOA or NS are deleted. For any Update RR whose CLASS is ANY and /// whose TYPE is not ANY all Zone RRs with the same NAME and TYPE are /// deleted, unless the NAME is the same as ZNAME in which case neither /// SOA or NS RRs will be deleted. /// /// 3.4.2.4. For any Update RR whose class is NONE, any Zone RR whose /// NAME, TYPE, RDATA and RDLENGTH are equal to the Update RR is deleted, /// unless the NAME is the same as ZNAME and either the TYPE is SOA or /// the TYPE is NS and the matching Zone RR is the only NS remaining in /// the RRset, in which case this Update RR is ignored. /// /// 3.4.2.5. Signal NOERROR to the requestor. /// ``` /// /// # Arguments /// /// * `update` - The `UpdateMessage` records will be extracted and used to perform the update /// actions as specified in the above RFC. /// /// # Return value /// /// true if any of additions, updates or deletes were made to the zone, false otherwise. Err is /// returned in the case of bad data, etc. #[cfg(feature = "dnssec")] async fn update(&self, update: &MessageRequest) -> UpdateResult { //let this = &mut self.in_memory.lock().await; // the spec says to authorize after prereqs, seems better to auth first. self.authorize(update).await?; self.verify_prerequisites(update.prerequisites()).await?; self.pre_scan(update.updates()).await?; self.update_records(update.updates(), true).await } /// Always fail when DNSSEC is disabled. #[cfg(not(feature = "dnssec"))] async fn update(&self, _update: &MessageRequest) -> UpdateResult { Err(ResponseCode::NotImp) } /// Get the origin of this zone, i.e. example.com is the origin for www.example.com fn origin(&self) -> &LowerName { self.in_memory.origin() } /// Looks up all Resource Records matching the giving `Name` and `RecordType`. /// /// # Arguments /// /// * `name` - The `Name`, label, to lookup. /// * `rtype` - The `RecordType`, to lookup. `RecordType::ANY` will return all records matching /// `name`. `RecordType::AXFR` will return all record types except `RecordType::SOA` /// due to the requirements that on zone transfers the `RecordType::SOA` must both /// precede and follow all other records. /// * `is_secure` - If the DO bit is set on the EDNS OPT record, then return RRSIGs as well. /// /// # Return value /// /// None if there are no matching records, otherwise a `Vec` containing the found records. async fn lookup( &self, name: &LowerName, rtype: RecordType, lookup_options: LookupOptions, ) -> Result { self.in_memory.lookup(name, rtype, lookup_options).await } async fn search( &self, request_info: RequestInfo<'_>, lookup_options: LookupOptions, ) -> Result { self.in_memory.search(request_info, lookup_options).await } /// Return the NSEC records based on the given name /// /// # Arguments /// /// * `name` - given this name (i.e. the lookup name), return the NSEC record that is less than /// this /// * `is_secure` - if true then it will return RRSIG records as well async fn get_nsec_records( &self, name: &LowerName, lookup_options: LookupOptions, ) -> Result { self.in_memory.get_nsec_records(name, lookup_options).await } } #[cfg(feature = "dnssec")] #[cfg_attr(docsrs, doc(cfg(feature = "dnssec")))] #[async_trait::async_trait] impl DnssecAuthority for SqliteAuthority { async fn add_update_auth_key(&self, name: Name, key: KEY) -> DnsSecResult<()> { self.in_memory.add_update_auth_key(name, key).await } /// By adding a secure key, this will implicitly enable dnssec for the zone. /// /// # Arguments /// /// * `signer` - Signer with associated private key async fn add_zone_signing_key(&self, signer: SigSigner) -> DnsSecResult<()> { self.in_memory.add_zone_signing_key(signer).await } /// (Re)generates the nsec records, increments the serial number and signs the zone async fn secure_zone(&self) -> DnsSecResult<()> { self.in_memory.secure_zone().await } } #[cfg(test)] mod tests { use crate::store::sqlite::SqliteAuthority; #[test] fn test_is_send_sync() { fn send_sync() -> bool { true } assert!(send_sync::()); } } trust-dns-server-0.22.0/src/store/sqlite/config.rs000064400000000000000000000013501046102023000202120ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use serde::Deserialize; /// Configuration for zone file for sqlite based zones #[derive(Deserialize, PartialEq, Eq, Debug)] pub struct SqliteConfig { /// path to initial zone file pub zone_file_path: String, /// path to the sqlite journal file pub journal_file_path: String, /// Are updates allowed to this zone #[serde(default)] pub allow_update: bool, } trust-dns-server-0.22.0/src/store/sqlite/mod.rs000064400000000000000000000011121046102023000175200ustar 00000000000000// Copyright 2015-2018 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! SQLite serving with Dynamic DNS and journaling support pub mod authority; mod config; pub mod persistence; pub use self::authority::SqliteAuthority; pub use self::config::SqliteConfig; pub use self::persistence::Journal; trust-dns-server-0.22.0/src/store/sqlite/persistence.rs000064400000000000000000000264111046102023000212760ustar 00000000000000// Copyright 2015-2016 Benjamin Fry // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! All zone persistence related types use std::iter::Iterator; use std::path::Path; use std::sync::{Mutex, MutexGuard}; use rusqlite::types::ToSql; use rusqlite::{self, Connection}; use time; use tracing::error; use crate::error::{PersistenceErrorKind, PersistenceResult}; use crate::proto::rr::Record; use crate::proto::serialize::binary::{BinDecodable, BinDecoder, BinEncodable, BinEncoder}; /// The current Journal version of the application pub const CURRENT_VERSION: i64 = 1; /// The Journal is the audit log of all changes to a zone after initial creation. pub struct Journal { conn: Mutex, version: i64, } impl Journal { /// Constructs a new Journal, attaching to the specified Sqlite Connection pub fn new(conn: Connection) -> PersistenceResult { let version = Self::select_schema_version(&conn)?; Ok(Self { conn: Mutex::new(conn), version, }) } /// Constructs a new Journal opening a Sqlite connection to the file at the specified path pub fn from_file(journal_file: &Path) -> PersistenceResult { let result = Self::new(Connection::open(journal_file)?); let mut journal = result?; journal.schema_up()?; Ok(journal) } /// Returns a reference to the Sqlite Connection pub fn conn(&self) -> MutexGuard<'_, Connection> { self.conn.lock().expect("conn poisoned") } /// Returns the current schema version of the journal pub fn schema_version(&self) -> i64 { self.version } /// this returns an iterator from the beginning of time, to be used to recreate an authority pub fn iter(&self) -> JournalIter<'_> { JournalIter::new(self) } /// Inserts a record, this is an append only operation. /// /// Records should never be posthumously modified. The message will be serialized into the. /// the first message serialized to the journal, should be a single AXFR of the entire zone, /// this will be used as a starting point to reconstruct the zone. /// /// # Argument /// /// * `record` - will be serialized into the journal pub fn insert_record(&self, soa_serial: u32, record: &Record) -> PersistenceResult<()> { assert!( self.version == CURRENT_VERSION, "schema version mismatch, schema_up() resolves this" ); let mut serial_record: Vec = Vec::with_capacity(512); { let mut encoder = BinEncoder::new(&mut serial_record); record.emit(&mut encoder)?; } let timestamp = time::OffsetDateTime::now_utc(); let client_id: i64 = 0; // TODO: we need better id information about the client, like pub_key let soa_serial: i64 = i64::from(soa_serial); let count = self.conn.lock().expect("conn poisoned").execute( "INSERT \ INTO records (client_id, soa_serial, timestamp, \ record) \ VALUES ($1, $2, $3, $4)", &[ &client_id as &dyn ToSql, &soa_serial, ×tamp, &serial_record, ], )?; // if count != 1 { return Err(PersistenceErrorKind::WrongInsertCount { got: count, expect: 1, } .into()); }; Ok(()) } /// Inserts a set of records into the Journal, a convenience method for insert_record pub fn insert_records(&self, soa_serial: u32, records: &[Record]) -> PersistenceResult<()> { // TODO: NEED TRANSACTION HERE for record in records { self.insert_record(soa_serial, record)?; } Ok(()) } /// Selects a record from the given row_id. /// /// This allows for the entire set of records to be iterated through, by starting at 0, and /// incrementing each subsequent row. /// /// # Arguments /// /// * `row_id` - the row_id can either be exact, or start at 0 to get the earliest row in the /// list. pub fn select_record(&self, row_id: i64) -> PersistenceResult> { assert!( self.version == CURRENT_VERSION, "schema version mismatch, schema_up() resolves this" ); let conn = self.conn.lock().expect("conn poisoned"); let mut stmt = conn.prepare( "SELECT _rowid_, record \ FROM records \ WHERE _rowid_ >= $1 \ LIMIT 1", )?; let record_opt: Option> = stmt .query_and_then( &[&row_id], |row| -> Result<(i64, Record), rusqlite::Error> { let row_id: i64 = row.get(0)?; let record_bytes: Vec = row.get(1)?; let mut decoder = BinDecoder::new(&record_bytes); // todo add location to this... match Record::read(&mut decoder) { Ok(record) => Ok((row_id, record)), Err(decode_error) => Err(rusqlite::Error::InvalidParameterName(format!( "could not decode: {}", decode_error ))), } }, )? .next(); // match record_opt { Some(Ok((row_id, record))) => Ok(Some((row_id, record))), Some(Err(err)) => Err(err.into()), None => Ok(None), } } /// selects the current schema version of the journal DB, returns -1 if there is no schema /// /// /// # Arguments /// /// * `conn` - db connection to use pub fn select_schema_version(conn: &Connection) -> PersistenceResult { // first see if our schema is there let mut stmt = conn.prepare( "SELECT name \ FROM sqlite_master \ WHERE type='table' \ AND name='tdns_schema'", )?; let tdns_schema_opt: Option> = stmt.query_map([], |row| row.get(0))?.next(); let tdns_schema = match tdns_schema_opt { Some(Ok(string)) => string, Some(Err(err)) => return Err(err.into()), None => return Ok(-1), }; assert_eq!(&tdns_schema, "tdns_schema"); let version: i64 = conn.query_row( "SELECT version \ FROM tdns_schema", [], |row| row.get(0), )?; Ok(version) } /// update the schema version fn update_schema_version(&self, new_version: i64) -> PersistenceResult<()> { // validate the versions of all the schemas... assert!(new_version <= CURRENT_VERSION); let count = self .conn .lock() .expect("conn poisoned") .execute("UPDATE tdns_schema SET version = $1", &[&new_version])?; // assert_eq!(count, 1); Ok(()) } /// initializes the schema for the Journal pub fn schema_up(&mut self) -> PersistenceResult { while self.version < CURRENT_VERSION { match self.version + 1 { 0 => self.version = self.init_up()?, 1 => self.version = self.records_up()?, _ => panic!("incorrect version somewhere"), // valid panic, non-recoverable state } self.update_schema_version(self.version)?; } Ok(self.version) } /// initial schema, include the tdns_schema table for tracking the Journal version fn init_up(&self) -> PersistenceResult { let count = self.conn.lock().expect("conn poisoned").execute( "CREATE TABLE tdns_schema ( \ version INTEGER NOT NULL \ )", [], )?; // assert_eq!(count, 0); let count = self .conn .lock() .expect("conn poisoned") .execute("INSERT INTO tdns_schema (version) VALUES (0)", [])?; // assert_eq!(count, 1); Ok(0) } /// adds the records table, this is the main and single table for the history of changes to an /// authority. Each record is expected to be in the format of an update record fn records_up(&self) -> PersistenceResult { // we'll be using rowid for our primary key, basically: `rowid INTEGER PRIMARY KEY ASC` let count = self.conn.lock().expect("conn poisoned").execute( "CREATE TABLE records ( \ client_id INTEGER NOT NULL, \ soa_serial INTEGER NOT NULL, \ timestamp TEXT NOT NULL, \ record BLOB NOT NULL \ )", [], )?; // assert_eq!(count, 1); Ok(1) } } /// Returns an iterator over all items in a Journal /// /// Useful for replaying an entire journal into memory to reconstruct a zone from disk pub struct JournalIter<'j> { current_row_id: i64, journal: &'j Journal, } impl<'j> JournalIter<'j> { fn new(journal: &'j Journal) -> Self { JournalIter { current_row_id: 0, journal, } } } impl<'j> Iterator for JournalIter<'j> { type Item = Record; fn next(&mut self) -> Option { let next: PersistenceResult> = self.journal.select_record(self.current_row_id + 1); match next { Ok(Some((row_id, record))) => { self.current_row_id = row_id; Some(record) } Ok(None) => None, Err(err) => { error!("persistence error while iterating over journal: {}", err); None } } } } trust-dns-server-0.22.0/tests/authority_battery/basic.rs000064400000000000000000000552731046102023000215430ustar 00000000000000#![allow(clippy::dbg_macro)] use std::future::Future; use std::net::{Ipv4Addr, Ipv6Addr}; use std::str::FromStr; use futures_executor::block_on; use trust_dns_client::op::{Header, Message, Query, ResponseCode}; use trust_dns_client::rr::{Name, RData, Record, RecordType}; use trust_dns_server::authority::{ AuthLookup, Authority, LookupError, LookupOptions, MessageRequest, }; use trust_dns_server::server::{Protocol, RequestInfo}; const TEST_HEADER: &Header = &Header::new(); pub fn test_a_lookup>(authority: A) { let query = Query::query(Name::from_str("www.example.com.").unwrap(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); match lookup .into_iter() .next() .expect("A record not found in authority") .data() .and_then(RData::as_a) { Some(ip) => assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *ip), _ => panic!("wrong rdata type returned"), } } #[allow(clippy::unreadable_literal)] pub fn test_soa>(authority: A) { let lookup = block_on(authority.soa()).unwrap(); match lookup .into_iter() .next() .expect("SOA record not found in authity") .data() { Some(RData::SOA(soa)) => { assert_eq!(Name::from_str("trust-dns.org.").unwrap(), *soa.mname()); assert_eq!(Name::from_str("root.trust-dns.org.").unwrap(), *soa.rname()); assert_eq!(199609203, soa.serial()); assert_eq!(28800, soa.refresh()); assert_eq!(7200, soa.retry()); assert_eq!(604800, soa.expire()); assert_eq!(86400, soa.minimum()); } _ => panic!("wrong rdata type returned"), } } pub fn test_ns>(authority: A) { let lookup = block_on(authority.ns(LookupOptions::default())).unwrap(); match lookup .into_iter() .next() .expect("NS record not found in authity") .data() { Some(RData::NS(name)) => assert_eq!(Name::from_str("bbb.example.com.").unwrap(), *name), _ => panic!("wrong rdata type returned"), } } pub fn test_ns_lookup>(authority: A) { let query = Query::query(Name::from_str("example.com.").unwrap(), RecordType::NS).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); let additionals = dbg!(lookup .take_additionals() .expect("no additionals in response")); let ns = lookup .into_iter() .next() .expect("NS record not found in authority") .data() .and_then(RData::as_ns) .expect("Not an NS record"); assert_eq!(Name::from_str("bbb.example.com.").unwrap(), *ns); let a = additionals .into_iter() .next() .expect("A record not found") .data() .and_then(RData::as_a) .expect("Not an A record"); assert_eq!(Ipv4Addr::new(127, 0, 0, 2), *a); } pub fn test_mx>(authority: A) { let query = Query::query(Name::from_str("example.com.").unwrap(), RecordType::MX).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); let additionals = dbg!(lookup .take_additionals() .expect("no additionals in response")); let mx = lookup .into_iter() .next() .expect("MX record not found in authority") .data() .and_then(RData::as_mx) .expect("Not an MX record"); assert_eq!( Name::from_str("alias.example.com.").unwrap(), *mx.exchange() ); // assert the A record is in the additionals section let mut additionals = additionals.into_iter(); let cname = additionals .next() .expect("CNAME record not found") .data() .and_then(RData::as_cname) .expect("Not an CNAME record"); assert_eq!(Name::from_str("www.example.com.").unwrap(), *cname); let a = additionals .next() .expect("A record not found") .data() .and_then(RData::as_a) .expect("Not an A record"); assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *a); let aaaa = additionals .next() .expect("AAAA record not found") .data() .and_then(RData::as_aaaa) .expect("Not an AAAA record"); assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), *aaaa); } pub fn test_mx_to_null>(authority: A) { let query = Query::query( Name::from_str("no-service.example.com.").unwrap(), RecordType::MX, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); // In this case there should be no additional records assert!(lookup.take_additionals().is_none()); let mx = lookup .into_iter() .next() .expect("MX record not found in authority") .data() .and_then(RData::as_mx) .expect("Not an MX record"); assert_eq!(Name::from_str(".").unwrap(), *mx.exchange()); } pub fn test_cname>(authority: A) { let query = Query::query( Name::from_str("alias.example.com.").unwrap(), RecordType::CNAME, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); let cname = lookup .into_iter() .next() .expect("CNAME record not found in authority") .data() .and_then(RData::as_cname) .expect("Not an A record"); assert_eq!(Name::from_str("www.example.com.").unwrap(), *cname); } pub fn test_cname_alias>(authority: A) { let query = Query::query(Name::from_str("alias.example.com.").unwrap(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); let additionals = lookup .take_additionals() .expect("no additionals in response"); // for cname lookups, we have a cname returned in the answer, the catalog will perform additional lookups let cname = lookup .into_iter() .next() .expect("CNAME record not found in authority") .data() .and_then(RData::as_cname) .expect("Not a CNAME record"); assert_eq!(Name::from_str("www.example.com.").unwrap(), *cname); // assert the A record is in the additionals section let a = additionals .into_iter() .next() .expect("A record not found") .data() .and_then(RData::as_a) .expect("Not an A record"); assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *a); } pub fn test_cname_chain>(authority: A) { let query = Query::query( Name::from_str("alias-chain.example.com.").unwrap(), RecordType::A, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); let additionals = lookup .take_additionals() .expect("no additionals in response"); // for cname lookups, we have a cname returned in the answer, the catalog will perform additional lookups let cname = lookup .into_iter() .next() .expect("CNAME record not found in authority") .data() .and_then(RData::as_cname) .expect("Not a CNAME record"); assert_eq!(Name::from_str("alias.example.com.").unwrap(), *cname); // assert the A record is in the additionals section let mut additionals = additionals.into_iter(); let cname = additionals .next() .expect("CNAME record not found") .data() .and_then(RData::as_cname) .expect("Not an CNAME record"); assert_eq!(Name::from_str("www.example.com.").unwrap(), *cname); let a = additionals .next() .expect("A record not found") .data() .and_then(RData::as_a) .expect("Not an A record"); assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *a); } /// In this the ANAME , should, return A and AAAA records in additional section /// the answer should be the A record pub fn test_aname>(authority: A) { let query = Query::query(Name::from_str("example.com.").unwrap(), RecordType::ANAME).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); let additionals = lookup .take_additionals() .expect("no additionals from ANAME"); let aname = lookup .into_iter() .next() .expect("ANAME record not found in authority") .data() .and_then(RData::as_aname) .expect("Not an ANAME record"); assert_eq!(Name::from_str("www.example.com.").unwrap(), *aname); // check that additionals contain the info let a = additionals .iter() .find(|r| r.record_type() == RecordType::A) .and_then(Record::data) .and_then(RData::as_a) .expect("A not found"); assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *a); let aaaa = additionals .iter() .find(|r| r.record_type() == RecordType::AAAA) .and_then(Record::data) .and_then(RData::as_aaaa) .expect("AAAA not found"); assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), *aaaa); } /// In this test the A record that the ANAME resolves to should be returned as the answer, /// /// The additionals should include the ANAME. pub fn test_aname_a_lookup>(authority: A) { let query = Query::query(Name::from_str("example.com.").unwrap(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); let additionals = lookup.take_additionals().expect("no additionals for aname"); // the name should match the lookup, not the A records let (name, a) = lookup .into_iter() .next() .map(|r| (r.name(), r.data())) .expect("No A answer"); let a = a.and_then(RData::as_a).expect("Not an A record"); assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *a); assert_eq!(Name::from_str("example.com.").unwrap(), *name); // check that additionals contain the info let aname = additionals .into_iter() .next() .expect("ANAME record not found in authority") .data() .and_then(RData::as_aname) .expect("Not an ANAME record"); assert_eq!(Name::from_str("www.example.com.").unwrap(), *aname); } /// In this test the A record that the ANAME resolves to should be returned as the answer, not at the apex /// /// The additionals should include the ANAME, this one should include the CNAME chain as well. pub fn test_aname_chain>(authority: A) { let query = Query::query( Name::from_str("aname-chain.example.com.").unwrap(), RecordType::A, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); let additionals = lookup.take_additionals().expect("no additionals"); let (name, a) = lookup .into_iter() .next() .map(|r| (r.name(), r.data())) .expect("Not an A record"); let a = a.and_then(RData::as_a).expect("Not an A record"); assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *a); assert_eq!(Name::from_str("aname-chain.example.com.").unwrap(), *name); // the name should match the lookup, not the A records let mut additionals = additionals.into_iter(); let aname = additionals .next() .expect("ANAME record not found in authority") .data() .and_then(RData::as_aname) .expect("Not an ANAME record"); assert_eq!(Name::from_str("alias.example.com.").unwrap(), *aname); let cname = additionals .next() .expect("CNAME record not found") .data() .and_then(RData::as_cname) .expect("Not an CNAME record"); assert_eq!(Name::from_str("www.example.com.").unwrap(), *cname); let a = additionals .next() .expect("A record not found") .data() .and_then(RData::as_a) .expect("Not an A record"); assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *a); } pub fn test_update_errors>(mut authority: A) { use trust_dns_client::serialize::binary::BinDecodable; let mut message = Message::default(); message.add_query(Query::default()); let bytes = message.to_vec().unwrap(); let update = MessageRequest::from_bytes(&bytes).unwrap(); // this is expected to fail, i.e. updates are not allowed assert!(block_on(authority.update(&update)).is_err()); } pub fn test_dots_in_name>(authority: A) { let query = Query::query( Name::from_str("this.has.dots.example.com.").unwrap(), RecordType::A, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!( *lookup .into_iter() .next() .expect("A record not found in authity") .data() .and_then(RData::as_a) .expect("wrong rdata type returned"), Ipv4Addr::new(127, 0, 0, 3) ); // the rest should all be NameExists let query = Query::query( Name::from_str("has.dots.example.com.").unwrap(), RecordType::A, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap_err(); assert!(lookup.is_name_exists(), "lookup: {}", lookup); // the rest should all be NameExists let query = Query::query(Name::from_str("dots.example.com.").unwrap(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap_err(); assert!(lookup.is_name_exists()); // and this should be an NXDOMAIN let query = Query::query( Name::from_str("not.this.has.dots.example.com.").unwrap(), RecordType::A, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap_err(); assert!(lookup.is_nx_domain()); } pub fn test_wildcard>(authority: A) { // check direct lookup let query = Query::query( Name::from_str("*.wildcard.example.com.").unwrap(), RecordType::CNAME, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!( *lookup .into_iter() .next() .expect("CNAME record not found in authority") .data() .and_then(RData::as_cname) .expect("wrong rdata type returned"), Name::from_str("www.example.com.").unwrap() ); // check wildcard lookup let query = Query::query( Name::from_str("www.wildcard.example.com.").unwrap(), RecordType::CNAME, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())) .expect("lookup of www.wildcard.example.com. failed"); assert_eq!( *lookup .into_iter() .next() .map(|r| { assert_eq!( *r.name(), Name::from_str("www.wildcard.example.com.").unwrap() ); r }) .expect("CNAME record not found in authority") .data() .and_then(RData::as_cname) .expect("wrong rdata type returned"), Name::from_str("www.example.com.").unwrap() ); } pub fn test_wildcard_chain>(authority: A) { // check wildcard lookup let query = Query::query( Name::from_str("www.wildcard.example.com.").unwrap(), RecordType::A, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())) .expect("lookup of www.wildcard.example.com. failed"); // the name should match the lookup, not the A records let additionals = lookup.take_additionals().expect("no additionals"); assert_eq!( *lookup .into_iter() .next() .expect("CNAME record not found in authority") .data() .and_then(RData::as_cname) .expect("wrong rdata type returned"), Name::from_str("www.example.com.").unwrap() ); let mut additionals = additionals.into_iter(); let a = additionals .next() .expect("A record not found") .data() .and_then(RData::as_a) .expect("Not an A record"); assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *a); } pub fn test_srv>(authority: A) { let query = Query::query( Name::from_str("server.example.com.").unwrap(), RecordType::SRV, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let mut lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); let additionals = dbg!(lookup .take_additionals() .expect("no additionals in response")); let srv = lookup .into_iter() .next() .expect("SRV record not found in authority") .data() .and_then(RData::as_srv) .expect("Not an SRV record"); assert_eq!(Name::from_str("alias.example.com.").unwrap(), *srv.target()); // assert the A record is in the additionals section let mut additionals = additionals.into_iter(); let cname = additionals .next() .expect("CNAME record not found") .data() .and_then(RData::as_cname) .expect("Not an CNAME record"); assert_eq!(Name::from_str("www.example.com.").unwrap(), *cname); let a = additionals .next() .expect("A record not found") .data() .and_then(RData::as_a) .expect("Not an A record"); assert_eq!(Ipv4Addr::new(127, 0, 0, 1), *a); let aaaa = additionals .next() .expect("AAAA record not found") .data() .and_then(RData::as_aaaa) .expect("Not an AAAA record"); assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), *aaaa); } pub fn test_invalid_lookup>(authority: A) { let query = Query::query(Name::from_str("www.google.com.").unwrap(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())); let err = lookup.expect_err("Lookup for www.google.com succeeded"); match err { LookupError::ResponseCode(code) => assert_eq!(code, ResponseCode::Refused), _ => panic!("invalid error enum variant"), } } // test some additional record collections macro_rules! define_basic_test { ($new:ident; $( $f:ident, )*) => { $( #[test] fn $f () { // Useful for getting debug logs // env_logger::try_init().ok(); let authority = crate::$new("../../tests/test-data/named_test_configs/example.com.zone", module_path!(), stringify!($f)); crate::authority_battery::basic::$f(authority); } )* } } macro_rules! basic_battery { ($new:ident) => { #[cfg(test)] mod basic { mod $new { define_basic_test!($new; test_a_lookup, test_soa, test_ns, test_ns_lookup, test_mx, test_mx_to_null, test_cname, test_cname_alias, test_cname_chain, test_aname, test_aname_a_lookup, test_aname_chain, test_update_errors, test_dots_in_name, test_wildcard, test_wildcard_chain, test_srv, test_invalid_lookup, ); } } }; } trust-dns-server-0.22.0/tests/authority_battery/dnssec.rs000064400000000000000000000414401046102023000217300ustar 00000000000000#![cfg(feature = "dnssec")] use std::future::Future; use std::str::FromStr; use futures_executor::block_on; use trust_dns_client::op::{Header, Query}; use trust_dns_client::rr::dnssec::{Algorithm, SupportedAlgorithms, Verifier}; use trust_dns_client::rr::{DNSClass, Name, RData, Record, RecordType}; use trust_dns_proto::rr::dnssec::rdata::DNSKEY; use trust_dns_proto::xfer; use trust_dns_server::authority::{AuthLookup, Authority, DnssecAuthority, LookupOptions}; use trust_dns_server::server::{Protocol, RequestInfo}; const TEST_HEADER: &Header = &Header::new(); pub fn test_a_lookup>(authority: A, keys: &[DNSKEY]) { let query = Query::query(Name::from_str("www.example.com.").unwrap(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search( request_info, LookupOptions::for_dnssec(true, SupportedAlgorithms::new()), )) .unwrap(); let (a_records, other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::A); let (rrsig_records, _other_records): (Vec<_>, Vec<_>) = other_records .into_iter() .partition(|r| r.record_type() == RecordType::RRSIG); assert!(!rrsig_records.is_empty()); verify(&a_records, &rrsig_records, keys); } #[allow(clippy::unreadable_literal)] pub fn test_soa>(authority: A, keys: &[DNSKEY]) { let lookup = block_on(authority.soa_secure(LookupOptions::for_dnssec(true, SupportedAlgorithms::new()))) .unwrap(); let (soa_records, other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::SOA); assert_eq!(soa_records.len(), 1); let soa = soa_records .first() .unwrap() .data() .and_then(RData::as_soa) .unwrap(); assert_eq!(Name::from_str("trust-dns.org.").unwrap(), *soa.mname()); assert_eq!(Name::from_str("root.trust-dns.org.").unwrap(), *soa.rname()); assert!(199609203 < soa.serial()); // serial should be one or more b/c of the signing process assert_eq!(28800, soa.refresh()); assert_eq!(7200, soa.retry()); assert_eq!(604800, soa.expire()); assert_eq!(86400, soa.minimum()); let (rrsig_records, _other_records): (Vec<_>, Vec<_>) = other_records .into_iter() .partition(|r| r.record_type() == RecordType::RRSIG); assert!(!rrsig_records.is_empty()); verify(&soa_records, &rrsig_records, keys); } pub fn test_ns>(authority: A, keys: &[DNSKEY]) { let lookup = block_on(authority.ns(LookupOptions::for_dnssec(true, SupportedAlgorithms::new()))) .unwrap(); let (ns_records, other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::NS); assert_eq!( *ns_records .first() .unwrap() .data() .and_then(RData::as_ns) .unwrap(), Name::from_str("bbb.example.com.").unwrap() ); let (rrsig_records, _other_records): (Vec<_>, Vec<_>) = other_records .into_iter() .partition(|r| r.record_type() == RecordType::RRSIG); assert!(!rrsig_records.is_empty()); verify(&ns_records, &rrsig_records, keys); } pub fn test_aname_lookup>(authority: A, keys: &[DNSKEY]) { let query = Query::query( Name::from_str("aname-chain.example.com.").unwrap(), RecordType::A, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search( request_info, LookupOptions::for_dnssec(true, SupportedAlgorithms::new()), )) .unwrap(); let (a_records, other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::A); let (rrsig_records, _other_records): (Vec<_>, Vec<_>) = other_records .into_iter() .partition(|r| r.record_type() == RecordType::RRSIG); assert!(!rrsig_records.is_empty()); verify(&a_records, &rrsig_records, keys); } pub fn test_wildcard>(authority: A, keys: &[DNSKEY]) { // check wildcard lookup let query = Query::query( Name::from_str("www.wildcard.example.com.").unwrap(), RecordType::CNAME, ) .into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search( request_info, LookupOptions::for_dnssec(true, SupportedAlgorithms::new()), )) .expect("lookup of www.wildcard.example.com. failed"); let (cname_records, other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::CNAME); assert!(cname_records .iter() .all(|r| *r.name() == Name::from_str("www.wildcard.example.com.").unwrap())); let (rrsig_records, _other_records): (Vec<_>, Vec<_>) = other_records .into_iter() .partition(|r| r.record_type() == RecordType::RRSIG); assert!(!rrsig_records.is_empty()); verify(&cname_records, &rrsig_records, keys); } pub fn test_nsec_nodata>(authority: A, keys: &[DNSKEY]) { // this should have a single nsec record that covers the type let name = Name::from_str("www.example.com.").unwrap(); let lookup = block_on(authority.get_nsec_records( &name.clone().into(), LookupOptions::for_dnssec(true, SupportedAlgorithms::all()), )) .unwrap(); let (nsec_records, _other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::NSEC); println!("nsec_records: {:?}", nsec_records); // there should only be one, and it should match the www.example.com name assert_eq!(nsec_records.len(), 1); assert_eq!(nsec_records.first().unwrap().name(), &name); let nsecs: Vec<&Record> = nsec_records.iter().collect(); let query = Query::query(name, RecordType::TXT); assert!(xfer::dnssec_dns_handle::verify_nsec( &query, &Name::from_str("example.com.").unwrap(), &nsecs )); } pub fn test_nsec_nxdomain_start>(authority: A, keys: &[DNSKEY]) { // tests between the SOA and first record in the zone, where bbb is the first zone record let name = Name::from_str("aaa.example.com.").unwrap(); let lookup = block_on(authority.get_nsec_records( &name.clone().into(), LookupOptions::for_dnssec(true, SupportedAlgorithms::all()), )) .unwrap(); let (nsec_records, _other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::NSEC); println!("nsec_records: {:?}", nsec_records); // there should only be one, and it should match the www.example.com name assert!(!nsec_records.is_empty()); // because the first record is from the SOA, the wildcard isn't necessary // that is `example.com.` -> `bbb.example.com.` proves there is no wildcard. assert_eq!(nsec_records.len(), 1); let nsecs: Vec<&Record> = nsec_records.iter().collect(); let query = Query::query(name, RecordType::A); assert!(xfer::dnssec_dns_handle::verify_nsec( &query, &Name::from_str("example.com.").unwrap(), &nsecs )); } pub fn test_nsec_nxdomain_middle>(authority: A, keys: &[DNSKEY]) { // follows the first record, nsec should cover between ccc and www, where bbb is the first zone record let name = Name::from_str("ccc.example.com.").unwrap(); let lookup = block_on(authority.get_nsec_records( &name.clone().into(), LookupOptions::for_dnssec(true, SupportedAlgorithms::all()), )) .unwrap(); let (nsec_records, _other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::NSEC); println!("nsec_records: {:?}", nsec_records); // there should only be one, and it should match the www.example.com name assert!(!nsec_records.is_empty()); // one record covers between the names, the other is for the wildcard proof. assert_eq!(nsec_records.len(), 2); let nsecs: Vec<&Record> = nsec_records.iter().collect(); let query = Query::query(name, RecordType::A); assert!(xfer::dnssec_dns_handle::verify_nsec( &query, &Name::from_str("example.com.").unwrap(), &nsecs )); } pub fn test_nsec_nxdomain_wraps_end>( authority: A, keys: &[DNSKEY], ) { // wraps back to the beginning of the zone, where www is the last zone record let name = Name::from_str("zzz.example.com.").unwrap(); let lookup = block_on(authority.get_nsec_records( &name.clone().into(), LookupOptions::for_dnssec(true, SupportedAlgorithms::all()), )) .unwrap(); let (nsec_records, _other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::NSEC); println!("nsec_records: {:?}", nsec_records); // there should only be one, and it should match the www.example.com name assert!(!nsec_records.is_empty()); // one record covers between the names, the other is for the wildcard proof. assert_eq!(nsec_records.len(), 2); let nsecs: Vec<&Record> = nsec_records.iter().collect(); let query = Query::query(name, RecordType::A); assert!(xfer::dnssec_dns_handle::verify_nsec( &query, &Name::from_str("example.com.").unwrap(), &nsecs )); } pub fn test_rfc_6975_supported_algorithms>( authority: A, keys: &[DNSKEY], ) { // for each key, see that supported algorithms are restricted to that individual key for key in keys { println!("key algorithm: {}", key.algorithm()); let query = Query::query(Name::from_str("www.example.com.").unwrap(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search( request_info, LookupOptions::for_dnssec(true, SupportedAlgorithms::from(key.algorithm())), )) .unwrap(); let (a_records, other_records): (Vec<_>, Vec<_>) = lookup .into_iter() .cloned() .partition(|r| r.record_type() == RecordType::A); let (rrsig_records, _other_records): (Vec<_>, Vec<_>) = other_records .into_iter() .partition(|r| r.record_type() == RecordType::RRSIG); assert!(!rrsig_records.is_empty()); verify(&a_records, &rrsig_records, &[key.clone()]); } } pub fn verify(records: &[Record], rrsig_records: &[Record], keys: &[DNSKEY]) { let record_name = records.first().unwrap().name(); let record_type = records.first().unwrap().record_type(); println!("record_name: {}, type: {}", record_name, record_type); // should be signed with all the keys assert!(keys.iter().all(|key| rrsig_records .iter() .filter_map(|rrsig| { let rrsig = rrsig .data() .and_then(RData::as_dnssec) .expect("not DNSSEC") .as_sig() .expect("not RRSIG"); if rrsig.algorithm() == key.algorithm() { Some(rrsig) } else { None } }) .filter(|rrsig| rrsig.key_tag() == key.calculate_key_tag().unwrap()) .filter(|rrsig| rrsig.type_covered() == record_type) .any(|rrsig| key .verify_rrsig(record_name, DNSClass::IN, rrsig, records) .map_err(|e| println!("failed to verify: {}", e)) .is_ok()))); } pub fn add_signers(authority: &mut A) -> Vec { use trust_dns_server::config::dnssec::*; let signer_name = Name::from(authority.origin().to_owned()); let mut keys = Vec::::new(); // TODO: support RSA signing with ring #[cfg(feature = "dnssec-openssl")] // rsa { let key_config = KeyConfig { key_path: "../../tests/test-data/named_test_configs/dnssec/rsa_2048.pem".to_string(), password: Some("123456".to_string()), algorithm: Algorithm::RSASHA512.to_string(), signer_name: Some(signer_name.to_string()), is_zone_signing_key: Some(true), is_zone_update_auth: Some(false), }; let signer = key_config .try_into_signer(signer_name.clone()) .expect("failed to read key_config"); keys.push(signer.to_dnskey().expect("failed to create DNSKEY")); block_on(authority.add_zone_signing_key(signer)).expect("failed to add signer to zone"); block_on(authority.secure_zone()).expect("failed to sign zone"); } // // TODO: why are ecdsa tests failing in this context? // // ecdsa_p256 // { // let key_config = KeyConfig { // key_path: "../../tests/test-data/named_test_configs/dnssec/ecdsa_p256.pem".to_string(), // password: None, // algorithm: Algorithm::ECDSAP256SHA256.to_string(), // signer_name: Some(signer_name.clone().to_string()), // is_zone_signing_key: Some(true), // is_zone_update_auth: Some(false), // }; // let signer = key_config.try_into_signer(signer_name.clone()).expect("failed to read key_config"); // keys.push(signer.to_dnskey().expect("failed to create DNSKEY")); // authority.add_zone_signing_key(signer).expect("failed to add signer to zone"); // authority.secure_zone().expect("failed to sign zone"); // } // // ecdsa_p384 // { // let key_config = KeyConfig { // key_path: "../../tests/test-data/named_test_configs/dnssec/ecdsa_p384.pem".to_string(), // password: None, // algorithm: Algorithm::ECDSAP384SHA384.to_string(), // signer_name: Some(signer_name.clone().to_string()), // is_zone_signing_key: Some(true), // is_zone_update_auth: Some(false), // }; // let signer = key_config.try_into_signer(signer_name.clone()).expect("failed to read key_config"); // keys.push(signer.to_dnskey().expect("failed to create DNSKEY")); // authority.add_zone_signing_key(signer).expect("failed to add signer to zone"); // authority.secure_zone().expect("failed to sign zone"); // } // ed 25519 #[cfg(feature = "dnssec-ring")] { let key_config = KeyConfig { key_path: "../../tests/test-data/named_test_configs/dnssec/ed25519.pk8".to_string(), password: None, algorithm: Algorithm::ED25519.to_string(), signer_name: Some(signer_name.to_string()), is_zone_signing_key: Some(true), is_zone_update_auth: Some(false), }; let signer = key_config .try_into_signer(signer_name) .expect("failed to read key_config"); keys.push(signer.to_dnskey().expect("failed to create DNSKEY")); block_on(authority.add_zone_signing_key(signer)).expect("failed to add signer to zone"); block_on(authority.secure_zone()).expect("failed to sign zone"); } keys } macro_rules! define_dnssec_test { ($new:ident; $( $f:ident, )*) => { $( #[test] fn $f () { let mut authority = crate::$new("../../tests/test-data/named_test_configs/example.com.zone", module_path!(), stringify!($f)); let keys = crate::authority_battery::dnssec::add_signers(&mut authority); crate::authority_battery::dnssec::$f(authority, &keys); } )* } } macro_rules! dnssec_battery { ($new:ident) => { #[cfg(test)] mod dnssec { mod $new { define_dnssec_test!($new; test_a_lookup, test_soa, test_ns, test_aname_lookup, test_wildcard, test_nsec_nodata, test_nsec_nxdomain_start, test_nsec_nxdomain_middle, test_nsec_nxdomain_wraps_end, test_rfc_6975_supported_algorithms, ); } } }; } trust-dns-server-0.22.0/tests/authority_battery/dynamic_update.rs000064400000000000000000000773311046102023000234470ustar 00000000000000#![cfg(feature = "dnssec")] use std::{ future::Future, net::{Ipv4Addr, Ipv6Addr}, str::FromStr, }; use futures_executor::block_on; use trust_dns_client::{ op::{update_message, Header, Message, Query, ResponseCode}, proto::rr::{DNSClass, Name, RData, Record, RecordSet, RecordType}, rr::dnssec::{Algorithm, SigSigner, SupportedAlgorithms, Verifier}, serialize::binary::{BinDecodable, BinEncodable, BinSerializable}, }; use trust_dns_server::{ authority::{ AuthLookup, Authority, DnssecAuthority, LookupError, LookupOptions, MessageRequest, UpdateResult, }, server::{Protocol, RequestInfo}, }; const TEST_HEADER: &Header = &Header::new(); fn update_authority>( mut message: Message, key: &SigSigner, authority: &mut A, ) -> UpdateResult { message.finalize(key, 1).expect("failed to sign message"); let message = message.to_bytes().unwrap(); let request = MessageRequest::from_bytes(&message).unwrap(); block_on(authority.update(&request)) } pub fn test_create>(mut authority: A, keys: &[SigSigner]) { let name = Name::from_str("create.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); let record = Record::from_rdata(name.clone(), 8, RData::A(Ipv4Addr::new(127, 0, 0, 10))); let message = update_message::create( record.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("create failed")); let query = Query::query(name, RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); match lookup .into_iter() .next() .expect("A record not found in authity") .data() { Some(RData::A(ip)) => assert_eq!(Ipv4Addr::new(127, 0, 0, 10), *ip), _ => panic!("wrong rdata type returned"), } // trying to create again should error let mut message = update_message::create(record.into(), Name::from_str("example.com.").unwrap(), true); assert_eq!( update_authority(message, key, &mut authority).unwrap_err(), ResponseCode::YXRRSet ); } } pub fn test_create_multi>(mut authority: A, keys: &[SigSigner]) { let name = Name::from_str("create-multi.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); // create a record let mut record = Record::with(name.clone(), RecordType::A, 8); record.set_data(Some(RData::A(Ipv4Addr::new(100, 10, 100, 10)))); let record = record; let mut record2 = record.clone(); record2.set_data(Some(RData::A(Ipv4Addr::new(100, 10, 100, 11)))); let record2 = record2; let mut rrset = RecordSet::from(record.clone()); rrset.insert(record2.clone(), 0); let rrset = rrset; let message = update_message::create(rrset.clone(), Name::from_str("example.com.").unwrap(), true); assert!(update_authority(message, key, &mut authority).expect("create failed")); let query = Query::query(name, RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert!(lookup.iter().any(|rr| *rr == record)); assert!(lookup.iter().any(|rr| *rr == record2)); // trying to create again should error let message = update_message::create(rrset, Name::from_str("example.com.").unwrap(), true); assert_eq!( update_authority(message, key, &mut authority).unwrap_err(), ResponseCode::YXRRSet ); } } pub fn test_append>(mut authority: A, keys: &[SigSigner]) { let name = Name::from_str("append.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); // append a record let mut record = Record::with(name.clone(), RecordType::A, 8); record.set_data(Some(RData::A(Ipv4Addr::new(100, 10, 100, 10)))); // first check the must_exist option let mut message = update_message::append( record.clone().into(), Name::from_str("example.com.").unwrap(), true, true, ); assert_eq!( update_authority(message, key, &mut authority).unwrap_err(), ResponseCode::NXRRSet ); // next append to a non-existent RRset let message = update_message::append( record.clone().into(), Name::from_str("example.com.").unwrap(), false, true, ); assert!(update_authority(message, key, &mut authority).expect("create failed")); // verify record contents let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 1); assert!(lookup.iter().any(|rr| *rr == record)); // will fail if already set and not the same value. let mut record2 = record.clone(); record2.set_data(Some(RData::A(Ipv4Addr::new(101, 11, 101, 11)))); let message = update_message::append( record2.clone().into(), Name::from_str("example.com.").unwrap(), true, true, ); assert!(update_authority(message, key, &mut authority).expect("append failed")); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 2); assert!(lookup.iter().any(|rr| *rr == record)); assert!(lookup.iter().any(|rr| *rr == record2)); // show that appending the same thing again is ok, but doesn't add any records let message = update_message::append( record2.clone().into(), Name::from_str("example.com.").unwrap(), true, true, ); assert!(!update_authority(message, key, &mut authority).expect("append failed")); let query = Query::query(name, RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 2); assert!(lookup.iter().any(|rr| *rr == record)); assert!(lookup.iter().any(|rr| *rr == record2)); } } pub fn test_append_multi>(mut authority: A, keys: &[SigSigner]) { let name = Name::from_str("append-multi.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); // append a record let mut record = Record::with(name.clone(), RecordType::A, 8); record.set_data(Some(RData::A(Ipv4Addr::new(100, 10, 100, 10)))); // next append to a non-existent RRset let message = update_message::append( record.clone().into(), Name::from_str("example.com.").unwrap(), false, true, ); assert!(update_authority(message, key, &mut authority).expect("append failed")); // will fail if already set and not the same value. let mut record2 = record.clone(); record2.set_data(Some(RData::A(Ipv4Addr::new(101, 11, 101, 11)))); let mut record3 = record.clone(); record3.set_data(Some(RData::A(Ipv4Addr::new(101, 11, 101, 12)))); // build the append set let mut rrset = RecordSet::from(record2.clone()); rrset.insert(record3.clone(), 0); let message = update_message::append( rrset.clone(), Name::from_str("example.com.").unwrap(), true, true, ); assert!(update_authority(message, key, &mut authority).expect("append failed")); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 3); assert!(lookup.iter().any(|rr| *rr == record)); assert!(lookup.iter().any(|rr| *rr == record2)); assert!(lookup.iter().any(|rr| *rr == record3)); // show that appending the same thing again is ok, but doesn't add any records // TODO: technically this is a test for the Server, not client... let message = update_message::append( rrset.clone(), Name::from_str("example.com.").unwrap(), true, true, ); assert!(!update_authority(message, key, &mut authority).expect("append failed")); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 3); assert!(lookup.iter().any(|rr| *rr == record)); assert!(lookup.iter().any(|rr| *rr == record2)); assert!(lookup.iter().any(|rr| *rr == record3)); } } pub fn test_compare_and_swap>( mut authority: A, keys: &[SigSigner], ) { let name = Name::from_str("compare-and-swap.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); // create a record let mut record = Record::with(name.clone(), RecordType::A, 8); record.set_data(Some(RData::A(Ipv4Addr::new(100, 10, 100, 10)))); let record = record; let message = update_message::create( record.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("create failed")); let current = record; let mut new = current.clone(); new.set_data(Some(RData::A(Ipv4Addr::new(101, 11, 101, 11)))); let new = new; let message = update_message::compare_and_swap( current.clone().into(), new.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("compare_and_swap failed")); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 1); assert!(lookup.iter().any(|rr| *rr == new)); assert!(!lookup.iter().any(|rr| *rr == current)); // check the it fails if tried again. let mut not = new.clone(); not.set_data(Some(RData::A(Ipv4Addr::new(102, 12, 102, 12)))); let not = not; let message = update_message::compare_and_swap( current.into(), not.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert_eq!( update_authority(message, key, &mut authority).unwrap_err(), ResponseCode::NXRRSet ); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 1); assert!(lookup.iter().any(|rr| *rr == new)); assert!(!lookup.iter().any(|rr| *rr == not)); } } pub fn test_compare_and_swap_multi>( mut authority: A, keys: &[SigSigner], ) { let name = Name::from_str("compare-and-swap-multi.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); // create a record let mut current = RecordSet::with_ttl(name.clone(), RecordType::A, 8); let current1 = current .new_record(&RData::A(Ipv4Addr::new(100, 10, 100, 10))) .clone(); let current2 = current .new_record(&RData::A(Ipv4Addr::new(100, 10, 100, 11))) .clone(); let current = current; let mut message = update_message::create( current.clone(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("create failed")); let mut new = RecordSet::with_ttl(current.name().clone(), current.record_type(), current.ttl()); let new1 = new .new_record(&RData::A(Ipv4Addr::new(100, 10, 101, 10))) .clone(); let new2 = new .new_record(&RData::A(Ipv4Addr::new(100, 10, 101, 11))) .clone(); let new = new; let mut message = update_message::compare_and_swap( current.clone(), new.clone(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("compare_and_swap failed")); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 2); assert!(lookup.iter().any(|rr| *rr == new1)); assert!(lookup.iter().any(|rr| *rr == new2)); assert!(!lookup.iter().any(|rr| *rr == current1)); assert!(!lookup.iter().any(|rr| *rr == current2)); // check the it fails if tried again. let mut not = new1.clone(); not.set_data(Some(RData::A(Ipv4Addr::new(102, 12, 102, 12)))); let not = not; let message = update_message::compare_and_swap( current.clone(), not.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert_eq!( update_authority(message, key, &mut authority).unwrap_err(), ResponseCode::NXRRSet ); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 2); assert!(lookup.iter().any(|rr| *rr == new1)); assert!(!lookup.iter().any(|rr| *rr == not)); } } pub fn test_delete_by_rdata>( mut authority: A, keys: &[SigSigner], ) { let name = Name::from_str("test-delete-by-rdata.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); // append a record let mut record1 = Record::with(name.clone(), RecordType::A, 8); record1.set_data(Some(RData::A(Ipv4Addr::new(100, 10, 100, 10)))); // first check the must_exist option let mut message = update_message::delete_by_rdata( record1.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert!(!update_authority(message, key, &mut authority).expect("delete_by_rdata failed")); // next create to a non-existent RRset let mut message = update_message::create( record1.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("delete_by_rdata failed")); let mut record2 = record1.clone(); record2.set_data(Some(RData::A(Ipv4Addr::new(101, 11, 101, 11)))); let message = update_message::append( record2.clone().into(), Name::from_str("example.com.").unwrap(), true, true, ); assert!(update_authority(message, key, &mut authority).expect("append failed")); // verify record contents let message = update_message::delete_by_rdata( record2.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("delete_by_rdata failed")); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 1); assert!(lookup.iter().any(|rr| *rr == record1)); } } pub fn test_delete_by_rdata_multi>( mut authority: A, keys: &[SigSigner], ) { let name = Name::from_str("test-delete-by-rdata-multi.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); // append a record let mut rrset = RecordSet::with_ttl(name.clone(), RecordType::A, 8); let record1 = rrset .new_record(&RData::A(Ipv4Addr::new(100, 10, 100, 10))) .clone(); let record2 = rrset .new_record(&RData::A(Ipv4Addr::new(100, 10, 100, 11))) .clone(); let record3 = rrset .new_record(&RData::A(Ipv4Addr::new(100, 10, 100, 12))) .clone(); let record4 = rrset .new_record(&RData::A(Ipv4Addr::new(100, 10, 100, 13))) .clone(); let rrset = rrset; // first check the must_exist option let message = update_message::delete_by_rdata( rrset.clone(), Name::from_str("example.com.").unwrap(), true, ); assert!(!update_authority(message, key, &mut authority).expect("delete_by_rdata failed")); // next create to a non-existent RRset let message = update_message::create(rrset.clone(), Name::from_str("example.com.").unwrap(), true); assert!(update_authority(message, key, &mut authority).expect("create failed")); // append a record let mut rrset = RecordSet::with_ttl(name.clone(), RecordType::A, 8); let record1 = rrset.new_record(record1.data().unwrap()).clone(); let record3 = rrset.new_record(record3.data().unwrap()).clone(); let rrset = rrset; let message = update_message::append( rrset.clone(), Name::from_str("example.com.").unwrap(), true, true, ); assert!(!update_authority(message, key, &mut authority).expect("append failed")); // verify record contents let message = update_message::delete_by_rdata( rrset.clone(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("delete_by_rdata failed")); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())).unwrap(); assert_eq!(lookup.iter().count(), 2); assert!(!lookup.iter().any(|rr| *rr == record1)); assert!(lookup.iter().any(|rr| *rr == record2)); assert!(!lookup.iter().any(|rr| *rr == record3)); assert!(lookup.iter().any(|rr| *rr == record4)); } } pub fn test_delete_rrset>(mut authority: A, keys: &[SigSigner]) { let name = Name::from_str("compare-and-swap-multi.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); // append a record let mut record = Record::with(name.clone(), RecordType::A, 8); record.set_data(Some(RData::A(Ipv4Addr::new(100, 10, 100, 10)))); // first check the must_exist option let message = update_message::delete_rrset( record.clone(), Name::from_str("example.com.").unwrap(), true, ); assert!(!update_authority(message, key, &mut authority).expect("delete_rrset failed")); // next create to a non-existent RRset let message = update_message::create( record.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("create failed")); let mut record = record.clone(); record.set_data(Some(RData::A(Ipv4Addr::new(101, 11, 101, 11)))); let message = update_message::append( record.clone().into(), Name::from_str("example.com.").unwrap(), true, true, ); assert!(update_authority(message, key, &mut authority).expect("append failed")); // verify record contents let message = update_message::delete_rrset( record.clone(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("delete_rrset failed")); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())); assert_eq!( *lookup.unwrap_err().as_response_code().unwrap(), ResponseCode::NXDomain ); } } pub fn test_delete_all>(mut authority: A, keys: &[SigSigner]) { let name = Name::from_str("compare-and-swap-multi.example.com.").unwrap(); for key in keys { let name = Name::from_str(key.algorithm().as_str()) .unwrap() .append_name(&name) .unwrap(); // append a record let mut record = Record::with(name.clone(), RecordType::A, 8); record.set_data(Some(RData::A(Ipv4Addr::new(100, 10, 100, 10)))); // first check the must_exist option let message = update_message::delete_all( record.name().clone(), Name::from_str("example.com.").unwrap(), DNSClass::IN, true, ); assert!(!update_authority(message, key, &mut authority).expect("delete_all failed")); // next create to a non-existent RRset let message = update_message::create( record.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("create failed")); let mut record = record.clone(); record.set_rr_type(RecordType::AAAA); record.set_data(Some(RData::AAAA(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)))); let message = update_message::create( record.clone().into(), Name::from_str("example.com.").unwrap(), true, ); assert!(update_authority(message, key, &mut authority).expect("create failed")); // verify record contents let message = update_message::delete_all( record.name().clone(), Name::from_str("example.com.").unwrap(), DNSClass::IN, true, ); assert!(update_authority(message, key, &mut authority).expect("delete_all failed")); let query = Query::query(name.clone(), RecordType::A).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())); assert_eq!( *lookup.unwrap_err().as_response_code().unwrap(), ResponseCode::NXDomain ); let query = Query::query(name.clone(), RecordType::AAAA).into(); let request_info = RequestInfo::new( "127.0.0.1:53".parse().unwrap(), Protocol::Udp, TEST_HEADER, &query, ); let lookup = block_on(authority.search(request_info, LookupOptions::default())); assert_eq!( *lookup.unwrap_err().as_response_code().unwrap(), ResponseCode::NXDomain ); } } pub fn add_auth(authority: &mut A) -> Vec { use trust_dns_client::rr::rdata::key::KeyUsage; use trust_dns_server::config::dnssec::*; let update_name = Name::from_str("update") .unwrap() .append_domain(&authority.origin().to_owned().into()) .unwrap(); let mut keys = Vec::::new(); // TODO: support RSA signing with ring // rsa #[cfg(feature = "dnssec-openssl")] { let key_config = KeyConfig { key_path: "../../tests/test-data/named_test_configs/dnssec/rsa_2048.pem".to_string(), password: Some("123456".to_string()), algorithm: Algorithm::RSASHA512.to_string(), signer_name: Some(update_name.to_string()), is_zone_signing_key: Some(true), is_zone_update_auth: Some(false), }; let signer = key_config .try_into_signer(update_name.clone()) .expect("failed to read key_config"); let public_key = signer .key() .to_sig0key_with_usage(Algorithm::RSASHA512, KeyUsage::Host) .expect("failed to get sig0 key"); block_on(authority.add_update_auth_key(update_name.clone(), public_key)) .expect("failed to add signer to zone"); keys.push(signer); } // // TODO: why are ecdsa tests failing in this context? // // ecdsa_p256 // { // let key_config = KeyConfig { // key_path: "tests/test-data/named_test_configs/dnssec/ecdsa_p256.pem".to_string(), // password: None, // algorithm: Algorithm::ECDSAP256SHA256.to_string(), // signer_name: Some(signer_name.clone().to_string()), // is_zone_signing_key: Some(true), // is_zone_update_auth: Some(false), // }; // let signer = key_config.try_into_signer(signer_name.clone()).expect("failed to read key_config"); // keys.push(signer.to_dnskey().expect("failed to create DNSKEY")); // authority.add_zone_signing_key(signer).expect("failed to add signer to zone"); // authority.secure_zone().expect("failed to sign zone"); // } // // ecdsa_p384 // { // let key_config = KeyConfig { // key_path: "../../tests/test-data/named_test_configs/dnssec/ecdsa_p384.pem".to_string(), // password: None, // algorithm: Algorithm::ECDSAP384SHA384.to_string(), // signer_name: Some(signer_name.clone().to_string()), // is_zone_signing_key: Some(true), // is_zone_update_auth: Some(false), // }; // let signer = key_config.try_into_signer(signer_name.clone()).expect("failed to read key_config"); // keys.push(signer.to_dnskey().expect("failed to create DNSKEY")); // authority.add_zone_signing_key(signer).expect("failed to add signer to zone"); // authority.secure_zone().expect("failed to sign zone"); // } // ed 25519 #[cfg(feature = "dnssec-ring")] { let key_config = KeyConfig { key_path: "../../tests/test-data/named_test_configs/dnssec/ed25519.pk8".to_string(), password: None, algorithm: Algorithm::ED25519.to_string(), signer_name: Some(update_name.to_string()), is_zone_signing_key: Some(true), is_zone_update_auth: Some(false), }; let signer = key_config .try_into_signer(update_name.clone()) .expect("failed to read key_config"); let public_key = signer .key() .to_sig0key_with_usage(Algorithm::ED25519, KeyUsage::Host) .expect("failed to get sig0 key"); block_on(authority.add_update_auth_key(update_name, public_key)) .expect("failed to add signer to zone"); keys.push(signer); } keys } macro_rules! define_update_test { ($new:ident; $( $f:ident, )*) => { $( #[test] fn $f () { let mut authority = crate::$new("../../tests/test-data/named_test_configs/example.com.zone", module_path!(), stringify!($f)); let keys = crate::authority_battery::dynamic_update::add_auth(&mut authority); crate::authority_battery::dynamic_update::$f(authority, &keys); } )* } } macro_rules! dynamic_update { ($new:ident) => { #[cfg(test)] mod dynamic_update { mod $new { define_update_test!($new; test_create, test_create_multi, test_append, test_append_multi, test_compare_and_swap, test_compare_and_swap_multi, test_delete_by_rdata, test_delete_by_rdata_multi, test_delete_rrset, test_delete_all, ); } } }; } trust-dns-server-0.22.0/tests/authority_battery/mod.rs000064400000000000000000000001611046102023000212230ustar 00000000000000#![allow(unused)] #[macro_use] pub mod basic; #[macro_use] pub mod dnssec; #[macro_use] pub mod dynamic_update; trust-dns-server-0.22.0/tests/config_tests.rs000064400000000000000000000201531046102023000173540ustar 00000000000000/* * Copyright (C) 2015 Benjamin Fry * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use std::env; use std::net::{Ipv4Addr, Ipv6Addr}; use std::path::{Path, PathBuf}; use std::time::Duration; use trust_dns_server::authority::ZoneType; use trust_dns_server::config::*; #[test] fn test_read_config() { let server_path = env::var("TDNS_WORKSPACE_ROOT").unwrap_or_else(|_| "../..".to_owned()); let path: PathBuf = PathBuf::from(server_path).join("tests/test-data/named_test_configs/example.toml"); if !path.exists() { panic!("can't locate example.toml and other configs: {:?}", path) } println!("reading config"); let config: Config = Config::read_config(&path).unwrap(); assert_eq!(config.get_listen_port(), 53); assert_eq!(config.get_listen_addrs_ipv4(), Ok(Vec::::new())); assert_eq!(config.get_listen_addrs_ipv6(), Ok(Vec::::new())); assert_eq!(config.get_tcp_request_timeout(), Duration::from_secs(5)); assert_eq!(config.get_log_level(), tracing::Level::INFO); assert_eq!(config.get_directory(), Path::new("/var/named")); assert_eq!( config.get_zones(), [ ZoneConfig::new( "localhost".into(), ZoneType::Primary, "default/localhost.zone".into(), None, None, None, vec![], ), ZoneConfig::new( "0.0.127.in-addr.arpa".into(), ZoneType::Primary, "default/127.0.0.1.zone".into(), None, None, None, vec![], ), ZoneConfig::new( "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.\ ip6.arpa" .into(), ZoneType::Primary, "default/ipv6_1.zone".into(), None, None, None, vec![], ), ZoneConfig::new( "255.in-addr.arpa".into(), ZoneType::Primary, "default/255.zone".into(), None, None, None, vec![], ), ZoneConfig::new( "0.in-addr.arpa".into(), ZoneType::Primary, "default/0.zone".into(), None, None, None, vec![], ), ZoneConfig::new( "example.com".into(), ZoneType::Primary, "example.com.zone".into(), None, None, None, vec![], ) ] ); } #[test] fn test_parse_toml() { let config: Config = "listen_port = 2053".parse().unwrap(); assert_eq!(config.get_listen_port(), 2053); let config: Config = "listen_addrs_ipv4 = [\"0.0.0.0\"]".parse().unwrap(); assert_eq!( config.get_listen_addrs_ipv4(), Ok(vec![Ipv4Addr::new(0, 0, 0, 0)]) ); let config: Config = "listen_addrs_ipv4 = [\"0.0.0.0\", \"127.0.0.1\"]" .parse() .unwrap(); assert_eq!( config.get_listen_addrs_ipv4(), Ok(vec![Ipv4Addr::new(0, 0, 0, 0), Ipv4Addr::new(127, 0, 0, 1)]) ); let config: Config = "listen_addrs_ipv6 = [\"::0\"]".parse().unwrap(); assert_eq!( config.get_listen_addrs_ipv6(), Ok(vec![Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)]) ); let config: Config = "listen_addrs_ipv6 = [\"::0\", \"::1\"]".parse().unwrap(); assert_eq!( config.get_listen_addrs_ipv6(), Ok(vec![ Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0), Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), ]) ); let config: Config = "tcp_request_timeout = 25".parse().unwrap(); assert_eq!(config.get_tcp_request_timeout(), Duration::from_secs(25)); let config: Config = "log_level = \"Debug\"".parse().unwrap(); assert_eq!(config.get_log_level(), tracing::Level::DEBUG); let config: Config = "directory = \"/dev/null\"".parse().unwrap(); assert_eq!(config.get_directory(), Path::new("/dev/null")); } #[cfg(feature = "dnssec")] #[test] fn test_parse_zone_keys() { use trust_dns_client::rr::dnssec::Algorithm; use trust_dns_client::rr::Name; let config: Config = " [[zones]] zone = \"example.com\" zone_type = \"Primary\" file = \"example.com.zone\" \ [[zones.keys]] key_path = \"/path/to/my_ed25519.pem\" algorithm = \"ED25519\" \ signer_name = \"ns.example.com.\" is_zone_signing_key = false is_zone_update_auth = true [[zones.keys]] key_path = \"/path/to/my_rsa.pem\" algorithm = \ \"RSASHA256\" signer_name = \"ns.example.com.\" " .parse() .unwrap(); assert_eq!( config.get_zones()[0].get_keys()[0].key_path(), Path::new("/path/to/my_ed25519.pem") ); assert_eq!( config.get_zones()[0].get_keys()[0].algorithm().unwrap(), Algorithm::ED25519 ); assert_eq!( config.get_zones()[0].get_keys()[0] .signer_name() .unwrap() .unwrap(), Name::parse("ns.example.com.", None).unwrap() ); assert!(!config.get_zones()[0].get_keys()[0].is_zone_signing_key(),); assert!(config.get_zones()[0].get_keys()[0].is_zone_update_auth(),); assert_eq!( config.get_zones()[0].get_keys()[1].key_path(), Path::new("/path/to/my_rsa.pem") ); assert_eq!( config.get_zones()[0].get_keys()[1].algorithm().unwrap(), Algorithm::RSASHA256 ); assert_eq!( config.get_zones()[0].get_keys()[1] .signer_name() .unwrap() .unwrap(), Name::parse("ns.example.com.", None).unwrap() ); assert!(!config.get_zones()[0].get_keys()[1].is_zone_signing_key(),); assert!(!config.get_zones()[0].get_keys()[1].is_zone_update_auth(),); } #[test] #[cfg(feature = "dnssec")] fn test_parse_tls() { // defaults let config: Config = "".parse().unwrap(); assert_eq!(config.get_tls_listen_port(), 853); assert_eq!(config.get_tls_cert(), None); let config: Config = " tls_cert = { path = \"path/to/some.pkcs12\", endpoint_name = \"ns.example.com\" } tls_listen_port = 8853 " .parse() .unwrap(); assert_eq!(config.get_tls_listen_port(), 8853); assert_eq!( config.get_tls_cert().unwrap().get_path(), Path::new("path/to/some.pkcs12") ); } fn test_config(path: &str) { let workspace = env::var("TDNS_WORKSPACE_ROOT").unwrap_or_else(|_| "../..".to_owned()); let path = PathBuf::from(workspace) .join("tests/test-data/named_test_configs") .join(path) .with_extension("toml"); assert!(path.exists(), "does not exist: {}", path.display()); println!("reading: {}", path.display()); Config::read_config(&path).expect("failed to read"); } macro_rules! define_test_config { ($name:ident) => { #[test] fn $name() { test_config(stringify!($name)); } }; } define_test_config!(all_supported_dnssec); define_test_config!(dns_over_https); define_test_config!(dns_over_tls_rustls_and_openssl); define_test_config!(dns_over_tls); #[cfg(feature = "sqlite")] define_test_config!(dnssec_with_update); define_test_config!(dnssec_with_update_deprecated); define_test_config!(example); define_test_config!(ipv4_and_ipv6); define_test_config!(ipv4_only); define_test_config!(ipv6_only); define_test_config!(openssl_dnssec); define_test_config!(ring_dnssec); #[cfg(feature = "trust-dns-resolver")] define_test_config!(example_forwarder); trust-dns-server-0.22.0/tests/forwarder.rs000064400000000000000000000017641046102023000166670ustar 00000000000000#![recursion_limit = "128"] #![cfg(feature = "trust-dns-resolver")] use std::net::Ipv4Addr; use std::str::FromStr; use tokio::runtime::Runtime; use trust_dns_client::rr::{Name, RData, RecordType}; use trust_dns_resolver::TokioHandle; use trust_dns_server::{ authority::{Authority, LookupObject}, store::forwarder::ForwardAuthority, }; #[ignore] #[test] fn test_lookup() { let runtime = Runtime::new().expect("failed to create Tokio Runtime"); let forwarder = ForwardAuthority::new(TokioHandle).expect("failed to create forwarder"); let lookup = runtime .block_on(forwarder.lookup( &Name::from_str("www.example.com.").unwrap().into(), RecordType::A, Default::default(), )) .unwrap(); let address = lookup.iter().next().expect("no addresses returned!"); let address = address .data() .and_then(RData::as_a) .expect("not an A record"); assert_eq!(*address, Ipv4Addr::new(93, 184, 216, 34)); } trust-dns-server-0.22.0/tests/in_memory.rs000064400000000000000000000104661046102023000166710ustar 00000000000000use std::str::FromStr; use tokio::runtime::Runtime; use trust_dns_client::rr::{Name, RData, Record, RecordType}; use trust_dns_server::{ authority::{Authority, ZoneType}, store::in_memory::InMemoryAuthority, }; #[test] fn test_cname_loop() { let runtime = Runtime::new().expect("failed to create Tokio Runtime"); let mut auth = InMemoryAuthority::empty( Name::from_str("example.com.").unwrap(), ZoneType::Primary, false, ); auth.upsert_mut( Record::from_rdata( Name::from_str("foo.example.com.").unwrap(), 300, RData::CNAME(Name::from_str("foo.example.com.").unwrap()), ), 0, ); auth.upsert_mut( Record::from_rdata( Name::from_str("bar.example.com.").unwrap(), 300, RData::CNAME(Name::from_str("foo.example.com.").unwrap()), ), 0, ); auth.upsert_mut( Record::from_rdata( Name::from_str("baz.example.com.").unwrap(), 300, RData::CNAME(Name::from_str("boz.example.com.").unwrap()), ), 0, ); auth.upsert_mut( Record::from_rdata( Name::from_str("boz.example.com.").unwrap(), 300, RData::CNAME(Name::from_str("biz.example.com.").unwrap()), ), 0, ); auth.upsert_mut( Record::from_rdata( Name::from_str("biz.example.com.").unwrap(), 300, RData::CNAME(Name::from_str("baz.example.com.").unwrap()), ), 0, ); let mut lookup = runtime .block_on(auth.lookup( &Name::from_str("foo.example.com.").unwrap().into(), RecordType::A, Default::default(), )) .unwrap(); let records: Vec<&Record> = lookup.iter().collect(); assert_eq!(records.len(), 1); let record = records[0]; assert_eq!(record.name(), &Name::from_str("foo.example.com.").unwrap()); assert_eq!( record.data(), Some(&RData::CNAME(Name::from_str("foo.example.com.").unwrap())) ); assert!( lookup.take_additionals().is_none(), "Should be no additional records." ); let mut lookup = runtime .block_on(auth.lookup( &Name::from_str("bar.example.com.").unwrap().into(), RecordType::A, Default::default(), )) .unwrap(); let records: Vec<&Record> = lookup.iter().collect(); assert_eq!(records.len(), 1); let record = records[0]; assert_eq!(record.name(), &Name::from_str("bar.example.com.").unwrap()); assert_eq!( record.data(), Some(&RData::CNAME(Name::from_str("foo.example.com.").unwrap())) ); let additionals = lookup .take_additionals() .expect("Should be additional records"); let additionals: Vec<&Record> = additionals.iter().collect(); assert_eq!(additionals.len(), 1); let record = additionals[0]; assert_eq!(record.name(), &Name::from_str("foo.example.com.").unwrap()); assert_eq!( record.data(), Some(&RData::CNAME(Name::from_str("foo.example.com.").unwrap())) ); let mut lookup = runtime .block_on(auth.lookup( &Name::from_str("baz.example.com.").unwrap().into(), RecordType::A, Default::default(), )) .unwrap(); let records: Vec<&Record> = lookup.iter().collect(); assert_eq!(records.len(), 1); let record = records[0]; assert_eq!(record.name(), &Name::from_str("baz.example.com.").unwrap()); assert_eq!( record.data(), Some(&RData::CNAME(Name::from_str("boz.example.com.").unwrap())) ); let additionals = lookup .take_additionals() .expect("Should be additional records"); let additionals: Vec<&Record> = additionals.iter().collect(); assert_eq!(additionals.len(), 2); let record = additionals[0]; assert_eq!(record.name(), &Name::from_str("boz.example.com.").unwrap()); assert_eq!( record.data(), Some(&RData::CNAME(Name::from_str("biz.example.com.").unwrap())) ); let record = additionals[1]; assert_eq!(record.name(), &Name::from_str("biz.example.com.").unwrap()); assert_eq!( record.data(), Some(&RData::CNAME(Name::from_str("baz.example.com.").unwrap())) ); } trust-dns-server-0.22.0/tests/sqlite_tests.rs000064400000000000000000000054621046102023000174160ustar 00000000000000#![cfg(feature = "sqlite")] use std::net::*; use std::str::FromStr; use rusqlite::*; use trust_dns_client::rr::*; use trust_dns_server::store::sqlite::persistence::CURRENT_VERSION; use trust_dns_server::store::sqlite::Journal; #[test] fn test_new_journal() { let conn = Connection::open_in_memory().expect("could not create in memory DB"); assert_eq!( Journal::new(conn).expect("new Journal").schema_version(), -1 ); } #[test] fn test_init_journal() { let conn = Connection::open_in_memory().expect("could not create in memory DB"); let mut journal = Journal::new(conn).unwrap(); let version = journal.schema_up().unwrap(); assert_eq!(version, CURRENT_VERSION); assert_eq!( Journal::select_schema_version(&journal.conn()).unwrap(), CURRENT_VERSION ); } fn create_test_journal() -> (Record, Journal) { let www = Name::from_str("www.example.com").unwrap(); let mut record = Record::new(); record.set_name(www); record.set_rr_type(RecordType::A); record.set_data(Some(RData::A(Ipv4Addr::from_str("127.0.0.1").unwrap()))); // test that this message can be inserted let conn = Connection::open_in_memory().expect("could not create in memory DB"); let mut journal = Journal::new(conn).unwrap(); journal.schema_up().unwrap(); // insert the message journal.insert_record(0, &record).unwrap(); // insert another... record.set_data(Some(RData::A(Ipv4Addr::from_str("127.0.1.1").unwrap()))); journal.insert_record(0, &record).unwrap(); (record, journal) } #[test] fn test_insert_and_select_record() { let (mut record, journal) = create_test_journal(); // select the record let (row_id, journal_record) = journal .select_record(0) .expect("persistence error") .expect("none"); record.set_data(Some(RData::A(Ipv4Addr::from_str("127.0.0.1").unwrap()))); assert_eq!(journal_record, record); // test another let (row_id, journal_record) = journal .select_record(row_id + 1) .expect("persistence error") .expect("none"); record.set_data(Some(RData::A(Ipv4Addr::from_str("127.0.1.1").unwrap()))); assert_eq!(journal_record, record); // check that we get nothing for id over row_id let option_none = journal .select_record(row_id + 1) .expect("persistence error"); assert!(option_none.is_none()); } #[test] fn test_iterator() { let (mut record, journal) = create_test_journal(); let mut iter = journal.iter(); assert_eq!( record.set_data(Some(RData::A(Ipv4Addr::from_str("127.0.0.1").unwrap()))), &iter.next().unwrap() ); assert_eq!( record.set_data(Some(RData::A(Ipv4Addr::from_str("127.0.1.1").unwrap()))), &iter.next().unwrap() ); assert_eq!(None, iter.next()); } trust-dns-server-0.22.0/tests/store_file_tests.rs000064400000000000000000000024651046102023000202500ustar 00000000000000use std::str::FromStr; use trust_dns_client::rr::{LowerName, RecordType}; use trust_dns_client::rr::{Name, RrKey}; use trust_dns_server::authority::ZoneType; use trust_dns_server::store::file::{FileAuthority, FileConfig}; #[macro_use] mod authority_battery; fn file(master_file_path: &str, _module: &str, _test_name: &str) -> FileAuthority { let config = FileConfig { zone_file_path: master_file_path.to_string(), }; FileAuthority::try_from_config( Name::from_str("example.com.").unwrap(), ZoneType::Primary, false, None, &config, ) .expect("failed to load file") } basic_battery!(file); #[cfg(feature = "dnssec")] dnssec_battery!(file); #[test] fn test_all_lines_are_loaded() { let config = FileConfig { zone_file_path: "../../tests/test-data/named_test_configs/default/nonewline.zone" .to_string(), }; let mut authority = FileAuthority::try_from_config( Name::from_str("example.com.").unwrap(), ZoneType::Primary, false, None, &config, ) .expect("failed to load"); let rrkey = RrKey { record_type: RecordType::A, name: LowerName::from(Name::from_ascii("ensure.nonewline.").unwrap()), }; assert!(authority.records_get_mut().get(&rrkey).is_some()) } trust-dns-server-0.22.0/tests/store_sqlite_tests.rs000064400000000000000000000040441046102023000206250ustar 00000000000000#![cfg(feature = "sqlite")] use std::fs; use std::path::PathBuf; use std::str::FromStr; use futures_executor::block_on; use trust_dns_client::rr::Name; use trust_dns_server::{ authority::ZoneType, store::sqlite::{SqliteAuthority, SqliteConfig}, }; #[macro_use] mod authority_battery; fn sqlite(master_file_path: &str, module: &str, test_name: &str) -> SqliteAuthority { let journal_path = PathBuf::from("target/tests") .join(module.replace("::", "_")) .join(test_name) .join("authority_battery.jrnl"); fs::create_dir_all(journal_path.parent().unwrap()).ok(); // cleanup anything from previous test fs::remove_file(&journal_path).ok(); let config = SqliteConfig { zone_file_path: master_file_path.to_string(), journal_file_path: journal_path.to_str().unwrap().to_string(), allow_update: true, }; block_on(SqliteAuthority::try_from_config( Name::from_str("example.com.").unwrap(), ZoneType::Primary, false, true, None, &config, )) .expect("failed to load file") } #[allow(unused)] fn sqlite_update(master_file_path: &str, module: &str, test_name: &str) -> SqliteAuthority { let journal_path = PathBuf::from("target/tests") .join(module.replace("::", "_")) .join(test_name) .join("authority_battery.jrnl"); fs::create_dir_all(journal_path.parent().unwrap()).ok(); // cleanup anything from previous test fs::remove_file(&journal_path).ok(); let config = SqliteConfig { zone_file_path: master_file_path.to_string(), journal_file_path: journal_path.to_str().unwrap().to_string(), allow_update: true, }; block_on(SqliteAuthority::try_from_config( Name::from_str("example.com.").unwrap(), ZoneType::Primary, false, true, None, &config, )) .expect("failed to load file") } basic_battery!(sqlite); #[cfg(feature = "dnssec")] dnssec_battery!(sqlite); #[cfg(feature = "dnssec")] dynamic_update!(sqlite_update); trust-dns-server-0.22.0/tests/timeout_stream_tests.rs000064400000000000000000000031341046102023000211500ustar 00000000000000use std::io; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; use futures_util::stream::{iter, Stream, StreamExt, TryStreamExt}; use tokio::runtime::Runtime; use trust_dns_server::server::TimeoutStream; #[test] fn test_no_timeout() { #[allow(deprecated)] let sequence = iter(vec![Ok(1), Err("error"), Ok(2)]).map_err(|e| io::Error::new(io::ErrorKind::Other, e)); let core = Runtime::new().expect("could not get core"); let timeout_stream = TimeoutStream::new(sequence, Duration::from_secs(360)); let (val, timeout_stream) = core.block_on(timeout_stream.into_future()); assert_eq!(val.expect("nothing in stream").ok(), Some(1)); let (error, timeout_stream) = core.block_on(timeout_stream.into_future()); assert!(error.expect("nothing in stream").is_err()); let (val, timeout_stream) = core.block_on(timeout_stream.into_future()); assert_eq!(val.expect("nothing in stream").ok(), Some(2)); let (val, _) = core.block_on(timeout_stream.into_future()); assert!(val.is_none()) } struct NeverStream {} impl Stream for NeverStream { type Item = Result<(), io::Error>; // somehow insert a timeout here... fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Pending } } #[test] fn test_timeout() { let core = Runtime::new().expect("could not get core"); let timeout_stream = TimeoutStream::new(NeverStream {}, Duration::from_millis(1)); assert!(core .block_on(timeout_stream.into_future()) .0 .expect("nothing in stream") .is_err()); } trust-dns-server-0.22.0/tests/txt_tests.rs000064400000000000000000000360641046102023000167360ustar 00000000000000use std::net::{Ipv4Addr, Ipv6Addr}; use std::str::FromStr; use futures_executor::block_on; use trust_dns_client::proto::rr::rdata::tlsa::*; use trust_dns_client::rr::*; use trust_dns_client::serialize::txt::*; use trust_dns_server::authority::{Authority, LookupOptions, ZoneType}; use trust_dns_server::store::in_memory::InMemoryAuthority; // TODO: split this test up to test each thing separately #[test] #[allow(clippy::cognitive_complexity)] fn test_zone() { let lexer = Lexer::new( r###" @ IN SOA venera action\.domains ( 20 ; SERIAL 7200 ; REFRESH 600 ; RETRY 3600000; EXPIRE 60) ; MINIMUM NS a.isi.edu. NS venera NS vaxa MX 10 venera MX 20 vaxa a A 26.3.0.103 TXT I am a txt record TXT I am another txt record TXT "I am a different" "txt record" TXT key=val aaaa AAAA 4321:0:1:2:3:4:567:89ab alias CNAME a 103.0.3.26.IN-ADDR.ARPA. PTR a b.a.9.8.7.6.5.0.4.0.0.0.3.0.0.0.2.0.0.0.1.0.0.0.0.0.0.0.1.2.3.4.IP6.ARPA. PTR aaaa _ldap._tcp.service SRV 1 2 3 short rust-❤️-🦀 A 192.0.2.1 short 70 A 26.3.0.104 venera A 10.1.0.52 A 128.9.0.32 nocerts CAA 0 issue ";" certs CAA 0 issuewild "example.net" _443._tcp.www.example.com. IN TLSA ( 0 0 1 d2abde240d7cd3ee6b4b28c54df034b9 7983a1d16e8a410e4561cb106618e971) tech. 3600 in soa ns0.centralnic.net. hostmaster.centralnic.net. 271851 900 1800 6048000 3600 "###, ); let records = Parser::new().parse(lexer, Some(Name::from_str("isi.edu").unwrap()), None); if records.is_err() { panic!("failed to parse: {:?}", records.err()) } let (origin, records) = records.unwrap(); let authority = InMemoryAuthority::new(origin, records, ZoneType::Primary, false).unwrap(); // not validating everything, just one of each... // SOA let soa_record = block_on(authority.soa()) .unwrap() .iter() .next() .cloned() .unwrap(); assert_eq!(RecordType::SOA, soa_record.rr_type()); assert_eq!(&Name::from_str("isi.edu").unwrap(), soa_record.name()); // i.e. the origin or domain assert_eq!(3_600_000, soa_record.ttl()); assert_eq!(DNSClass::IN, soa_record.dns_class()); if let Some(RData::SOA(ref soa)) = soa_record.data() { // this should all be lowercased assert_eq!(&Name::from_str("venera.isi.edu").unwrap(), soa.mname()); assert_eq!( &Name::from_str("action\\.domains.isi.edu").unwrap(), soa.rname() ); assert_eq!(20, soa.serial()); assert_eq!(7200, soa.refresh()); assert_eq!(600, soa.retry()); assert_eq!(3_600_000, soa.expire()); assert_eq!(60, soa.minimum()); } else { panic!("Not an SOA record!!!") // valid panic, test code } let lowercase_record = block_on(authority.lookup( &Name::from_str("tech.").unwrap().into(), RecordType::SOA, LookupOptions::default(), )) .unwrap() .iter() .next() .cloned() .unwrap(); assert_eq!(&Name::from_str("tech.").unwrap(), lowercase_record.name()); assert_eq!(DNSClass::IN, lowercase_record.dns_class()); if let Some(RData::SOA(ref lower_soa)) = lowercase_record.data() { assert_eq!( &Name::from_str("ns0.centralnic.net").unwrap(), lower_soa.mname() ); assert_eq!( &Name::from_str("hostmaster.centralnic.net").unwrap(), lower_soa.rname() ); assert_eq!(271851, lower_soa.serial()); assert_eq!(900, lower_soa.refresh()); assert_eq!(1800, lower_soa.retry()); assert_eq!(6_048_000, lower_soa.expire()); assert_eq!(3_600, lower_soa.minimum()); } else { panic!("Not an SOA record!!!") // valid panic, test code } // NS let mut ns_records: Vec = block_on(authority.lookup( &Name::from_str("isi.edu").unwrap().into(), RecordType::NS, LookupOptions::default(), )) .unwrap() .iter() .cloned() .collect(); let mut compare = vec![ // this is cool, zip up the expected results... works as long as the order is good. Name::from_str("a.isi.edu").unwrap(), Name::from_str("venera.isi.edu").unwrap(), Name::from_str("vaxa.isi.edu").unwrap(), ]; compare.sort(); ns_records.sort(); let compare = ns_records.iter().zip(compare); for (record, name) in compare { assert_eq!(&Name::from_str("isi.edu").unwrap(), record.name()); assert_eq!(60, record.ttl()); // TODO: should this be minimum or expire? assert_eq!(DNSClass::IN, record.dns_class()); assert_eq!(RecordType::NS, record.rr_type()); if let Some(RData::NS(nsdname)) = record.data() { assert_eq!(name, *nsdname); } else { panic!("Not an NS record!!!") // valid panic, test code } } // MX let mut mx_records: Vec = block_on(authority.lookup( &Name::from_str("isi.edu").unwrap().into(), RecordType::MX, LookupOptions::default(), )) .unwrap() .iter() .cloned() .collect(); let mut compare = vec![ (10, Name::from_str("venera.isi.edu").unwrap()), (20, Name::from_str("vaxa.isi.edu").unwrap()), ]; compare.sort(); mx_records.sort(); let compare = mx_records.iter().zip(compare); for (record, (num, ref name)) in compare { assert_eq!(&Name::from_str("isi.edu").unwrap(), record.name()); assert_eq!(60, record.ttl()); // TODO: should this be minimum or expire? assert_eq!(DNSClass::IN, record.dns_class()); assert_eq!(RecordType::MX, record.rr_type()); if let Some(RData::MX(ref rdata)) = record.data() { assert_eq!(num, rdata.preference()); assert_eq!(name, rdata.exchange()); } else { panic!("Not an NS record!!!") // valid panic, test code } } // A let a_record: Record = block_on(authority.lookup( &Name::from_str("a.isi.edu").unwrap().into(), RecordType::A, LookupOptions::default(), )) .unwrap() .iter() .next() .cloned() .unwrap(); assert_eq!(&Name::from_str("a.isi.edu").unwrap(), a_record.name()); assert_eq!(60, a_record.ttl()); // TODO: should this be minimum or expire? assert_eq!(DNSClass::IN, a_record.dns_class()); assert_eq!(RecordType::A, a_record.rr_type()); if let Some(RData::A(ref address)) = a_record.data() { assert_eq!(&Ipv4Addr::new(26u8, 3u8, 0u8, 103u8), address); } else { panic!("Not an A record!!!") // valid panic, test code } // AAAA let aaaa_record: Record = block_on(authority.lookup( &Name::from_str("aaaa.isi.edu").unwrap().into(), RecordType::AAAA, LookupOptions::default(), )) .unwrap() .iter() .next() .cloned() .unwrap(); assert_eq!(&Name::from_str("aaaa.isi.edu").unwrap(), aaaa_record.name()); if let Some(RData::AAAA(ref address)) = aaaa_record.data() { assert_eq!( &Ipv6Addr::from_str("4321:0:1:2:3:4:567:89ab").unwrap(), address ); } else { panic!("Not a AAAA record!!!") // valid panic, test code } // SHORT let short_record: Record = block_on(authority.lookup( &Name::from_str("short.isi.edu").unwrap().into(), RecordType::A, LookupOptions::default(), )) .unwrap() .iter() .next() .cloned() .unwrap(); assert_eq!( &Name::from_str("short.isi.edu").unwrap(), short_record.name() ); assert_eq!(70, short_record.ttl()); if let Some(RData::A(ref address)) = short_record.data() { assert_eq!(&Ipv4Addr::new(26u8, 3u8, 0u8, 104u8), address); } else { panic!("Not an A record!!!") // valid panic, test code } // TXT let mut txt_records: Vec = block_on(authority.lookup( &Name::from_str("a.isi.edu").unwrap().into(), RecordType::TXT, LookupOptions::default(), )) .unwrap() .iter() .cloned() .collect(); let compare: Vec>> = vec![ vec![b"I" as &[u8], b"am", b"a", b"txt", b"record"] .into_iter() .map(Box::from) .collect(), vec![b"I" as &[u8], b"am", b"another", b"txt", b"record"] .into_iter() .map(Box::from) .collect(), vec![b"key=val" as &[u8]] .into_iter() .map(Box::from) .collect(), vec![b"I am a different" as &[u8], b"txt record"] .into_iter() .map(Box::from) .collect(), ]; txt_records.sort(); println!("compare: {:#?}", compare); println!("txt_records: {:#?}", txt_records); let compare = txt_records.iter().zip(compare); for (record, ref vector) in compare { if let Some(RData::TXT(ref rdata)) = record.data() { assert_eq!(vector as &[Box<[u8]>], rdata.txt_data()); } else { panic!("Not a TXT record!!!") // valid panic, test code } } // PTR let ptr_record: Record = block_on(authority.lookup( &Name::from_str("103.0.3.26.in-addr.arpa").unwrap().into(), RecordType::PTR, LookupOptions::default(), )) .unwrap() .iter() .next() .cloned() .unwrap(); if let Some(RData::PTR(ref ptrdname)) = ptr_record.data() { assert_eq!(&Name::from_str("a.isi.edu").unwrap(), ptrdname); } else { panic!("Not a PTR record!!!") // valid panic, test code } // SRV let srv_record: Record = block_on(authority.lookup( &Name::from_str("_ldap._tcp.service.isi.edu").unwrap().into(), RecordType::SRV, LookupOptions::default(), )) .unwrap() .iter() .next() .cloned() .unwrap(); if let Some(RData::SRV(ref rdata)) = srv_record.data() { assert_eq!(rdata.priority(), 1); assert_eq!(rdata.weight(), 2); assert_eq!(rdata.port(), 3); assert_eq!(rdata.target(), &Name::from_str("short.isi.edu").unwrap()); } else { panic!("Not an SRV record!!!") // valid panic, test code } // IDNA name: rust-❤️-🦀 A 192.0.2.1 let idna_record: Record = block_on(authority.lookup( &Name::from_str("rust-❤️-🦀.isi.edu").unwrap().into(), RecordType::A, LookupOptions::default(), )) .unwrap() .iter() .next() .cloned() .unwrap(); assert_eq!( &Name::from_str("rust-❤️-🦀.isi.edu").unwrap(), idna_record.name() ); if let Some(RData::A(ref address)) = idna_record.data() { assert_eq!(&Ipv4Addr::new(192u8, 0u8, 2u8, 1u8), address); } else { panic!("Not an A record!!!") // valid panic, test code } // CAA let caa_record: Record = block_on(authority.lookup( &Name::parse("nocerts.isi.edu.", None).unwrap().into(), RecordType::CAA, LookupOptions::default(), )) .unwrap() .iter() .next() .cloned() .expect("nocerts not found"); if let Some(RData::CAA(ref rdata)) = caa_record.data() { assert!(!rdata.issuer_critical()); assert!(rdata.tag().is_issue()); assert!(rdata.value().is_issuer()); } else { panic!(); } // TLSA let tlsa_record: Record = block_on( authority.lookup( &Name::parse("_443._tcp.www.example.com.", None) .unwrap() .into(), RecordType::TLSA, LookupOptions::default(), ), ) .unwrap() .iter() .next() .cloned() .expect("tlsa record not found"); if let Some(RData::TLSA(ref rdata)) = tlsa_record.data() { assert_eq!(rdata.cert_usage(), CertUsage::CA); assert_eq!(rdata.selector(), Selector::Full); assert_eq!(rdata.matching(), Matching::Sha256); assert_eq!( rdata.cert_data(), &[ 210, 171, 222, 36, 13, 124, 211, 238, 107, 75, 40, 197, 77, 240, 52, 185, 121, 131, 161, 209, 110, 138, 65, 14, 69, 97, 203, 16, 102, 24, 233, 113 ] ); } else { panic!(); } } #[test] #[allow(clippy::cognitive_complexity)] fn test_bad_cname_at_soa() { let lexer = Lexer::new( r###" @ IN SOA venera action\.domains ( 20 ; SERIAL 7200 ; REFRESH 600 ; RETRY 3600000; EXPIRE 60) ; MINIMUM CNAME a a A 127.0.0.1 "###, ); let records = Parser::new().parse(lexer, Some(Name::from_str("isi.edu").unwrap()), None); if records.is_err() { panic!("failed to parse: {:?}", records.err()) } let (origin, records) = records.unwrap(); assert!(InMemoryAuthority::new(origin, records, ZoneType::Primary, false).is_err()); } #[test] fn test_bad_cname_at_a() { let lexer = Lexer::new( r###" @ IN SOA venera action\.domains ( 20 ; SERIAL 7200 ; REFRESH 600 ; RETRY 3600000; EXPIRE 60) ; MINIMUM a CNAME b a A 127.0.0.1 b A 127.0.0.2 "###, ); let records = Parser::new().parse(lexer, Some(Name::from_str("isi.edu").unwrap()), None); if records.is_err() { panic!("failed to parse: {:?}", records.err()) } let (origin, records) = records.unwrap(); assert!(InMemoryAuthority::new(origin, records, ZoneType::Primary, false).is_err()); } #[test] fn test_aname_at_soa() { let lexer = Lexer::new( r###" @ IN SOA venera action\.domains ( 20 ; SERIAL 7200 ; REFRESH 600 ; RETRY 3600000; EXPIRE 60) ; MINIMUM ANAME a a A 127.0.0.1 "###, ); let records = Parser::new().parse(lexer, Some(Name::from_str("isi.edu").unwrap()), None); if records.is_err() { panic!("failed to parse: {:?}", records.err()) } let (origin, records) = records.unwrap(); assert!(InMemoryAuthority::new(origin, records, ZoneType::Primary, false).is_ok()); } #[test] fn test_named_root() { let lexer = Lexer::new( r###" . 3600000 NS A.ROOT-SERVERS.NET. "###, ); let records = Parser::new().parse(lexer, Some(Name::root()), Some(DNSClass::IN)); if records.is_err() { panic!("failed to parse: {:?}", records.err()) } let (_, records) = records.unwrap(); let key = RrKey::new(LowerName::from(Name::root()), RecordType::NS); assert!(records.contains_key(&key)); assert_eq!(records[&key].dns_class(), DNSClass::IN) }