prost-types-0.12.6/.cargo_vcs_info.json0000644000000001510000000000100134300ustar { "git": { "sha1": "d42c85e790263f78f6c626ceb0dac5fda0edcb41" }, "path_in_vcs": "prost-types" }prost-types-0.12.6/Cargo.toml0000644000000022060000000000100114310ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.70" name = "prost-types" version = "0.12.6" authors = [ "Dan Burkert ", "Lucio Franco ", "Casper Meijn ", "Tokio Contributors ", ] description = "Prost definitions of Protocol Buffers well known types." documentation = "https://docs.rs/prost-types" readme = "README.md" license = "Apache-2.0" repository = "https://github.com/tokio-rs/prost" [lib] doctest = false [dependencies.prost] version = "0.12.6" features = ["prost-derive"] default-features = false [dev-dependencies.proptest] version = "1" [features] default = ["std"] std = ["prost/std"] prost-types-0.12.6/Cargo.toml.orig000064400000000000000000000012761046102023000151200ustar 00000000000000[package] name = "prost-types" version = "0.12.6" authors = [ "Dan Burkert ", "Lucio Franco ", "Casper Meijn ", "Tokio Contributors ", ] license = "Apache-2.0" repository = "https://github.com/tokio-rs/prost" documentation = "https://docs.rs/prost-types" readme = "README.md" description = "Prost definitions of Protocol Buffers well known types." edition = "2021" rust-version = "1.70" [lib] doctest = false [features] default = ["std"] std = ["prost/std"] [dependencies] prost = { version = "0.12.6", path = "../prost", default-features = false, features = ["prost-derive"] } [dev-dependencies] proptest = "1" prost-types-0.12.6/LICENSE000064400000000000000000000251371046102023000132400ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. prost-types-0.12.6/README.md000064400000000000000000000014041046102023000135010ustar 00000000000000[![Documentation](https://docs.rs/prost-types/badge.svg)](https://docs.rs/prost-types/) [![Crate](https://img.shields.io/crates/v/prost-types.svg)](https://crates.io/crates/prost-types) # `prost-types` Prost definitions of Protocol Buffers well known types. See the [Protobuf reference][1] for more information about well known types. [1]: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf ## License `prost-types` is distributed under the terms of the Apache License (Version 2.0). `prost-types` includes code imported from the Protocol Buffers projet, which is included under its original ([BSD][2]) license. [2]: https://github.com/google/protobuf/blob/master/LICENSE See [LICENSE](..LICENSE) for details. Copyright 2017 Dan Burkert prost-types-0.12.6/src/any.rs000064400000000000000000000033731046102023000141550ustar 00000000000000use super::*; impl Any { /// Serialize the given message type `M` as [`Any`]. pub fn from_msg(msg: &M) -> Result where M: Name, { let type_url = M::type_url(); let mut value = Vec::new(); Message::encode(msg, &mut value)?; Ok(Any { type_url, value }) } /// Decode the given message type `M` from [`Any`], validating that it has /// the expected type URL. pub fn to_msg(&self) -> Result where M: Default + Name + Sized, { let expected_type_url = M::type_url(); if let (Some(expected), Some(actual)) = ( TypeUrl::new(&expected_type_url), TypeUrl::new(&self.type_url), ) { if expected == actual { return M::decode(self.value.as_slice()); } } let mut err = DecodeError::new(format!( "expected type URL: \"{}\" (got: \"{}\")", expected_type_url, &self.type_url )); err.push("unexpected type URL", "type_url"); Err(err) } } impl Name for Any { const PACKAGE: &'static str = PACKAGE; const NAME: &'static str = "Any"; fn type_url() -> String { type_url_for::() } } #[cfg(test)] mod tests { use super::*; #[test] fn check_any_serialization() { let message = Timestamp::date(2000, 1, 1).unwrap(); let any = Any::from_msg(&message).unwrap(); assert_eq!( &any.type_url, "type.googleapis.com/google.protobuf.Timestamp" ); let message2 = any.to_msg::().unwrap(); assert_eq!(message, message2); // Wrong type URL assert!(any.to_msg::().is_err()); } } prost-types-0.12.6/src/compiler.rs000064400000000000000000000220251046102023000151730ustar 00000000000000// This file is @generated by prost-build. /// The version number of protocol compiler. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Version { #[prost(int32, optional, tag = "1")] pub major: ::core::option::Option, #[prost(int32, optional, tag = "2")] pub minor: ::core::option::Option, #[prost(int32, optional, tag = "3")] pub patch: ::core::option::Option, /// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should /// be empty for mainline stable releases. #[prost(string, optional, tag = "4")] pub suffix: ::core::option::Option<::prost::alloc::string::String>, } /// An encoded CodeGeneratorRequest is written to the plugin's stdin. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CodeGeneratorRequest { /// The .proto files that were explicitly listed on the command-line. The /// code generator should generate code only for these files. Each file's /// descriptor will be included in proto_file, below. #[prost(string, repeated, tag = "1")] pub file_to_generate: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// The generator parameter passed on the command-line. #[prost(string, optional, tag = "2")] pub parameter: ::core::option::Option<::prost::alloc::string::String>, /// FileDescriptorProtos for all files in files_to_generate and everything /// they import. The files will appear in topological order, so each file /// appears before any file that imports it. /// /// protoc guarantees that all proto_files will be written after /// the fields above, even though this is not technically guaranteed by the /// protobuf wire format. This theoretically could allow a plugin to stream /// in the FileDescriptorProtos and handle them one by one rather than read /// the entire set into memory at once. However, as of this writing, this /// is not similarly optimized on protoc's end -- it will store all fields in /// memory at once before sending them to the plugin. /// /// Type names of fields and extensions in the FileDescriptorProto are always /// fully qualified. #[prost(message, repeated, tag = "15")] pub proto_file: ::prost::alloc::vec::Vec, /// The version number of protocol compiler. #[prost(message, optional, tag = "3")] pub compiler_version: ::core::option::Option, } /// The plugin writes an encoded CodeGeneratorResponse to stdout. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CodeGeneratorResponse { /// Error message. If non-empty, code generation failed. The plugin process /// should exit with status code zero even if it reports an error in this way. /// /// This should be used to indicate errors in .proto files which prevent the /// code generator from generating correct code. Errors which indicate a /// problem in protoc itself -- such as the input CodeGeneratorRequest being /// unparseable -- should be reported by writing a message to stderr and /// exiting with a non-zero status code. #[prost(string, optional, tag = "1")] pub error: ::core::option::Option<::prost::alloc::string::String>, /// A bitmask of supported features that the code generator supports. /// This is a bitwise "or" of values from the Feature enum. #[prost(uint64, optional, tag = "2")] pub supported_features: ::core::option::Option, #[prost(message, repeated, tag = "15")] pub file: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `CodeGeneratorResponse`. pub mod code_generator_response { /// Represents a single generated file. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct File { /// The file name, relative to the output directory. The name must not /// contain "." or ".." components and must be relative, not be absolute (so, /// the file cannot lie outside the output directory). "/" must be used as /// the path separator, not "". /// /// If the name is omitted, the content will be appended to the previous /// file. This allows the generator to break large files into small chunks, /// and allows the generated text to be streamed back to protoc so that large /// files need not reside completely in memory at one time. Note that as of /// this writing protoc does not optimize for this -- it will read the entire /// CodeGeneratorResponse before writing files to disk. #[prost(string, optional, tag = "1")] pub name: ::core::option::Option<::prost::alloc::string::String>, /// If non-empty, indicates that the named file should already exist, and the /// content here is to be inserted into that file at a defined insertion /// point. This feature allows a code generator to extend the output /// produced by another code generator. The original generator may provide /// insertion points by placing special annotations in the file that look /// like: /// @@protoc_insertion_point(NAME) /// The annotation can have arbitrary text before and after it on the line, /// which allows it to be placed in a comment. NAME should be replaced with /// an identifier naming the point -- this is what other generators will use /// as the insertion_point. Code inserted at this point will be placed /// immediately above the line containing the insertion point (thus multiple /// insertions to the same point will come out in the order they were added). /// The double-@ is intended to make it unlikely that the generated code /// could contain things that look like insertion points by accident. /// /// For example, the C++ code generator places the following line in the /// .pb.h files that it generates: /// // @@protoc_insertion_point(namespace_scope) /// This line appears within the scope of the file's package namespace, but /// outside of any particular class. Another plugin can then specify the /// insertion_point "namespace_scope" to generate additional classes or /// other declarations that should be placed in this scope. /// /// Note that if the line containing the insertion point begins with /// whitespace, the same whitespace will be added to every line of the /// inserted text. This is useful for languages like Python, where /// indentation matters. In these languages, the insertion point comment /// should be indented the same amount as any inserted code will need to be /// in order to work correctly in that context. /// /// The code generator that generates the initial file and the one which /// inserts into it must both run as part of a single invocation of protoc. /// Code generators are executed in the order in which they appear on the /// command line. /// /// If |insertion_point| is present, |name| must also be present. #[prost(string, optional, tag = "2")] pub insertion_point: ::core::option::Option<::prost::alloc::string::String>, /// The file contents. #[prost(string, optional, tag = "15")] pub content: ::core::option::Option<::prost::alloc::string::String>, /// Information describing the file content being inserted. If an insertion /// point is used, this information will be appropriately offset and inserted /// into the code generation metadata for the generated files. #[prost(message, optional, tag = "16")] pub generated_code_info: ::core::option::Option, } /// Sync with code_generator.h. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum Feature { None = 0, Proto3Optional = 1, } impl Feature { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { Feature::None => "FEATURE_NONE", Feature::Proto3Optional => "FEATURE_PROTO3_OPTIONAL", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "FEATURE_NONE" => Some(Self::None), "FEATURE_PROTO3_OPTIONAL" => Some(Self::Proto3Optional), _ => None, } } } } prost-types-0.12.6/src/datetime.rs000064400000000000000000000632251046102023000151640ustar 00000000000000//! A date/time type which exists primarily to convert [`Timestamp`]s into an RFC 3339 formatted //! string. use core::fmt; use crate::Duration; use crate::Timestamp; /// A point in time, represented as a date and time in the UTC timezone. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub(crate) struct DateTime { /// The year. pub(crate) year: i64, /// The month of the year, from 1 to 12, inclusive. pub(crate) month: u8, /// The day of the month, from 1 to 31, inclusive. pub(crate) day: u8, /// The hour of the day, from 0 to 23, inclusive. pub(crate) hour: u8, /// The minute of the hour, from 0 to 59, inclusive. pub(crate) minute: u8, /// The second of the minute, from 0 to 59, inclusive. pub(crate) second: u8, /// The nanoseconds, from 0 to 999_999_999, inclusive. pub(crate) nanos: u32, } impl DateTime { /// The minimum representable [`Timestamp`] as a `DateTime`. pub(crate) const MIN: DateTime = DateTime { year: -292_277_022_657, month: 1, day: 27, hour: 8, minute: 29, second: 52, nanos: 0, }; /// The maximum representable [`Timestamp`] as a `DateTime`. pub(crate) const MAX: DateTime = DateTime { year: 292_277_026_596, month: 12, day: 4, hour: 15, minute: 30, second: 7, nanos: 999_999_999, }; /// Returns `true` if the `DateTime` is a valid calendar date. pub(crate) fn is_valid(&self) -> bool { self >= &DateTime::MIN && self <= &DateTime::MAX && self.month > 0 && self.month <= 12 && self.day > 0 && self.day <= days_in_month(self.year, self.month) && self.hour < 24 && self.minute < 60 && self.second < 60 && self.nanos < 1_000_000_000 } } impl fmt::Display for DateTime { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // Pad years to at least 4 digits. if self.year > 9999 { write!(f, "+{}", self.year)?; } else if self.year < 0 { write!(f, "{:05}", self.year)?; } else { write!(f, "{:04}", self.year)?; }; write!( f, "-{:02}-{:02}T{:02}:{:02}:{:02}", self.month, self.day, self.hour, self.minute, self.second, )?; // Format subseconds to either nothing, millis, micros, or nanos. let nanos = self.nanos; if nanos == 0 { write!(f, "Z") } else if nanos % 1_000_000 == 0 { write!(f, ".{:03}Z", nanos / 1_000_000) } else if nanos % 1_000 == 0 { write!(f, ".{:06}Z", nanos / 1_000) } else { write!(f, ".{:09}Z", nanos) } } } impl From for DateTime { /// musl's [`__secs_to_tm`][1] converted to Rust via [c2rust][2] and then cleaned up by hand. /// /// All existing `strftime`-like APIs in Rust are unable to handle the full range of timestamps /// representable by `Timestamp`, including `strftime` itself, since tm.tm_year is an int. /// /// [1]: http://git.musl-libc.org/cgit/musl/tree/src/time/__secs_to_tm.c /// [2]: https://c2rust.com/ fn from(mut timestamp: Timestamp) -> DateTime { timestamp.normalize(); let t = timestamp.seconds; let nanos = timestamp.nanos; // 2000-03-01 (mod 400 year, immediately after feb29 const LEAPOCH: i64 = 946_684_800 + 86400 * (31 + 29); const DAYS_PER_400Y: i32 = 365 * 400 + 97; const DAYS_PER_100Y: i32 = 365 * 100 + 24; const DAYS_PER_4Y: i32 = 365 * 4 + 1; const DAYS_IN_MONTH: [u8; 12] = [31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 31, 29]; // Note(dcb): this bit is rearranged slightly to avoid integer overflow. let mut days: i64 = (t / 86_400) - (LEAPOCH / 86_400); let mut remsecs: i32 = (t % 86_400) as i32; if remsecs < 0i32 { remsecs += 86_400; days -= 1 } let mut qc_cycles: i32 = (days / i64::from(DAYS_PER_400Y)) as i32; let mut remdays: i32 = (days % i64::from(DAYS_PER_400Y)) as i32; if remdays < 0 { remdays += DAYS_PER_400Y; qc_cycles -= 1; } let mut c_cycles: i32 = remdays / DAYS_PER_100Y; if c_cycles == 4 { c_cycles -= 1; } remdays -= c_cycles * DAYS_PER_100Y; let mut q_cycles: i32 = remdays / DAYS_PER_4Y; if q_cycles == 25 { q_cycles -= 1; } remdays -= q_cycles * DAYS_PER_4Y; let mut remyears: i32 = remdays / 365; if remyears == 4 { remyears -= 1; } remdays -= remyears * 365; let mut years: i64 = i64::from(remyears) + 4 * i64::from(q_cycles) + 100 * i64::from(c_cycles) + 400 * i64::from(qc_cycles); let mut months: i32 = 0; while i32::from(DAYS_IN_MONTH[months as usize]) <= remdays { remdays -= i32::from(DAYS_IN_MONTH[months as usize]); months += 1 } if months >= 10 { months -= 12; years += 1; } let date_time = DateTime { year: years + 2000, month: (months + 3) as u8, day: (remdays + 1) as u8, hour: (remsecs / 3600) as u8, minute: (remsecs / 60 % 60) as u8, second: (remsecs % 60) as u8, nanos: nanos as u32, }; debug_assert!(date_time.is_valid()); date_time } } /// Returns the number of days in the month. fn days_in_month(year: i64, month: u8) -> u8 { const DAYS_IN_MONTH: [u8; 12] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; let (_, is_leap) = year_to_seconds(year); DAYS_IN_MONTH[usize::from(month - 1)] + u8::from(is_leap && month == 2) } macro_rules! ensure { ($expr:expr) => {{ if !$expr { return None; } }}; } /// Parses a date in RFC 3339 format from ASCII string `b`, returning the year, month, day, and /// remaining input. /// /// The date is not validated according to a calendar. fn parse_date(s: &str) -> Option<(i64, u8, u8, &str)> { debug_assert!(s.is_ascii()); // Smallest valid date is YYYY-MM-DD. ensure!(s.len() >= 10); // Parse the year in one of three formats: // * +YYYY[Y]+ // * -[Y]+ // * YYYY let (year, s) = match s.as_bytes()[0] { b'+' => { let (digits, s) = parse_digits(&s[1..]); ensure!(digits.len() >= 5); let date: i64 = digits.parse().ok()?; (date, s) } b'-' => { let (digits, s) = parse_digits(&s[1..]); ensure!(digits.len() >= 4); let date: i64 = digits.parse().ok()?; (-date, s) } _ => { // Parse a 4 digit numeric. let (n1, s) = parse_two_digit_numeric(s)?; let (n2, s) = parse_two_digit_numeric(s)?; (i64::from(n1) * 100 + i64::from(n2), s) } }; let s = parse_char(s, b'-')?; let (month, s) = parse_two_digit_numeric(s)?; let s = parse_char(s, b'-')?; let (day, s) = parse_two_digit_numeric(s)?; Some((year, month, day, s)) } /// Parses a time in RFC 3339 format from ASCII string `s`, returning the hour, minute, second, and /// nanos. /// /// The date is not validated according to a calendar. fn parse_time(s: &str) -> Option<(u8, u8, u8, u32, &str)> { debug_assert!(s.is_ascii()); let (hour, s) = parse_two_digit_numeric(s)?; let s = parse_char(s, b':')?; let (minute, s) = parse_two_digit_numeric(s)?; let s = parse_char(s, b':')?; let (second, s) = parse_two_digit_numeric(s)?; let (nanos, s) = parse_nanos(s)?; Some((hour, minute, second, nanos, s)) } /// Parses an optional nanosecond time from ASCII string `s`, returning the nanos and remaining /// string. fn parse_nanos(s: &str) -> Option<(u32, &str)> { debug_assert!(s.is_ascii()); // Parse the nanoseconds, if present. let (nanos, s) = if let Some(s) = parse_char(s, b'.') { let (digits, s) = parse_digits(s); ensure!(digits.len() <= 9); let nanos = 10u32.pow(9 - digits.len() as u32) * digits.parse::().ok()?; (nanos, s) } else { (0, s) }; Some((nanos, s)) } /// Parses a timezone offset in RFC 3339 format from ASCII string `s`, returning the offset hour, /// offset minute, and remaining input. fn parse_offset(s: &str) -> Option<(i8, i8, &str)> { debug_assert!(s.is_ascii()); if s.is_empty() { // If no timezone specified, assume UTC. return Some((0, 0, s)); } // Snowflake's timestamp format contains a space separator before the offset. let s = parse_char(s, b' ').unwrap_or(s); if let Some(s) = parse_char_ignore_case(s, b'Z') { Some((0, 0, s)) } else { let (is_positive, s) = if let Some(s) = parse_char(s, b'+') { (true, s) } else if let Some(s) = parse_char(s, b'-') { (false, s) } else { return None; }; let (hour, s) = parse_two_digit_numeric(s)?; let (minute, s) = if s.is_empty() { // No offset minutes are specified, e.g. +00 or +07. (0, s) } else { // Optional colon separator between the hour and minute digits. let s = parse_char(s, b':').unwrap_or(s); let (minute, s) = parse_two_digit_numeric(s)?; (minute, s) }; // '-00:00' indicates an unknown local offset. ensure!(is_positive || hour > 0 || minute > 0); ensure!(hour < 24 && minute < 60); let hour = hour as i8; let minute = minute as i8; if is_positive { Some((hour, minute, s)) } else { Some((-hour, -minute, s)) } } } /// Parses a two-digit base-10 number from ASCII string `s`, returning the number and the remaining /// string. fn parse_two_digit_numeric(s: &str) -> Option<(u8, &str)> { debug_assert!(s.is_ascii()); let (digits, s) = s.split_at(2); Some((digits.parse().ok()?, s)) } /// Splits ASCII string `s` at the first occurrence of a non-digit character. fn parse_digits(s: &str) -> (&str, &str) { debug_assert!(s.is_ascii()); let idx = s .as_bytes() .iter() .position(|c| !c.is_ascii_digit()) .unwrap_or(s.len()); s.split_at(idx) } /// Attempts to parse ASCII character `c` from ASCII string `s`, returning the remaining string. If /// the character can not be parsed, returns `None`. fn parse_char(s: &str, c: u8) -> Option<&str> { debug_assert!(s.is_ascii()); ensure!(*s.as_bytes().first()? == c); Some(&s[1..]) } /// Attempts to parse ASCII character `c` from ASCII string `s`, ignoring ASCII case, returning the /// remaining string. If the character can not be parsed, returns `None`. fn parse_char_ignore_case(s: &str, c: u8) -> Option<&str> { debug_assert!(s.is_ascii()); ensure!(s.as_bytes().first()?.eq_ignore_ascii_case(&c)); Some(&s[1..]) } /// Returns the offset in seconds from the Unix epoch of the date time. /// /// This is musl's [`__tm_to_secs`][1] converted to Rust via [c2rust[2] and then cleaned up by /// hand. /// /// [1]: https://git.musl-libc.org/cgit/musl/tree/src/time/__tm_to_secs.c /// [2]: https://c2rust.com/ fn date_time_to_seconds(tm: &DateTime) -> i64 { let (start_of_year, is_leap) = year_to_seconds(tm.year); let seconds_within_year = month_to_seconds(tm.month, is_leap) + 86400 * u32::from(tm.day - 1) + 3600 * u32::from(tm.hour) + 60 * u32::from(tm.minute) + u32::from(tm.second); (start_of_year + i128::from(seconds_within_year)) as i64 } /// Returns the number of seconds in the year prior to the start of the provided month. /// /// This is musl's [`__month_to_secs`][1] converted to Rust via c2rust and then cleaned up by hand. /// /// [1]: https://git.musl-libc.org/cgit/musl/tree/src/time/__month_to_secs.c fn month_to_seconds(month: u8, is_leap: bool) -> u32 { const SECS_THROUGH_MONTH: [u32; 12] = [ 0, 31 * 86400, 59 * 86400, 90 * 86400, 120 * 86400, 151 * 86400, 181 * 86400, 212 * 86400, 243 * 86400, 273 * 86400, 304 * 86400, 334 * 86400, ]; let t = SECS_THROUGH_MONTH[usize::from(month - 1)]; if is_leap && month > 2 { t + 86400 } else { t } } /// Returns the offset in seconds from the Unix epoch of the start of a year. /// /// musl's [`__year_to_secs`][1] converted to Rust via c2rust and then cleaned up by hand. /// /// Returns an i128 because the start of the earliest supported year underflows i64. /// /// [1]: https://git.musl-libc.org/cgit/musl/tree/src/time/__year_to_secs.c pub(crate) fn year_to_seconds(year: i64) -> (i128, bool) { let is_leap; let year = year - 1900; // Fast path for years 1900 - 2038. if year as u64 <= 138 { let mut leaps: i64 = (year - 68) >> 2; if (year - 68).trailing_zeros() >= 2 { leaps -= 1; is_leap = true; } else { is_leap = false; } return ( i128::from(31_536_000 * (year - 70) + 86400 * leaps), is_leap, ); } let centuries: i64; let mut leaps: i64; let mut cycles: i64 = (year - 100) / 400; let mut rem: i64 = (year - 100) % 400; if rem < 0 { cycles -= 1; rem += 400 } if rem == 0 { is_leap = true; centuries = 0; leaps = 0; } else { if rem >= 200 { if rem >= 300 { centuries = 3; rem -= 300; } else { centuries = 2; rem -= 200; } } else if rem >= 100 { centuries = 1; rem -= 100; } else { centuries = 0; } if rem == 0 { is_leap = false; leaps = 0; } else { leaps = rem / 4; rem %= 4; is_leap = rem == 0; } } leaps += 97 * cycles + 24 * centuries - i64::from(is_leap); ( i128::from((year - 100) * 31_536_000) + i128::from(leaps * 86400 + 946_684_800 + 86400), is_leap, ) } /// Parses a timestamp in RFC 3339 format from `s`. pub(crate) fn parse_timestamp(s: &str) -> Option { // Check that the string is ASCII, since subsequent parsing steps use byte-level indexing. ensure!(s.is_ascii()); let (year, month, day, s) = parse_date(s)?; if s.is_empty() { // The string only contained a date. let date_time = DateTime { year, month, day, ..DateTime::default() }; ensure!(date_time.is_valid()); return Some(Timestamp::from(date_time)); } // Accept either 'T' or ' ' as delimiter between date and time. let s = parse_char_ignore_case(s, b'T').or_else(|| parse_char(s, b' '))?; let (hour, minute, mut second, nanos, s) = parse_time(s)?; let (offset_hour, offset_minute, s) = parse_offset(s)?; ensure!(s.is_empty()); // Detect whether the timestamp falls in a leap second. If this is the case, roll it back // to the previous second. To be maximally conservative, this should be checking that the // timestamp is the last second in the UTC day (23:59:60), and even potentially checking // that it's the final day of the UTC month, however these checks are non-trivial because // at this point we have, in effect, a local date time, since the offset has not been // applied. if second == 60 { second = 59; } let date_time = DateTime { year, month, day, hour, minute, second, nanos, }; ensure!(date_time.is_valid()); let Timestamp { seconds, nanos } = Timestamp::from(date_time); let seconds = seconds.checked_sub(i64::from(offset_hour) * 3600 + i64::from(offset_minute) * 60)?; Some(Timestamp { seconds, nanos }) } /// Parse a duration in the [Protobuf JSON encoding spec format][1]. /// /// [1]: https://developers.google.com/protocol-buffers/docs/proto3#json pub(crate) fn parse_duration(s: &str) -> Option { // Check that the string is ASCII, since subsequent parsing steps use byte-level indexing. ensure!(s.is_ascii()); let (is_negative, s) = match parse_char(s, b'-') { Some(s) => (true, s), None => (false, s), }; let (digits, s) = parse_digits(s); let seconds = digits.parse::().ok()?; let (nanos, s) = parse_nanos(s)?; let s = parse_char(s, b's')?; ensure!(s.is_empty()); ensure!(nanos < crate::NANOS_PER_SECOND as u32); // If the duration is negative, also flip the nanos sign. let (seconds, nanos) = if is_negative { (-seconds, -(nanos as i32)) } else { (seconds, nanos as i32) }; Some(Duration { seconds, nanos }) } impl From for Timestamp { fn from(date_time: DateTime) -> Timestamp { let seconds = date_time_to_seconds(&date_time); let nanos = date_time.nanos; Timestamp { seconds, nanos: nanos as i32, } } } #[cfg(test)] mod tests { use super::*; use proptest::prelude::*; #[test] fn test_min_max() { assert_eq!( DateTime::MIN, DateTime::from(Timestamp { seconds: i64::MIN, nanos: 0 }), ); assert_eq!( DateTime::MAX, DateTime::from(Timestamp { seconds: i64::MAX, nanos: 999_999_999 }), ); } #[cfg(feature = "std")] #[test] fn test_datetime_from_timestamp() { let case = |expected: &str, secs: i64, nanos: i32| { let timestamp = Timestamp { seconds: secs, nanos, }; assert_eq!( expected, format!("{}", DateTime::from(timestamp.clone())), "timestamp: {:?}", timestamp ); }; // Mostly generated with: // - date -jur +"%Y-%m-%dT%H:%M:%S.000000000Z" // - http://unixtimestamp.50x.eu/ case("1970-01-01T00:00:00Z", 0, 0); case("1970-01-01T00:00:00.000000001Z", 0, 1); case("1970-01-01T00:00:00.123450Z", 0, 123_450_000); case("1970-01-01T00:00:00.050Z", 0, 50_000_000); case("1970-01-01T00:00:01.000000001Z", 1, 1); case("1970-01-01T00:01:01.000000001Z", 60 + 1, 1); case("1970-01-01T01:01:01.000000001Z", 60 * 60 + 60 + 1, 1); case( "1970-01-02T01:01:01.000000001Z", 24 * 60 * 60 + 60 * 60 + 60 + 1, 1, ); case("1969-12-31T23:59:59Z", -1, 0); case("1969-12-31T23:59:59.000001Z", -1, 1_000); case("1969-12-31T23:59:59.500Z", -1, 500_000_000); case("1969-12-31T23:58:59.000001Z", -60 - 1, 1_000); case("1969-12-31T22:58:59.000001Z", -60 * 60 - 60 - 1, 1_000); case( "1969-12-30T22:58:59.000000001Z", -24 * 60 * 60 - 60 * 60 - 60 - 1, 1, ); case("2038-01-19T03:14:07Z", i32::MAX as i64, 0); case("2038-01-19T03:14:08Z", i32::MAX as i64 + 1, 0); case("1901-12-13T20:45:52Z", i32::MIN as i64, 0); case("1901-12-13T20:45:51Z", i32::MIN as i64 - 1, 0); // Skipping these tests on windows as std::time::SystemTime range is low // on Windows compared with that of Unix which can cause the following // high date value tests to panic #[cfg(not(target_os = "windows"))] { case("+292277026596-12-04T15:30:07Z", i64::MAX, 0); case("+292277026596-12-04T15:30:06Z", i64::MAX - 1, 0); case("-292277022657-01-27T08:29:53Z", i64::MIN + 1, 0); } case("1900-01-01T00:00:00Z", -2_208_988_800, 0); case("1899-12-31T23:59:59Z", -2_208_988_801, 0); case("0000-01-01T00:00:00Z", -62_167_219_200, 0); case("-0001-12-31T23:59:59Z", -62_167_219_201, 0); case("1234-05-06T07:08:09Z", -23_215_049_511, 0); case("-1234-05-06T07:08:09Z", -101_097_651_111, 0); case("2345-06-07T08:09:01Z", 11_847_456_541, 0); case("-2345-06-07T08:09:01Z", -136_154_620_259, 0); } #[test] fn test_parse_timestamp() { // RFC 3339 Section 5.8 Examples assert_eq!( "1985-04-12T23:20:50.52Z".parse::(), Timestamp::date_time_nanos(1985, 4, 12, 23, 20, 50, 520_000_000), ); assert_eq!( "1996-12-19T16:39:57-08:00".parse::(), Timestamp::date_time(1996, 12, 20, 0, 39, 57), ); assert_eq!( "1996-12-19T16:39:57-08:00".parse::(), Timestamp::date_time(1996, 12, 20, 0, 39, 57), ); assert_eq!( "1990-12-31T23:59:60Z".parse::(), Timestamp::date_time(1990, 12, 31, 23, 59, 59), ); assert_eq!( "1990-12-31T15:59:60-08:00".parse::(), Timestamp::date_time(1990, 12, 31, 23, 59, 59), ); assert_eq!( "1937-01-01T12:00:27.87+00:20".parse::(), Timestamp::date_time_nanos(1937, 1, 1, 11, 40, 27, 870_000_000), ); // Date assert_eq!( "1937-01-01".parse::(), Timestamp::date(1937, 1, 1), ); // Negative year assert_eq!( "-0008-01-01".parse::(), Timestamp::date(-8, 1, 1), ); // Plus year assert_eq!( "+19370-01-01".parse::(), Timestamp::date(19370, 1, 1), ); // Full nanos assert_eq!( "2020-02-03T01:02:03.123456789Z".parse::(), Timestamp::date_time_nanos(2020, 2, 3, 1, 2, 3, 123_456_789), ); // Leap day assert_eq!( "2020-02-29T01:02:03.00Z".parse::().unwrap(), Timestamp::from(DateTime { year: 2020, month: 2, day: 29, hour: 1, minute: 2, second: 3, nanos: 0, }), ); // Test extensions to RFC 3339. // ' ' instead of 'T' as date/time separator. assert_eq!( "1985-04-12 23:20:50.52Z".parse::(), Timestamp::date_time_nanos(1985, 4, 12, 23, 20, 50, 520_000_000), ); // No time zone specified. assert_eq!( "1985-04-12T23:20:50.52".parse::(), Timestamp::date_time_nanos(1985, 4, 12, 23, 20, 50, 520_000_000), ); // Offset without minutes specified. assert_eq!( "1996-12-19T16:39:57-08".parse::(), Timestamp::date_time(1996, 12, 20, 0, 39, 57), ); // Snowflake stage style. assert_eq!( "2015-09-12 00:47:19.591 Z".parse::(), Timestamp::date_time_nanos(2015, 9, 12, 0, 47, 19, 591_000_000), ); assert_eq!( "2020-06-15 00:01:02.123 +0800".parse::(), Timestamp::date_time_nanos(2020, 6, 14, 16, 1, 2, 123_000_000), ); } #[test] fn test_parse_duration() { let case = |s: &str, seconds: i64, nanos: i32| { assert_eq!( s.parse::().unwrap(), Duration { seconds, nanos }, "duration: {}", s ); }; case("0s", 0, 0); case("0.0s", 0, 0); case("0.000s", 0, 0); case("-0s", 0, 0); case("-0.0s", 0, 0); case("-0.000s", 0, 0); case("-0s", 0, 0); case("-0.0s", 0, 0); case("-0.000s", 0, 0); case("0.05s", 0, 50_000_000); case("0.050s", 0, 50_000_000); case("-0.05s", 0, -50_000_000); case("-0.050s", 0, -50_000_000); case("1s", 1, 0); case("1.0s", 1, 0); case("1.000s", 1, 0); case("-1s", -1, 0); case("-1.0s", -1, 0); case("-1.000s", -1, 0); case("15s", 15, 0); case("15.1s", 15, 100_000_000); case("15.100s", 15, 100_000_000); case("-15s", -15, 0); case("-15.1s", -15, -100_000_000); case("-15.100s", -15, -100_000_000); case("100.000000009s", 100, 9); case("-100.000000009s", -100, -9); } #[test] fn test_parse_non_ascii() { assert!("2021️⃣-06-15 00:01:02.123 +0800" .parse::() .is_err()); assert!("1️⃣s".parse::().is_err()); } proptest! { #[cfg(feature = "std")] #[test] fn check_timestamp_parse_to_string_roundtrip( system_time in std::time::SystemTime::arbitrary(), ) { let ts = Timestamp::from(system_time); assert_eq!( ts, ts.to_string().parse::().unwrap(), ) } #[cfg(feature = "std")] #[test] fn check_duration_parse_to_string_roundtrip( duration in core::time::Duration::arbitrary(), ) { let duration = match Duration::try_from(duration) { Ok(duration) => duration, Err(_) => return Err(TestCaseError::reject("duration out of range")), }; prop_assert_eq!( &duration, &duration.to_string().parse::().unwrap(), "{}", duration.to_string() ); } } } prost-types-0.12.6/src/duration.rs000064400000000000000000000330501046102023000152060ustar 00000000000000use super::*; #[cfg(feature = "std")] impl std::hash::Hash for Duration { fn hash(&self, state: &mut H) { self.seconds.hash(state); self.nanos.hash(state); } } impl Duration { /// Normalizes the duration to a canonical format. /// /// Based on [`google::protobuf::util::CreateNormalized`][1]. /// /// [1]: https://github.com/google/protobuf/blob/v3.3.2/src/google/protobuf/util/time_util.cc#L79-L100 pub fn normalize(&mut self) { // Make sure nanos is in the range. if self.nanos <= -NANOS_PER_SECOND || self.nanos >= NANOS_PER_SECOND { if let Some(seconds) = self .seconds .checked_add((self.nanos / NANOS_PER_SECOND) as i64) { self.seconds = seconds; self.nanos %= NANOS_PER_SECOND; } else if self.nanos < 0 { // Negative overflow! Set to the least normal value. self.seconds = i64::MIN; self.nanos = -NANOS_MAX; } else { // Positive overflow! Set to the greatest normal value. self.seconds = i64::MAX; self.nanos = NANOS_MAX; } } // nanos should have the same sign as seconds. if self.seconds < 0 && self.nanos > 0 { if let Some(seconds) = self.seconds.checked_add(1) { self.seconds = seconds; self.nanos -= NANOS_PER_SECOND; } else { // Positive overflow! Set to the greatest normal value. debug_assert_eq!(self.seconds, i64::MAX); self.nanos = NANOS_MAX; } } else if self.seconds > 0 && self.nanos < 0 { if let Some(seconds) = self.seconds.checked_sub(1) { self.seconds = seconds; self.nanos += NANOS_PER_SECOND; } else { // Negative overflow! Set to the least normal value. debug_assert_eq!(self.seconds, i64::MIN); self.nanos = -NANOS_MAX; } } // TODO: should this be checked? // debug_assert!(self.seconds >= -315_576_000_000 && self.seconds <= 315_576_000_000, // "invalid duration: {:?}", self); } } impl Name for Duration { const PACKAGE: &'static str = PACKAGE; const NAME: &'static str = "Duration"; fn type_url() -> String { type_url_for::() } } impl TryFrom for Duration { type Error = DurationError; /// Converts a `std::time::Duration` to a `Duration`, failing if the duration is too large. fn try_from(duration: time::Duration) -> Result { let seconds = i64::try_from(duration.as_secs()).map_err(|_| DurationError::OutOfRange)?; let nanos = duration.subsec_nanos() as i32; let mut duration = Duration { seconds, nanos }; duration.normalize(); Ok(duration) } } impl TryFrom for time::Duration { type Error = DurationError; /// Converts a `Duration` to a `std::time::Duration`, failing if the duration is negative. fn try_from(mut duration: Duration) -> Result { duration.normalize(); if duration.seconds >= 0 && duration.nanos >= 0 { Ok(time::Duration::new( duration.seconds as u64, duration.nanos as u32, )) } else { Err(DurationError::NegativeDuration(time::Duration::new( (-duration.seconds) as u64, (-duration.nanos) as u32, ))) } } } impl fmt::Display for Duration { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d = self.clone(); d.normalize(); if self.seconds < 0 && self.nanos < 0 { write!(f, "-")?; } write!(f, "{}", d.seconds.abs())?; // Format subseconds to either nothing, millis, micros, or nanos. let nanos = d.nanos.abs(); if nanos == 0 { write!(f, "s") } else if nanos % 1_000_000 == 0 { write!(f, ".{:03}s", nanos / 1_000_000) } else if nanos % 1_000 == 0 { write!(f, ".{:06}s", nanos / 1_000) } else { write!(f, ".{:09}s", nanos) } } } /// A duration handling error. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Debug, PartialEq)] #[non_exhaustive] pub enum DurationError { /// Indicates failure to parse a [`Duration`] from a string. /// /// The [`Duration`] string format is specified in the [Protobuf JSON mapping specification][1]. /// /// [1]: https://developers.google.com/protocol-buffers/docs/proto3#json ParseFailure, /// Indicates failure to convert a `prost_types::Duration` to a `std::time::Duration` because /// the duration is negative. The included `std::time::Duration` matches the magnitude of the /// original negative `prost_types::Duration`. NegativeDuration(time::Duration), /// Indicates failure to convert a `std::time::Duration` to a `prost_types::Duration`. /// /// Converting a `std::time::Duration` to a `prost_types::Duration` fails if the magnitude /// exceeds that representable by `prost_types::Duration`. OutOfRange, } impl fmt::Display for DurationError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { DurationError::ParseFailure => write!(f, "failed to parse duration"), DurationError::NegativeDuration(duration) => { write!(f, "failed to convert negative duration: {:?}", duration) } DurationError::OutOfRange => { write!(f, "failed to convert duration out of range") } } } } #[cfg(feature = "std")] impl std::error::Error for DurationError {} impl FromStr for Duration { type Err = DurationError; fn from_str(s: &str) -> Result { datetime::parse_duration(s).ok_or(DurationError::ParseFailure) } } #[cfg(test)] mod tests { use super::*; #[cfg(feature = "std")] use proptest::prelude::*; #[cfg(feature = "std")] proptest! { #[test] fn check_duration_roundtrip( seconds in u64::arbitrary(), nanos in 0u32..1_000_000_000u32, ) { let std_duration = time::Duration::new(seconds, nanos); let prost_duration = match Duration::try_from(std_duration) { Ok(duration) => duration, Err(_) => return Err(TestCaseError::reject("duration out of range")), }; prop_assert_eq!(time::Duration::try_from(prost_duration.clone()).unwrap(), std_duration); if std_duration != time::Duration::default() { let neg_prost_duration = Duration { seconds: -prost_duration.seconds, nanos: -prost_duration.nanos, }; prop_assert!( matches!( time::Duration::try_from(neg_prost_duration), Err(DurationError::NegativeDuration(d)) if d == std_duration, ) ) } } #[test] fn check_duration_roundtrip_nanos( nanos in u32::arbitrary(), ) { let seconds = 0; let std_duration = std::time::Duration::new(seconds, nanos); let prost_duration = match Duration::try_from(std_duration) { Ok(duration) => duration, Err(_) => return Err(TestCaseError::reject("duration out of range")), }; prop_assert_eq!(time::Duration::try_from(prost_duration.clone()).unwrap(), std_duration); if std_duration != time::Duration::default() { let neg_prost_duration = Duration { seconds: -prost_duration.seconds, nanos: -prost_duration.nanos, }; prop_assert!( matches!( time::Duration::try_from(neg_prost_duration), Err(DurationError::NegativeDuration(d)) if d == std_duration, ) ) } } } #[cfg(feature = "std")] #[test] fn check_duration_try_from_negative_nanos() { let seconds: u64 = 0; let nanos: u32 = 1; let std_duration = std::time::Duration::new(seconds, nanos); let neg_prost_duration = Duration { seconds: 0, nanos: -1, }; assert!(matches!( time::Duration::try_from(neg_prost_duration), Err(DurationError::NegativeDuration(d)) if d == std_duration, )) } #[test] fn check_duration_normalize() { #[rustfmt::skip] // Don't mangle the table formatting. let cases = [ // --- Table of test cases --- // test seconds test nanos expected seconds expected nanos (line!(), 0, 0, 0, 0), (line!(), 1, 1, 1, 1), (line!(), -1, -1, -1, -1), (line!(), 0, 999_999_999, 0, 999_999_999), (line!(), 0, -999_999_999, 0, -999_999_999), (line!(), 0, 1_000_000_000, 1, 0), (line!(), 0, -1_000_000_000, -1, 0), (line!(), 0, 1_000_000_001, 1, 1), (line!(), 0, -1_000_000_001, -1, -1), (line!(), -1, 1, 0, -999_999_999), (line!(), 1, -1, 0, 999_999_999), (line!(), -1, 1_000_000_000, 0, 0), (line!(), 1, -1_000_000_000, 0, 0), (line!(), i64::MIN , 0, i64::MIN , 0), (line!(), i64::MIN + 1, 0, i64::MIN + 1, 0), (line!(), i64::MIN , 1, i64::MIN + 1, -999_999_999), (line!(), i64::MIN , 1_000_000_000, i64::MIN + 1, 0), (line!(), i64::MIN , -1_000_000_000, i64::MIN , -999_999_999), (line!(), i64::MIN + 1, -1_000_000_000, i64::MIN , 0), (line!(), i64::MIN + 2, -1_000_000_000, i64::MIN + 1, 0), (line!(), i64::MIN , -1_999_999_998, i64::MIN , -999_999_999), (line!(), i64::MIN + 1, -1_999_999_998, i64::MIN , -999_999_998), (line!(), i64::MIN + 2, -1_999_999_998, i64::MIN + 1, -999_999_998), (line!(), i64::MIN , -1_999_999_999, i64::MIN , -999_999_999), (line!(), i64::MIN + 1, -1_999_999_999, i64::MIN , -999_999_999), (line!(), i64::MIN + 2, -1_999_999_999, i64::MIN + 1, -999_999_999), (line!(), i64::MIN , -2_000_000_000, i64::MIN , -999_999_999), (line!(), i64::MIN + 1, -2_000_000_000, i64::MIN , -999_999_999), (line!(), i64::MIN + 2, -2_000_000_000, i64::MIN , 0), (line!(), i64::MIN , -999_999_998, i64::MIN , -999_999_998), (line!(), i64::MIN + 1, -999_999_998, i64::MIN + 1, -999_999_998), (line!(), i64::MAX , 0, i64::MAX , 0), (line!(), i64::MAX - 1, 0, i64::MAX - 1, 0), (line!(), i64::MAX , -1, i64::MAX - 1, 999_999_999), (line!(), i64::MAX , 1_000_000_000, i64::MAX , 999_999_999), (line!(), i64::MAX - 1, 1_000_000_000, i64::MAX , 0), (line!(), i64::MAX - 2, 1_000_000_000, i64::MAX - 1, 0), (line!(), i64::MAX , 1_999_999_998, i64::MAX , 999_999_999), (line!(), i64::MAX - 1, 1_999_999_998, i64::MAX , 999_999_998), (line!(), i64::MAX - 2, 1_999_999_998, i64::MAX - 1, 999_999_998), (line!(), i64::MAX , 1_999_999_999, i64::MAX , 999_999_999), (line!(), i64::MAX - 1, 1_999_999_999, i64::MAX , 999_999_999), (line!(), i64::MAX - 2, 1_999_999_999, i64::MAX - 1, 999_999_999), (line!(), i64::MAX , 2_000_000_000, i64::MAX , 999_999_999), (line!(), i64::MAX - 1, 2_000_000_000, i64::MAX , 999_999_999), (line!(), i64::MAX - 2, 2_000_000_000, i64::MAX , 0), (line!(), i64::MAX , 999_999_998, i64::MAX , 999_999_998), (line!(), i64::MAX - 1, 999_999_998, i64::MAX - 1, 999_999_998), ]; for case in cases.iter() { let mut test_duration = Duration { seconds: case.1, nanos: case.2, }; test_duration.normalize(); assert_eq!( test_duration, Duration { seconds: case.3, nanos: case.4, }, "test case on line {} doesn't match", case.0, ); } } } prost-types-0.12.6/src/lib.rs000064400000000000000000000030011046102023000141200ustar 00000000000000#![doc(html_root_url = "https://docs.rs/prost-types/0.12.6")] //! Protocol Buffers well-known types. //! //! Note that the documentation for the types defined in this crate are generated from the Protobuf //! definitions, so code examples are not in Rust. //! //! See the [Protobuf reference][1] for more information about well-known types. //! //! ## Feature Flags //! - `std`: Enable integration with standard library. Disable this feature for `no_std` support. This feature is enabled by default. //! //! [1]: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf #![cfg_attr(not(feature = "std"), no_std)] #[rustfmt::skip] pub mod compiler; mod datetime; #[rustfmt::skip] mod protobuf; use core::convert::TryFrom; use core::fmt; use core::i32; use core::i64; use core::str::FromStr; use core::time; use prost::alloc::format; use prost::alloc::string::String; use prost::alloc::vec::Vec; use prost::{DecodeError, EncodeError, Message, Name}; pub use protobuf::*; // The Protobuf `Duration` and `Timestamp` types can't delegate to the standard library equivalents // because the Protobuf versions are signed. To make them easier to work with, `From` conversions // are defined in both directions. const NANOS_PER_SECOND: i32 = 1_000_000_000; const NANOS_MAX: i32 = NANOS_PER_SECOND - 1; const PACKAGE: &str = "google.protobuf"; mod any; mod duration; pub use duration::DurationError; mod timestamp; pub use timestamp::TimestampError; mod type_url; pub(crate) use type_url::{type_url_for, TypeUrl}; prost-types-0.12.6/src/protobuf.rs000064400000000000000000002752641046102023000152400ustar 00000000000000// This file is @generated by prost-build. /// The protocol compiler can output a FileDescriptorSet containing the .proto /// files it parses. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FileDescriptorSet { #[prost(message, repeated, tag = "1")] pub file: ::prost::alloc::vec::Vec, } /// Describes a complete .proto file. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FileDescriptorProto { /// file name, relative to root of source tree #[prost(string, optional, tag = "1")] pub name: ::core::option::Option<::prost::alloc::string::String>, /// e.g. "foo", "foo.bar", etc. #[prost(string, optional, tag = "2")] pub package: ::core::option::Option<::prost::alloc::string::String>, /// Names of files imported by this file. #[prost(string, repeated, tag = "3")] pub dependency: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Indexes of the public imported files in the dependency list above. #[prost(int32, repeated, packed = "false", tag = "10")] pub public_dependency: ::prost::alloc::vec::Vec, /// Indexes of the weak imported files in the dependency list. /// For Google-internal migration only. Do not use. #[prost(int32, repeated, packed = "false", tag = "11")] pub weak_dependency: ::prost::alloc::vec::Vec, /// All top-level definitions in this file. #[prost(message, repeated, tag = "4")] pub message_type: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "5")] pub enum_type: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "6")] pub service: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "7")] pub extension: ::prost::alloc::vec::Vec, #[prost(message, optional, tag = "8")] pub options: ::core::option::Option, /// This field contains optional information about the original source code. /// You may safely remove this entire field without harming runtime /// functionality of the descriptors -- the information is needed only by /// development tools. #[prost(message, optional, tag = "9")] pub source_code_info: ::core::option::Option, /// The syntax of the proto file. /// The supported values are "proto2" and "proto3". #[prost(string, optional, tag = "12")] pub syntax: ::core::option::Option<::prost::alloc::string::String>, } /// Describes a message type. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DescriptorProto { #[prost(string, optional, tag = "1")] pub name: ::core::option::Option<::prost::alloc::string::String>, #[prost(message, repeated, tag = "2")] pub field: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "6")] pub extension: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "3")] pub nested_type: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "4")] pub enum_type: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "5")] pub extension_range: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "8")] pub oneof_decl: ::prost::alloc::vec::Vec, #[prost(message, optional, tag = "7")] pub options: ::core::option::Option, #[prost(message, repeated, tag = "9")] pub reserved_range: ::prost::alloc::vec::Vec, /// Reserved field names, which may not be used by fields in the same message. /// A given name may only be reserved once. #[prost(string, repeated, tag = "10")] pub reserved_name: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `DescriptorProto`. pub mod descriptor_proto { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExtensionRange { /// Inclusive. #[prost(int32, optional, tag = "1")] pub start: ::core::option::Option, /// Exclusive. #[prost(int32, optional, tag = "2")] pub end: ::core::option::Option, #[prost(message, optional, tag = "3")] pub options: ::core::option::Option, } /// Range of reserved tag numbers. Reserved tag numbers may not be used by /// fields or extension ranges in the same message. Reserved ranges may /// not overlap. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReservedRange { /// Inclusive. #[prost(int32, optional, tag = "1")] pub start: ::core::option::Option, /// Exclusive. #[prost(int32, optional, tag = "2")] pub end: ::core::option::Option, } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExtensionRangeOptions { /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag = "999")] pub uninterpreted_option: ::prost::alloc::vec::Vec, } /// Describes a field within a message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FieldDescriptorProto { #[prost(string, optional, tag = "1")] pub name: ::core::option::Option<::prost::alloc::string::String>, #[prost(int32, optional, tag = "3")] pub number: ::core::option::Option, #[prost(enumeration = "field_descriptor_proto::Label", optional, tag = "4")] pub label: ::core::option::Option, /// If type_name is set, this need not be set. If both this and type_name /// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. #[prost(enumeration = "field_descriptor_proto::Type", optional, tag = "5")] pub r#type: ::core::option::Option, /// For message and enum types, this is the name of the type. If the name /// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping /// rules are used to find the type (i.e. first the nested types within this /// message are searched, then within the parent, on up to the root /// namespace). #[prost(string, optional, tag = "6")] pub type_name: ::core::option::Option<::prost::alloc::string::String>, /// For extensions, this is the name of the type being extended. It is /// resolved in the same manner as type_name. #[prost(string, optional, tag = "2")] pub extendee: ::core::option::Option<::prost::alloc::string::String>, /// For numeric types, contains the original text representation of the value. /// For booleans, "true" or "false". /// For strings, contains the default text contents (not escaped in any way). /// For bytes, contains the C escaped value. All bytes >= 128 are escaped. /// TODO(kenton): Base-64 encode? #[prost(string, optional, tag = "7")] pub default_value: ::core::option::Option<::prost::alloc::string::String>, /// If set, gives the index of a oneof in the containing type's oneof_decl /// list. This field is a member of that oneof. #[prost(int32, optional, tag = "9")] pub oneof_index: ::core::option::Option, /// JSON name of this field. The value is set by protocol compiler. If the /// user has set a "json_name" option on this field, that option's value /// will be used. Otherwise, it's deduced from the field's name by converting /// it to camelCase. #[prost(string, optional, tag = "10")] pub json_name: ::core::option::Option<::prost::alloc::string::String>, #[prost(message, optional, tag = "8")] pub options: ::core::option::Option, /// If true, this is a proto3 "optional". When a proto3 field is optional, it /// tracks presence regardless of field type. /// /// When proto3_optional is true, this field must be belong to a oneof to /// signal to old proto3 clients that presence is tracked for this field. This /// oneof is known as a "synthetic" oneof, and this field must be its sole /// member (each proto3 optional field gets its own synthetic oneof). Synthetic /// oneofs exist in the descriptor only, and do not generate any API. Synthetic /// oneofs must be ordered after all "real" oneofs. /// /// For message fields, proto3_optional doesn't create any semantic change, /// since non-repeated message fields always track presence. However it still /// indicates the semantic detail of whether the user wrote "optional" or not. /// This can be useful for round-tripping the .proto file. For consistency we /// give message fields a synthetic oneof also, even though it is not required /// to track presence. This is especially important because the parser can't /// tell if a field is a message or an enum, so it must always create a /// synthetic oneof. /// /// Proto2 optional fields do not set this flag, because they already indicate /// optional with `LABEL_OPTIONAL`. #[prost(bool, optional, tag = "17")] pub proto3_optional: ::core::option::Option, } /// Nested message and enum types in `FieldDescriptorProto`. pub mod field_descriptor_proto { #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum Type { /// 0 is reserved for errors. /// Order is weird for historical reasons. Double = 1, Float = 2, /// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if /// negative values are likely. Int64 = 3, Uint64 = 4, /// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if /// negative values are likely. Int32 = 5, Fixed64 = 6, Fixed32 = 7, Bool = 8, String = 9, /// Tag-delimited aggregate. /// Group type is deprecated and not supported in proto3. However, Proto3 /// implementations should still be able to parse the group wire format and /// treat group fields as unknown fields. Group = 10, /// Length-delimited aggregate. Message = 11, /// New in version 2. Bytes = 12, Uint32 = 13, Enum = 14, Sfixed32 = 15, Sfixed64 = 16, /// Uses ZigZag encoding. Sint32 = 17, /// Uses ZigZag encoding. Sint64 = 18, } impl Type { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { Type::Double => "TYPE_DOUBLE", Type::Float => "TYPE_FLOAT", Type::Int64 => "TYPE_INT64", Type::Uint64 => "TYPE_UINT64", Type::Int32 => "TYPE_INT32", Type::Fixed64 => "TYPE_FIXED64", Type::Fixed32 => "TYPE_FIXED32", Type::Bool => "TYPE_BOOL", Type::String => "TYPE_STRING", Type::Group => "TYPE_GROUP", Type::Message => "TYPE_MESSAGE", Type::Bytes => "TYPE_BYTES", Type::Uint32 => "TYPE_UINT32", Type::Enum => "TYPE_ENUM", Type::Sfixed32 => "TYPE_SFIXED32", Type::Sfixed64 => "TYPE_SFIXED64", Type::Sint32 => "TYPE_SINT32", Type::Sint64 => "TYPE_SINT64", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "TYPE_DOUBLE" => Some(Self::Double), "TYPE_FLOAT" => Some(Self::Float), "TYPE_INT64" => Some(Self::Int64), "TYPE_UINT64" => Some(Self::Uint64), "TYPE_INT32" => Some(Self::Int32), "TYPE_FIXED64" => Some(Self::Fixed64), "TYPE_FIXED32" => Some(Self::Fixed32), "TYPE_BOOL" => Some(Self::Bool), "TYPE_STRING" => Some(Self::String), "TYPE_GROUP" => Some(Self::Group), "TYPE_MESSAGE" => Some(Self::Message), "TYPE_BYTES" => Some(Self::Bytes), "TYPE_UINT32" => Some(Self::Uint32), "TYPE_ENUM" => Some(Self::Enum), "TYPE_SFIXED32" => Some(Self::Sfixed32), "TYPE_SFIXED64" => Some(Self::Sfixed64), "TYPE_SINT32" => Some(Self::Sint32), "TYPE_SINT64" => Some(Self::Sint64), _ => None, } } } #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum Label { /// 0 is reserved for errors Optional = 1, Required = 2, Repeated = 3, } impl Label { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { Label::Optional => "LABEL_OPTIONAL", Label::Required => "LABEL_REQUIRED", Label::Repeated => "LABEL_REPEATED", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "LABEL_OPTIONAL" => Some(Self::Optional), "LABEL_REQUIRED" => Some(Self::Required), "LABEL_REPEATED" => Some(Self::Repeated), _ => None, } } } } /// Describes a oneof. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OneofDescriptorProto { #[prost(string, optional, tag = "1")] pub name: ::core::option::Option<::prost::alloc::string::String>, #[prost(message, optional, tag = "2")] pub options: ::core::option::Option, } /// Describes an enum type. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumDescriptorProto { #[prost(string, optional, tag = "1")] pub name: ::core::option::Option<::prost::alloc::string::String>, #[prost(message, repeated, tag = "2")] pub value: ::prost::alloc::vec::Vec, #[prost(message, optional, tag = "3")] pub options: ::core::option::Option, /// Range of reserved numeric values. Reserved numeric values may not be used /// by enum values in the same enum declaration. Reserved ranges may not /// overlap. #[prost(message, repeated, tag = "4")] pub reserved_range: ::prost::alloc::vec::Vec< enum_descriptor_proto::EnumReservedRange, >, /// Reserved enum value names, which may not be reused. A given name may only /// be reserved once. #[prost(string, repeated, tag = "5")] pub reserved_name: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `EnumDescriptorProto`. pub mod enum_descriptor_proto { /// Range of reserved numeric values. Reserved values may not be used by /// entries in the same enum. Reserved ranges may not overlap. /// /// Note that this is distinct from DescriptorProto.ReservedRange in that it /// is inclusive such that it can appropriately represent the entire int32 /// domain. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumReservedRange { /// Inclusive. #[prost(int32, optional, tag = "1")] pub start: ::core::option::Option, /// Inclusive. #[prost(int32, optional, tag = "2")] pub end: ::core::option::Option, } } /// Describes a value within an enum. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumValueDescriptorProto { #[prost(string, optional, tag = "1")] pub name: ::core::option::Option<::prost::alloc::string::String>, #[prost(int32, optional, tag = "2")] pub number: ::core::option::Option, #[prost(message, optional, tag = "3")] pub options: ::core::option::Option, } /// Describes a service. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ServiceDescriptorProto { #[prost(string, optional, tag = "1")] pub name: ::core::option::Option<::prost::alloc::string::String>, #[prost(message, repeated, tag = "2")] pub method: ::prost::alloc::vec::Vec, #[prost(message, optional, tag = "3")] pub options: ::core::option::Option, } /// Describes a method of a service. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MethodDescriptorProto { #[prost(string, optional, tag = "1")] pub name: ::core::option::Option<::prost::alloc::string::String>, /// Input and output type names. These are resolved in the same way as /// FieldDescriptorProto.type_name, but must refer to a message type. #[prost(string, optional, tag = "2")] pub input_type: ::core::option::Option<::prost::alloc::string::String>, #[prost(string, optional, tag = "3")] pub output_type: ::core::option::Option<::prost::alloc::string::String>, #[prost(message, optional, tag = "4")] pub options: ::core::option::Option, /// Identifies if client streams multiple client messages #[prost(bool, optional, tag = "5", default = "false")] pub client_streaming: ::core::option::Option, /// Identifies if server streams multiple server messages #[prost(bool, optional, tag = "6", default = "false")] pub server_streaming: ::core::option::Option, } /// Each of the definitions above may have "options" attached. These are /// just annotations which may cause code to be generated slightly differently /// or may contain hints for code that manipulates protocol messages. /// /// Clients may define custom options as extensions of the \*Options messages. /// These extensions may not yet be known at parsing time, so the parser cannot /// store the values in them. Instead it stores them in a field in the \*Options /// message called uninterpreted_option. This field must have the same name /// across all \*Options messages. We then use this field to populate the /// extensions when we build a descriptor, at which point all protos have been /// parsed and so all extensions are known. /// /// Extension numbers for custom options may be chosen as follows: /// /// * For options which will only be used within a single application or /// organization, or for experimental options, use field numbers 50000 /// through 99999. It is up to you to ensure that you do not use the /// same number for multiple options. /// * For options which will be published and used publicly by multiple /// independent entities, e-mail protobuf-global-extension-registry@google.com /// to reserve extension numbers. Simply provide your project name (e.g. /// Objective-C plugin) and your project website (if available) -- there's no /// need to explain how you intend to use them. Usually you only need one /// extension number. You can declare multiple options with only one extension /// number by putting them in a sub-message. See the Custom Options section of /// the docs for examples: /// /// If this turns out to be popular, a web service will be set up /// to automatically assign option numbers. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FileOptions { /// Sets the Java package where classes generated from this .proto will be /// placed. By default, the proto package is used, but this is often /// inappropriate because proto packages do not normally start with backwards /// domain names. #[prost(string, optional, tag = "1")] pub java_package: ::core::option::Option<::prost::alloc::string::String>, /// Controls the name of the wrapper Java class generated for the .proto file. /// That class will always contain the .proto file's getDescriptor() method as /// well as any top-level extensions defined in the .proto file. /// If java_multiple_files is disabled, then all the other classes from the /// .proto file will be nested inside the single wrapper outer class. #[prost(string, optional, tag = "8")] pub java_outer_classname: ::core::option::Option<::prost::alloc::string::String>, /// If enabled, then the Java code generator will generate a separate .java /// file for each top-level message, enum, and service defined in the .proto /// file. Thus, these types will *not* be nested inside the wrapper class /// named by java_outer_classname. However, the wrapper class will still be /// generated to contain the file's getDescriptor() method as well as any /// top-level extensions defined in the file. #[prost(bool, optional, tag = "10", default = "false")] pub java_multiple_files: ::core::option::Option, /// This option does nothing. #[deprecated] #[prost(bool, optional, tag = "20")] pub java_generate_equals_and_hash: ::core::option::Option, /// If set true, then the Java2 code generator will generate code that /// throws an exception whenever an attempt is made to assign a non-UTF-8 /// byte sequence to a string field. /// Message reflection will do the same. /// However, an extension field still accepts non-UTF-8 byte sequences. /// This option has no effect on when used with the lite runtime. #[prost(bool, optional, tag = "27", default = "false")] pub java_string_check_utf8: ::core::option::Option, #[prost( enumeration = "file_options::OptimizeMode", optional, tag = "9", default = "Speed" )] pub optimize_for: ::core::option::Option, /// Sets the Go package where structs generated from this .proto will be /// placed. If omitted, the Go package will be derived from the following: /// /// * The basename of the package import path, if provided. /// * Otherwise, the package statement in the .proto file, if present. /// * Otherwise, the basename of the .proto file, without extension. #[prost(string, optional, tag = "11")] pub go_package: ::core::option::Option<::prost::alloc::string::String>, /// Should generic services be generated in each language? "Generic" services /// are not specific to any particular RPC system. They are generated by the /// main code generators in each language (without additional plugins). /// Generic services were the only kind of service generation supported by /// early versions of google.protobuf. /// /// Generic services are now considered deprecated in favor of using plugins /// that generate code specific to your particular RPC system. Therefore, /// these default to false. Old code which depends on generic services should /// explicitly set them to true. #[prost(bool, optional, tag = "16", default = "false")] pub cc_generic_services: ::core::option::Option, #[prost(bool, optional, tag = "17", default = "false")] pub java_generic_services: ::core::option::Option, #[prost(bool, optional, tag = "18", default = "false")] pub py_generic_services: ::core::option::Option, #[prost(bool, optional, tag = "42", default = "false")] pub php_generic_services: ::core::option::Option, /// Is this file deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for everything in the file, or it will be completely ignored; in the very /// least, this is a formalization for deprecating files. #[prost(bool, optional, tag = "23", default = "false")] pub deprecated: ::core::option::Option, /// Enables the use of arenas for the proto messages in this file. This applies /// only to generated classes for C++. #[prost(bool, optional, tag = "31", default = "true")] pub cc_enable_arenas: ::core::option::Option, /// Sets the objective c class prefix which is prepended to all objective c /// generated classes from this .proto. There is no default. #[prost(string, optional, tag = "36")] pub objc_class_prefix: ::core::option::Option<::prost::alloc::string::String>, /// Namespace for generated classes; defaults to the package. #[prost(string, optional, tag = "37")] pub csharp_namespace: ::core::option::Option<::prost::alloc::string::String>, /// By default Swift generators will take the proto package and CamelCase it /// replacing '.' with underscore and use that to prefix the types/symbols /// defined. When this options is provided, they will use this value instead /// to prefix the types/symbols defined. #[prost(string, optional, tag = "39")] pub swift_prefix: ::core::option::Option<::prost::alloc::string::String>, /// Sets the php class prefix which is prepended to all php generated classes /// from this .proto. Default is empty. #[prost(string, optional, tag = "40")] pub php_class_prefix: ::core::option::Option<::prost::alloc::string::String>, /// Use this option to change the namespace of php generated classes. Default /// is empty. When this option is empty, the package name will be used for /// determining the namespace. #[prost(string, optional, tag = "41")] pub php_namespace: ::core::option::Option<::prost::alloc::string::String>, /// Use this option to change the namespace of php generated metadata classes. /// Default is empty. When this option is empty, the proto file name will be /// used for determining the namespace. #[prost(string, optional, tag = "44")] pub php_metadata_namespace: ::core::option::Option<::prost::alloc::string::String>, /// Use this option to change the package of ruby generated classes. Default /// is empty. When this option is not set, the package name will be used for /// determining the ruby package. #[prost(string, optional, tag = "45")] pub ruby_package: ::core::option::Option<::prost::alloc::string::String>, /// The parser stores options it doesn't recognize here. /// See the documentation for the "Options" section above. #[prost(message, repeated, tag = "999")] pub uninterpreted_option: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `FileOptions`. pub mod file_options { /// Generated classes can be optimized for speed or code size. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum OptimizeMode { /// Generate complete code for parsing, serialization, Speed = 1, /// etc. /// /// Use ReflectionOps to implement these methods. CodeSize = 2, /// Generate code using MessageLite and the lite runtime. LiteRuntime = 3, } impl OptimizeMode { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { OptimizeMode::Speed => "SPEED", OptimizeMode::CodeSize => "CODE_SIZE", OptimizeMode::LiteRuntime => "LITE_RUNTIME", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "SPEED" => Some(Self::Speed), "CODE_SIZE" => Some(Self::CodeSize), "LITE_RUNTIME" => Some(Self::LiteRuntime), _ => None, } } } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MessageOptions { /// Set true to use the old proto1 MessageSet wire format for extensions. /// This is provided for backwards-compatibility with the MessageSet wire /// format. You should not use this for any other reason: It's less /// efficient, has fewer features, and is more complicated. /// /// The message must be defined exactly as follows: /// message Foo { /// option message_set_wire_format = true; /// extensions 4 to max; /// } /// Note that the message cannot have any defined fields; MessageSets only /// have extensions. /// /// All extensions of your type must be singular messages; e.g. they cannot /// be int32s, enums, or repeated messages. /// /// Because this is an option, the above two restrictions are not enforced by /// the protocol compiler. #[prost(bool, optional, tag = "1", default = "false")] pub message_set_wire_format: ::core::option::Option, /// Disables the generation of the standard "descriptor()" accessor, which can /// conflict with a field of the same name. This is meant to make migration /// from proto1 easier; new code should avoid fields named "descriptor". #[prost(bool, optional, tag = "2", default = "false")] pub no_standard_descriptor_accessor: ::core::option::Option, /// Is this message deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the message, or it will be completely ignored; in the very least, /// this is a formalization for deprecating messages. #[prost(bool, optional, tag = "3", default = "false")] pub deprecated: ::core::option::Option, /// Whether the message is an automatically generated map entry type for the /// maps field. /// /// For maps fields: /// map\ map_field = 1; /// The parsed descriptor looks like: /// message MapFieldEntry { /// option map_entry = true; /// optional KeyType key = 1; /// optional ValueType value = 2; /// } /// repeated MapFieldEntry map_field = 1; /// /// Implementations may choose not to generate the map_entry=true message, but /// use a native map in the target language to hold the keys and values. /// The reflection APIs in such implementations still need to work as /// if the field is a repeated message field. /// /// NOTE: Do not set the option in .proto files. Always use the maps syntax /// instead. The option should only be implicitly set by the proto compiler /// parser. #[prost(bool, optional, tag = "7")] pub map_entry: ::core::option::Option, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag = "999")] pub uninterpreted_option: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FieldOptions { /// The ctype option instructs the C++ code generator to use a different /// representation of the field than it normally would. See the specific /// options below. This option is not yet implemented in the open source /// release -- sorry, we'll try to include it in a future version! #[prost( enumeration = "field_options::CType", optional, tag = "1", default = "String" )] pub ctype: ::core::option::Option, /// The packed option can be enabled for repeated primitive fields to enable /// a more efficient representation on the wire. Rather than repeatedly /// writing the tag and type for each element, the entire array is encoded as /// a single length-delimited blob. In proto3, only explicit setting it to /// false will avoid using packed encoding. #[prost(bool, optional, tag = "2")] pub packed: ::core::option::Option, /// The jstype option determines the JavaScript type used for values of the /// field. The option is permitted only for 64 bit integral and fixed types /// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING /// is represented as JavaScript string, which avoids loss of precision that /// can happen when a large value is converted to a floating point JavaScript. /// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to /// use the JavaScript "number" type. The behavior of the default option /// JS_NORMAL is implementation dependent. /// /// This option is an enum to permit additional types to be added, e.g. /// goog.math.Integer. #[prost( enumeration = "field_options::JsType", optional, tag = "6", default = "JsNormal" )] pub jstype: ::core::option::Option, /// Should this field be parsed lazily? Lazy applies only to message-type /// fields. It means that when the outer message is initially parsed, the /// inner message's contents will not be parsed but instead stored in encoded /// form. The inner message will actually be parsed when it is first accessed. /// /// This is only a hint. Implementations are free to choose whether to use /// eager or lazy parsing regardless of the value of this option. However, /// setting this option true suggests that the protocol author believes that /// using lazy parsing on this field is worth the additional bookkeeping /// overhead typically needed to implement it. /// /// This option does not affect the public interface of any generated code; /// all method signatures remain the same. Furthermore, thread-safety of the /// interface is not affected by this option; const methods remain safe to /// call from multiple threads concurrently, while non-const methods continue /// to require exclusive access. /// /// Note that implementations may choose not to check required fields within /// a lazy sub-message. That is, calling IsInitialized() on the outer message /// may return true even if the inner message has missing required fields. /// This is necessary because otherwise the inner message would have to be /// parsed in order to perform the check, defeating the purpose of lazy /// parsing. An implementation which chooses not to check required fields /// must be consistent about it. That is, for any particular sub-message, the /// implementation must either *always* check its required fields, or *never* /// check its required fields, regardless of whether or not the message has /// been parsed. #[prost(bool, optional, tag = "5", default = "false")] pub lazy: ::core::option::Option, /// Is this field deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for accessors, or it will be completely ignored; in the very least, this /// is a formalization for deprecating fields. #[prost(bool, optional, tag = "3", default = "false")] pub deprecated: ::core::option::Option, /// For Google-internal migration only. Do not use. #[prost(bool, optional, tag = "10", default = "false")] pub weak: ::core::option::Option, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag = "999")] pub uninterpreted_option: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `FieldOptions`. pub mod field_options { #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum CType { /// Default mode. String = 0, Cord = 1, StringPiece = 2, } impl CType { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { CType::String => "STRING", CType::Cord => "CORD", CType::StringPiece => "STRING_PIECE", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "STRING" => Some(Self::String), "CORD" => Some(Self::Cord), "STRING_PIECE" => Some(Self::StringPiece), _ => None, } } } #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum JsType { /// Use the default type. JsNormal = 0, /// Use JavaScript strings. JsString = 1, /// Use JavaScript numbers. JsNumber = 2, } impl JsType { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { JsType::JsNormal => "JS_NORMAL", JsType::JsString => "JS_STRING", JsType::JsNumber => "JS_NUMBER", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "JS_NORMAL" => Some(Self::JsNormal), "JS_STRING" => Some(Self::JsString), "JS_NUMBER" => Some(Self::JsNumber), _ => None, } } } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OneofOptions { /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag = "999")] pub uninterpreted_option: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumOptions { /// Set this option to true to allow mapping different tag names to the same /// value. #[prost(bool, optional, tag = "2")] pub allow_alias: ::core::option::Option, /// Is this enum deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the enum, or it will be completely ignored; in the very least, this /// is a formalization for deprecating enums. #[prost(bool, optional, tag = "3", default = "false")] pub deprecated: ::core::option::Option, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag = "999")] pub uninterpreted_option: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumValueOptions { /// Is this enum value deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the enum value, or it will be completely ignored; in the very least, /// this is a formalization for deprecating enum values. #[prost(bool, optional, tag = "1", default = "false")] pub deprecated: ::core::option::Option, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag = "999")] pub uninterpreted_option: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ServiceOptions { /// Is this service deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the service, or it will be completely ignored; in the very least, /// this is a formalization for deprecating services. #[prost(bool, optional, tag = "33", default = "false")] pub deprecated: ::core::option::Option, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag = "999")] pub uninterpreted_option: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MethodOptions { /// Is this method deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the method, or it will be completely ignored; in the very least, /// this is a formalization for deprecating methods. #[prost(bool, optional, tag = "33", default = "false")] pub deprecated: ::core::option::Option, #[prost( enumeration = "method_options::IdempotencyLevel", optional, tag = "34", default = "IdempotencyUnknown" )] pub idempotency_level: ::core::option::Option, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag = "999")] pub uninterpreted_option: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `MethodOptions`. pub mod method_options { /// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, /// or neither? HTTP based RPC implementation may choose GET verb for safe /// methods, and PUT verb for idempotent methods instead of the default POST. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum IdempotencyLevel { IdempotencyUnknown = 0, /// implies idempotent NoSideEffects = 1, /// idempotent, but may have side effects Idempotent = 2, } impl IdempotencyLevel { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { IdempotencyLevel::IdempotencyUnknown => "IDEMPOTENCY_UNKNOWN", IdempotencyLevel::NoSideEffects => "NO_SIDE_EFFECTS", IdempotencyLevel::Idempotent => "IDEMPOTENT", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "IDEMPOTENCY_UNKNOWN" => Some(Self::IdempotencyUnknown), "NO_SIDE_EFFECTS" => Some(Self::NoSideEffects), "IDEMPOTENT" => Some(Self::Idempotent), _ => None, } } } } /// A message representing a option the parser does not recognize. This only /// appears in options protos created by the compiler::Parser class. /// DescriptorPool resolves these when building Descriptor objects. Therefore, /// options protos in descriptor objects (e.g. returned by Descriptor::options(), /// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions /// in them. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UninterpretedOption { #[prost(message, repeated, tag = "2")] pub name: ::prost::alloc::vec::Vec, /// The value of the uninterpreted option, in whatever type the tokenizer /// identified it as during parsing. Exactly one of these should be set. #[prost(string, optional, tag = "3")] pub identifier_value: ::core::option::Option<::prost::alloc::string::String>, #[prost(uint64, optional, tag = "4")] pub positive_int_value: ::core::option::Option, #[prost(int64, optional, tag = "5")] pub negative_int_value: ::core::option::Option, #[prost(double, optional, tag = "6")] pub double_value: ::core::option::Option, #[prost(bytes = "vec", optional, tag = "7")] pub string_value: ::core::option::Option<::prost::alloc::vec::Vec>, #[prost(string, optional, tag = "8")] pub aggregate_value: ::core::option::Option<::prost::alloc::string::String>, } /// Nested message and enum types in `UninterpretedOption`. pub mod uninterpreted_option { /// The name of the uninterpreted option. Each string represents a segment in /// a dot-separated name. is_extension is true iff a segment represents an /// extension (denoted with parentheses in options specs in .proto files). /// E.g.,{ \["foo", false\], \["bar.baz", true\], \["qux", false\] } represents /// "foo.(bar.baz).qux". #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct NamePart { #[prost(string, required, tag = "1")] pub name_part: ::prost::alloc::string::String, #[prost(bool, required, tag = "2")] pub is_extension: bool, } } /// Encapsulates information about the original source file from which a /// FileDescriptorProto was generated. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SourceCodeInfo { /// A Location identifies a piece of source code in a .proto file which /// corresponds to a particular definition. This information is intended /// to be useful to IDEs, code indexers, documentation generators, and similar /// tools. /// /// For example, say we have a file like: /// message Foo { /// optional string foo = 1; /// } /// Let's look at just the field definition: /// optional string foo = 1; /// ^ ^^ ^^ ^ ^^^ /// a bc de f ghi /// We have the following locations: /// span path represents /// \[a,i) \[ 4, 0, 2, 0 \] The whole field definition. /// \[a,b) \[ 4, 0, 2, 0, 4 \] The label (optional). /// \[c,d) \[ 4, 0, 2, 0, 5 \] The type (string). /// \[e,f) \[ 4, 0, 2, 0, 1 \] The name (foo). /// \[g,h) \[ 4, 0, 2, 0, 3 \] The number (1). /// /// Notes: /// /// * A location may refer to a repeated field itself (i.e. not to any /// particular index within it). This is used whenever a set of elements are /// logically enclosed in a single code segment. For example, an entire /// extend block (possibly containing multiple extension definitions) will /// have an outer location whose path refers to the "extensions" repeated /// field without an index. /// * Multiple locations may have the same path. This happens when a single /// logical declaration is spread out across multiple places. The most /// obvious example is the "extend" block again -- there may be multiple /// extend blocks in the same scope, each of which will have the same path. /// * A location's span is not always a subset of its parent's span. For /// example, the "extendee" of an extension declaration appears at the /// beginning of the "extend" block and is shared by all extensions within /// the block. /// * Just because a location's span is a subset of some other location's span /// does not mean that it is a descendant. For example, a "group" defines /// both a type and a field in a single declaration. Thus, the locations /// corresponding to the type and field and their components will overlap. /// * Code which tries to interpret locations should probably be designed to /// ignore those that it doesn't understand, as more types of locations could /// be recorded in the future. #[prost(message, repeated, tag = "1")] pub location: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `SourceCodeInfo`. pub mod source_code_info { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Location { /// Identifies which part of the FileDescriptorProto was defined at this /// location. /// /// Each element is a field number or an index. They form a path from /// the root FileDescriptorProto to the place where the definition. For /// example, this path: /// \[ 4, 3, 2, 7, 1 \] /// refers to: /// file.message_type(3) // 4, 3 /// .field(7) // 2, 7 /// .name() // 1 /// This is because FileDescriptorProto.message_type has field number 4: /// repeated DescriptorProto message_type = 4; /// and DescriptorProto.field has field number 2: /// repeated FieldDescriptorProto field = 2; /// and FieldDescriptorProto.name has field number 1: /// optional string name = 1; /// /// Thus, the above path gives the location of a field name. If we removed /// the last element: /// \[ 4, 3, 2, 7 \] /// this path refers to the whole field declaration (from the beginning /// of the label to the terminating semicolon). #[prost(int32, repeated, tag = "1")] pub path: ::prost::alloc::vec::Vec, /// Always has exactly three or four elements: start line, start column, /// end line (optional, otherwise assumed same as start line), end column. /// These are packed into a single field for efficiency. Note that line /// and column numbers are zero-based -- typically you will want to add /// 1 to each before displaying to a user. #[prost(int32, repeated, tag = "2")] pub span: ::prost::alloc::vec::Vec, /// If this SourceCodeInfo represents a complete declaration, these are any /// comments appearing before and after the declaration which appear to be /// attached to the declaration. /// /// A series of line comments appearing on consecutive lines, with no other /// tokens appearing on those lines, will be treated as a single comment. /// /// leading_detached_comments will keep paragraphs of comments that appear /// before (but not connected to) the current element. Each paragraph, /// separated by empty lines, will be one comment element in the repeated /// field. /// /// Only the comment content is provided; comment markers (e.g. //) are /// stripped out. For block comments, leading whitespace and an asterisk /// will be stripped from the beginning of each line other than the first. /// Newlines are included in the output. /// /// Examples: /// /// optional int32 foo = 1; // Comment attached to foo. /// // Comment attached to bar. /// optional int32 bar = 2; /// /// optional string baz = 3; /// // Comment attached to baz. /// // Another line attached to baz. /// /// // Comment attached to qux. /// // /// // Another line attached to qux. /// optional double qux = 4; /// /// // Detached comment for corge. This is not leading or trailing comments /// // to qux or corge because there are blank lines separating it from /// // both. /// /// // Detached comment for corge paragraph 2. /// /// optional string corge = 5; /// /\* Block comment attached /// \* to corge. Leading asterisks /// \* will be removed. */ /// /* Block comment attached to /// \* grault. \*/ /// optional int32 grault = 6; /// /// // ignored detached comments. #[prost(string, optional, tag = "3")] pub leading_comments: ::core::option::Option<::prost::alloc::string::String>, #[prost(string, optional, tag = "4")] pub trailing_comments: ::core::option::Option<::prost::alloc::string::String>, #[prost(string, repeated, tag = "6")] pub leading_detached_comments: ::prost::alloc::vec::Vec< ::prost::alloc::string::String, >, } } /// Describes the relationship between generated code and its original source /// file. A GeneratedCodeInfo message is associated with only one generated /// source file, but may contain references to different source .proto files. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GeneratedCodeInfo { /// An Annotation connects some span of text in generated code to an element /// of its generating .proto file. #[prost(message, repeated, tag = "1")] pub annotation: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `GeneratedCodeInfo`. pub mod generated_code_info { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Annotation { /// Identifies the element in the original source .proto file. This field /// is formatted the same as SourceCodeInfo.Location.path. #[prost(int32, repeated, tag = "1")] pub path: ::prost::alloc::vec::Vec, /// Identifies the filesystem path to the original source .proto. #[prost(string, optional, tag = "2")] pub source_file: ::core::option::Option<::prost::alloc::string::String>, /// Identifies the starting offset in bytes in the generated code /// that relates to the identified object. #[prost(int32, optional, tag = "3")] pub begin: ::core::option::Option, /// Identifies the ending offset in bytes in the generated code that /// relates to the identified offset. The end offset should be one past /// the last relevant byte (so the length of the text = end - begin). #[prost(int32, optional, tag = "4")] pub end: ::core::option::Option, } } /// `Any` contains an arbitrary serialized protocol buffer message along with a /// URL that describes the type of the serialized message. /// /// Protobuf library provides support to pack/unpack Any values in the form /// of utility functions or additional generated methods of the Any type. /// /// Example 1: Pack and unpack a message in C++. /// /// ```text /// Foo foo = ...; /// Any any; /// any.PackFrom(foo); /// ... /// if (any.UnpackTo(&foo)) { /// ... /// } /// ``` /// /// Example 2: Pack and unpack a message in Java. /// /// ```text /// Foo foo = ...; /// Any any = Any.pack(foo); /// ... /// if (any.is(Foo.class)) { /// foo = any.unpack(Foo.class); /// } /// ``` /// /// Example 3: Pack and unpack a message in Python. /// /// ```text /// foo = Foo(...) /// any = Any() /// any.Pack(foo) /// ... /// if any.Is(Foo.DESCRIPTOR): /// any.Unpack(foo) /// ... /// ``` /// /// Example 4: Pack and unpack a message in Go /// /// ```text /// foo := &pb.Foo{...} /// any, err := anypb.New(foo) /// if err != nil { /// ... /// } /// ... /// foo := &pb.Foo{} /// if err := any.UnmarshalTo(foo); err != nil { /// ... /// } /// ``` /// /// The pack methods provided by protobuf library will by default use /// 'type.googleapis.com/full.type.name' as the type URL and the unpack /// methods only use the fully qualified type name after the last '/' /// in the type URL, for example "foo.bar.com/x/y.z" will yield type /// name "y.z". /// /// # JSON /// /// The JSON representation of an `Any` value uses the regular /// representation of the deserialized, embedded message, with an /// additional field `@type` which contains the type URL. Example: /// /// ```text /// package google.profile; /// message Person { /// string first_name = 1; /// string last_name = 2; /// } /// /// { /// "@type": "type.googleapis.com/google.profile.Person", /// "firstName": , /// "lastName": /// } /// ``` /// /// If the embedded message type is well-known and has a custom JSON /// representation, that representation will be embedded adding a field /// `value` which holds the custom JSON in addition to the `@type` /// field. Example (for message \[google.protobuf.Duration\]\[\]): /// /// ```text /// { /// "@type": "type.googleapis.com/google.protobuf.Duration", /// "value": "1.212s" /// } /// ``` #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Any { /// A URL/resource name that uniquely identifies the type of the serialized /// protocol buffer message. This string must contain at least /// one "/" character. The last segment of the URL's path must represent /// the fully qualified name of the type (as in /// `path/google.protobuf.Duration`). The name should be in a canonical form /// (e.g., leading "." is not accepted). /// /// In practice, teams usually precompile into the binary all types that they /// expect it to use in the context of Any. However, for URLs which use the /// scheme `http`, `https`, or no scheme, one can optionally set up a type /// server that maps type URLs to message definitions as follows: /// /// * If no scheme is provided, `https` is assumed. /// * An HTTP GET on the URL must yield a \[google.protobuf.Type\]\[\] /// value in binary format, or produce an error. /// * Applications are allowed to cache lookup results based on the /// URL, or have them precompiled into a binary to avoid any /// lookup. Therefore, binary compatibility needs to be preserved /// on changes to types. (Use versioned type names to manage /// breaking changes.) /// /// Note: this functionality is not currently available in the official /// protobuf release, and it is not used for type URLs beginning with /// type.googleapis.com. /// /// Schemes other than `http`, `https` (or the empty scheme) might be /// used with implementation specific semantics. #[prost(string, tag = "1")] pub type_url: ::prost::alloc::string::String, /// Must be a valid serialized protocol buffer of the above specified type. #[prost(bytes = "vec", tag = "2")] pub value: ::prost::alloc::vec::Vec, } /// `SourceContext` represents information about the source of a /// protobuf element, like the file in which it is defined. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SourceContext { /// The path-qualified name of the .proto file that contained the associated /// protobuf element. For example: `"google/protobuf/source_context.proto"`. #[prost(string, tag = "1")] pub file_name: ::prost::alloc::string::String, } /// A protocol buffer message type. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Type { /// The fully qualified message name. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// The list of fields. #[prost(message, repeated, tag = "2")] pub fields: ::prost::alloc::vec::Vec, /// The list of types appearing in `oneof` definitions in this type. #[prost(string, repeated, tag = "3")] pub oneofs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// The protocol buffer options. #[prost(message, repeated, tag = "4")] pub options: ::prost::alloc::vec::Vec