gix-protocol-0.47.0/.cargo_vcs_info.json0000644000000001520000000000100135500ustar { "git": { "sha1": "beb0ea8c4ff94c64b7773772a9d388ccb403f3c1" }, "path_in_vcs": "gix-protocol" }gix-protocol-0.47.0/Cargo.toml0000644000000123260000000000100115540ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.65" name = "gix-protocol" version = "0.47.0" authors = ["Sebastian Thiel "] build = false include = [ "src/**/*", "LICENSE-*", "!**/tests/**/*", ] autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "A crate of the gitoxide project for implementing git protocols" readme = false license = "MIT OR Apache-2.0" repository = "https://github.com/GitoxideLabs/gitoxide" [package.metadata.docs.rs] features = [ "blocking-client", "document-features", "serde", ] [lib] name = "gix_protocol" path = "src/lib.rs" doctest = false [dependencies.async-trait] version = "0.1.51" optional = true [dependencies.bstr] version = "1.3.0" features = [ "std", "unicode", ] default-features = false [dependencies.document-features] version = "0.2.0" optional = true [dependencies.futures-io] version = "0.3.16" optional = true [dependencies.futures-lite] version = "2.1.0" optional = true [dependencies.gix-credentials] version = "^0.26.0" optional = true [dependencies.gix-date] version = "^0.9.3" [dependencies.gix-features] version = "^0.39.1" features = ["progress"] [dependencies.gix-hash] version = "^0.15.1" [dependencies.gix-lock] version = "^15.0.0" optional = true [dependencies.gix-negotiate] version = "^0.17.0" optional = true [dependencies.gix-object] version = "^0.46.1" optional = true [dependencies.gix-ref] version = "^0.49.1" [dependencies.gix-refspec] version = "^0.27.0" optional = true [dependencies.gix-revwalk] version = "^0.17.0" optional = true [dependencies.gix-shallow] version = "^0.1.0" [dependencies.gix-trace] version = "^0.1.11" optional = true [dependencies.gix-transport] version = "^0.44.0" [dependencies.gix-utils] version = "^0.1.13" [dependencies.maybe-async] version = "0.2.6" [dependencies.serde] version = "1.0.114" features = ["derive"] optional = true default-features = false [dependencies.thiserror] version = "2.0.0" [dependencies.winnow] version = "0.6" features = ["simd"] [dev-dependencies.async-std] version = "1.9.0" features = ["attributes"] [dev-dependencies.gix-packetline] version = "^0.18.2" [features] async-client = [ "gix-transport/async-client", "dep:async-trait", "dep:futures-io", "dep:futures-lite", "handshake", "fetch", ] blocking-client = [ "gix-transport/blocking-client", "maybe-async/is_sync", "handshake", "fetch", ] fetch = [ "dep:gix-negotiate", "dep:gix-object", "dep:gix-revwalk", "dep:gix-lock", "dep:gix-refspec", "dep:gix-trace", ] handshake = ["dep:gix-credentials"] serde = [ "dep:serde", "bstr/serde", "gix-transport/serde", "gix-hash/serde", "gix-shallow/serde", ] [lints.clippy] bool_to_int_with_if = "allow" borrow_as_ptr = "allow" cast_lossless = "allow" cast_possible_truncation = "allow" cast_possible_wrap = "allow" cast_precision_loss = "allow" cast_sign_loss = "allow" checked_conversions = "allow" copy_iterator = "allow" default_trait_access = "allow" doc_markdown = "allow" empty_docs = "allow" enum_glob_use = "allow" explicit_deref_methods = "allow" explicit_into_iter_loop = "allow" explicit_iter_loop = "allow" filter_map_next = "allow" fn_params_excessive_bools = "allow" from_iter_instead_of_collect = "allow" if_not_else = "allow" ignored_unit_patterns = "allow" implicit_clone = "allow" inconsistent_struct_constructor = "allow" inefficient_to_string = "allow" inline_always = "allow" items_after_statements = "allow" iter_not_returning_iterator = "allow" iter_without_into_iter = "allow" manual_assert = "allow" manual_is_variant_and = "allow" manual_let_else = "allow" manual_string_new = "allow" many_single_char_names = "allow" match_bool = "allow" match_same_arms = "allow" match_wild_err_arm = "allow" match_wildcard_for_single_variants = "allow" missing_errors_doc = "allow" missing_panics_doc = "allow" module_name_repetitions = "allow" must_use_candidate = "allow" mut_mut = "allow" naive_bytecount = "allow" needless_for_each = "allow" needless_pass_by_value = "allow" needless_raw_string_hashes = "allow" no_effect_underscore_binding = "allow" option_option = "allow" range_plus_one = "allow" redundant_else = "allow" return_self_not_must_use = "allow" should_panic_without_expect = "allow" similar_names = "allow" single_match_else = "allow" stable_sort_primitive = "allow" struct_excessive_bools = "allow" struct_field_names = "allow" too_long_first_doc_paragraph = "allow" too_many_lines = "allow" transmute_ptr_to_ptr = "allow" trivially_copy_pass_by_ref = "allow" unnecessary_join = "allow" unnecessary_wraps = "allow" unreadable_literal = "allow" unused_self = "allow" used_underscore_binding = "allow" wildcard_imports = "allow" [lints.clippy.pedantic] level = "warn" priority = -1 [lints.rust] gix-protocol-0.47.0/Cargo.toml.orig000064400000000000000000000072241046102023000152360ustar 00000000000000lints.workspace = true [package] name = "gix-protocol" version = "0.47.0" repository = "https://github.com/GitoxideLabs/gitoxide" license = "MIT OR Apache-2.0" description = "A crate of the gitoxide project for implementing git protocols" authors = ["Sebastian Thiel "] edition = "2021" include = ["src/**/*", "LICENSE-*", "!**/tests/**/*"] rust-version = "1.65" [lib] doctest = false [features] #! ### _Mutually exclusive client _ #! The _client_ portion of the protocol uses `gix-transport` to communicate to a server. For it to be available, one of the following features must #! be selected. #! #! Specifying both causes a compile error, preventing the use of `--all-features`. ## If set, blocking command implementations are available and will use the blocking version of the `gix-transport` crate. blocking-client = [ "gix-transport/blocking-client", "maybe-async/is_sync", "handshake", "fetch" ] ## As above, but provides async implementations instead. async-client = [ "gix-transport/async-client", "dep:async-trait", "dep:futures-io", "dep:futures-lite", "handshake", "fetch" ] ## Add implementations for performing a `handshake` along with the dependencies needed for it. handshake = ["dep:gix-credentials"] ## Add implementations for performing a `fetch` (for packs) along with the dependencies needed for it. fetch = [ "dep:gix-negotiate", "dep:gix-object", "dep:gix-revwalk", "dep:gix-lock", "dep:gix-refspec", "dep:gix-trace", ] #! ### Other ## Data structures implement `serde::Serialize` and `serde::Deserialize`. serde = ["dep:serde", "bstr/serde", "gix-transport/serde", "gix-hash/serde", "gix-shallow/serde"] [[test]] name = "blocking" path = "tests/blocking-protocol.rs" required-features = ["blocking-client"] [[test]] name = "async" path = "tests/async-protocol.rs" required-features = ["async-client"] [dependencies] gix-features = { version = "^0.39.1", path = "../gix-features", features = [ "progress", ] } gix-transport = { version = "^0.44.0", path = "../gix-transport" } gix-hash = { version = "^0.15.1", path = "../gix-hash" } gix-shallow = { version = "^0.1.0", path = "../gix-shallow" } gix-date = { version = "^0.9.3", path = "../gix-date" } gix-utils = { version = "^0.1.13", path = "../gix-utils" } gix-ref = { version = "^0.49.1", path = "../gix-ref" } gix-trace = { version = "^0.1.11", path = "../gix-trace", optional = true } gix-negotiate = { version = "^0.17.0", path = "../gix-negotiate", optional = true } gix-object = { version = "^0.46.1", path = "../gix-object", optional = true } gix-revwalk = { version = "^0.17.0", path = "../gix-revwalk", optional = true } gix-credentials = { version = "^0.26.0", path = "../gix-credentials", optional = true } gix-refspec = { version = "^0.27.0", path = "../gix-refspec", optional = true } gix-lock = { version = "^15.0.0", path = "../gix-lock", optional = true } thiserror = "2.0.0" serde = { version = "1.0.114", optional = true, default-features = false, features = [ "derive", ] } bstr = { version = "1.3.0", default-features = false, features = [ "std", "unicode", ] } winnow = { version = "0.6", features = ["simd"] } # for async-client async-trait = { version = "0.1.51", optional = true } futures-io = { version = "0.3.16", optional = true } futures-lite = { version = "2.1.0", optional = true } maybe-async = "0.2.6" document-features = { version = "0.2.0", optional = true } [dev-dependencies] async-std = { version = "1.9.0", features = ["attributes"] } gix-packetline = { path = "../gix-packetline", version = "^0.18.2" } [package.metadata.docs.rs] features = ["blocking-client", "document-features", "serde"] gix-protocol-0.47.0/LICENSE-APACHE000064400000000000000000000247461046102023000143030ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. gix-protocol-0.47.0/LICENSE-MIT000064400000000000000000000017771046102023000140120ustar 00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. gix-protocol-0.47.0/src/command.rs000064400000000000000000000240341046102023000151200ustar 00000000000000//! V2 command abstraction to validate invocations and arguments, like a database of what we know about them. use std::borrow::Cow; use super::Command; /// A key value pair of values known at compile time. pub type Feature = (&'static str, Option>); impl Command { /// Produce the name of the command as known by the server side. pub fn as_str(&self) -> &'static str { match self { Command::LsRefs => "ls-refs", Command::Fetch => "fetch", } } } #[cfg(any(test, feature = "async-client", feature = "blocking-client"))] mod with_io { use bstr::{BString, ByteSlice}; use gix_transport::client::Capabilities; use crate::{command::Feature, Command}; impl Command { /// Only V2 fn all_argument_prefixes(&self) -> &'static [&'static str] { match self { Command::LsRefs => &["symrefs", "peel", "ref-prefix ", "unborn"], Command::Fetch => &[ "want ", // hex oid "have ", // hex oid "done", "thin-pack", "no-progress", "include-tag", "ofs-delta", // Shallow feature/capability "shallow ", // hex oid "deepen ", // commit depth "deepen-relative", "deepen-since ", // time-stamp "deepen-not ", // rev // filter feature/capability "filter ", // filter-spec // ref-in-want feature "want-ref ", // ref path // sideband-all feature "sideband-all", // packfile-uris feature "packfile-uris ", // protocols // wait-for-done feature "wait-for-done", ], } } fn all_features(&self, version: gix_transport::Protocol) -> &'static [&'static str] { match self { Command::LsRefs => &[], Command::Fetch => match version { gix_transport::Protocol::V0 | gix_transport::Protocol::V1 => &[ "multi_ack", "thin-pack", "side-band", "side-band-64k", "ofs-delta", "shallow", "deepen-since", "deepen-not", "deepen-relative", "no-progress", "include-tag", "multi_ack_detailed", "allow-tip-sha1-in-want", "allow-reachable-sha1-in-want", "no-done", "filter", ], gix_transport::Protocol::V2 => &[ "shallow", "filter", "ref-in-want", "sideband-all", "packfile-uris", "wait-for-done", ], }, } } /// Provide the initial arguments based on the given `features`. /// They are typically provided by the [`Self::default_features`] method. /// Only useful for V2, and based on heuristics/experimentation. pub fn initial_v2_arguments(&self, features: &[Feature]) -> Vec { match self { Command::Fetch => ["thin-pack", "ofs-delta"] .iter() .map(|s| s.as_bytes().as_bstr().to_owned()) .chain( [ "sideband-all", /* "packfile-uris" */ // packfile-uris must be configurable and can't just be used. Some servers advertise it and reject it later. ] .iter() .filter(|f| features.iter().any(|(sf, _)| sf == *f)) .map(|f| f.as_bytes().as_bstr().to_owned()), ) .collect(), Command::LsRefs => vec![b"symrefs".as_bstr().to_owned(), b"peel".as_bstr().to_owned()], } } /// Turns on all modern features for V1 and all supported features for V2, returning them as a vector of features. /// Note that this is the basis for any fetch operation as these features fulfil basic requirements and reasonably up-to-date servers. pub fn default_features( &self, version: gix_transport::Protocol, server_capabilities: &Capabilities, ) -> Vec { match self { Command::Fetch => match version { gix_transport::Protocol::V0 | gix_transport::Protocol::V1 => { let has_multi_ack_detailed = server_capabilities.contains("multi_ack_detailed"); let has_sideband_64k = server_capabilities.contains("side-band-64k"); self.all_features(version) .iter() .copied() .filter(|feature| match *feature { "side-band" if has_sideband_64k => false, "multi_ack" if has_multi_ack_detailed => false, "no-progress" => false, feature => server_capabilities.contains(feature), }) .map(|s| (s, None)) .collect() } gix_transport::Protocol::V2 => { let supported_features: Vec<_> = server_capabilities .iter() .find_map(|c| { if c.name() == Command::Fetch.as_str() { c.values().map(|v| v.map(ToOwned::to_owned).collect()) } else { None } }) .unwrap_or_default(); self.all_features(version) .iter() .copied() .filter(|feature| supported_features.iter().any(|supported| supported == feature)) .map(|s| (s, None)) .collect() } }, Command::LsRefs => vec![], } } /// Return an error if the given `arguments` and `features` don't match what's statically known. pub fn validate_argument_prefixes( &self, version: gix_transport::Protocol, server: &Capabilities, arguments: &[BString], features: &[Feature], ) -> Result<(), validate_argument_prefixes::Error> { use validate_argument_prefixes::Error; let allowed = self.all_argument_prefixes(); for arg in arguments { if allowed.iter().any(|allowed| arg.starts_with(allowed.as_bytes())) { continue; } return Err(Error::UnsupportedArgument { command: self.as_str(), argument: arg.clone(), }); } match version { gix_transport::Protocol::V0 | gix_transport::Protocol::V1 => { for (feature, _) in features { if server .iter() .any(|c| feature.starts_with(c.name().to_str_lossy().as_ref())) { continue; } return Err(Error::UnsupportedCapability { command: self.as_str(), feature: feature.to_string(), }); } } gix_transport::Protocol::V2 => { let allowed = server .iter() .find_map(|c| { if c.name() == self.as_str() { c.values().map(|v| v.map(ToString::to_string).collect::>()) } else { None } }) .unwrap_or_default(); for (feature, _) in features { if allowed.iter().any(|allowed| feature == allowed) { continue; } match *feature { "agent" => {} _ => { return Err(Error::UnsupportedCapability { command: self.as_str(), feature: feature.to_string(), }) } } } } } Ok(()) } } /// pub mod validate_argument_prefixes { use bstr::BString; /// The error returned by [Command::validate_argument_prefixes()](super::Command::validate_argument_prefixes()). #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { #[error("{command}: argument {argument} is not known or allowed")] UnsupportedArgument { command: &'static str, argument: BString }, #[error("{command}: capability {feature} is not supported")] UnsupportedCapability { command: &'static str, feature: String }, } } } #[cfg(any(test, feature = "async-client", feature = "blocking-client"))] pub use with_io::validate_argument_prefixes; gix-protocol-0.47.0/src/fetch/arguments/async_io.rs000064400000000000000000000046001046102023000204010ustar 00000000000000use futures_lite::io::AsyncWriteExt; use gix_transport::{client, client::TransportV2Ext}; use crate::{fetch::Arguments, Command}; impl Arguments { /// Send fetch arguments to the server, and indicate this is the end of negotiations only if `add_done_argument` is present. pub async fn send<'a, T: client::Transport + 'a>( &mut self, transport: &'a mut T, add_done_argument: bool, ) -> Result + Unpin + 'a>, client::Error> { if self.haves.is_empty() { assert!(add_done_argument, "If there are no haves, is_done must be true."); } match self.version { gix_transport::Protocol::V0 | gix_transport::Protocol::V1 => { let (on_into_read, retained_state) = self.prepare_v1( transport.connection_persists_across_multiple_requests(), add_done_argument, )?; let mut line_writer = transport.request( client::WriteMode::OneLfTerminatedLinePerWriteCall, on_into_read, self.trace, )?; let had_args = !self.args.is_empty(); for arg in self.args.drain(..) { line_writer.write_all(&arg).await?; } if had_args { line_writer.write_message(client::MessageKind::Flush).await?; } for line in self.haves.drain(..) { line_writer.write_all(&line).await?; } if let Some(next_args) = retained_state { self.args = next_args; } Ok(line_writer.into_read().await?) } gix_transport::Protocol::V2 => { let retained_state = self.args.clone(); self.args.append(&mut self.haves); if add_done_argument { self.args.push("done".into()); } transport .invoke( Command::Fetch.as_str(), self.features.iter().filter(|(_, v)| v.is_some()).cloned(), Some(std::mem::replace(&mut self.args, retained_state).into_iter()), self.trace, ) .await } } } } gix-protocol-0.47.0/src/fetch/arguments/blocking_io.rs000064400000000000000000000044161046102023000210610ustar 00000000000000use std::io::Write; use gix_transport::{client, client::TransportV2Ext}; use crate::{fetch::Arguments, Command}; impl Arguments { /// Send fetch arguments to the server, and indicate this is the end of negotiations only if `add_done_argument` is present. pub fn send<'a, T: client::Transport + 'a>( &mut self, transport: &'a mut T, add_done_argument: bool, ) -> Result + Unpin + 'a>, client::Error> { if self.haves.is_empty() { assert!(add_done_argument, "If there are no haves, is_done must be true."); } match self.version { gix_transport::Protocol::V0 | gix_transport::Protocol::V1 => { let (on_into_read, retained_state) = self.prepare_v1( transport.connection_persists_across_multiple_requests(), add_done_argument, )?; let mut line_writer = transport.request( client::WriteMode::OneLfTerminatedLinePerWriteCall, on_into_read, self.trace, )?; let had_args = !self.args.is_empty(); for arg in self.args.drain(..) { line_writer.write_all(&arg)?; } if had_args { line_writer.write_message(client::MessageKind::Flush)?; } for line in self.haves.drain(..) { line_writer.write_all(&line)?; } if let Some(next_args) = retained_state { self.args = next_args; } Ok(line_writer.into_read()?) } gix_transport::Protocol::V2 => { let retained_state = self.args.clone(); self.args.append(&mut self.haves); if add_done_argument { self.args.push("done".into()); } transport.invoke( Command::Fetch.as_str(), self.features.iter().filter(|(_, v)| v.is_some()).cloned(), Some(std::mem::replace(&mut self.args, retained_state).into_iter()), self.trace, ) } } } } gix-protocol-0.47.0/src/fetch/arguments/mod.rs000064400000000000000000000270521046102023000173620ustar 00000000000000use std::fmt; use bstr::{BStr, BString, ByteSlice, ByteVec}; /// The arguments passed to a server command. #[derive(Debug)] pub struct Arguments { /// The active features/capabilities of the fetch invocation #[cfg(any(feature = "async-client", feature = "blocking-client"))] features: Vec, args: Vec, haves: Vec, filter: bool, shallow: bool, deepen_since: bool, deepen_not: bool, deepen_relative: bool, ref_in_want: bool, supports_include_tag: bool, features_for_first_want: Option>, #[cfg(any(feature = "async-client", feature = "blocking-client"))] version: gix_transport::Protocol, #[cfg(any(feature = "async-client", feature = "blocking-client"))] trace: bool, } impl Arguments { /// Return true if there is no argument at all. /// /// This can happen if callers assure that they won't add 'wants' if their 'have' is the same, i.e. if the remote has nothing /// new for them. pub fn is_empty(&self) -> bool { self.haves.is_empty() && !self.args.iter().rev().any(|arg| arg.starts_with_str("want ")) } /// Return true if ref filters is supported. pub fn can_use_filter(&self) -> bool { self.filter } /// Return true if shallow refs are supported. /// /// This is relevant for partial clones when using `--depth X`. pub fn can_use_shallow(&self) -> bool { self.shallow } /// Return true if the 'deepen' capability is supported. /// /// This is relevant for partial clones when using `--depth X` and retrieving additional history. pub fn can_use_deepen(&self) -> bool { self.shallow } /// Return true if the '`deepen_since`' capability is supported. /// /// This is relevant for partial clones when using `--depth X` and retrieving additional history /// based on a date beyond which all history should be present. pub fn can_use_deepen_since(&self) -> bool { self.deepen_since } /// Return true if the '`deepen_not`' capability is supported. /// /// This is relevant for partial clones when using `--depth X`. pub fn can_use_deepen_not(&self) -> bool { self.deepen_not } /// Return true if the '`deepen_relative`' capability is supported. /// /// This is relevant for partial clones when using `--depth X`. pub fn can_use_deepen_relative(&self) -> bool { self.deepen_relative } /// Return true if the 'ref-in-want' capability is supported. /// /// This can be used to bypass 'ls-refs' entirely in protocol v2. pub fn can_use_ref_in_want(&self) -> bool { self.ref_in_want } /// Return true if the 'include-tag' capability is supported. pub fn can_use_include_tag(&self) -> bool { self.supports_include_tag } /// Return true if we will use a stateless mode of operation, which can be decided in conjunction with `transport_is_stateless`. /// /// * we are always stateless if the transport is stateless, i.e. doesn't support multiple interactions with a single connection. /// * we are always stateless if the protocol version is `2` /// * otherwise we may be stateful. pub fn is_stateless(&self, transport_is_stateless: bool) -> bool { #[cfg(any(feature = "async-client", feature = "blocking-client"))] let res = transport_is_stateless || self.version == gix_transport::Protocol::V2; #[cfg(not(any(feature = "async-client", feature = "blocking-client")))] let res = transport_is_stateless; res } /// Add the given `id` pointing to a commit to the 'want' list. /// /// As such it should be included in the server response as it's not present on the client. pub fn want(&mut self, id: impl AsRef) { match self.features_for_first_want.take() { Some(features) => self.prefixed("want ", format!("{} {}", id.as_ref(), features.join(" "))), None => self.prefixed("want ", id.as_ref()), } } /// Add the given ref to the 'want-ref' list. /// /// The server should respond with a corresponding 'wanted-refs' section if it will include the /// wanted ref in the packfile response. pub fn want_ref(&mut self, ref_path: &BStr) { let mut arg = BString::from("want-ref "); arg.push_str(ref_path); self.args.push(arg); } /// Add the given `id` pointing to a commit to the 'have' list. /// /// As such it should _not_ be included in the server response as it's already present on the client. pub fn have(&mut self, id: impl AsRef) { self.haves.push(format!("have {}", id.as_ref()).into()); } /// Add the given `id` pointing to a commit to the 'shallow' list. pub fn shallow(&mut self, id: impl AsRef) { debug_assert!(self.shallow, "'shallow' feature required for 'shallow '"); if self.shallow { self.prefixed("shallow ", id.as_ref()); } } /// Deepen the commit history by `depth` amount of commits. pub fn deepen(&mut self, depth: usize) { debug_assert!(self.shallow, "'shallow' feature required for deepen"); if self.shallow { self.prefixed("deepen ", depth); } } /// Deepen the commit history to include all commits from now to (and including) `seconds` as passed since UNIX epoch. pub fn deepen_since(&mut self, seconds: gix_date::SecondsSinceUnixEpoch) { debug_assert!(self.deepen_since, "'deepen-since' feature required"); if self.deepen_since { self.prefixed("deepen-since ", seconds); } } /// Deepen the commit history in a relative instead of absolute fashion. pub fn deepen_relative(&mut self) { debug_assert!(self.deepen_relative, "'deepen-relative' feature required"); if self.deepen_relative { self.args.push("deepen-relative".into()); } } /// Do not include commits reachable by the given `ref_path` when deepening the history. pub fn deepen_not(&mut self, ref_path: &BStr) { debug_assert!(self.deepen_not, "'deepen-not' feature required"); if self.deepen_not { let mut line = BString::from("deepen-not "); line.extend_from_slice(ref_path); self.args.push(line); } } /// Set the given filter `spec` when listing references. pub fn filter(&mut self, spec: &str) { debug_assert!(self.filter, "'filter' feature required"); if self.filter { self.prefixed("filter ", spec); } } /// Permanently allow the server to include tags that point to commits or objects it would return. /// /// Needs to only be called once. #[cfg(any(feature = "async-client", feature = "blocking-client"))] pub fn use_include_tag(&mut self) { debug_assert!(self.supports_include_tag, "'include-tag' feature required"); if self.supports_include_tag { self.add_feature("include-tag"); } } /// Add the given `feature`, unconditionally. /// /// Note that sending an unknown or unsupported feature may cause the remote to terminate /// the connection. Use this method if you know what you are doing *and* there is no specialized /// method for this, e.g. [`Self::use_include_tag()`]. #[cfg(any(feature = "async-client", feature = "blocking-client"))] pub fn add_feature(&mut self, feature: &str) { match self.version { gix_transport::Protocol::V0 | gix_transport::Protocol::V1 => { let features = self .features_for_first_want .as_mut() .expect("call add_feature before first want()"); features.push(feature.into()); } gix_transport::Protocol::V2 => { self.args.push(feature.into()); } } } fn prefixed(&mut self, prefix: &str, value: impl fmt::Display) { self.args.push(format!("{prefix}{value}").into()); } /// Create a new instance to help setting up arguments to send to the server as part of a `fetch` operation /// for which `features` are the available and configured features to use. /// If `trace` is `true`, all packetlines received or sent will be passed to the facilities of the `gix-trace` crate. #[cfg(any(feature = "async-client", feature = "blocking-client"))] pub fn new(version: gix_transport::Protocol, features: Vec, trace: bool) -> Self { use crate::Command; let has = |name: &str| features.iter().any(|f| f.0 == name); let filter = has("filter"); let shallow = has("shallow"); let ref_in_want = has("ref-in-want"); let mut deepen_since = shallow; let mut deepen_not = shallow; let mut deepen_relative = shallow; let supports_include_tag; let (initial_arguments, features_for_first_want) = match version { gix_transport::Protocol::V0 | gix_transport::Protocol::V1 => { deepen_since = has("deepen-since"); deepen_not = has("deepen-not"); deepen_relative = has("deepen-relative"); supports_include_tag = has("include-tag"); let baked_features = features .iter() .filter( |(f, _)| *f != "include-tag", /* not a capability in that sense, needs to be turned on by caller later */ ) .map(|(n, v)| match v { Some(v) => format!("{n}={v}"), None => n.to_string(), }) .collect::>(); (Vec::new(), Some(baked_features)) } gix_transport::Protocol::V2 => { supports_include_tag = true; (Command::Fetch.initial_v2_arguments(&features), None) } }; Arguments { features, version, args: initial_arguments, haves: Vec::new(), filter, shallow, supports_include_tag, deepen_not, deepen_relative, ref_in_want, deepen_since, features_for_first_want, trace, } } } #[cfg(any(feature = "blocking-client", feature = "async-client"))] mod shared { use bstr::{BString, ByteSlice}; use gix_transport::{client, client::MessageKind}; use crate::fetch::Arguments; impl Arguments { pub(in crate::fetch::arguments) fn prepare_v1( &mut self, transport_is_stateful: bool, add_done_argument: bool, ) -> Result<(MessageKind, Option>), client::Error> { if self.haves.is_empty() { assert!(add_done_argument, "If there are no haves, is_done must be true."); } let on_into_read = if add_done_argument { client::MessageKind::Text(&b"done"[..]) } else { client::MessageKind::Flush }; let retained_state = if transport_is_stateful { None } else { Some(self.args.clone()) }; if let Some(first_arg_position) = self.args.iter().position(|l| l.starts_with_str("want ")) { self.args.swap(first_arg_position, 0); } Ok((on_into_read, retained_state)) } } } #[cfg(feature = "async-client")] mod async_io; #[cfg(feature = "blocking-client")] mod blocking_io; gix-protocol-0.47.0/src/fetch/error.rs000064400000000000000000000031371046102023000157250ustar 00000000000000/// The error returned by [`fetch()`](crate::fetch()). #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { #[error("Could not decode server reply")] FetchResponse(#[from] crate::fetch::response::Error), #[error(transparent)] Negotiate(#[from] crate::fetch::negotiate::Error), #[error(transparent)] Client(#[from] crate::transport::client::Error), #[error("Server lack feature {feature:?}: {description}")] MissingServerFeature { feature: &'static str, description: &'static str, }, #[error("Could not write 'shallow' file to incorporate remote updates after fetching")] WriteShallowFile(#[from] gix_shallow::write::Error), #[error("Could not read 'shallow' file to send current shallow boundary")] ReadShallowFile(#[from] gix_shallow::read::Error), #[error("'shallow' file could not be locked in preparation for writing changes")] LockShallowFile(#[from] gix_lock::acquire::Error), #[error("Receiving objects from shallow remotes is prohibited due to the value of `clone.rejectShallow`")] RejectShallowRemote, #[error("Failed to consume the pack sent by the remote")] ConsumePack(#[source] Box), #[error("Failed to read remaining bytes in stream")] ReadRemainingBytes(#[source] std::io::Error), } impl crate::transport::IsSpuriousError for Error { fn is_spurious(&self) -> bool { match self { Error::FetchResponse(err) => err.is_spurious(), Error::Client(err) => err.is_spurious(), _ => false, } } } gix-protocol-0.47.0/src/fetch/function.rs000064400000000000000000000264361046102023000164300ustar 00000000000000use crate::fetch::{ negotiate, Context, Error, Negotiate, NegotiateOutcome, Options, Outcome, ProgressId, Shallow, Tags, }; use crate::{fetch::Arguments, transport::packetline::read::ProgressAction}; use gix_features::progress::DynNestedProgress; use std::path::Path; use std::sync::atomic::{AtomicBool, Ordering}; /// Perform one fetch operation, relying on a `transport`. /// `negotiate` is used to run the negotiation of objects that should be contained in the pack, *if* one is to be received. /// `progress` and `should_interrupt` is passed to all potentially long-running parts of the operation. /// /// `consume_pack(pack_read, progress, interrupt) -> bool` is always called to consume all bytes that are sent by the server, returning `true` if we should assure the pack is read to the end, /// or `false` to do nothing. Dropping the reader without reading to EOF (i.e. returning `false`) is an offense to the server, and /// `transport` won't be in the correct state to perform additional operations, or indicate the end of operation. /// Note that the passed reader blocking as the pack-writing is blocking as well. /// /// The `Context` and `Options` further define parts of this `fetch` operation. /// /// As opposed to a full `git fetch`, this operation does *not*… /// /// * …update local refs /// * …end the interaction after the fetch /// /// **Note that the interaction will never be ended**, even on error or failure, leaving it up to the caller to do that, maybe /// with the help of [`SendFlushOnDrop`](crate::SendFlushOnDrop) which can wrap `transport`. /// Generally, the `transport` is left in a state that allows for more commands to be run. /// /// Return `Ok(None)` if there was nothing to do because all remote refs are at the same state as they are locally, /// or there was nothing wanted, or `Ok(Some(outcome))` to inform about all the changes that were made. #[maybe_async::maybe_async] pub async fn fetch( negotiate: &mut impl Negotiate, consume_pack: impl FnOnce(&mut dyn std::io::BufRead, &mut dyn DynNestedProgress, &AtomicBool) -> Result, mut progress: P, should_interrupt: &AtomicBool, Context { handshake, transport, user_agent, trace_packetlines, }: Context<'_, T>, Options { shallow_file, shallow, tags, reject_shallow_remote, }: Options<'_>, ) -> Result, Error> where P: gix_features::progress::NestedProgress, P::SubProgress: 'static, T: gix_transport::client::Transport, E: Into>, { let _span = gix_trace::coarse!("gix_protocol::fetch()"); let v1_shallow_updates = handshake.v1_shallow_updates.take(); let protocol_version = handshake.server_protocol_version; let fetch = crate::Command::Fetch; let fetch_features = { let mut f = fetch.default_features(protocol_version, &handshake.capabilities); f.push(user_agent); f }; crate::fetch::Response::check_required_features(protocol_version, &fetch_features)?; let sideband_all = fetch_features.iter().any(|(n, _)| *n == "sideband-all"); let mut arguments = Arguments::new(protocol_version, fetch_features, trace_packetlines); if matches!(tags, Tags::Included) { if !arguments.can_use_include_tag() { return Err(Error::MissingServerFeature { feature: "include-tag", description: // NOTE: if this is an issue, we could probably do what's proposed here. "To make this work we would have to implement another pass to fetch attached tags separately", }); } arguments.use_include_tag(); } let (shallow_commits, mut shallow_lock) = add_shallow_args(&mut arguments, shallow, &shallow_file)?; let negotiate_span = gix_trace::detail!( "negotiate", protocol_version = handshake.server_protocol_version as usize ); let action = negotiate.mark_complete_and_common_ref()?; let mut previous_response = None::; match &action { negotiate::Action::NoChange | negotiate::Action::SkipToRefUpdate => Ok(None), negotiate::Action::MustNegotiate { remote_ref_target_known, } => { if !negotiate.add_wants(&mut arguments, remote_ref_target_known) { return Ok(None); } let mut rounds = Vec::new(); let is_stateless = arguments.is_stateless(!transport.connection_persists_across_multiple_requests()); let mut state = negotiate::one_round::State::new(is_stateless); let mut reader = 'negotiation: loop { let _round = gix_trace::detail!("negotiate round", round = rounds.len() + 1); progress.step(); progress.set_name(format!("negotiate (round {})", rounds.len() + 1)); if should_interrupt.load(Ordering::Relaxed) { return Err(Error::Negotiate(negotiate::Error::NegotiationFailed { rounds: rounds.len(), })); } let is_done = match negotiate.one_round(&mut state, &mut arguments, previous_response.as_ref()) { Ok((round, is_done)) => { rounds.push(round); is_done } Err(err) => { return Err(err.into()); } }; let mut reader = arguments.send(transport, is_done).await?; if sideband_all { setup_remote_progress(&mut progress, &mut reader, should_interrupt); } let response = crate::fetch::Response::from_line_reader(protocol_version, &mut reader, is_done, !is_done).await?; let has_pack = response.has_pack(); previous_response = Some(response); if has_pack { progress.step(); progress.set_name("receiving pack".into()); if !sideband_all { setup_remote_progress(&mut progress, &mut reader, should_interrupt); } break 'negotiation reader; } }; drop(negotiate_span); let mut previous_response = previous_response.expect("knowledge of a pack means a response was received"); previous_response.append_v1_shallow_updates(v1_shallow_updates); if !previous_response.shallow_updates().is_empty() && shallow_lock.is_none() { if reject_shallow_remote { return Err(Error::RejectShallowRemote); } shallow_lock = acquire_shallow_lock(&shallow_file).map(Some)?; } #[cfg(feature = "async-client")] let mut rd = crate::futures_lite::io::BlockOn::new(reader); #[cfg(not(feature = "async-client"))] let mut rd = reader; let may_read_to_end = consume_pack(&mut rd, &mut progress, should_interrupt).map_err(|err| Error::ConsumePack(err.into()))?; #[cfg(feature = "async-client")] { reader = rd.into_inner(); } #[cfg(not(feature = "async-client"))] { reader = rd; } if may_read_to_end { // Assure the final flush packet is consumed. let has_read_to_end = reader.stopped_at().is_some(); #[cfg(feature = "async-client")] { if !has_read_to_end { futures_lite::io::copy(&mut reader, &mut futures_lite::io::sink()) .await .map_err(Error::ReadRemainingBytes)?; } } #[cfg(not(feature = "async-client"))] { if !has_read_to_end { std::io::copy(&mut reader, &mut std::io::sink()).map_err(Error::ReadRemainingBytes)?; } } } drop(reader); if let Some(shallow_lock) = shallow_lock { if !previous_response.shallow_updates().is_empty() { gix_shallow::write(shallow_lock, shallow_commits, previous_response.shallow_updates())?; } } Ok(Some(Outcome { last_response: previous_response, negotiate: NegotiateOutcome { action, rounds }, })) } } } fn acquire_shallow_lock(shallow_file: &Path) -> Result { gix_lock::File::acquire_to_update_resource(shallow_file, gix_lock::acquire::Fail::Immediately, None) .map_err(Into::into) } fn add_shallow_args( args: &mut Arguments, shallow: &Shallow, shallow_file: &std::path::Path, ) -> Result<(Option>, Option), Error> { let expect_change = *shallow != Shallow::NoChange; let shallow_lock = expect_change.then(|| acquire_shallow_lock(shallow_file)).transpose()?; let shallow_commits = gix_shallow::read(shallow_file)?; if (shallow_commits.is_some() || expect_change) && !args.can_use_shallow() { // NOTE: if this is an issue, we can always unshallow the repo ourselves. return Err(Error::MissingServerFeature { feature: "shallow", description: "shallow clones need server support to remain shallow, otherwise bigger than expected packs are sent effectively unshallowing the repository", }); } if let Some(shallow_commits) = &shallow_commits { for commit in shallow_commits.iter() { args.shallow(commit); } } match shallow { Shallow::NoChange => {} Shallow::DepthAtRemote(commits) => args.deepen(commits.get() as usize), Shallow::Deepen(commits) => { args.deepen(*commits as usize); args.deepen_relative(); } Shallow::Since { cutoff } => { args.deepen_since(cutoff.seconds); } Shallow::Exclude { remote_refs, since_cutoff, } => { if let Some(cutoff) = since_cutoff { args.deepen_since(cutoff.seconds); } for ref_ in remote_refs { args.deepen_not(ref_.as_ref().as_bstr()); } } } Ok((shallow_commits, shallow_lock)) } fn setup_remote_progress<'a>( progress: &mut dyn gix_features::progress::DynNestedProgress, reader: &mut Box + Unpin + 'a>, should_interrupt: &'a AtomicBool, ) { use crate::transport::client::ExtendedBufRead; reader.set_progress_handler(Some(Box::new({ let mut remote_progress = progress.add_child_with_id("remote".to_string(), ProgressId::RemoteProgress.into()); move |is_err: bool, data: &[u8]| { crate::RemoteProgress::translate_to_progress(is_err, data, &mut remote_progress); if should_interrupt.load(Ordering::Relaxed) { ProgressAction::Interrupt } else { ProgressAction::Continue } } }) as crate::transport::client::HandleProgress<'a>)); } gix-protocol-0.47.0/src/fetch/handshake.rs000064400000000000000000000017641046102023000165260ustar 00000000000000use gix_features::progress::Progress; use gix_transport::{client, Service}; use maybe_async::maybe_async; use crate::{ credentials, handshake::{Error, Outcome}, }; /// Perform a handshake with the server on the other side of `transport`, with `authenticate` being used if authentication /// turns out to be required. `extra_parameters` are the parameters `(name, optional value)` to add to the handshake, /// each time it is performed in case authentication is required. /// `progress` is used to inform about what's currently happening. #[allow(clippy::result_large_err)] #[maybe_async] pub async fn upload_pack( transport: T, authenticate: AuthFn, extra_parameters: Vec<(String, Option)>, progress: &mut impl Progress, ) -> Result where AuthFn: FnMut(credentials::helper::Action) -> credentials::protocol::Result, T: client::Transport, { crate::handshake(transport, Service::UploadPack, authenticate, extra_parameters, progress).await } gix-protocol-0.47.0/src/fetch/mod.rs000064400000000000000000000037721046102023000153600ustar 00000000000000/// A module providing low-level primitives to flexibly perform various `fetch` related activities. Note that the typesystem isn't used /// to assure they are always performed in the right order, the caller has to follow some parts of the protocol itself. /// /// ### Order for receiving a pack /// /// * [handshake](handshake()) /// * **ls-refs** /// * [get available refs by refspecs](RefMap::new()) /// * **fetch pack** /// * `negotiate` until a pack can be received (TBD) /// * [officially terminate the connection](crate::indicate_end_of_interaction()) /// - Consider wrapping the transport in [`SendFlushOnDrop`](crate::SendFlushOnDrop) to be sure the connection is terminated /// gracefully even if there is an application error. /// /// Note that this flow doesn't involve actually writing the pack, or indexing it. Nor does it contain machinery /// to write or update references based on the fetched remote references. /// /// Also, when the server supports [version 2](crate::transport::Protocol::V2) of the protocol, then each of the listed commands, /// `ls-refs` and `fetch` can be invoked multiple times in any order. // Note: for ease of use, this is tested in `gix` itself. The test-suite here uses a legacy implementation. mod arguments; pub use arguments::Arguments; #[cfg(any(feature = "blocking-client", feature = "async-client"))] #[cfg(feature = "fetch")] mod error; #[cfg(any(feature = "blocking-client", feature = "async-client"))] #[cfg(feature = "fetch")] pub use error::Error; /// pub mod response; #[cfg(any(feature = "blocking-client", feature = "async-client"))] #[cfg(feature = "fetch")] pub(crate) mod function; #[cfg(any(feature = "blocking-client", feature = "async-client"))] #[cfg(feature = "handshake")] mod handshake; #[cfg(any(feature = "blocking-client", feature = "async-client"))] #[cfg(feature = "handshake")] pub use handshake::upload_pack as handshake; #[cfg(feature = "fetch")] pub mod negotiate; /// #[cfg(feature = "fetch")] pub mod refmap; mod types; pub use types::*; gix-protocol-0.47.0/src/fetch/negotiate.rs000064400000000000000000000552771046102023000165670ustar 00000000000000//! A modules with primitives to perform negotiation as part of a fetch operation. //! //! The functions provided are called in a certain order: //! //! 1. [`mark_complete_and_common_ref()`] - initialize the [`negotiator`](gix_negotiate::Negotiator) with all state known on the remote. //! 2. [`add_wants()`] is called if the call at 1) returned [`Action::MustNegotiate`]. //! 3. [`one_round()`] is called for each negotiation round, providing information if the negotiation is done. use gix_date::SecondsSinceUnixEpoch; use gix_negotiate::Flags; use gix_ref::file::ReferenceExt; use std::borrow::Cow; use crate::fetch::{refmap, RefMap, Shallow, Tags}; type Queue = gix_revwalk::PriorityQueue; /// The error returned during [`one_round()`] or [`mark_complete_and_common_ref()`]. #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { #[error("We were unable to figure out what objects the server should send after {rounds} round(s)")] NegotiationFailed { rounds: usize }, #[error(transparent)] LookupCommitInGraph(#[from] gix_revwalk::graph::get_or_insert_default::Error), #[error(transparent)] OpenPackedRefsBuffer(#[from] gix_ref::packed::buffer::open::Error), #[error(transparent)] IO(#[from] std::io::Error), #[error(transparent)] InitRefIter(#[from] gix_ref::file::iter::loose_then_packed::Error), #[error(transparent)] PeelToId(#[from] gix_ref::peel::to_id::Error), #[error(transparent)] AlternateRefsAndObjects(Box), } /// Determines what should be done after [preparing the commit-graph for negotiation](mark_complete_and_common_ref). #[must_use] #[derive(Debug, Clone)] pub enum Action { /// None of the remote refs moved compared to our last recorded state (via tracking refs), so there is nothing to do at all, /// not even a ref update. NoChange, /// Don't negotiate, don't fetch the pack, skip right to updating the references. /// /// This happens if we already have all local objects even though the server seems to have changed. SkipToRefUpdate, /// We can't know for sure if fetching *is not* needed, so we go ahead and negotiate. MustNegotiate { /// Each `ref_map.mapping` has a slot here which is `true` if we have the object the remote ref points to, locally. remote_ref_target_known: Vec, }, } /// Key information about each round in the pack-negotiation, as produced by [`one_round()`]. #[derive(Debug, Clone, Copy)] pub struct Round { /// The amount of `HAVE` lines sent this round. /// /// Each `HAVE` is an object that we tell the server about which would acknowledge each one it has as well. pub haves_sent: usize, /// A total counter, over all previous rounds, indicating how many `HAVE`s we sent without seeing a single acknowledgement, /// i.e. the indication of a common object. /// /// This number maybe zero or be lower compared to the previous round if we have received at least one acknowledgement. pub in_vain: usize, /// The amount of haves we should send in this round. /// /// If the value is lower than `haves_sent` (the `HAVE` lines actually sent), the negotiation algorithm has run out of options /// which typically indicates the end of the negotiation phase. pub haves_to_send: usize, /// If `true`, the server reported, as response to our previous `HAVE`s, that at least one of them is in common by acknowledging it. /// /// This may also lead to the server responding with a pack. pub previous_response_had_at_least_one_in_common: bool, } /// This function is modeled after the similarly named one in the git codebase to mark known refs in a commit-graph. /// /// It to do the following: /// /// * figure out all advertised refs on the remote *that we already have* and keep track of the oldest one as cutoff date. /// * mark all of our own refs as tips for a traversal. /// * mark all their parents, recursively, up to (and including) the cutoff date up to which we have seen the servers commit that we have. /// * pass all known-to-be-common-with-remote commits to the negotiator as common commits. /// /// This is done so that we already find the most recent common commits, even if we are ahead, which is *potentially* better than /// what we would get if we would rely on tracking refs alone, particularly if one wouldn't trust the tracking refs for some reason. /// /// Note that git doesn't trust its own tracking refs as the server *might* have changed completely, for instance by force-pushing, so /// marking our local tracking refs as known is something that's actually not proven to be correct so it's not done. /// /// Additionally, it does what's done in `transport.c` and we check if a fetch is actually needed as at least one advertised ref changed. /// /// Finally, we also mark tips in the `negotiator` in one go to avoid traversing all refs twice, since we naturally encounter all tips during /// our own walk. /// /// Return whether we should negotiate, along with a queue for later use. /// /// # Parameters /// /// * `objects` /// - Access to the object database. *Note* that the `exists()` calls must not trigger a refresh of the ODB packs as plenty of them might fail, i.e. find on object. /// * `refs` /// - Access to the git references database. /// * `alternates` /// - A function that returns an iterator over `(refs, objects)` for each alternate repository, to assure all known objects are added also according to their tips. /// * `negotiator` /// - The implementation that performs the negotiation later, i.e. prepare wants and haves. /// * `graph` /// - The commit-graph for use by the `negotiator` - we populate it with tips to initialize the graph traversal. /// * `ref_map` /// - The references known on the remote, as previously obtained with [`RefMap::new()`]. /// * `shallow` /// - How to deal with shallow repositories. It does affect how negotiations are performed. /// * `mapping_is_ignored` /// - `f(mapping) -> bool` returns `true` if the given mapping should not participate in change tracking. /// - [`make_refmapping_ignore_predicate()`] is a typical implementation for this. #[allow(clippy::too_many_arguments)] pub fn mark_complete_and_common_ref( objects: &(impl gix_object::Find + gix_object::FindHeader + gix_object::Exists), refs: &gix_ref::file::Store, alternates: impl FnOnce() -> Result, negotiator: &mut dyn gix_negotiate::Negotiator, graph: &mut gix_negotiate::Graph<'_, '_>, ref_map: &RefMap, shallow: &Shallow, mapping_is_ignored: impl Fn(&refmap::Mapping) -> bool, ) -> Result where E: Into>, Out: Iterator, F: gix_object::Find, { let _span = gix_trace::detail!("mark_complete_and_common_ref", mappings = ref_map.mappings.len()); if ref_map.mappings.is_empty() { return Ok(Action::NoChange); } if let Shallow::Deepen(0) = shallow { // Avoid deepening (relative) with zero as it seems to upset the server. Git also doesn't actually // perform the negotiation for some reason (couldn't find it in code). return Ok(Action::NoChange); } if let Some(refmap::Mapping { remote: refmap::Source::Ref(crate::handshake::Ref::Unborn { .. }), .. }) = ref_map.mappings.last().filter(|_| ref_map.mappings.len() == 1) { // There is only an unborn branch, as the remote has an empty repository. This means there is nothing to do except for // possibly reproducing the unborn branch locally. return Ok(Action::SkipToRefUpdate); } // Compute the cut-off date by checking which of the refs advertised (and matched in refspecs) by the remote we have, // and keep the oldest one. let mut cutoff_date = None::; let mut num_mappings_with_change = 0; let mut remote_ref_target_known: Vec = std::iter::repeat(false).take(ref_map.mappings.len()).collect(); let mut remote_ref_included: Vec = std::iter::repeat(false).take(ref_map.mappings.len()).collect(); for (mapping_idx, mapping) in ref_map.mappings.iter().enumerate() { let want_id = mapping.remote.as_id(); let have_id = mapping.local.as_ref().and_then(|name| { // this is the only time git uses the peer-id. let r = refs.find(name).ok()?; r.target.try_id().map(ToOwned::to_owned) }); // Even for ignored mappings we want to know if the `want` is already present locally, so skip nothing else. if !mapping_is_ignored(mapping) { remote_ref_included[mapping_idx] = true; // Like git, we don't let known unchanged mappings participate in the tree traversal if want_id.zip(have_id).map_or(true, |(want, have)| want != have) { num_mappings_with_change += 1; } } if let Some(commit) = want_id .and_then(|id| graph.get_or_insert_commit(id.into(), |_| {}).transpose()) .transpose()? { remote_ref_target_known[mapping_idx] = true; cutoff_date = cutoff_date.unwrap_or_default().max(commit.commit_time).into(); } else if want_id.map_or(false, |maybe_annotated_tag| objects.exists(maybe_annotated_tag)) { remote_ref_target_known[mapping_idx] = true; } } if matches!(shallow, Shallow::NoChange) { if num_mappings_with_change == 0 { return Ok(Action::NoChange); } else if remote_ref_target_known .iter() .zip(remote_ref_included) .filter_map(|(known, included)| included.then_some(known)) .all(|known| *known) { return Ok(Action::SkipToRefUpdate); } } // color our commits as complete as identified by references, unconditionally // (`git` is conditional here based on `deepen`, but it doesn't make sense and it's hard to extract from history when that happened). let mut queue = Queue::new(); mark_all_refs_in_repo(refs, objects, graph, &mut queue, Flags::COMPLETE)?; for (alt_refs, alt_objs) in alternates().map_err(|err| Error::AlternateRefsAndObjects(err.into()))? { mark_all_refs_in_repo(&alt_refs, &alt_objs, graph, &mut queue, Flags::COMPLETE)?; } // Keep track of the tips, which happen to be on our queue right, before we traverse the graph with cutoff. let tips = if let Some(cutoff) = cutoff_date { let tips = Cow::Owned(queue.clone()); // color all their parents up to the cutoff date, the oldest commit we know the server has. mark_recent_complete_commits(&mut queue, graph, cutoff)?; tips } else { Cow::Borrowed(&queue) }; gix_trace::detail!("mark known_common").into_scope(|| -> Result<_, Error> { // mark all complete advertised refs as common refs. for mapping in ref_map .mappings .iter() .zip(remote_ref_target_known.iter().copied()) // We need this filter as the graph wouldn't contain annotated tags. .filter_map(|(mapping, known)| (!known).then_some(mapping)) { let want_id = mapping.remote.as_id(); if let Some(common_id) = want_id .and_then(|id| graph.get(id).map(|c| (c, id))) .filter(|(c, _)| c.data.flags.contains(Flags::COMPLETE)) .map(|(_, id)| id) { negotiator.known_common(common_id.into(), graph)?; } } Ok(()) })?; // As negotiators currently may rely on getting `known_common` calls first and tips after, we adhere to that which is the only // reason we cached the set of tips. gix_trace::detail!("mark tips", num_tips = tips.len()).into_scope(|| -> Result<_, Error> { for tip in tips.iter_unordered() { negotiator.add_tip(*tip, graph)?; } Ok(()) })?; Ok(Action::MustNegotiate { remote_ref_target_known, }) } /// Create a predicate that checks if a refspec mapping should be ignored. /// /// We want to ignore mappings during negotiation if they would be handled implicitly by the server, which is the case /// when tags would be sent implicitly due to `Tags::Included`. pub fn make_refmapping_ignore_predicate(fetch_tags: Tags, ref_map: &RefMap) -> impl Fn(&refmap::Mapping) -> bool + '_ { // With included tags, we have to keep mappings of tags to handle them later when updating refs, but we don't want to // explicitly `want` them as the server will determine by itself which tags are pointing to a commit it wants to send. // If we would not exclude implicit tag mappings like this, we would get too much of the graph. let tag_refspec_to_ignore = matches!(fetch_tags, Tags::Included) .then(|| fetch_tags.to_refspec()) .flatten(); move |mapping| { tag_refspec_to_ignore.map_or(false, |tag_spec| { mapping .spec_index .implicit_index() .and_then(|idx| ref_map.extra_refspecs.get(idx)) .map_or(false, |spec| spec.to_ref() == tag_spec) }) } } /// Add all 'wants' to `arguments` once it's known negotiation is necessary. /// /// This is a call to be made when [`mark_complete_and_common_ref()`] returned [`Action::MustNegotiate`]. /// That variant also contains the `remote_ref_target_known` field which is supposed to be passed here. /// /// `objects` are used to see if remote ids are known here and are tags, in which case they are also added as 'haves' as /// [negotiators](gix_negotiate::Negotiator) don't see tags at all. /// /// * `ref_map` is the state of refs as known on the remote. /// * `shallow` defines if the history should be shallow. /// * `mapping_is_ignored` is typically initialized with [`make_refmapping_ignore_predicate`]. /// /// Returns `true` if at least one [want](crate::fetch::Arguments::want()) was added, or `false` otherwise. /// Note that not adding a single want can make the remote hang, so it's avoided on the client side by ending the fetch operation. pub fn add_wants( objects: &impl gix_object::FindHeader, arguments: &mut crate::fetch::Arguments, ref_map: &RefMap, remote_ref_target_known: &[bool], shallow: &Shallow, mapping_is_ignored: impl Fn(&refmap::Mapping) -> bool, ) -> bool { // When using shallow, we can't exclude `wants` as the remote won't send anything then. Thus, we have to resend everything // we have as want instead to get exactly the same graph, but possibly deepened. let is_shallow = !matches!(shallow, Shallow::NoChange); let mut has_want = false; let wants = ref_map .mappings .iter() .zip(remote_ref_target_known) .filter_map(|(m, known)| (is_shallow || !*known).then_some(m)) .filter(|m| !mapping_is_ignored(m)); for want in wants { let id_on_remote = want.remote.as_id(); if !arguments.can_use_ref_in_want() || matches!(want.remote, refmap::Source::ObjectId(_)) { if let Some(id) = id_on_remote { arguments.want(id); has_want = true; } } else { arguments.want_ref( want.remote .as_name() .expect("name available if this isn't an object id"), ); has_want = true; } let id_is_annotated_tag_we_have = id_on_remote .and_then(|id| objects.try_header(id).ok().flatten().map(|h| (id, h))) .filter(|(_, h)| h.kind == gix_object::Kind::Tag) .map(|(id, _)| id); if let Some(tag_on_remote) = id_is_annotated_tag_we_have { // Annotated tags are not handled at all by negotiators in the commit-graph - they only see commits and thus won't // ever add `have`s for tags. To correct for that, we add these haves here to avoid getting them sent again. arguments.have(tag_on_remote); } } has_want } /// Remove all commits that are more recent than the cut-off, which is the commit time of the oldest common commit we have with the server. fn mark_recent_complete_commits( queue: &mut Queue, graph: &mut gix_negotiate::Graph<'_, '_>, cutoff: SecondsSinceUnixEpoch, ) -> Result<(), Error> { let _span = gix_trace::detail!("mark_recent_complete", queue_len = queue.len()); while let Some(id) = queue .peek() .and_then(|(commit_time, id)| (commit_time >= &cutoff).then_some(*id)) { queue.pop_value(); let commit = graph.get(&id).expect("definitely set when adding tips or parents"); for parent_id in commit.parents.clone() { let mut was_complete = false; if let Some(parent) = graph .get_or_insert_commit(parent_id, |md| { was_complete = md.flags.contains(Flags::COMPLETE); md.flags |= Flags::COMPLETE; })? .filter(|_| !was_complete) { queue.insert(parent.commit_time, parent_id); } } } Ok(()) } fn mark_all_refs_in_repo( store: &gix_ref::file::Store, objects: &impl gix_object::Find, graph: &mut gix_negotiate::Graph<'_, '_>, queue: &mut Queue, mark: Flags, ) -> Result<(), Error> { let _span = gix_trace::detail!("mark_all_refs"); for local_ref in store.iter()?.all()? { let mut local_ref = local_ref?; let id = local_ref.peel_to_id_in_place_packed( store, objects, store.cached_packed_buffer()?.as_ref().map(|b| &***b), )?; let mut is_complete = false; if let Some(commit) = graph .get_or_insert_commit(id, |md| { is_complete = md.flags.contains(Flags::COMPLETE); md.flags |= mark; })? .filter(|_| !is_complete) { queue.insert(commit.commit_time, id); }; } Ok(()) } /// pub mod one_round { /// State to keep between individual [rounds](super::one_round()). #[derive(Clone, Debug)] pub struct State { /// The amount of haves to send the next round. /// It's initialized with the standard window size for negotations. pub haves_to_send: usize, /// Is turned `true` if the remote as confirmed any common commit so far. pub(super) seen_ack: bool, /// The amount of haves we have sent that didn't have a match on the remote. /// /// The higher this number, the more time was wasted. pub(super) in_vain: usize, /// Commits we have in common. /// /// Only set when we are stateless as we have to resend known common commits each round. pub(super) common_commits: Option>, } impl State { /// Create a new instance. /// /// setting `connection_is_stateless` accordingly which affects the amount of haves to send. pub fn new(connection_is_stateless: bool) -> Self { State { haves_to_send: gix_negotiate::window_size(connection_is_stateless, None), seen_ack: false, in_vain: 0, common_commits: connection_is_stateless.then(Vec::new), } } } impl State { /// Return `true` if the transports connection is stateless. fn connection_is_stateless(&self) -> bool { self.common_commits.is_some() } pub(super) fn adjust_window_size(&mut self) { self.haves_to_send = gix_negotiate::window_size(self.connection_is_stateless(), Some(self.haves_to_send)); } } } /// Prepare to negotiate a single round in the process of letting the remote know what we have, and have in common. /// /// Note that this function only configures `arguments`, no IO is performed. /// /// The operation is performed with `negotiator` and `graph`, sending the amount of `haves_to_send` after possibly /// making the common commits (as sent by the remote) known to `negotiator` using `previous_response`, if this isn't the first round. /// All [commits we have](crate::fetch::Arguments::have()) are added to `arguments` accordingly. /// /// Returns information about this round, and `true` if we are done and should stop negotiating *after* the `arguments` have /// been sent to the remote one last time. pub fn one_round( negotiator: &mut dyn gix_negotiate::Negotiator, graph: &mut gix_negotiate::Graph<'_, '_>, state: &mut one_round::State, arguments: &mut crate::fetch::Arguments, previous_response: Option<&crate::fetch::Response>, ) -> Result<(Round, bool), Error> { let mut seen_ack = false; if let Some(response) = previous_response { use crate::fetch::response::Acknowledgement; for ack in response.acknowledgements() { match ack { Acknowledgement::Common(id) => { seen_ack = true; negotiator.in_common_with_remote(*id, graph)?; if let Some(common) = &mut state.common_commits { common.push(*id); } } Acknowledgement::Ready => { // NOTE: In git, there is some logic dealing with whether to expect a DELIM or FLUSH package, // but we handle this with peeking. } Acknowledgement::Nak => {} } } } // `common` is set only if this is a stateless transport, and we repeat previously confirmed common commits as HAVE, because // we are not going to repeat them otherwise. if let Some(common) = &mut state.common_commits { for have_id in common { arguments.have(have_id); } } let mut haves_added = 0; for have_id in (0..state.haves_to_send).map_while(|_| negotiator.next_have(graph)) { arguments.have(have_id?); haves_added += 1; } // Note that we are differing from the git implementation, which does an extra-round of with no new haves sent at all. // For us, it seems better to just say we are done when we know we are done, as potentially additional acks won't affect the // queue of our implementation at all (so the negotiator won't come up with more haves next time either). if seen_ack { state.in_vain = 0; } state.seen_ack |= seen_ack; state.in_vain += haves_added; let round = Round { haves_sent: haves_added, in_vain: state.in_vain, haves_to_send: state.haves_to_send, previous_response_had_at_least_one_in_common: seen_ack, }; let is_done = haves_added != state.haves_to_send || (state.seen_ack && state.in_vain >= 256); state.adjust_window_size(); Ok((round, is_done)) } gix-protocol-0.47.0/src/fetch/refmap/init.rs000064400000000000000000000154411046102023000170120ustar 00000000000000use std::collections::HashSet; use crate::fetch; use crate::fetch::refmap::{Mapping, Source, SpecIndex}; use crate::fetch::RefMap; use crate::transport::client::Transport; use bstr::{BString, ByteVec}; use gix_features::progress::Progress; /// The error returned by [`RefMap::new()`]. #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { #[error("The object format {format:?} as used by the remote is unsupported")] UnknownObjectFormat { format: BString }, #[error(transparent)] MappingValidation(#[from] gix_refspec::match_group::validate::Error), #[error(transparent)] ListRefs(#[from] crate::ls_refs::Error), } /// For use in [`RefMap::new()`]. #[derive(Debug, Clone)] pub struct Options { /// Use a two-component prefix derived from the ref-spec's source, like `refs/heads/` to let the server pre-filter refs /// with great potential for savings in traffic and local CPU time. Defaults to `true`. pub prefix_from_spec_as_filter_on_remote: bool, /// A list of refspecs to use as implicit refspecs which won't be saved or otherwise be part of the remote in question. /// /// This is useful for handling `remote..tagOpt` for example. pub extra_refspecs: Vec, } impl Default for Options { fn default() -> Self { Options { prefix_from_spec_as_filter_on_remote: true, extra_refspecs: Vec::new(), } } } impl RefMap { /// Create a new instance by obtaining all references on the remote that have been filtered through our remote's /// for _fetching_. /// /// A [context](fetch::Context) is provided to bundle what would be additional parameters, /// and [options](Options) are used to further configure the call. /// /// * `progress` is used if `ls-refs` is invoked on the remote. Always the case when V2 is used. /// * `fetch_refspecs` are all explicit refspecs to identify references on the remote that you are interested in. /// Note that these are copied to [`RefMap::refspecs`] for convenience, as `RefMap::mappings` refer to them by index. #[allow(clippy::result_large_err)] #[maybe_async::maybe_async] pub async fn new( mut progress: impl Progress, fetch_refspecs: &[gix_refspec::RefSpec], fetch::Context { handshake, transport, user_agent, trace_packetlines, }: fetch::Context<'_, T>, Options { prefix_from_spec_as_filter_on_remote, extra_refspecs, }: Options, ) -> Result where T: Transport, { let _span = gix_trace::coarse!("gix_protocol::fetch::RefMap::new()"); let null = gix_hash::ObjectId::null(gix_hash::Kind::Sha1); // OK to hardcode Sha1, it's not supposed to match, ever. let all_refspecs = { let mut s: Vec<_> = fetch_refspecs.to_vec(); s.extend(extra_refspecs.clone()); s }; let remote_refs = match handshake.refs.take() { Some(refs) => refs, None => { crate::ls_refs( transport, &handshake.capabilities, |_capabilities, arguments, features| { features.push(user_agent); if prefix_from_spec_as_filter_on_remote { let mut seen = HashSet::new(); for spec in &all_refspecs { let spec = spec.to_ref(); if seen.insert(spec.instruction()) { let mut prefixes = Vec::with_capacity(1); spec.expand_prefixes(&mut prefixes); for mut prefix in prefixes { prefix.insert_str(0, "ref-prefix "); arguments.push(prefix); } } } } Ok(crate::ls_refs::Action::Continue) }, &mut progress, trace_packetlines, ) .await? } }; let num_explicit_specs = fetch_refspecs.len(); let group = gix_refspec::MatchGroup::from_fetch_specs(all_refspecs.iter().map(gix_refspec::RefSpec::to_ref)); let (res, fixes) = group .match_remotes(remote_refs.iter().map(|r| { let (full_ref_name, target, object) = r.unpack(); gix_refspec::match_group::Item { full_ref_name, target: target.unwrap_or(&null), object, } })) .validated()?; let mappings = res.mappings; let mappings = mappings .into_iter() .map(|m| Mapping { remote: m.item_index.map_or_else( || { Source::ObjectId(match m.lhs { gix_refspec::match_group::SourceRef::ObjectId(id) => id, _ => unreachable!("no item index implies having an object id"), }) }, |idx| Source::Ref(remote_refs[idx].clone()), ), local: m.rhs.map(std::borrow::Cow::into_owned), spec_index: if m.spec_index < num_explicit_specs { SpecIndex::ExplicitInRemote(m.spec_index) } else { SpecIndex::Implicit(m.spec_index - num_explicit_specs) }, }) .collect(); let object_hash = extract_object_format(handshake)?; Ok(RefMap { mappings, refspecs: fetch_refspecs.to_vec(), extra_refspecs, fixes, remote_refs, object_hash, }) } } /// Assume sha1 if server says nothing, otherwise configure anything beyond sha1 in the local repo configuration #[allow(clippy::result_large_err)] fn extract_object_format(outcome: &crate::handshake::Outcome) -> Result { use bstr::ByteSlice; let object_hash = if let Some(object_format) = outcome.capabilities.capability("object-format").and_then(|c| c.value()) { let object_format = object_format.to_str().map_err(|_| Error::UnknownObjectFormat { format: object_format.into(), })?; match object_format { "sha1" => gix_hash::Kind::Sha1, unknown => return Err(Error::UnknownObjectFormat { format: unknown.into() }), } } else { gix_hash::Kind::Sha1 }; Ok(object_hash) } gix-protocol-0.47.0/src/fetch/refmap/mod.rs000064400000000000000000000102511046102023000166200ustar 00000000000000/// #[cfg(any(feature = "blocking-client", feature = "async-client"))] pub mod init; /// Either an object id that the remote has or the matched remote ref itself. #[derive(Debug, Clone)] pub enum Source { /// An object id, as the matched ref-spec was an object id itself. ObjectId(gix_hash::ObjectId), /// The remote reference that matched the ref-specs name. Ref(crate::handshake::Ref), } impl Source { /// Return either the direct object id we refer to or the direct target that a reference refers to. /// The latter may be a direct or a symbolic reference. /// If unborn, `None` is returned. pub fn as_id(&self) -> Option<&gix_hash::oid> { match self { Source::ObjectId(id) => Some(id), Source::Ref(r) => r.unpack().1, } } /// Return the target that this symbolic ref is pointing to, or `None` if it is no symbolic ref. pub fn as_target(&self) -> Option<&bstr::BStr> { match self { Source::ObjectId(_) => None, Source::Ref(r) => match r { crate::handshake::Ref::Peeled { .. } | crate::handshake::Ref::Direct { .. } => None, crate::handshake::Ref::Symbolic { target, .. } | crate::handshake::Ref::Unborn { target, .. } => { Some(target.as_ref()) } }, } } /// Returns the peeled id of this instance, that is the object that can't be de-referenced anymore. pub fn peeled_id(&self) -> Option<&gix_hash::oid> { match self { Source::ObjectId(id) => Some(id), Source::Ref(r) => { let (_name, target, peeled) = r.unpack(); peeled.or(target) } } } /// Return ourselves as the full name of the reference we represent, or `None` if this source isn't a reference but an object. pub fn as_name(&self) -> Option<&bstr::BStr> { match self { Source::ObjectId(_) => None, Source::Ref(r) => match r { crate::handshake::Ref::Unborn { full_ref_name, .. } | crate::handshake::Ref::Symbolic { full_ref_name, .. } | crate::handshake::Ref::Direct { full_ref_name, .. } | crate::handshake::Ref::Peeled { full_ref_name, .. } => Some(full_ref_name.as_ref()), }, } } } /// An index into various lists of refspecs that have been used in a [Mapping] of remote references to local ones. #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] pub enum SpecIndex { /// An index into the _refspecs of the remote_ that triggered a fetch operation. /// These refspecs are explicit and visible to the user. ExplicitInRemote(usize), /// An index into the list of [extra refspecs](crate::fetch::RefMap::extra_refspecs) that are implicit /// to a particular fetch operation. Implicit(usize), } impl SpecIndex { /// Depending on our index variant, get the index either from `refspecs` or from `extra_refspecs` for `Implicit` variants. pub fn get<'a>( self, refspecs: &'a [gix_refspec::RefSpec], extra_refspecs: &'a [gix_refspec::RefSpec], ) -> Option<&'a gix_refspec::RefSpec> { match self { SpecIndex::ExplicitInRemote(idx) => refspecs.get(idx), SpecIndex::Implicit(idx) => extra_refspecs.get(idx), } } /// If this is an `Implicit` variant, return its index. pub fn implicit_index(self) -> Option { match self { SpecIndex::Implicit(idx) => Some(idx), SpecIndex::ExplicitInRemote(_) => None, } } } /// A mapping between a single remote reference and its advertised objects to a local destination which may or may not exist. #[derive(Debug, Clone)] pub struct Mapping { /// The reference on the remote side, along with information about the objects they point to as advertised by the server. pub remote: Source, /// The local tracking reference to update after fetching the object visible via `remote`. pub local: Option, /// The index into the fetch ref-specs used to produce the mapping, allowing it to be recovered. pub spec_index: SpecIndex, } gix-protocol-0.47.0/src/fetch/response/async_io.rs000064400000000000000000000175201046102023000202370ustar 00000000000000use std::io; use gix_transport::{client, Protocol}; use crate::fetch::{ response, response::shallow_update_from_line, response::{Acknowledgement, ShallowUpdate, WantedRef}, Response, }; async fn parse_v2_section( line: &mut String, reader: &mut (impl client::ExtendedBufRead<'_> + Unpin), res: &mut Vec, parse: impl Fn(&str) -> Result, ) -> Result { line.clear(); while reader.readline_str(line).await? != 0 { res.push(parse(line)?); line.clear(); } // End of message, or end of section? Ok(if reader.stopped_at() == Some(client::MessageKind::Delimiter) { // try reading more sections reader.reset(Protocol::V2); false } else { // we are done, there is no pack true }) } impl Response { /// Parse a response of the given `version` of the protocol from `reader`. /// /// `client_expects_pack` is only relevant for V1 stateful connections, and if `false`, causes us to stop parsing when seeing `NAK`, /// and if `true` we will keep parsing until we get a pack as the client already signalled to the server that it's done. /// This way of doing things allows us to exploit knowledge about more recent versions of the protocol, which keeps code easier /// and more localized without having to support all the cruft that there is. /// /// `wants_to_negotiate` should be `false` for clones which is when we don't have sent any haves. The reason for this flag to exist /// is to predict how to parse V1 output only, and neither `client_expects_pack` nor `wants_to_negotiate` are relevant for V2. /// This ugliness is in place to avoid having to resort to an [an even more complex ugliness](https://github.com/git/git/blob/9e49351c3060e1fa6e0d2de64505b7becf157f28/fetch-pack.c#L583-L594) /// that `git` has to use to predict how many acks are supposed to be read. We also genuinely hope that this covers it all…. pub async fn from_line_reader( version: Protocol, reader: &mut (impl client::ExtendedBufRead<'_> + Unpin), client_expects_pack: bool, wants_to_negotiate: bool, ) -> Result { match version { Protocol::V0 | Protocol::V1 => { let mut line = String::new(); let mut acks = Vec::::new(); let mut shallows = Vec::::new(); let mut saw_ready = false; let has_pack = 'lines: loop { line.clear(); let peeked_line = match reader.peek_data_line().await { Some(Ok(Ok(line))) => String::from_utf8_lossy(line), // This special case (hang/block forever) deals with a single NAK being a legitimate EOF sometimes // Note that this might block forever in stateful connections as there it's not really clear // if something will be following or not by just looking at the response. Instead you have to know // [a lot](https://github.com/git/git/blob/9e49351c3060e1fa6e0d2de64505b7becf157f28/fetch-pack.c#L583-L594) // to deal with this correctly. // For now this is acceptable, as V2 can be used as a workaround, which also is the default. Some(Err(err)) if err.kind() == io::ErrorKind::UnexpectedEof => break 'lines false, Some(Err(err)) => return Err(err.into()), Some(Ok(Err(err))) => return Err(err.into()), None => { // maybe we saw a shallow flush packet, let's reset and retry debug_assert_eq!( reader.stopped_at(), Some(client::MessageKind::Flush), "If this isn't a flush packet, we don't know what's going on" ); reader.readline_str(&mut line).await?; reader.reset(Protocol::V1); match reader.peek_data_line().await { Some(Ok(Ok(line))) => String::from_utf8_lossy(line), Some(Err(err)) => return Err(err.into()), Some(Ok(Err(err))) => return Err(err.into()), None => break 'lines false, // EOF } } }; if Response::parse_v1_ack_or_shallow_or_assume_pack(&mut acks, &mut shallows, &peeked_line) { break 'lines true; } assert_ne!( reader.readline_str(&mut line).await?, 0, "consuming a peeked line works" ); // When the server sends ready, we know there is going to be a pack so no need to stop early. saw_ready |= matches!(acks.last(), Some(Acknowledgement::Ready)); if let Some(Acknowledgement::Nak) = acks.last().filter(|_| !client_expects_pack || !saw_ready) { if !wants_to_negotiate { continue; } break 'lines false; } }; Ok(Response { acks, shallows, wanted_refs: vec![], has_pack, }) } Protocol::V2 => { // NOTE: We only read acknowledgements and scrub to the pack file, until we have use for the other features let mut line = String::new(); reader.reset(Protocol::V2); let mut acks = Vec::::new(); let mut shallows = Vec::::new(); let mut wanted_refs = Vec::::new(); let has_pack = 'section: loop { line.clear(); if reader.readline_str(&mut line).await? == 0 { return Err(response::Error::Io(io::Error::new( io::ErrorKind::UnexpectedEof, "Could not read message headline", ))); }; match line.trim_end() { "acknowledgments" => { if parse_v2_section(&mut line, reader, &mut acks, Acknowledgement::from_line).await? { break 'section false; } } "shallow-info" => { if parse_v2_section(&mut line, reader, &mut shallows, shallow_update_from_line).await? { break 'section false; } } "wanted-refs" => { if parse_v2_section(&mut line, reader, &mut wanted_refs, WantedRef::from_line).await? { break 'section false; } } "packfile" => { // what follows is the packfile itself, which can be read with a sideband enabled reader break 'section true; } _ => return Err(response::Error::UnknownSectionHeader { header: line }), } }; Ok(Response { acks, shallows, wanted_refs, has_pack, }) } } } } gix-protocol-0.47.0/src/fetch/response/blocking_io.rs000064400000000000000000000172621046102023000207150ustar 00000000000000use std::io; use gix_transport::{client, Protocol}; use crate::fetch::response::shallow_update_from_line; use crate::fetch::{ response, response::{Acknowledgement, ShallowUpdate, WantedRef}, Response, }; fn parse_v2_section<'a, T>( line: &mut String, reader: &mut impl client::ExtendedBufRead<'a>, res: &mut Vec, parse: impl Fn(&str) -> Result, ) -> Result { line.clear(); while reader.readline_str(line)? != 0 { res.push(parse(line)?); line.clear(); } // End of message, or end of section? Ok(if reader.stopped_at() == Some(client::MessageKind::Delimiter) { // try reading more sections reader.reset(Protocol::V2); false } else { // we are done, there is no pack true }) } impl Response { /// Parse a response of the given `version` of the protocol from `reader`. /// /// `client_expects_pack` is only relevant for V1 stateful connections, and if `false`, causes us to stop parsing when seeing `NAK`, /// and if `true` we will keep parsing until we get a pack as the client already signalled to the server that it's done. /// This way of doing things allows us to exploit knowledge about more recent versions of the protocol, which keeps code easier /// and more localized without having to support all the cruft that there is. /// /// `wants_to_negotiate` should be `false` for clones which is when we don't have sent any haves. The reason for this flag to exist /// is to predict how to parse V1 output only, and neither `client_expects_pack` nor `wants_to_negotiate` are relevant for V2. /// This ugliness is in place to avoid having to resort to an [an even more complex ugliness](https://github.com/git/git/blob/9e49351c3060e1fa6e0d2de64505b7becf157f28/fetch-pack.c#L583-L594) /// that `git` has to use to predict how many acks are supposed to be read. We also genuinely hope that this covers it all…. pub fn from_line_reader<'a>( version: Protocol, reader: &mut impl client::ExtendedBufRead<'a>, client_expects_pack: bool, wants_to_negotiate: bool, ) -> Result { match version { Protocol::V0 | Protocol::V1 => { let mut line = String::new(); let mut acks = Vec::::new(); let mut shallows = Vec::::new(); let mut saw_ready = false; let has_pack = 'lines: loop { line.clear(); let peeked_line = match reader.peek_data_line() { Some(Ok(Ok(line))) => String::from_utf8_lossy(line), // This special case (hang/block forever) deals with a single NAK being a legitimate EOF sometimes // Note that this might block forever in stateful connections as there it's not really clear // if something will be following or not by just looking at the response. Instead you have to know // [a lot](https://github.com/git/git/blob/9e49351c3060e1fa6e0d2de64505b7becf157f28/fetch-pack.c#L583-L594) // to deal with this correctly. // For now this is acceptable, as V2 can be used as a workaround, which also is the default. Some(Err(err)) if err.kind() == io::ErrorKind::UnexpectedEof => break 'lines false, Some(Err(err)) => return Err(err.into()), Some(Ok(Err(err))) => return Err(err.into()), None => { // maybe we saw a shallow flush packet, let's reset and retry debug_assert_eq!( reader.stopped_at(), Some(client::MessageKind::Flush), "If this isn't a flush packet, we don't know what's going on" ); reader.readline_str(&mut line)?; reader.reset(Protocol::V1); match reader.peek_data_line() { Some(Ok(Ok(line))) => String::from_utf8_lossy(line), Some(Err(err)) => return Err(err.into()), Some(Ok(Err(err))) => return Err(err.into()), None => break 'lines false, // EOF } } }; if Response::parse_v1_ack_or_shallow_or_assume_pack(&mut acks, &mut shallows, &peeked_line) { break 'lines true; } assert_ne!(reader.readline_str(&mut line)?, 0, "consuming a peeked line works"); // When the server sends ready, we know there is going to be a pack so no need to stop early. saw_ready |= matches!(acks.last(), Some(Acknowledgement::Ready)); if let Some(Acknowledgement::Nak) = acks.last().filter(|_| !client_expects_pack || !saw_ready) { if !wants_to_negotiate { continue; } break 'lines false; } }; Ok(Response { acks, shallows, wanted_refs: vec![], has_pack, }) } Protocol::V2 => { // NOTE: We only read acknowledgements and scrub to the pack file, until we have use for the other features let mut line = String::new(); reader.reset(Protocol::V2); let mut acks = Vec::::new(); let mut shallows = Vec::::new(); let mut wanted_refs = Vec::::new(); let has_pack = 'section: loop { line.clear(); if reader.readline_str(&mut line)? == 0 { return Err(response::Error::Io(io::Error::new( io::ErrorKind::UnexpectedEof, "Could not read message headline", ))); }; match line.trim_end() { "acknowledgments" => { if parse_v2_section(&mut line, reader, &mut acks, Acknowledgement::from_line)? { break 'section false; } } "shallow-info" => { if parse_v2_section(&mut line, reader, &mut shallows, shallow_update_from_line)? { break 'section false; } } "wanted-refs" => { if parse_v2_section(&mut line, reader, &mut wanted_refs, WantedRef::from_line)? { break 'section false; } } "packfile" => { // what follows is the packfile itself, which can be read with a sideband enabled reader break 'section true; } _ => return Err(response::Error::UnknownSectionHeader { header: line }), } }; Ok(Response { acks, shallows, wanted_refs, has_pack, }) } } } } gix-protocol-0.47.0/src/fetch/response/mod.rs000064400000000000000000000222141046102023000172060ustar 00000000000000use bstr::BString; use gix_transport::{client, Protocol}; use crate::command::Feature; use crate::fetch::Response; /// The error returned in the [response module][crate::fetch::response]. #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { #[error("Failed to read from line reader")] Io(#[source] std::io::Error), #[error(transparent)] UploadPack(#[from] gix_transport::packetline::read::Error), #[error(transparent)] Transport(#[from] client::Error), #[error("Currently we require feature {feature:?}, which is not supported by the server")] MissingServerCapability { feature: &'static str }, #[error("Encountered an unknown line prefix in {line:?}")] UnknownLineType { line: String }, #[error("Unknown or unsupported header: {header:?}")] UnknownSectionHeader { header: String }, } impl From for Error { fn from(err: std::io::Error) -> Self { if err.kind() == std::io::ErrorKind::Other { match err.into_inner() { Some(err) => match err.downcast::() { Ok(err) => Error::UploadPack(*err), Err(err) => Error::Io(std::io::Error::new(std::io::ErrorKind::Other, err)), }, None => Error::Io(std::io::ErrorKind::Other.into()), } } else { Error::Io(err) } } } impl gix_transport::IsSpuriousError for Error { fn is_spurious(&self) -> bool { match self { Error::Io(err) => err.is_spurious(), Error::Transport(err) => err.is_spurious(), _ => false, } } } /// An 'ACK' line received from the server. #[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Acknowledgement { /// The contained `id` is in common. Common(gix_hash::ObjectId), /// The server is ready to receive more lines. Ready, /// The server isn't ready yet. Nak, } pub use gix_shallow::Update as ShallowUpdate; /// A wanted-ref line received from the server. #[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct WantedRef { /// The object id of the wanted ref, as seen by the server. pub id: gix_hash::ObjectId, /// The name of the ref, as requested by the client as a `want-ref` argument. pub path: BString, } /// Parse a `ShallowUpdate` from a `line` as received to the server. pub fn shallow_update_from_line(line: &str) -> Result { match line.trim_end().split_once(' ') { Some((prefix, id)) => { let id = gix_hash::ObjectId::from_hex(id.as_bytes()) .map_err(|_| Error::UnknownLineType { line: line.to_owned() })?; Ok(match prefix { "shallow" => ShallowUpdate::Shallow(id), "unshallow" => ShallowUpdate::Unshallow(id), _ => return Err(Error::UnknownLineType { line: line.to_owned() }), }) } None => Err(Error::UnknownLineType { line: line.to_owned() }), } } impl Acknowledgement { /// Parse an `Acknowledgement` from a `line` as received to the server. pub fn from_line(line: &str) -> Result { let mut tokens = line.trim_end().splitn(3, ' '); match (tokens.next(), tokens.next(), tokens.next()) { (Some(first), id, description) => Ok(match first { "ready" => Acknowledgement::Ready, // V2 "NAK" => Acknowledgement::Nak, // V1 "ACK" => { let id = match id { Some(id) => gix_hash::ObjectId::from_hex(id.as_bytes()) .map_err(|_| Error::UnknownLineType { line: line.to_owned() })?, None => return Err(Error::UnknownLineType { line: line.to_owned() }), }; if let Some(description) = description { match description { "common" => {} "ready" => return Ok(Acknowledgement::Ready), _ => return Err(Error::UnknownLineType { line: line.to_owned() }), } } Acknowledgement::Common(id) } _ => return Err(Error::UnknownLineType { line: line.to_owned() }), }), (None, _, _) => Err(Error::UnknownLineType { line: line.to_owned() }), } } /// Returns the hash of the acknowledged object if this instance acknowledges a common one. pub fn id(&self) -> Option<&gix_hash::ObjectId> { match self { Acknowledgement::Common(id) => Some(id), _ => None, } } } impl WantedRef { /// Parse a `WantedRef` from a `line` as received from the server. pub fn from_line(line: &str) -> Result { match line.trim_end().split_once(' ') { Some((id, path)) => { let id = gix_hash::ObjectId::from_hex(id.as_bytes()) .map_err(|_| Error::UnknownLineType { line: line.to_owned() })?; Ok(WantedRef { id, path: path.into() }) } None => Err(Error::UnknownLineType { line: line.to_owned() }), } } } impl Response { /// Return true if the response has a pack which can be read next. pub fn has_pack(&self) -> bool { self.has_pack } /// Return an error if the given `features` don't contain the required ones (the ones this implementation needs) /// for the given `version` of the protocol. /// /// Even though technically any set of features supported by the server could work, we only implement the ones that /// make it easy to maintain all versions with a single code base that aims to be and remain maintainable. pub fn check_required_features(version: Protocol, features: &[Feature]) -> Result<(), Error> { match version { Protocol::V0 | Protocol::V1 => { let has = |name: &str| features.iter().any(|f| f.0 == name); // Let's focus on V2 standards, and simply not support old servers to keep our code simpler if !has("multi_ack_detailed") { return Err(Error::MissingServerCapability { feature: "multi_ack_detailed", }); } // It's easy to NOT do sideband for us, but then again, everyone supports it. // CORRECTION: If side-band is off, it would send the packfile without packet line encoding, // which is nothing we ever want to deal with (despite it being more efficient). In V2, this // is not even an option anymore, sidebands are always present. if !has("side-band") && !has("side-band-64k") { return Err(Error::MissingServerCapability { feature: "side-band OR side-band-64k", }); } } Protocol::V2 => {} } Ok(()) } /// Return all acknowledgements [parsed previously][Response::from_line_reader()]. pub fn acknowledgements(&self) -> &[Acknowledgement] { &self.acks } /// Return all shallow update lines [parsed previously][Response::from_line_reader()]. pub fn shallow_updates(&self) -> &[ShallowUpdate] { &self.shallows } /// Append the given `updates` which may have been obtained from a /// (handshake::Outcome)[crate::handshake::Outcome::v1_shallow_updates]. /// /// In V2, these are received as part of the pack, but V1 sends them early, so we /// offer to re-integrate them here. pub fn append_v1_shallow_updates(&mut self, updates: Option>) { self.shallows.extend(updates.into_iter().flatten()); } /// Return all wanted-refs [parsed previously][Response::from_line_reader()]. pub fn wanted_refs(&self) -> &[WantedRef] { &self.wanted_refs } } #[cfg(any(feature = "async-client", feature = "blocking-client"))] impl Response { /// with a friendly server, we just assume that a non-ack line is a pack line /// which is our hint to stop here. fn parse_v1_ack_or_shallow_or_assume_pack( acks: &mut Vec, shallows: &mut Vec, peeked_line: &str, ) -> bool { match Acknowledgement::from_line(peeked_line) { Ok(ack) => match ack.id() { Some(id) => { if !acks.iter().any(|a| a.id() == Some(id)) { acks.push(ack); } } None => acks.push(ack), }, Err(_) => match shallow_update_from_line(peeked_line) { Ok(shallow) => { shallows.push(shallow); } Err(_) => return true, }, }; false } } #[cfg(feature = "async-client")] mod async_io; #[cfg(feature = "blocking-client")] mod blocking_io; gix-protocol-0.47.0/src/fetch/types.rs000064400000000000000000000263421046102023000157430ustar 00000000000000use crate::fetch::response::{Acknowledgement, ShallowUpdate, WantedRef}; use std::path::PathBuf; /// Options for use in [`fetch()`](`crate::fetch()`) #[derive(Debug, Clone)] pub struct Options<'a> { /// The path to the file containing the shallow commit boundary. /// /// When needed, it will be locked in preparation for being modified. pub shallow_file: PathBuf, /// How to deal with shallow repositories. It does affect how negotiations are performed. pub shallow: &'a Shallow, /// Describe how to handle tags when fetching. pub tags: Tags, /// If `true`, if we fetch from a remote that only offers shallow clones, the operation will fail with an error /// instead of writing the shallow boundary to the shallow file. pub reject_shallow_remote: bool, } /// For use in [`RefMap::new()`] and [`fetch`](crate::fetch()). #[cfg(feature = "handshake")] pub struct Context<'a, T> { /// The outcome of the handshake performed with the remote. /// /// Note that it's mutable as depending on the protocol, it may contain refs that have been sent unconditionally. pub handshake: &'a mut crate::handshake::Outcome, /// The transport to use when making an `ls-refs` or `fetch` call. /// /// This is always done if the underlying protocol is V2, which is implied by the absence of refs in the `handshake` outcome. pub transport: &'a mut T, /// How to self-identify during the `ls-refs` call in [`RefMap::new()`] or the `fetch` call in [`fetch()`](crate::fetch()). /// /// This could be read from the `gitoxide.userAgent` configuration variable. pub user_agent: (&'static str, Option>), /// If `true`, output all packetlines using the the `gix-trace` machinery. pub trace_packetlines: bool, } #[cfg(feature = "fetch")] mod with_fetch { use crate::fetch; use crate::fetch::{negotiate, refmap}; /// For use in [`fetch`](crate::fetch()). pub struct NegotiateContext<'a, 'b, 'c, Objects, Alternates, AlternatesOut, AlternatesErr, Find> where Objects: gix_object::Find + gix_object::FindHeader + gix_object::Exists, Alternates: FnOnce() -> Result, AlternatesErr: Into>, AlternatesOut: Iterator, Find: gix_object::Find, { /// Access to the object database. /// *Note* that the `exists()` calls must not trigger a refresh of the ODB packs as plenty of them might fail, i.e. find on object. pub objects: &'a Objects, /// Access to the git references database. pub refs: &'a gix_ref::file::Store, /// A function that returns an iterator over `(refs, objects)` for each alternate repository, to assure all known objects are added also according to their tips. pub alternates: Alternates, /// The implementation that performs the negotiation later, i.e. prepare wants and haves. pub negotiator: &'a mut dyn gix_negotiate::Negotiator, /// The commit-graph for use by the `negotiator` - we populate it with tips to initialize the graph traversal. pub graph: &'a mut gix_negotiate::Graph<'b, 'c>, } /// A trait to encapsulate steps to negotiate the contents of the pack. /// /// Typical implementations use the utilities found in the [`negotiate`] module. pub trait Negotiate { /// Typically invokes [`negotiate::mark_complete_and_common_ref()`]. fn mark_complete_and_common_ref(&mut self) -> Result; /// Typically invokes [`negotiate::add_wants()`]. /// Returns `true` if wants were added, or `false` if the negotiation should be aborted. #[must_use] fn add_wants(&mut self, arguments: &mut fetch::Arguments, remote_ref_target_known: &[bool]) -> bool; /// Typically invokes [`negotiate::one_round()`]. fn one_round( &mut self, state: &mut negotiate::one_round::State, arguments: &mut fetch::Arguments, previous_response: Option<&fetch::Response>, ) -> Result<(negotiate::Round, bool), negotiate::Error>; } /// The outcome of [`fetch()`](crate::fetch()). #[derive(Debug, Clone)] pub struct Outcome { /// The most recent server response. /// /// Useful to obtain information about new shallow boundaries. pub last_response: fetch::Response, /// Information about the negotiation to receive the new pack. pub negotiate: NegotiateOutcome, } /// The negotiation-specific outcome of [`fetch()`](crate::fetch()). #[derive(Debug, Clone)] pub struct NegotiateOutcome { /// The outcome of the negotiation stage of the fetch operation. /// /// If it is… /// /// * [`negotiate::Action::MustNegotiate`] there will always be a `pack`. /// * [`negotiate::Action::SkipToRefUpdate`] there is no `pack` but references can be updated right away. /// /// Note that this is never [negotiate::Action::NoChange`] as this would mean there is no negotiation information at all /// so this structure wouldn't be present. pub action: negotiate::Action, /// Additional information for each round of negotiation. pub rounds: Vec, } /// Information about the relationship between our refspecs, and remote references with their local counterparts. /// /// It's the first stage that offers connection to the server, and is typically required to perform one or more fetch operations. #[derive(Default, Debug, Clone)] pub struct RefMap { /// A mapping between a remote reference and a local tracking branch. pub mappings: Vec, /// The explicit refspecs that were supposed to be used for fetching. /// /// Typically, they are configured by the remote and are referred to by /// [`refmap::SpecIndex::ExplicitInRemote`] in [`refmap::Mapping`]. pub refspecs: Vec, /// Refspecs which have been added implicitly due to settings of the `remote`, usually pre-initialized from /// [`extra_refspecs` in RefMap options](refmap::init::Options). /// They are referred to by [`refmap::SpecIndex::Implicit`] in [`refmap::Mapping`]. /// /// They are never persisted nor are they typically presented to the user. pub extra_refspecs: Vec, /// Information about the fixes applied to the `mapping` due to validation and sanitization. pub fixes: Vec, /// All refs advertised by the remote. pub remote_refs: Vec, /// The kind of hash used for all data sent by the server, if understood by this client implementation. /// /// It was extracted from the `handshake` as advertised by the server. pub object_hash: gix_hash::Kind, } } #[cfg(feature = "fetch")] pub use with_fetch::*; /// Describe how shallow clones are handled when fetching, with variants defining how the *shallow boundary* is handled. /// /// The *shallow boundary* is a set of commits whose parents are not present in the repository. #[derive(Default, Debug, Clone, PartialEq, Eq)] pub enum Shallow { /// Fetch all changes from the remote without affecting the shallow boundary at all. /// /// This also means that repositories that aren't shallow will remain like that. #[default] NoChange, /// Receive update to `depth` commits in the history of the refs to fetch (from the viewpoint of the remote), /// with the value of `1` meaning to receive only the commit a ref is pointing to. /// /// This may update the shallow boundary to increase or decrease the amount of available history. DepthAtRemote(std::num::NonZeroU32), /// Increase the number of commits and thus expand the shallow boundary by `depth` commits as seen from our local /// shallow boundary, with a value of `0` having no effect. Deepen(u32), /// Set the shallow boundary at the `cutoff` time, meaning that there will be no commits beyond that time. Since { /// The date beyond which there will be no history. cutoff: gix_date::Time, }, /// Receive all history excluding all commits reachable from `remote_refs`. These can be long or short /// ref names or tag names. Exclude { /// The ref names to exclude, short or long. Note that ambiguous short names will cause the remote to abort /// without an error message being transferred (because the protocol does not support it) remote_refs: Vec, /// If some, this field has the same meaning as [`Shallow::Since`] which can be used in combination /// with excluded references. since_cutoff: Option, }, } impl Shallow { /// Produce a variant that causes the repository to loose its shallow boundary, effectively by extending it /// beyond all limits. pub fn undo() -> Self { Shallow::DepthAtRemote((i32::MAX as u32).try_into().expect("valid at compile time")) } } /// Describe how to handle tags when fetching #[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] pub enum Tags { /// Fetch all tags from the remote, even if these are not reachable from objects referred to by our refspecs. All, /// Fetch only the tags that point to the objects being sent. /// That way, annotated tags that point to an object we receive are automatically transmitted and their refs are created. /// The same goes for lightweight tags. #[default] Included, /// Do not fetch any tags. None, } impl Tags { /// Obtain a refspec that determines whether or not to fetch all tags, depending on this variant. /// /// The returned refspec is the default refspec for tags, but won't overwrite local tags ever. #[cfg(feature = "fetch")] pub fn to_refspec(&self) -> Option> { match self { Tags::All | Tags::Included => Some( gix_refspec::parse("refs/tags/*:refs/tags/*".into(), gix_refspec::parse::Operation::Fetch) .expect("valid"), ), Tags::None => None, } } } /// A representation of a complete fetch response #[derive(Debug, Clone)] pub struct Response { pub(crate) acks: Vec, pub(crate) shallows: Vec, pub(crate) wanted_refs: Vec, pub(crate) has_pack: bool, } /// The progress ids used in during various steps of the fetch operation. /// /// Note that tagged progress isn't very widely available yet, but support can be improved as needed. /// /// Use this information to selectively extract the progress of interest in case the parent application has custom visualization. #[derive(Debug, Copy, Clone)] pub enum ProgressId { /// The progress name is defined by the remote and the progress messages it sets, along with their progress values and limits. RemoteProgress, } impl From for gix_features::progress::Id { fn from(v: ProgressId) -> Self { match v { ProgressId::RemoteProgress => *b"FERP", } } } gix-protocol-0.47.0/src/handshake/function.rs000064400000000000000000000110341046102023000172510ustar 00000000000000use gix_features::{progress, progress::Progress}; use gix_transport::{client, client::SetServiceResponse, Service}; use maybe_async::maybe_async; use super::{Error, Outcome}; use crate::{credentials, handshake::refs}; /// Perform a handshake with the server on the other side of `transport`, with `authenticate` being used if authentication /// turns out to be required. `extra_parameters` are the parameters `(name, optional value)` to add to the handshake, /// each time it is performed in case authentication is required. /// `progress` is used to inform about what's currently happening. #[allow(clippy::result_large_err)] #[maybe_async] pub async fn handshake( mut transport: T, service: Service, mut authenticate: AuthFn, extra_parameters: Vec<(String, Option)>, progress: &mut impl Progress, ) -> Result where AuthFn: FnMut(credentials::helper::Action) -> credentials::protocol::Result, T: client::Transport, { let _span = gix_features::trace::detail!("gix_protocol::handshake()", service = ?service, extra_parameters = ?extra_parameters); let (server_protocol_version, refs, capabilities) = { progress.init(None, progress::steps()); progress.set_name("handshake".into()); progress.step(); let extra_parameters: Vec<_> = extra_parameters .iter() .map(|(k, v)| (k.as_str(), v.as_deref())) .collect(); let supported_versions: Vec<_> = transport.supported_protocol_versions().into(); let result = transport.handshake(service, &extra_parameters).await; let SetServiceResponse { actual_protocol, capabilities, refs, } = match result { Ok(v) => Ok(v), Err(client::Error::Io(ref err)) if err.kind() == std::io::ErrorKind::PermissionDenied => { drop(result); // needed to workaround this: https://github.com/rust-lang/rust/issues/76149 let url = transport.to_url().into_owned(); progress.set_name("authentication".into()); let credentials::protocol::Outcome { identity, next } = authenticate(credentials::helper::Action::get_for_url(url.clone()))? .ok_or(Error::EmptyCredentials)?; transport.set_identity(identity)?; progress.step(); progress.set_name("handshake (authenticated)".into()); match transport.handshake(service, &extra_parameters).await { Ok(v) => { authenticate(next.store())?; Ok(v) } // Still no permission? Reject the credentials. Err(client::Error::Io(err)) if err.kind() == std::io::ErrorKind::PermissionDenied => { authenticate(next.erase())?; return Err(Error::InvalidCredentials { url, source: err }); } // Otherwise, do nothing, as we don't know if it actually got to try the credentials. // If they were previously stored, they remain. In the worst case, the user has to enter them again // next time they try. Err(err) => Err(err), } } Err(err) => Err(err), }?; if !supported_versions.is_empty() && !supported_versions.contains(&actual_protocol) { return Err(Error::TransportProtocolPolicyViolation { actual_version: actual_protocol, }); } let parsed_refs = match refs { Some(mut refs) => { assert!( matches!( actual_protocol, gix_transport::Protocol::V0 | gix_transport::Protocol::V1 ), "Only V(0|1) auto-responds with refs" ); Some( refs::from_v1_refs_received_as_part_of_handshake_and_capabilities(&mut refs, capabilities.iter()) .await?, ) } None => None, }; (actual_protocol, parsed_refs, capabilities) }; // this scope is needed, see https://github.com/rust-lang/rust/issues/76149 let (refs, v1_shallow_updates) = refs .map(|(refs, shallow)| (Some(refs), Some(shallow))) .unwrap_or_default(); Ok(Outcome { server_protocol_version, refs, v1_shallow_updates, capabilities, }) } gix-protocol-0.47.0/src/handshake/mod.rs000064400000000000000000000110231046102023000162010ustar 00000000000000use bstr::BString; /// pub mod refs; /// A git reference, commonly referred to as 'ref', as returned by a git server before sending a pack. #[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Ref { /// A ref pointing to a `tag` object, which in turns points to an `object`, usually a commit Peeled { /// The name at which the ref is located, like `refs/tags/1.0`. full_ref_name: BString, /// The hash of the tag the ref points to. tag: gix_hash::ObjectId, /// The hash of the object the `tag` points to. object: gix_hash::ObjectId, }, /// A ref pointing to a commit object Direct { /// The name at which the ref is located, like `refs/heads/main` or `refs/tags/v1.0` for lightweight tags. full_ref_name: BString, /// The hash of the object the ref points to. object: gix_hash::ObjectId, }, /// A symbolic ref pointing to `target` ref, which in turn, ultimately after possibly following `tag`, points to an `object` Symbolic { /// The name at which the symbolic ref is located, like `HEAD`. full_ref_name: BString, /// The path of the ref the symbolic ref points to, like `refs/heads/main`. /// /// See issue [#205] for details /// /// [#205]: https://github.com/GitoxideLabs/gitoxide/issues/205 target: BString, /// The hash of the annotated tag the ref points to, if present. /// /// Note that this field is also `None` if `full_ref_name` is a lightweight tag. tag: Option, /// The hash of the object the `target` ref ultimately points to. object: gix_hash::ObjectId, }, /// A ref is unborn on the remote and just points to the initial, unborn branch, as is the case in a newly initialized repository /// or dangling symbolic refs. Unborn { /// The name at which the ref is located, typically `HEAD`. full_ref_name: BString, /// The path of the ref the symbolic ref points to, like `refs/heads/main`, even though the `target` does not yet exist. target: BString, }, } /// The result of the [`handshake()`][super::handshake()] function. #[derive(Default, Debug, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg(feature = "handshake")] pub struct Outcome { /// The protocol version the server responded with. It might have downgraded the desired version. pub server_protocol_version: gix_transport::Protocol, /// The references reported as part of the `Protocol::V1` handshake, or `None` otherwise as V2 requires a separate request. pub refs: Option>, /// Shallow updates as part of the `Protocol::V1`, to shallow a particular object. /// Note that unshallowing isn't supported here. pub v1_shallow_updates: Option>, /// The server capabilities. pub capabilities: gix_transport::client::Capabilities, } #[cfg(feature = "handshake")] mod error { use bstr::BString; use gix_transport::client; use crate::{credentials, handshake::refs}; /// The error returned by [`handshake()`][crate::fetch::handshake()]. #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { #[error("Failed to obtain credentials")] Credentials(#[from] credentials::protocol::Error), #[error("No credentials were returned at all as if the credential helper isn't functioning unknowingly")] EmptyCredentials, #[error("Credentials provided for \"{url}\" were not accepted by the remote")] InvalidCredentials { url: BString, source: std::io::Error }, #[error(transparent)] Transport(#[from] client::Error), #[error("The transport didn't accept the advertised server version {actual_version:?} and closed the connection client side")] TransportProtocolPolicyViolation { actual_version: gix_transport::Protocol }, #[error(transparent)] ParseRefs(#[from] refs::parse::Error), } impl gix_transport::IsSpuriousError for Error { fn is_spurious(&self) -> bool { match self { Error::Transport(err) => err.is_spurious(), _ => false, } } } } #[cfg(feature = "handshake")] pub use error::Error; #[cfg(any(feature = "blocking-client", feature = "async-client"))] #[cfg(feature = "handshake")] pub(crate) mod function; gix-protocol-0.47.0/src/handshake/refs/async_io.rs000064400000000000000000000035651046102023000202010ustar 00000000000000use crate::fetch::response::ShallowUpdate; use crate::handshake::{refs, refs::parse::Error, Ref}; /// Parse refs from the given input line by line. Protocol V2 is required for this to succeed. pub async fn from_v2_refs(in_refs: &mut dyn gix_transport::client::ReadlineBufRead) -> Result, Error> { let mut out_refs = Vec::new(); while let Some(line) = in_refs .readline() .await .transpose()? .transpose()? .and_then(|l| l.as_bstr()) { out_refs.push(refs::shared::parse_v2(line)?); } Ok(out_refs) } /// Parse refs from the return stream of the handshake as well as the server capabilities, also received as part of the /// handshake. /// Together they form a complete set of refs. /// /// # Note /// /// Symbolic refs are shoe-horned into server capabilities whereas refs (without symbolic ones) are sent automatically as /// part of the handshake. Both symbolic and peeled refs need to be combined to fit into the [`Ref`] type provided here. pub async fn from_v1_refs_received_as_part_of_handshake_and_capabilities<'a>( in_refs: &mut dyn gix_transport::client::ReadlineBufRead, capabilities: impl Iterator>, ) -> Result<(Vec, Vec), refs::parse::Error> { let mut out_refs = refs::shared::from_capabilities(capabilities)?; let mut out_shallow = Vec::new(); let number_of_possible_symbolic_refs_for_lookup = out_refs.len(); while let Some(line) = in_refs .readline() .await .transpose()? .transpose()? .and_then(|l| l.as_bstr()) { refs::shared::parse_v1( number_of_possible_symbolic_refs_for_lookup, &mut out_refs, &mut out_shallow, line, )?; } Ok((out_refs.into_iter().map(Into::into).collect(), out_shallow)) } gix-protocol-0.47.0/src/handshake/refs/blocking_io.rs000064400000000000000000000033561046102023000206520ustar 00000000000000use crate::fetch::response::ShallowUpdate; use crate::handshake::{refs, refs::parse::Error, Ref}; /// Parse refs from the given input line by line. Protocol V2 is required for this to succeed. pub fn from_v2_refs(in_refs: &mut dyn gix_transport::client::ReadlineBufRead) -> Result, Error> { let mut out_refs = Vec::new(); while let Some(line) = in_refs.readline().transpose()?.transpose()?.and_then(|l| l.as_bstr()) { out_refs.push(refs::shared::parse_v2(line)?); } Ok(out_refs) } /// Parse refs from the return stream of the handshake as well as the server capabilities, also received as part of the /// handshake. /// Together they form a complete set of refs. /// /// # Note /// /// Symbolic refs are shoe-horned into server capabilities whereas refs (without symbolic ones) are sent automatically as /// part of the handshake. Both symbolic and peeled refs need to be combined to fit into the [`Ref`] type provided here. pub fn from_v1_refs_received_as_part_of_handshake_and_capabilities<'a>( in_refs: &mut dyn gix_transport::client::ReadlineBufRead, capabilities: impl Iterator>, ) -> Result<(Vec, Vec), Error> { let mut out_refs = refs::shared::from_capabilities(capabilities)?; let mut out_shallow = Vec::new(); let number_of_possible_symbolic_refs_for_lookup = out_refs.len(); while let Some(line) = in_refs.readline().transpose()?.transpose()?.and_then(|l| l.as_bstr()) { refs::shared::parse_v1( number_of_possible_symbolic_refs_for_lookup, &mut out_refs, &mut out_shallow, line, )?; } Ok((out_refs.into_iter().map(Into::into).collect(), out_shallow)) } gix-protocol-0.47.0/src/handshake/refs/mod.rs000064400000000000000000000054361046102023000171530ustar 00000000000000use bstr::BStr; use super::Ref; /// pub mod parse { use bstr::BString; /// The error returned when parsing References/refs from the server response. #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), #[error(transparent)] DecodePacketline(#[from] gix_transport::packetline::decode::Error), #[error(transparent)] Id(#[from] gix_hash::decode::Error), #[error("{symref:?} could not be parsed. A symref is expected to look like :.")] MalformedSymref { symref: BString }, #[error("{0:?} could not be parsed. A V1 ref line should be ' '.")] MalformedV1RefLine(BString), #[error( "{0:?} could not be parsed. A V2 ref line should be ' [ (peeled|symref-target):'." )] MalformedV2RefLine(BString), #[error("The ref attribute {attribute:?} is unknown. Found in line {line:?}")] UnknownAttribute { attribute: BString, line: BString }, #[error("{message}")] InvariantViolation { message: &'static str }, } } impl Ref { /// Provide shared fields referring to the ref itself, namely `(name, target, [peeled])`. /// In case of peeled refs, the tag object itself is returned as it is what the ref directly refers to, and target of the tag is returned /// as `peeled`. /// If `unborn`, the first object id will be the null oid. pub fn unpack(&self) -> (&BStr, Option<&gix_hash::oid>, Option<&gix_hash::oid>) { match self { Ref::Direct { full_ref_name, object } => (full_ref_name.as_ref(), Some(object), None), Ref::Symbolic { full_ref_name, tag, object, .. } => ( full_ref_name.as_ref(), Some(tag.as_deref().unwrap_or(object)), tag.as_deref().map(|_| object.as_ref()), ), Ref::Peeled { full_ref_name, tag: object, object: peeled, } => (full_ref_name.as_ref(), Some(object), Some(peeled)), Ref::Unborn { full_ref_name, target: _, } => (full_ref_name.as_ref(), None, None), } } } #[cfg(any(feature = "blocking-client", feature = "async-client"))] pub(crate) mod shared; #[cfg(feature = "async-client")] mod async_io; #[cfg(feature = "async-client")] pub use async_io::{from_v1_refs_received_as_part_of_handshake_and_capabilities, from_v2_refs}; #[cfg(feature = "blocking-client")] mod blocking_io; #[cfg(feature = "blocking-client")] pub use blocking_io::{from_v1_refs_received_as_part_of_handshake_and_capabilities, from_v2_refs}; gix-protocol-0.47.0/src/handshake/refs/shared.rs000064400000000000000000000300671046102023000176400ustar 00000000000000use crate::fetch::response::ShallowUpdate; use crate::handshake::{refs::parse::Error, Ref}; use bstr::{BStr, BString, ByteSlice}; impl From for Ref { fn from(v: InternalRef) -> Self { match v { InternalRef::Symbolic { path, target: Some(target), tag, object, } => Ref::Symbolic { full_ref_name: path, target, tag, object, }, InternalRef::Symbolic { path, target: None, tag: None, object, } => Ref::Direct { full_ref_name: path, object, }, InternalRef::Symbolic { path, target: None, tag: Some(tag), object, } => Ref::Peeled { full_ref_name: path, tag, object, }, InternalRef::Peeled { path, tag, object } => Ref::Peeled { full_ref_name: path, tag, object, }, InternalRef::Direct { path, object } => Ref::Direct { full_ref_name: path, object, }, InternalRef::SymbolicForLookup { .. } => { unreachable!("this case should have been removed during processing") } } } } #[cfg_attr(test, derive(PartialEq, Eq, Debug, Clone))] pub(crate) enum InternalRef { /// A ref pointing to a `tag` object, which in turns points to an `object`, usually a commit Peeled { path: BString, tag: gix_hash::ObjectId, object: gix_hash::ObjectId, }, /// A ref pointing to a commit object Direct { path: BString, object: gix_hash::ObjectId }, /// A symbolic ref pointing to `target` ref, which in turn points to an `object` Symbolic { path: BString, /// It is `None` if the target is unreachable as it points to another namespace than the one is currently set /// on the server (i.e. based on the repository at hand or the user performing the operation). /// /// The latter is more of an edge case, please [this issue][#205] for details. target: Option, tag: Option, object: gix_hash::ObjectId, }, /// extracted from V1 capabilities, which contain some important symbolic refs along with their targets /// These don't contain the Id SymbolicForLookup { path: BString, target: Option }, } impl InternalRef { fn unpack_direct(self) -> Option<(BString, gix_hash::ObjectId)> { match self { InternalRef::Direct { path, object } => Some((path, object)), _ => None, } } fn lookup_symbol_has_path(&self, predicate_path: &BStr) -> bool { matches!(self, InternalRef::SymbolicForLookup { path, .. } if path == predicate_path) } } pub(crate) fn from_capabilities<'a>( capabilities: impl Iterator>, ) -> Result, Error> { let mut out_refs = Vec::new(); let symref_values = capabilities.filter_map(|c| { if c.name() == b"symref".as_bstr() { c.value().map(ToOwned::to_owned) } else { None } }); for symref in symref_values { let (left, right) = symref.split_at(symref.find_byte(b':').ok_or_else(|| Error::MalformedSymref { symref: symref.to_owned(), })?); if left.is_empty() || right.is_empty() { return Err(Error::MalformedSymref { symref: symref.to_owned(), }); } out_refs.push(InternalRef::SymbolicForLookup { path: left.into(), target: match &right[1..] { b"(null)" => None, name => Some(name.into()), }, }); } Ok(out_refs) } pub(in crate::handshake::refs) fn parse_v1( num_initial_out_refs: usize, out_refs: &mut Vec, out_shallow: &mut Vec, line: &BStr, ) -> Result<(), Error> { let trimmed = line.trim_end(); let (hex_hash, path) = trimmed.split_at( trimmed .find(b" ") .ok_or_else(|| Error::MalformedV1RefLine(trimmed.to_owned().into()))?, ); let path = &path[1..]; if path.is_empty() { return Err(Error::MalformedV1RefLine(trimmed.to_owned().into())); } match path.strip_suffix(b"^{}") { Some(stripped) => { if hex_hash.iter().all(|b| *b == b'0') && stripped == b"capabilities" { // this is a special dummy-ref just for the sake of getting capabilities across in a repo that is empty. return Ok(()); } let (previous_path, tag) = out_refs .pop() .and_then(InternalRef::unpack_direct) .ok_or(Error::InvariantViolation { message: "Expecting peeled refs to be preceded by direct refs", })?; if previous_path != stripped { return Err(Error::InvariantViolation { message: "Expecting peeled refs to have the same base path as the previous, unpeeled one", }); } out_refs.push(InternalRef::Peeled { path: previous_path, tag, object: gix_hash::ObjectId::from_hex(hex_hash.as_bytes())?, }); } None => { let object = match gix_hash::ObjectId::from_hex(hex_hash.as_bytes()) { Ok(id) => id, Err(_) if hex_hash.as_bstr() == "shallow" => { let id = gix_hash::ObjectId::from_hex(path)?; out_shallow.push(ShallowUpdate::Shallow(id)); return Ok(()); } Err(err) => return Err(err.into()), }; match out_refs .iter() .take(num_initial_out_refs) .position(|r| r.lookup_symbol_has_path(path.into())) { Some(position) => match out_refs.swap_remove(position) { InternalRef::SymbolicForLookup { path: _, target } => out_refs.push(InternalRef::Symbolic { path: path.into(), tag: None, // TODO: figure out how annotated tags work here. object, target, }), _ => unreachable!("Bug in lookup_symbol_has_path - must return lookup symbols"), }, None => out_refs.push(InternalRef::Direct { object, path: path.into(), }), }; } } Ok(()) } pub(in crate::handshake::refs) fn parse_v2(line: &BStr) -> Result { let trimmed = line.trim_end(); let mut tokens = trimmed.splitn(4, |b| *b == b' '); match (tokens.next(), tokens.next()) { (Some(hex_hash), Some(path)) => { let id = if hex_hash == b"unborn" { None } else { Some(gix_hash::ObjectId::from_hex(hex_hash.as_bytes())?) }; if path.is_empty() { return Err(Error::MalformedV2RefLine(trimmed.to_owned().into())); } let mut symref_target = None; let mut peeled = None; for attribute in tokens.by_ref().take(2) { let mut tokens = attribute.splitn(2, |b| *b == b':'); match (tokens.next(), tokens.next()) { (Some(attribute), Some(value)) => { if value.is_empty() { return Err(Error::MalformedV2RefLine(trimmed.to_owned().into())); } match attribute { b"peeled" => { peeled = Some(gix_hash::ObjectId::from_hex(value.as_bytes())?); } b"symref-target" => { symref_target = Some(value); } _ => { return Err(Error::UnknownAttribute { attribute: attribute.to_owned().into(), line: trimmed.to_owned().into(), }) } } } _ => return Err(Error::MalformedV2RefLine(trimmed.to_owned().into())), } } if tokens.next().is_some() { return Err(Error::MalformedV2RefLine(trimmed.to_owned().into())); } Ok(match (symref_target, peeled) { (Some(target_name), peeled) => match target_name { b"(null)" => match peeled { None => Ref::Direct { full_ref_name: path.into(), object: id.ok_or(Error::InvariantViolation { message: "got 'unborn' while (null) was a symref target", })?, }, Some(peeled) => Ref::Peeled { full_ref_name: path.into(), object: peeled, tag: id.ok_or(Error::InvariantViolation { message: "got 'unborn' while (null) was a symref target", })?, }, }, name => match id { Some(id) => Ref::Symbolic { full_ref_name: path.into(), tag: peeled.map(|_| id), object: peeled.unwrap_or(id), target: name.into(), }, None => Ref::Unborn { full_ref_name: path.into(), target: name.into(), }, }, }, (None, Some(peeled)) => Ref::Peeled { full_ref_name: path.into(), object: peeled, tag: id.ok_or(Error::InvariantViolation { message: "got 'unborn' as tag target", })?, }, (None, None) => Ref::Direct { object: id.ok_or(Error::InvariantViolation { message: "got 'unborn' as object name of direct reference", })?, full_ref_name: path.into(), }, }) } _ => Err(Error::MalformedV2RefLine(trimmed.to_owned().into())), } } #[cfg(test)] mod tests { use gix_transport::client; use crate::handshake::{refs, refs::shared::InternalRef}; #[test] fn extract_symbolic_references_from_capabilities() -> Result<(), client::Error> { let caps = client::Capabilities::from_bytes( b"\0unrelated symref=HEAD:refs/heads/main symref=ANOTHER:refs/heads/foo symref=MISSING_NAMESPACE_TARGET:(null) agent=git/2.28.0", )? .0; let out = refs::shared::from_capabilities(caps.iter()).expect("a working example"); assert_eq!( out, vec![ InternalRef::SymbolicForLookup { path: "HEAD".into(), target: Some("refs/heads/main".into()) }, InternalRef::SymbolicForLookup { path: "ANOTHER".into(), target: Some("refs/heads/foo".into()) }, InternalRef::SymbolicForLookup { path: "MISSING_NAMESPACE_TARGET".into(), target: None } ] ); Ok(()) } } gix-protocol-0.47.0/src/lib.rs000064400000000000000000000044741046102023000142560ustar 00000000000000//! An abstraction over [fetching][fetch()] a pack from the server. //! //! Generally, there is the following order of operations. //! //! * create a [`Transport`](gix_transport::client::Transport) //! * perform a [`handshake()`] //! * execute a [`Command`] //! - [list references](ls_refs()) //! - create a mapping between [refspecs and references](fetch::RefMap) //! - [receive a pack](fetch()) //! //! ## Feature Flags #![cfg_attr( all(doc, feature = "document-features"), doc = ::document_features::document_features!() )] #![cfg_attr(all(doc, feature = "document-features"), feature(doc_cfg, doc_auto_cfg))] #![deny(missing_docs, rust_2018_idioms, unsafe_code)] /// A function that performs a given credential action, trying to obtain credentials for an operation that needs it. /// /// Useful for both `fetch` and `push`. #[cfg(feature = "handshake")] pub type AuthenticateFn<'a> = Box gix_credentials::protocol::Result + 'a>; /// A selector for V2 commands to invoke on the server for purpose of pre-invocation validation. #[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)] pub enum Command { /// List references. LsRefs, /// Fetch a pack. Fetch, } pub mod command; #[cfg(feature = "async-client")] pub use async_trait; #[cfg(feature = "async-client")] pub use futures_io; #[cfg(feature = "async-client")] pub use futures_lite; #[cfg(feature = "handshake")] pub use gix_credentials as credentials; /// A convenience export allowing users of gix-protocol to use the transport layer without their own cargo dependency. pub use gix_transport as transport; pub use maybe_async; /// pub mod fetch; #[cfg(any(feature = "blocking-client", feature = "async-client"))] pub use fetch::function::fetch; mod remote_progress; pub use remote_progress::RemoteProgress; #[cfg(all(feature = "blocking-client", feature = "async-client"))] compile_error!("Cannot set both 'blocking-client' and 'async-client' features as they are mutually exclusive"); /// pub mod handshake; #[cfg(any(feature = "blocking-client", feature = "async-client"))] #[cfg(feature = "handshake")] pub use handshake::function::handshake; /// pub mod ls_refs; #[cfg(any(feature = "blocking-client", feature = "async-client"))] pub use ls_refs::function::ls_refs; mod util; pub use util::*; gix-protocol-0.47.0/src/ls_refs.rs000064400000000000000000000105701046102023000151370ustar 00000000000000#[cfg(any(feature = "blocking-client", feature = "async-client"))] mod error { use crate::handshake::refs::parse; /// The error returned by [`ls_refs()`][crate::ls_refs()]. #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), #[error(transparent)] Transport(#[from] gix_transport::client::Error), #[error(transparent)] Parse(#[from] parse::Error), #[error(transparent)] ArgumentValidation(#[from] crate::command::validate_argument_prefixes::Error), } impl gix_transport::IsSpuriousError for Error { fn is_spurious(&self) -> bool { match self { Error::Io(err) => err.is_spurious(), Error::Transport(err) => err.is_spurious(), _ => false, } } } } #[cfg(any(feature = "blocking-client", feature = "async-client"))] pub use error::Error; /// What to do after preparing ls-refs in [`ls_refs()`][crate::ls_refs()]. #[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone)] pub enum Action { /// Continue by sending a 'ls-refs' command. Continue, /// Skip 'ls-refs' entirely. /// /// This is useful if the `ref-in-want` capability is taken advantage of. When fetching, one must must then send /// `want-ref`s during the negotiation phase. Skip, } #[cfg(any(feature = "blocking-client", feature = "async-client"))] pub(crate) mod function { use std::borrow::Cow; use bstr::BString; use gix_features::progress::Progress; use gix_transport::client::{Capabilities, Transport, TransportV2Ext}; use maybe_async::maybe_async; use super::{Action, Error}; use crate::{ handshake::{refs::from_v2_refs, Ref}, indicate_end_of_interaction, Command, }; /// Invoke an ls-refs V2 command on `transport`, which requires a prior handshake that yielded /// server `capabilities`. `prepare_ls_refs(capabilities, arguments, features)` can be used to alter the _ls-refs_. `progress` is used to provide feedback. /// Note that `prepare_ls_refs()` is expected to add the `(agent, Some(name))` to the list of `features`. /// If `trace` is `true`, all packetlines received or sent will be passed to the facilities of the `gix-trace` crate. #[maybe_async] pub async fn ls_refs( mut transport: impl Transport, capabilities: &Capabilities, prepare_ls_refs: impl FnOnce( &Capabilities, &mut Vec, &mut Vec<(&str, Option>)>, ) -> std::io::Result, progress: &mut impl Progress, trace: bool, ) -> Result, Error> { let _span = gix_features::trace::detail!("gix_protocol::ls_refs()", capabilities = ?capabilities); let ls_refs = Command::LsRefs; let mut ls_features = ls_refs.default_features(gix_transport::Protocol::V2, capabilities); let mut ls_args = ls_refs.initial_v2_arguments(&ls_features); if capabilities .capability("ls-refs") .and_then(|cap| cap.supports("unborn")) .unwrap_or_default() { ls_args.push("unborn".into()); } let refs = match prepare_ls_refs(capabilities, &mut ls_args, &mut ls_features) { Ok(Action::Skip) => Vec::new(), Ok(Action::Continue) => { ls_refs.validate_argument_prefixes( gix_transport::Protocol::V2, capabilities, &ls_args, &ls_features, )?; progress.step(); progress.set_name("list refs".into()); let mut remote_refs = transport .invoke( ls_refs.as_str(), ls_features.into_iter(), if ls_args.is_empty() { None } else { Some(ls_args.into_iter()) }, trace, ) .await?; from_v2_refs(&mut remote_refs).await? } Err(err) => { indicate_end_of_interaction(transport, trace).await?; return Err(err.into()); } }; Ok(refs) } } gix-protocol-0.47.0/src/remote_progress.rs000064400000000000000000000073271046102023000167270ustar 00000000000000use bstr::ByteSlice; use winnow::{ combinator::{opt, preceded, terminated}, prelude::*, token::take_till, }; /// The information usually found in remote progress messages as sent by a git server during /// fetch, clone and push operations. #[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct RemoteProgress<'a> { #[cfg_attr(feature = "serde", serde(borrow))] /// The name of the action, like "clone". pub action: &'a bstr::BStr, /// The percentage to indicate progress, between 0 and 100. pub percent: Option, /// The amount of items already processed. pub step: Option, /// The maximum expected amount of items. `step` / `max` * 100 = `percent`. pub max: Option, } impl RemoteProgress<'_> { /// Parse the progress from a typical git progress `line` as sent by the remote. pub fn from_bytes(mut line: &[u8]) -> Option> { parse_progress(&mut line).ok().and_then(|r| { if r.percent.is_none() && r.step.is_none() && r.max.is_none() { None } else { Some(r) } }) } /// Parse `text`, which is interpreted as error if `is_error` is true, as [`RemoteProgress`] and call the respective /// methods on the given `progress` instance. pub fn translate_to_progress(is_error: bool, text: &[u8], progress: &mut impl gix_features::progress::Progress) { fn progress_name(current: Option, action: &[u8]) -> String { match current { Some(current) => format!( "{}: {}", current.split_once(':').map_or(&*current, |x| x.0), action.as_bstr() ), None => action.as_bstr().to_string(), } } if is_error { // ignore keep-alive packages sent with 'sideband-all' if !text.is_empty() { progress.fail(progress_name(None, text)); } } else { match RemoteProgress::from_bytes(text) { Some(RemoteProgress { action, percent: _, step, max, }) => { progress.set_name(progress_name(progress.name(), action)); progress.init(max, gix_features::progress::count("objects")); if let Some(step) = step { progress.set(step); } } None => progress.set_name(progress_name(progress.name(), text)), }; } } } fn parse_number(i: &mut &[u8]) -> PResult { take_till(0.., |c: u8| !c.is_ascii_digit()) .try_map(gix_utils::btoi::to_signed) .parse_next(i) } fn next_optional_percentage(i: &mut &[u8]) -> PResult, ()> { opt(terminated( preceded( take_till(0.., |c: u8| c.is_ascii_digit()), parse_number.try_map(u32::try_from), ), b"%", )) .parse_next(i) } fn next_optional_number(i: &mut &[u8]) -> PResult, ()> { opt(preceded(take_till(0.., |c: u8| c.is_ascii_digit()), parse_number)).parse_next(i) } fn parse_progress<'i>(line: &mut &'i [u8]) -> PResult, ()> { let action = take_till(1.., |c| c == b':').parse_next(line)?; let percent = next_optional_percentage.parse_next(line)?; let step = next_optional_number.parse_next(line)?; let max = next_optional_number.parse_next(line)?; Ok(RemoteProgress { action: action.into(), percent, step, max, }) } gix-protocol-0.47.0/src/util.rs000064400000000000000000000075151046102023000144640ustar 00000000000000/// The name of the `git` client in a format suitable for presentation to a `git` server, using `name` as user-defined portion of the value. pub fn agent(name: impl Into) -> String { let mut name = name.into(); if !name.starts_with("git/") { name.insert_str(0, "git/"); } name } #[cfg(any(feature = "blocking-client", feature = "async-client"))] mod with_transport { use gix_transport::client::Transport; /// Send a message to indicate the remote side that there is nothing more to expect from us, indicating a graceful shutdown. /// If `trace` is `true`, all packetlines received or sent will be passed to the facilities of the `gix-trace` crate. #[maybe_async::maybe_async] pub async fn indicate_end_of_interaction( mut transport: impl gix_transport::client::Transport, trace: bool, ) -> Result<(), gix_transport::client::Error> { // An empty request marks the (early) end of the interaction. Only relevant in stateful transports though. if transport.connection_persists_across_multiple_requests() { transport .request( gix_transport::client::WriteMode::Binary, gix_transport::client::MessageKind::Flush, trace, )? .into_read() .await?; } Ok(()) } /// A utility to automatically send a flush packet when the instance is dropped, assuring a graceful termination of any /// interaction with the server. pub struct SendFlushOnDrop where T: Transport, { /// The actual transport instance. pub inner: T, /// If `true`, the packetline used to indicate the end of interaction will be traced using `gix-trace`. trace_packetlines: bool, /// If `true`, we should not send another flush packet. flush_packet_sent: bool, } impl SendFlushOnDrop where T: Transport, { /// Create a new instance with `transport`, while optionally tracing packetlines with `trace_packetlines`. pub fn new(transport: T, trace_packetlines: bool) -> Self { Self { inner: transport, trace_packetlines, flush_packet_sent: false, } } /// Useful to explicitly invalidate the connection by sending a flush-packet. /// This will happen exactly once, and it is not considered an error to call it multiple times. /// /// For convenience, this is not consuming, but could be to assure the underlying transport isn't used anymore. #[maybe_async::maybe_async] pub async fn indicate_end_of_interaction(&mut self) -> Result<(), gix_transport::client::Error> { if self.flush_packet_sent { return Ok(()); } self.flush_packet_sent = true; indicate_end_of_interaction(&mut self.inner, self.trace_packetlines).await } } impl Drop for SendFlushOnDrop where T: Transport, { fn drop(&mut self) { #[cfg(feature = "async-client")] { // TODO: this should be an async drop once the feature is available. // Right now we block the executor by forcing this communication, but that only // happens if the user didn't actually try to receive a pack, which consumes the // connection in an async context. crate::futures_lite::future::block_on(self.indicate_end_of_interaction()).ok(); } #[cfg(not(feature = "async-client"))] { self.indicate_end_of_interaction().ok(); } } } } #[cfg(any(feature = "blocking-client", feature = "async-client"))] pub use with_transport::*;