gix-worktree-state-0.16.0/.cargo_vcs_info.json0000644000000001600000000000100146620ustar { "git": { "sha1": "beb0ea8c4ff94c64b7773772a9d388ccb403f3c1" }, "path_in_vcs": "gix-worktree-state" }gix-worktree-state-0.16.0/Cargo.toml0000644000000072340000000000100126710ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.65" name = "gix-worktree-state" version = "0.16.0" authors = ["Sebastian Thiel "] build = false include = [ "src/**/*", "LICENSE-*", ] autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "A crate of the gitoxide project implementing setting the worktree to a particular state" readme = false license = "MIT OR Apache-2.0" repository = "https://github.com/GitoxideLabs/gitoxide" [lib] name = "gix_worktree_state" path = "src/lib.rs" doctest = false [dependencies.bstr] version = "1.3.0" default-features = false [dependencies.gix-features] version = "^0.39.1" [dependencies.gix-filter] version = "^0.16.0" [dependencies.gix-fs] version = "^0.12.1" [dependencies.gix-glob] version = "^0.17.1" [dependencies.gix-hash] version = "^0.15.1" [dependencies.gix-index] version = "^0.37.0" [dependencies.gix-object] version = "^0.46.1" [dependencies.gix-path] version = "^0.10.13" [dependencies.gix-worktree] version = "^0.38.0" features = ["attributes"] default-features = false [dependencies.io-close] version = "0.3.7" [dependencies.thiserror] version = "2.0.0" [lints.clippy] bool_to_int_with_if = "allow" borrow_as_ptr = "allow" cast_lossless = "allow" cast_possible_truncation = "allow" cast_possible_wrap = "allow" cast_precision_loss = "allow" cast_sign_loss = "allow" checked_conversions = "allow" copy_iterator = "allow" default_trait_access = "allow" doc_markdown = "allow" empty_docs = "allow" enum_glob_use = "allow" explicit_deref_methods = "allow" explicit_into_iter_loop = "allow" explicit_iter_loop = "allow" filter_map_next = "allow" fn_params_excessive_bools = "allow" from_iter_instead_of_collect = "allow" if_not_else = "allow" ignored_unit_patterns = "allow" implicit_clone = "allow" inconsistent_struct_constructor = "allow" inefficient_to_string = "allow" inline_always = "allow" items_after_statements = "allow" iter_not_returning_iterator = "allow" iter_without_into_iter = "allow" manual_assert = "allow" manual_is_variant_and = "allow" manual_let_else = "allow" manual_string_new = "allow" many_single_char_names = "allow" match_bool = "allow" match_same_arms = "allow" match_wild_err_arm = "allow" match_wildcard_for_single_variants = "allow" missing_errors_doc = "allow" missing_panics_doc = "allow" module_name_repetitions = "allow" must_use_candidate = "allow" mut_mut = "allow" naive_bytecount = "allow" needless_for_each = "allow" needless_pass_by_value = "allow" needless_raw_string_hashes = "allow" no_effect_underscore_binding = "allow" option_option = "allow" range_plus_one = "allow" redundant_else = "allow" return_self_not_must_use = "allow" should_panic_without_expect = "allow" similar_names = "allow" single_match_else = "allow" stable_sort_primitive = "allow" struct_excessive_bools = "allow" struct_field_names = "allow" too_long_first_doc_paragraph = "allow" too_many_lines = "allow" transmute_ptr_to_ptr = "allow" trivially_copy_pass_by_ref = "allow" unnecessary_join = "allow" unnecessary_wraps = "allow" unreadable_literal = "allow" unused_self = "allow" used_underscore_binding = "allow" wildcard_imports = "allow" [lints.clippy.pedantic] level = "warn" priority = -1 [lints.rust] gix-worktree-state-0.16.0/Cargo.toml.orig000064400000000000000000000021721046102023000163460ustar 00000000000000lints.workspace = true [package] name = "gix-worktree-state" version = "0.16.0" repository = "https://github.com/GitoxideLabs/gitoxide" license = "MIT OR Apache-2.0" description = "A crate of the gitoxide project implementing setting the worktree to a particular state" authors = ["Sebastian Thiel "] edition = "2021" include = ["src/**/*", "LICENSE-*"] rust-version = "1.65" autotests = false [lib] doctest = false [dependencies] gix-worktree = { version = "^0.38.0", path = "../gix-worktree", default-features = false, features = ["attributes"] } gix-index = { version = "^0.37.0", path = "../gix-index" } gix-fs = { version = "^0.12.1", path = "../gix-fs" } gix-hash = { version = "^0.15.1", path = "../gix-hash" } gix-object = { version = "^0.46.1", path = "../gix-object" } gix-glob = { version = "^0.17.1", path = "../gix-glob" } gix-path = { version = "^0.10.13", path = "../gix-path" } gix-features = { version = "^0.39.1", path = "../gix-features" } gix-filter = { version = "^0.16.0", path = "../gix-filter" } io-close = "0.3.7" thiserror = "2.0.0" bstr = { version = "1.3.0", default-features = false } gix-worktree-state-0.16.0/LICENSE-APACHE000064400000000000000000000247461046102023000154160ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. gix-worktree-state-0.16.0/LICENSE-MIT000064400000000000000000000017771046102023000151250ustar 00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. gix-worktree-state-0.16.0/src/checkout/chunk.rs000064400000000000000000000302201046102023000175240ustar 00000000000000use std::{ collections::BTreeSet, sync::atomic::{AtomicUsize, Ordering}, }; use bstr::{BStr, BString}; use gix_worktree::Stack; use crate::{checkout, checkout::entry}; mod reduce { use crate::checkout; pub struct Reduce<'entry> { pub aggregate: super::Outcome<'entry>, } impl<'entry> gix_features::parallel::Reduce for Reduce<'entry> { type Input = Result, checkout::Error>; type FeedProduce = (); type Output = super::Outcome<'entry>; type Error = checkout::Error; fn feed(&mut self, item: Self::Input) -> Result { let item = item?; let super::Outcome { bytes_written, files, delayed_symlinks, errors, collisions, delayed_paths_unknown, delayed_paths_unprocessed, } = item; self.aggregate.bytes_written += bytes_written; self.aggregate.files += files; self.aggregate.delayed_symlinks.extend(delayed_symlinks); self.aggregate.errors.extend(errors); self.aggregate.collisions.extend(collisions); self.aggregate.delayed_paths_unknown.extend(delayed_paths_unknown); self.aggregate .delayed_paths_unprocessed .extend(delayed_paths_unprocessed); Ok(()) } fn finalize(self) -> Result { Ok(self.aggregate) } } } pub use reduce::Reduce; use crate::checkout::entry::DelayedFilteredStream; #[derive(Default)] pub struct Outcome<'a> { pub collisions: Vec, pub errors: Vec, pub delayed_symlinks: Vec<(&'a mut gix_index::Entry, &'a BStr)>, // all (immediately) written bytes pub bytes_written: u64, // the amount of files we processed pub files: usize, /// Relative paths that the process listed as 'delayed' even though we never passed them. pub delayed_paths_unknown: Vec, /// All paths that were left unprocessed, because they were never listed by the process even though we passed them. pub delayed_paths_unprocessed: Vec, } #[derive(Clone)] pub struct Context { pub objects: Find, pub path_cache: Stack, pub filters: gix_filter::Pipeline, pub buf: Vec, pub options: Options, } #[derive(Clone, Copy)] pub struct Options { pub fs: gix_fs::Capabilities, pub destination_is_initially_empty: bool, pub overwrite_existing: bool, pub keep_going: bool, pub filter_process_delay: gix_filter::driver::apply::Delay, } impl From<&checkout::Options> for Options { fn from(opts: &checkout::Options) -> Self { Options { fs: opts.fs, destination_is_initially_empty: opts.destination_is_initially_empty, overwrite_existing: opts.overwrite_existing, keep_going: opts.keep_going, filter_process_delay: opts.filter_process_delay, } } } pub fn process<'entry, Find>( entries_with_paths: impl Iterator, files: &AtomicUsize, bytes: &AtomicUsize, delayed_filter_results: &mut Vec>, ctx: &mut Context, ) -> Result, checkout::Error> where Find: gix_object::Find + Clone, { let mut delayed_symlinks = Vec::new(); let mut collisions = Vec::new(); let mut errors = Vec::new(); let mut bytes_written = 0; let mut files_in_chunk = 0; for (entry, entry_path) in entries_with_paths { // TODO: write test for that if entry.flags.contains(gix_index::entry::Flags::SKIP_WORKTREE) { files.fetch_add(1, Ordering::Relaxed); files_in_chunk += 1; continue; } // Symlinks always have to be delayed on windows as they have to point to something that exists on creation. // And even if not, there is a distinction between file and directory symlinks, hence we have to check what the target is // before creating it. // And to keep things sane, we just do the same on non-windows as well which is similar to what git does and adds some safety // around writing through symlinks (even though we handle this). // This also means that we prefer content in files over symlinks in case of collisions, which probably is for the better, too. if entry.mode == gix_index::entry::Mode::SYMLINK { delayed_symlinks.push((entry, entry_path)); continue; } match checkout_entry_handle_result(entry, entry_path, &mut errors, &mut collisions, files, bytes, ctx)? { entry::Outcome::Written { bytes } => { bytes_written += bytes as u64; files_in_chunk += 1; } entry::Outcome::Delayed(delayed) => delayed_filter_results.push(delayed), } } Ok(Outcome { bytes_written, files: files_in_chunk, errors, collisions, delayed_symlinks, delayed_paths_unknown: Vec::new(), delayed_paths_unprocessed: Vec::new(), }) } pub fn process_delayed_filter_results( mut delayed_filter_results: Vec>, files: &AtomicUsize, bytes: &AtomicUsize, out: &mut Outcome<'_>, ctx: &mut Context, ) -> Result<(), checkout::Error> where Find: gix_object::Find + Clone, { let Options { destination_is_initially_empty, overwrite_existing, keep_going, .. } = ctx.options; let mut bytes_written = 0; let mut delayed_files = 0; // Sort by path for fast lookups delayed_filter_results.sort_by(|a, b| a.entry_path.cmp(b.entry_path)); // We process each key and do as the filter process tells us, while collecting data about the overall progress. let keys: BTreeSet<_> = delayed_filter_results.iter().map(|d| d.key.clone()).collect(); let mut unknown_paths = Vec::new(); let mut rela_path_as_path = Default::default(); for key in keys { loop { let rela_paths = ctx.filters.driver_state_mut().list_delayed_paths(&key)?; if rela_paths.is_empty() { break; } for rela_path in rela_paths { let delayed = match delayed_filter_results.binary_search_by(|d| d.entry_path.cmp(rela_path.as_ref())) { Ok(idx) => &mut delayed_filter_results[idx], Err(_) => { if keep_going { unknown_paths.push(rela_path); continue; } else { return Err(checkout::Error::FilterPathUnknown { rela_path }); } } }; let mut read = std::io::BufReader::with_capacity( 512 * 1024, ctx.filters.driver_state_mut().fetch_delayed( &key, rela_path.as_ref(), gix_filter::driver::Operation::Smudge, )?, ); let (file, set_executable_after_creation) = match entry::open_file( &std::mem::take(&mut delayed.validated_file_path), // mark it as seen, relevant for `unprocessed_paths` destination_is_initially_empty, overwrite_existing, delayed.needs_executable_bit, delayed.entry.mode, ) { Ok(res) => res, Err(err) => { if !is_collision(&err, delayed.entry_path, &mut out.collisions, files) { handle_error(err, delayed.entry_path, files, &mut out.errors, ctx.options.keep_going)?; } std::io::copy(&mut read, &mut std::io::sink())?; continue; } }; let mut write = WriteWithProgress { inner: std::io::BufWriter::with_capacity(512 * 1024, file), progress: bytes, }; bytes_written += std::io::copy(&mut read, &mut write)?; entry::finalize_entry( delayed.entry, write.inner.into_inner().map_err(std::io::IntoInnerError::into_error)?, set_executable_after_creation.then(|| { rela_path_as_path = gix_path::from_bstr(delayed.entry_path); rela_path_as_path.as_ref() }), )?; delayed_files += 1; files.fetch_add(1, Ordering::Relaxed); } } } let unprocessed_paths = delayed_filter_results .into_iter() .filter_map(|d| (!d.validated_file_path.as_os_str().is_empty()).then(|| d.entry_path.to_owned())) .collect(); if !keep_going && !unknown_paths.is_empty() { return Err(checkout::Error::FilterPathsUnprocessed { rela_paths: unprocessed_paths, }); } out.delayed_paths_unknown = unknown_paths; out.delayed_paths_unprocessed = unprocessed_paths; out.bytes_written += bytes_written; out.files += delayed_files; Ok(()) } pub struct WriteWithProgress<'a, T> { pub inner: T, pub progress: &'a AtomicUsize, } impl std::io::Write for WriteWithProgress<'_, T> where T: std::io::Write, { fn write(&mut self, buf: &[u8]) -> std::io::Result { let written = self.inner.write(buf)?; self.progress .fetch_add(written as gix_features::progress::Step, Ordering::SeqCst); Ok(written) } fn flush(&mut self) -> std::io::Result<()> { self.inner.flush() } } pub fn checkout_entry_handle_result<'entry, Find>( entry: &'entry mut gix_index::Entry, entry_path: &'entry BStr, errors: &mut Vec, collisions: &mut Vec, files: &AtomicUsize, bytes: &AtomicUsize, Context { objects, path_cache, filters, buf, options, }: &mut Context, ) -> Result, checkout::Error> where Find: gix_object::Find + Clone, { let res = entry::checkout( entry, entry_path, entry::Context { objects, path_cache, filters, buf, }, *options, ); match res { Ok(out) => { if let Some(num) = out.as_bytes() { bytes.fetch_add(num, Ordering::Relaxed); files.fetch_add(1, Ordering::Relaxed); } Ok(out) } Err(checkout::Error::Io(err)) if is_collision(&err, entry_path, collisions, files) => { Ok(entry::Outcome::Written { bytes: 0 }) } Err(err) => handle_error(err, entry_path, files, errors, options.keep_going) .map(|()| entry::Outcome::Written { bytes: 0 }), } } fn handle_error( err: E, entry_path: &BStr, files: &AtomicUsize, errors: &mut Vec, keep_going: bool, ) -> Result<(), E> where E: std::error::Error + Send + Sync + 'static, { if keep_going { errors.push(checkout::ErrorRecord { path: entry_path.into(), error: Box::new(err), }); files.fetch_add(1, Ordering::Relaxed); Ok(()) } else { Err(err) } } fn is_collision( err: &std::io::Error, entry_path: &BStr, collisions: &mut Vec, files: &AtomicUsize, ) -> bool { if !gix_fs::symlink::is_collision_error(err) { return false; } // We are here because a file existed or was blocked by a directory which shouldn't be possible unless // we are on a file insensitive file system. gix_features::trace::error!("{entry_path}: collided ({:?})", err.kind()); collisions.push(checkout::Collision { path: entry_path.into(), error_kind: err.kind(), }); files.fetch_add(1, Ordering::Relaxed); true } gix-worktree-state-0.16.0/src/checkout/entry.rs000064400000000000000000000256541046102023000175740ustar 00000000000000use std::borrow::Cow; use std::{ fs::OpenOptions, io::Write, path::{Path, PathBuf}, }; use bstr::BStr; use gix_filter::{driver::apply::MaybeDelayed, pipeline::convert::ToWorktreeOutcome}; use gix_index::{entry::Stat, Entry}; use gix_object::FindExt; use gix_worktree::Stack; use io_close::Close; pub struct Context<'a, Find> { pub objects: &'a mut Find, pub path_cache: &'a mut Stack, pub filters: &'a mut gix_filter::Pipeline, pub buf: &'a mut Vec, } /// A delayed result of a long-running filter process, which is made available as stream. pub struct DelayedFilteredStream<'a> { /// The key identifying the driver program pub key: gix_filter::driver::Key, /// If the file is going to be an executable. pub needs_executable_bit: bool, /// The validated path on disk at which the file should be placed. pub validated_file_path: PathBuf, /// The entry to adjust with the file we will write. pub entry: &'a mut gix_index::Entry, /// The relative path at which the entry resides (for use when querying the delayed entry). pub entry_path: &'a BStr, } pub enum Outcome<'a> { /// The file was written. Written { /// The amount of written bytes. bytes: usize, }, /// The will be ready later. Delayed(DelayedFilteredStream<'a>), } impl Outcome<'_> { /// Return ourselves as (in-memory) bytes if possible. pub fn as_bytes(&self) -> Option { match self { Outcome::Written { bytes } => Some(*bytes), Outcome::Delayed { .. } => None, } } } #[cfg_attr(not(unix), allow(unused_variables))] pub fn checkout<'entry, Find>( entry: &'entry mut Entry, entry_path: &'entry BStr, Context { objects, filters, path_cache, buf, }: Context<'_, Find>, crate::checkout::chunk::Options { fs: gix_fs::Capabilities { symlink, executable_bit, .. }, destination_is_initially_empty, overwrite_existing, filter_process_delay, .. }: crate::checkout::chunk::Options, ) -> Result, crate::checkout::Error> where Find: gix_object::Find, { let dest_relative = gix_path::try_from_bstr(entry_path).map_err(|_| crate::checkout::Error::IllformedUtf8 { path: entry_path.to_owned(), })?; let path_cache = path_cache.at_path(dest_relative, Some(entry.mode), &*objects)?; let dest = path_cache.path(); let object_size = match entry.mode { gix_index::entry::Mode::FILE | gix_index::entry::Mode::FILE_EXECUTABLE => { let obj = (*objects) .find_blob(&entry.id, buf) .map_err(|err| crate::checkout::Error::Find { err, path: dest.to_path_buf(), })?; let filtered = filters.convert_to_worktree( obj.data, entry_path, &mut |_, attrs| { path_cache.matching_attributes(attrs); }, filter_process_delay, )?; let (num_bytes, file, set_executable_after_creation) = match filtered { ToWorktreeOutcome::Unchanged(buf) | ToWorktreeOutcome::Buffer(buf) => { let (mut file, flag) = open_file( dest, destination_is_initially_empty, overwrite_existing, executable_bit, entry.mode, )?; file.write_all(buf)?; (buf.len(), file, flag) } ToWorktreeOutcome::Process(MaybeDelayed::Immediate(mut filtered)) => { let (mut file, flag) = open_file( dest, destination_is_initially_empty, overwrite_existing, executable_bit, entry.mode, )?; let num_bytes = std::io::copy(&mut filtered, &mut file)? as usize; (num_bytes, file, flag) } ToWorktreeOutcome::Process(MaybeDelayed::Delayed(key)) => { return Ok(Outcome::Delayed(DelayedFilteredStream { key, needs_executable_bit: false, validated_file_path: dest.to_owned(), entry, entry_path, })) } }; // For possibly existing, overwritten files, we must change the file mode explicitly. finalize_entry(entry, file, set_executable_after_creation.then_some(dest))?; num_bytes } gix_index::entry::Mode::SYMLINK => { let obj = (*objects) .find_blob(&entry.id, buf) .map_err(|err| crate::checkout::Error::Find { err, path: dest.to_path_buf(), })?; if symlink { #[cfg_attr(not(windows), allow(unused_mut))] let mut symlink_destination = Cow::Borrowed( gix_path::try_from_byte_slice(obj.data) .map_err(|_| crate::checkout::Error::IllformedUtf8 { path: obj.data.into() })?, ); #[cfg(windows)] { symlink_destination = gix_path::to_native_path_on_windows(gix_path::into_bstr(symlink_destination)) } try_op_or_unlink(dest, overwrite_existing, |p| { gix_fs::symlink::create(symlink_destination.as_ref(), p) })?; } else { let mut file = try_op_or_unlink(dest, overwrite_existing, |p| { open_options(p, destination_is_initially_empty, overwrite_existing).open(dest) })?; file.write_all(obj.data)?; file.close()?; } entry.stat = Stat::from_fs(&gix_index::fs::Metadata::from_path_no_follow(dest)?)?; obj.data.len() } gix_index::entry::Mode::DIR => { gix_features::trace::warn!( "Skipped sparse directory at '{entry_path}' ({id}) as it cannot yet be handled", id = entry.id ); 0 } gix_index::entry::Mode::COMMIT => { gix_features::trace::warn!( "Skipped submodule at '{entry_path}' ({id}) as it cannot yet be handled", id = entry.id ); 0 } _ => unreachable!(), }; Ok(Outcome::Written { bytes: object_size }) } /// Note that this works only because we assume to not race ourselves when symlinks are involved, and we do this by /// delaying symlink creation to the end and will always do that sequentially. /// It's still possible to fall for a race if other actors create symlinks in our path, but that's nothing to defend against. fn try_op_or_unlink( path: &Path, overwrite_existing: bool, op: impl Fn(&Path) -> std::io::Result, ) -> std::io::Result { if overwrite_existing { match op(path) { Ok(res) => Ok(res), Err(err) if gix_fs::symlink::is_collision_error(&err) => { try_unlink_path_recursively(path, &std::fs::symlink_metadata(path)?)?; op(path) } Err(err) => Err(err), } } else { op(path) } } fn try_unlink_path_recursively(path: &Path, path_meta: &std::fs::Metadata) -> std::io::Result<()> { if path_meta.is_dir() { std::fs::remove_dir_all(path) } else if path_meta.file_type().is_symlink() { gix_fs::symlink::remove(path) } else { std::fs::remove_file(path) } } #[cfg(not(debug_assertions))] fn debug_assert_dest_is_no_symlink(_path: &Path) {} /// This is a debug assertion as we expect the machinery calling this to prevent this possibility in the first place #[cfg(debug_assertions)] fn debug_assert_dest_is_no_symlink(path: &Path) { if let Ok(meta) = path.metadata() { debug_assert!( !meta.file_type().is_symlink(), "BUG: should not ever allow to overwrite/write-into the target of a symbolic link: {}", path.display() ); } } fn open_options(path: &Path, destination_is_initially_empty: bool, overwrite_existing: bool) -> OpenOptions { if overwrite_existing || !destination_is_initially_empty { debug_assert_dest_is_no_symlink(path); } let mut options = gix_features::fs::open_options_no_follow(); options .create_new(destination_is_initially_empty && !overwrite_existing) .create(!destination_is_initially_empty || overwrite_existing) .write(true) .truncate(true); options } pub(crate) fn open_file( path: &Path, destination_is_initially_empty: bool, overwrite_existing: bool, fs_supports_executable_bit: bool, entry_mode: gix_index::entry::Mode, ) -> std::io::Result<(std::fs::File, bool)> { #[cfg_attr(windows, allow(unused_mut))] let mut options = open_options(path, destination_is_initially_empty, overwrite_existing); let needs_executable_bit = fs_supports_executable_bit && entry_mode == gix_index::entry::Mode::FILE_EXECUTABLE; #[cfg(unix)] let set_executable_after_creation = if needs_executable_bit && destination_is_initially_empty { use std::os::unix::fs::OpenOptionsExt; // Note that these only work if the file was newly created, but won't if it's already // existing, possibly without the executable bit set. Thus we do this only if the file is new. options.mode(0o777); false } else { needs_executable_bit }; // not supported on windows #[cfg(windows)] let set_executable_after_creation = needs_executable_bit; try_op_or_unlink(path, overwrite_existing, |p| options.open(p)).map(|f| (f, set_executable_after_creation)) } /// Close `file` and store its stats in `entry`, possibly setting `file` executable depending on `set_executable_after_creation`. #[cfg_attr(windows, allow(unused_variables))] pub(crate) fn finalize_entry( entry: &mut gix_index::Entry, file: std::fs::File, set_executable_after_creation: Option<&Path>, ) -> Result<(), crate::checkout::Error> { // For possibly existing, overwritten files, we must change the file mode explicitly. #[cfg(unix)] if let Some(path) = set_executable_after_creation { use std::os::unix::fs::PermissionsExt; let mut perm = std::fs::symlink_metadata(path)?.permissions(); perm.set_mode(0o777); std::fs::set_permissions(path, perm)?; } // NOTE: we don't call `file.sync_all()` here knowing that some filesystems don't handle this well. // revisit this once there is a bug to fix. entry.stat = Stat::from_fs(&gix_index::fs::Metadata::from_file(&file)?)?; file.close()?; Ok(()) } gix-worktree-state-0.16.0/src/checkout/function.rs000064400000000000000000000117761046102023000202600ustar 00000000000000use std::sync::atomic::AtomicBool; use gix_features::{interrupt, parallel::in_parallel_with_finalize}; use gix_worktree::{stack, Stack}; use crate::checkout::chunk; /// Checkout the entire `index` into `dir`, and resolve objects found in index entries with `objects` to write their content to their /// respective path in `dir`. /// Use `files` to count each fully checked out file, and count the amount written `bytes`. If `should_interrupt` is `true`, the /// operation will abort. /// `options` provide a lot of context on how to perform the operation. /// /// ### Handling the return value /// /// Note that interruption still produce an `Ok(…)` value, so the caller should look at `should_interrupt` to communicate the outcome. /// #[allow(clippy::too_many_arguments)] pub fn checkout( index: &mut gix_index::State, dir: impl Into, objects: Find, files: &dyn gix_features::progress::Count, bytes: &dyn gix_features::progress::Count, should_interrupt: &AtomicBool, options: crate::checkout::Options, ) -> Result where Find: gix_object::Find + Send + Clone, { let paths = index.take_path_backing(); let res = checkout_inner(index, &paths, dir, objects, files, bytes, should_interrupt, options); index.return_path_backing(paths); res } #[allow(clippy::too_many_arguments)] fn checkout_inner( index: &mut gix_index::State, paths: &gix_index::PathStorage, dir: impl Into, objects: Find, files: &dyn gix_features::progress::Count, bytes: &dyn gix_features::progress::Count, should_interrupt: &AtomicBool, mut options: crate::checkout::Options, ) -> Result where Find: gix_object::Find + Send + Clone, { let num_files = files.counter(); let num_bytes = bytes.counter(); let dir = dir.into(); let (chunk_size, thread_limit, num_threads) = gix_features::parallel::optimize_chunk_size_and_thread_limit( 100, index.entries().len().into(), options.thread_limit, None, ); let mut ctx = chunk::Context { buf: Vec::new(), options: (&options).into(), path_cache: Stack::from_state_and_ignore_case( dir, options.fs.ignore_case, stack::State::for_checkout( options.overwrite_existing, options.validate, std::mem::take(&mut options.attributes), ), index, paths, ), filters: options.filters, objects, }; let chunk::Outcome { mut collisions, mut errors, mut bytes_written, files: files_updated, delayed_symlinks, delayed_paths_unknown, delayed_paths_unprocessed, } = if num_threads == 1 { let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt); let mut delayed_filter_results = Vec::new(); let mut out = chunk::process( entries_with_paths, &num_files, &num_bytes, &mut delayed_filter_results, &mut ctx, )?; chunk::process_delayed_filter_results(delayed_filter_results, &num_files, &num_bytes, &mut out, &mut ctx)?; out } else { let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt); in_parallel_with_finalize( gix_features::iter::Chunks { inner: entries_with_paths, size: chunk_size, }, thread_limit, { let ctx = ctx.clone(); move |_| (Vec::new(), ctx) }, |chunk, (delayed_filter_results, ctx)| { chunk::process(chunk.into_iter(), &num_files, &num_bytes, delayed_filter_results, ctx) }, |(delayed_filter_results, mut ctx)| { let mut out = chunk::Outcome::default(); chunk::process_delayed_filter_results( delayed_filter_results, &num_files, &num_bytes, &mut out, &mut ctx, )?; Ok(out) }, chunk::Reduce { aggregate: Default::default(), }, )? }; for (entry, entry_path) in delayed_symlinks { bytes_written += chunk::checkout_entry_handle_result( entry, entry_path, &mut errors, &mut collisions, &num_files, &num_bytes, &mut ctx, )? .as_bytes() .expect("only symlinks are delayed here, they are never filtered (or delayed again)") as u64; } Ok(crate::checkout::Outcome { files_updated, collisions, errors, bytes_written, delayed_paths_unknown, delayed_paths_unprocessed, }) } gix-worktree-state-0.16.0/src/checkout/mod.rs000064400000000000000000000114661046102023000172060ustar 00000000000000use bstr::BString; use gix_index::entry::stat; /// Information about a path that failed to checkout as something else was already present. #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Collision { /// the path that collided with something already present on disk. pub path: BString, /// The io error we encountered when checking out `path`. pub error_kind: std::io::ErrorKind, } /// A path that encountered an IO error. #[derive(Debug)] pub struct ErrorRecord { /// the path that encountered the error. pub path: BString, /// The error pub error: Box, } /// The outcome of checking out an entire index. #[derive(Debug, Default)] pub struct Outcome { /// The amount of files updated, or created. pub files_updated: usize, /// The amount of bytes written to disk, pub bytes_written: u64, /// The encountered collisions, which can happen on a case-insensitive filesystem. pub collisions: Vec, /// Other errors that happened during checkout. pub errors: Vec, /// Relative paths that the process listed as 'delayed' even though we never passed them. pub delayed_paths_unknown: Vec, /// All paths that were left unprocessed, because they were never listed by the process even though we passed them. pub delayed_paths_unprocessed: Vec, } /// Options to further configure the checkout operation. #[derive(Clone, Default)] pub struct Options { /// capabilities of the file system pub fs: gix_fs::Capabilities, /// Options to configure how to validate path components. pub validate: gix_worktree::validate::path::component::Options, /// If set, don't use more than this amount of threads. /// Otherwise, usually use as many threads as there are logical cores. /// A value of 0 is interpreted as no-limit pub thread_limit: Option, /// If true, we assume no file to exist in the target directory, and want exclusive access to it. /// This should be enabled when cloning to avoid checks for freshness of files. This also enables /// detection of collisions based on whether or not exclusive file creation succeeds or fails. pub destination_is_initially_empty: bool, /// If true, default false, worktree entries on disk will be overwritten with content from the index /// even if they appear to be changed. When creating directories that clash with existing worktree entries, /// these will try to delete the existing entry. /// This is similar in behaviour as `git checkout --force`. pub overwrite_existing: bool, /// If true, default false, try to checkout as much as possible and don't abort on first error which isn't /// due to a conflict. /// The checkout operation will never fail, but count the encountered errors instead along with their paths. pub keep_going: bool, /// Control how stat comparisons are made when checking if a file is fresh. pub stat_options: stat::Options, /// A stack of attributes to use with the filesystem cache to use as driver for filters. pub attributes: gix_worktree::stack::state::Attributes, /// The filter pipeline to use for applying mandatory filters before writing to the worktree. pub filters: gix_filter::Pipeline, /// Control how long-running processes may use the 'delay' capability. pub filter_process_delay: gix_filter::driver::apply::Delay, } /// The error returned by the [checkout()][crate::checkout()] function. #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { #[error("Could not convert path to UTF8: {}", .path)] IllformedUtf8 { path: BString }, #[error("The clock was off when reading file related metadata after updating a file on disk")] Time(#[from] std::time::SystemTimeError), #[error("IO error while writing blob or reading file metadata or changing filetype")] Io(#[from] std::io::Error), #[error("object for checkout at {} could not be retrieved from object database", .path.display())] Find { #[source] err: gix_object::find::existing_object::Error, path: std::path::PathBuf, }, #[error(transparent)] Filter(#[from] gix_filter::pipeline::convert::to_worktree::Error), #[error(transparent)] FilterListDelayed(#[from] gix_filter::driver::delayed::list::Error), #[error(transparent)] FilterFetchDelayed(#[from] gix_filter::driver::delayed::fetch::Error), #[error("The entry at path '{rela_path}' was listed as delayed by the filter process, but we never passed it")] FilterPathUnknown { rela_path: BString }, #[error("The following paths were delayed and apparently forgotten to be processed by the filter driver: ")] FilterPathsUnprocessed { rela_paths: Vec }, } mod chunk; mod entry; pub(crate) mod function; gix-worktree-state-0.16.0/src/lib.rs000064400000000000000000000002631046102023000153610ustar 00000000000000//! A crate to help setting the worktree to a particular state. #![deny(missing_docs, rust_2018_idioms, unsafe_code)] /// pub mod checkout; pub use checkout::function::checkout;