pax_global_header00006660000000000000000000000064150431160140014505gustar00rootroot0000000000000052 comment=cf796758bea4bac29a4b1f71e805b9dc606eb50c coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/000077500000000000000000000000001504311601400207445ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.busybox-config000066400000000000000000000001311504311601400236760ustar00rootroot00000000000000CONFIG_FEATURE_FANCY_HEAD=y CONFIG_UNICODE_SUPPORT=y CONFIG_DESKTOP=y CONFIG_LONG_OPTS=y coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.cargo/000077500000000000000000000000001504311601400221155ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.cargo/config.toml000066400000000000000000000011551504311601400242610ustar00rootroot00000000000000# Note: keep in mind that this file is completely ignored in several use-cases # like e.g. out-of-tree builds ( https://github.com/rust-lang/cargo/issues/2930 ). # For this reason this file should be avoided as much as possible when there are alternatives. [target.x86_64-unknown-redox] linker = "x86_64-unknown-redox-gcc" [env] # See feat_external_libstdbuf in src/uu/stdbuf/Cargo.toml LIBSTDBUF_DIR = "/usr/local/libexec/coreutils" # libstdbuf must be a shared library, so musl libc can't be linked statically # https://github.com/rust-lang/rust/issues/82193 [build] rustflags = ["-C", "target-feature=-crt-static"] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.clippy.toml000066400000000000000000000002261504311601400232170ustar00rootroot00000000000000msrv = "1.85.0" avoid-breaking-exported-api = false check-private-items = true cognitive-complexity-threshold = 24 missing-docs-in-crate-items = true coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.codecov.yml000066400000000000000000000003121504311601400231630ustar00rootroot00000000000000comment: false coverage: status: project: default: informational: true changes: default: informational: true patch: default: informational: true coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.codespell.rc000066400000000000000000000001701504311601400233200ustar00rootroot00000000000000[codespell] ignore-words-list = crate skip = ./.git/**,./.vscode/cspell.dictionaries/**,./target/**,./tests/fixtures/** coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.config/000077500000000000000000000000001504311601400222675ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.config/nextest.toml000066400000000000000000000004051504311601400246550ustar00rootroot00000000000000[profile.ci] retries = 2 status-level = "all" final-status-level = "skip" failure-output = "immediate-final" fail-fast = false [profile.coverage] retries = 0 status-level = "all" final-status-level = "skip" failure-output = "immediate-final" fail-fast = false coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.editorconfig000066400000000000000000000040511504311601400234210ustar00rootroot00000000000000# EditorConfig (is awesome!; ref: http://EditorConfig.org; v2022.02.11 [rivy]) # spell-checker:ignore akefile shellcheck vcproj # * top-most EditorConfig file root = true [*] # default ~ utf-8, unix-style newlines with a newline ending every file, 4 space indentation charset = utf-8 end_of_line = lf indent_size = 4 indent_style = space insert_final_newline = true max_line_length = 100 trim_trailing_whitespace = true [{[Mm]akefile{,.*},*.{mak,mk,[Mm][Aa][Kk],[Mm][Kk]},[Gg][Nn][Uu]makefile}] # makefiles ~ TAB-style indentation indent_style = tab [*.bash] # `bash` shell scripts indent_size = 4 indent_style = space # * ref: # shell_variant = bash ## allow `shellcheck` to decide via script hash-bang/sha-bang line switch_case_indent = true [*.{bat,cmd,[Bb][Aa][Tt],[Cc][Mm][Dd]}] # BAT/CMD ~ DOS/Win requires BAT/CMD files to have CRLF EOLNs end_of_line = crlf [*.{cjs,cjx,cts,ctx,js,jsx,mjs,mts,mtx,ts,tsx,json,jsonc}] # js/ts/json ~ Prettier/XO-style == TAB indention + SPACE alignment indent_size = 2 indent_style = tab [*.go] # go ~ TAB-style indentation (SPACE-style alignment); ref: @@ indent_style = tab [*.{markdown,md,mkd,[Mm][Dd],[Mm][Kk][Dd],[Mm][Dd][Oo][Ww][Nn],[Mm][Kk][Dd][Oo][Ww][Nn],[Mm][Aa][Rr][Kk][Dd][Oo][Ww][Nn]}] # markdown indent_size = 2 indent_style = space [*.sh] # POSIX shell scripts indent_size = 4 indent_style = space # * ref: # shell_variant = posix ## allow `shellcheck` to decide via script hash-bang/sha-bang line switch_case_indent = true [*.{sln,vc{,x}proj{,.*},[Ss][Ln][Nn],[Vv][Cc]{,[Xx]}[Pp][Rr][Oo][Jj]{,.*}}] # MSVC sln/vcproj/vcxproj files, when used, will persistently revert to CRLF EOLNs and eat final EOLs end_of_line = crlf insert_final_newline = false [*.{yaml,yml,[Yy][Mm][Ll],[Yy][Aa][Mm][Ll]}] # YAML indent_size = 2 indent_style = space coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.envrc000066400000000000000000000003741504311601400220660ustar00rootroot00000000000000# spell-checker:ignore direnv if ! has nix_direnv_version || ! nix_direnv_version 3.0.6; then source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.6/direnvrc" "sha256-RYcUJaRMf8oF5LznDrlCXbkOQrywm0HDv1VjYGaJGdM=" fi use flake coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/000077500000000000000000000000001504311601400223045ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/FUNDING.yml000066400000000000000000000000171504311601400241170ustar00rootroot00000000000000github: uutils coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/fluent_linter_config.yml000066400000000000000000000010241504311601400272230ustar00rootroot00000000000000--- ID01: enabled: true exclusions: messages: [] files: [] ID02: enabled: true min_length: 7 VC: disabled: true # Disable: # TE01: single quote instead of apostrophe for genitive (foo's) TE01: enabled: false # TE03: single quotes ('foo') TE03: enabled: false # TE04: Double-quoted strings should use Unicode " instead of "foo". TE04: enabled: false # Disable: TE05: 3 dots for ellipsis ("...") TE05: enabled: false # Should be fixed VC01: disabled: true ID03: enabled: true coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/stale.yml000066400000000000000000000013651504311601400241440ustar00rootroot00000000000000# spell-checker:ignore (labels) wontfix # Number of days of inactivity before an issue/PR becomes stale daysUntilStale: 1095 # Number of days of inactivity before a stale issue/PR is closed daysUntilClose: 1095 # Issues with these labels will never be considered stale exemptLabels: - pinned - security - "Good first bug" # Label to use when marking an issue as stale staleLabel: wontfix # Comment to post when marking an issue as stale. Set to `false` to disable markComment: > This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions. # Comment to post when closing a stale issue. Set to `false` to disable closeComment: false coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/000077500000000000000000000000001504311601400243415ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/CICD.yml000066400000000000000000001463321504311601400255770ustar00rootroot00000000000000name: CICD # spell-checker:ignore (abbrev/names) CACHEDIR CICD CodeCOV MacOS MinGW MSVC musl taiki # spell-checker:ignore (env/flags) Awarnings Ccodegen Coverflow Cpanic Dwarnings RUSTDOCFLAGS RUSTFLAGS Zpanic CARGOFLAGS # spell-checker:ignore (jargon) SHAs deps dequote softprops subshell toolchain fuzzers dedupe devel profdata # spell-checker:ignore (people) Peltoche rivy dtolnay Anson dawidd # spell-checker:ignore (shell/tools) binutils choco clippy dmake dpkg esac fakeroot fdesc fdescfs gmake grcov halium lcov libclang libfuse libssl limactl mkdir nextest nocross pacman popd printf pushd redoxer rsync rustc rustfmt rustup shopt sccache utmpdump xargs # spell-checker:ignore (misc) aarch alnum armhf bindir busytest coreutils defconfig DESTDIR gecos getenforce gnueabihf issuecomment maint manpages msys multisize noconfirm nofeatures nullglob onexitbegin onexitend pell runtest Swatinem tempfile testsuite toybox uutils env: PROJECT_NAME: coreutils PROJECT_DESC: "Core universal (cross-platform) utilities" PROJECT_AUTH: "uutils" RUST_MIN_SRV: "1.85.0" # * style job configuration STYLE_FAIL_ON_FAULT: true ## (bool) fail the build if a style job contains a fault (error or warning); may be overridden on a per-job basis on: pull_request: push: tags: - '*' branches: - '*' permissions: contents: read # to fetch code (actions/checkout) # End the current execution if there is a new changeset in the PR. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: cargo-deny: name: Style/cargo-deny runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: EmbarkStudios/cargo-deny-action@v2 style_deps: ## ToDO: [2021-11-10; rivy] 'Style/deps' needs more informative output and better integration of results into the GHA dashboard name: Style/deps runs-on: ${{ matrix.job.os }} strategy: fail-fast: false matrix: job: # note: `cargo-udeps` panics when processing stdbuf/libstdbuf ("uu_stdbuf_libstdbuf"); either b/c of the 'cpp' crate or 'libstdbuf' itself # ... b/c of the panic, a more limited feature set is tested (though only excluding `stdbuf`) - { os: ubuntu-latest , features: "feat_Tier1,feat_require_unix,feat_require_unix_utmpx" } - { os: macos-latest , features: "feat_Tier1,feat_require_unix,feat_require_unix_utmpx" } - { os: windows-latest , features: feat_os_windows } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@nightly ## note: requires 'nightly' toolchain b/c `cargo-udeps` uses the `rustc` '-Z save-analysis' option ## * ... ref: - uses: taiki-e/install-action@cargo-udeps - uses: Swatinem/rust-cache@v2 - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # failure mode unset FAIL_ON_FAULT ; case '${{ env.STYLE_FAIL_ON_FAULT }}' in ''|0|f|false|n|no|off) FAULT_TYPE=warning ;; *) FAIL_ON_FAULT=true ; FAULT_TYPE=error ;; esac; outputs FAIL_ON_FAULT FAULT_TYPE # target-specific options # * CARGO_FEATURES_OPTION CARGO_FEATURES_OPTION='' ; if [ -n "${{ matrix.job.features }}" ]; then CARGO_FEATURES_OPTION='--features "${{ matrix.job.features }}"' ; fi outputs CARGO_FEATURES_OPTION - name: Detect unused dependencies shell: bash run: | ## Detect unused dependencies unset fault fault_type="${{ steps.vars.outputs.FAULT_TYPE }}" fault_prefix=$(echo "$fault_type" | tr '[:lower:]' '[:upper:]') # cargo +nightly udeps ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} --all-targets &> udeps.log || cat udeps.log grep --ignore-case "all deps seem to have been used" udeps.log || { printf "%s\n" "::${fault_type} ::${fault_prefix}: \`cargo udeps\`: style violation (unused dependency found)" ; fault=true ; } if [ -n "${{ steps.vars.outputs.FAIL_ON_FAULT }}" ] && [ -n "$fault" ]; then exit 1 ; fi doc_warnings: name: Documentation/warnings runs-on: ${{ matrix.job.os }} env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: feat_os_unix } # for now, don't build it on mac & windows because the doc is only published from linux # + it needs a bunch of duplication for build # and I don't want to add a doc step in the regular build to avoid long builds # - { os: macos-latest , features: feat_os_macos } # - { os: windows-latest , features: feat_os_windows } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@master with: toolchain: stable components: clippy - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Install/setup prerequisites shell: bash run: | sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # failure mode unset FAIL_ON_FAULT ; case '${{ env.STYLE_FAIL_ON_FAULT }}' in ''|0|f|false|n|no|off) FAULT_TYPE=warning ;; *) FAIL_ON_FAULT=true ; FAULT_TYPE=error ;; esac; outputs FAIL_ON_FAULT FAULT_TYPE # target-specific options # * CARGO_FEATURES_OPTION CARGO_FEATURES_OPTION='--all-features' ; if [ -n "${{ matrix.job.features }}" ]; then CARGO_FEATURES_OPTION='--features ${{ matrix.job.features }}' ; fi outputs CARGO_FEATURES_OPTION # * determine sub-crate utility list UTILITY_LIST="$(./util/show-utils.sh ${CARGO_FEATURES_OPTION})" echo UTILITY_LIST=${UTILITY_LIST} CARGO_UTILITY_LIST_OPTIONS="$(for u in ${UTILITY_LIST}; do echo -n "-puu_${u} "; done;)" outputs CARGO_UTILITY_LIST_OPTIONS - name: "`cargo doc` with warnings" shell: bash run: | RUSTDOCFLAGS="-Dwarnings" cargo doc ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} --no-deps --workspace --document-private-items - uses: DavidAnson/markdownlint-cli2-action@v20 with: fix: "true" globs: | *.md docs/src/*.md src/uu/*/*.md min_version: name: MinRustV # Minimum supported rust version (aka, MinSRV or MSRV) runs-on: ${{ matrix.job.os }} env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: matrix: job: - { os: ubuntu-latest , features: feat_os_unix } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_MIN_SRV }} components: rustfmt - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # target-specific options # * CARGO_FEATURES_OPTION unset CARGO_FEATURES_OPTION if [ -n "${{ matrix.job.features }}" ]; then CARGO_FEATURES_OPTION='--features "${{ matrix.job.features }}"' ; fi outputs CARGO_FEATURES_OPTION - name: Confirm MinSRV compatible '*/Cargo.lock' shell: bash run: | ## Confirm MinSRV compatible '*/Cargo.lock' # * '*/Cargo.lock' is required to be in a format that `cargo` of MinSRV can interpret (eg, v1-format for MinSRV < v1.38) for dir in "." "fuzz"; do ( cd "$dir" && cargo fetch --locked --quiet ) || { echo "::error file=$dir/Cargo.lock::Incompatible (or out-of-date) '$dir/Cargo.lock' file; update using \`cd '$dir' && cargo +${{ env.RUST_MIN_SRV }} update\`" ; exit 1 ; } done - name: Install/setup prerequisites shell: bash run: | # Install a package for one of the tests sudo apt-get -y update ; sudo apt-get -y install attr - name: Info shell: bash run: | ## Info # environment echo "## environment" echo "CI='${CI}'" # tooling info display echo "## tooling" which gcc >/dev/null 2>&1 && (gcc --version | head -1) || true rustup -V 2>/dev/null rustup show active-toolchain cargo -V rustc -V cargo tree -V # dependencies echo "## dependency list" ## * using the 'stable' toolchain is necessary to avoid "unexpected '--filter-platform'" errors RUSTUP_TOOLCHAIN=stable cargo fetch --locked --quiet RUSTUP_TOOLCHAIN=stable cargo tree --no-dedupe --locked -e=no-dev --prefix=none ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} | grep -vE "$PWD" | sort --unique - name: Test run: cargo nextest run --hide-progress-bar --profile ci ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} -p uucore -p coreutils env: RUSTFLAGS: "-Awarnings" RUST_BACKTRACE: "1" deps: name: Dependencies runs-on: ${{ matrix.job.os }} strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: feat_os_unix } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: "`cargo update` testing" shell: bash run: | ## `cargo update` testing # * convert any errors/warnings to GHA UI annotations; ref: for dir in "." "fuzz"; do ( cd "$dir" && cargo fetch --locked --quiet ) || { echo "::error file=$dir/Cargo.lock::'$dir/Cargo.lock' file requires update (use \`cd '$dir' && cargo +${{ env.RUST_MIN_SRV }} update\`)" ; exit 1 ; } done build_makefile: name: Build/Makefile needs: [ min_version, deps ] runs-on: ${{ matrix.job.os }} env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: feat_os_unix } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 - name: Install/setup prerequisites shell: bash run: | sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: "`make build`" # Also check that target/CACHEDIR.TAG is created on a fresh checkout shell: bash run: | set -x # Target directory must not exist to start with, otherwise cargo # will not create target/CACHEDIR.TAG. if [[ -d target ]]; then mv -T target target.cache fi # Actually do the build make build echo "Check that target directory will be ignored by backup tools" test -f target/CACHEDIR.TAG # Restore cache for target/release (we only did a debug build) mv -t target/ target.cache/release 2>/dev/null || true - name: "`make nextest`" shell: bash run: make nextest CARGOFLAGS="--profile ci --hide-progress-bar" env: RUST_BACKTRACE: "1" - name: "`make install COMPLETIONS=n MANPAGES=n`" shell: bash run: | set -x DESTDIR=/tmp/ make PROFILE=release COMPLETIONS=n MANPAGES=n install # Check that the utils are present test -f /tmp/usr/local/bin/tty # Check that the manpage is not present ! test -f /tmp/usr/local/share/man/man1/whoami.1 # Check that the completion is not present ! test -f /tmp/usr/local/share/zsh/site-functions/_install ! test -f /tmp/usr/local/share/bash-completion/completions/head ! test -f /tmp/usr/local/share/fish/vendor_completions.d/cat.fish env: RUST_BACKTRACE: "1" - name: "`make install`" shell: bash run: | set -x DESTDIR=/tmp/ make PROFILE=release install # Check that the utils are present test -f /tmp/usr/local/bin/tty # Check that the manpage is present test -f /tmp/usr/local/share/man/man1/whoami.1 # Check that the completion is present test -f /tmp/usr/local/share/zsh/site-functions/_install test -f /tmp/usr/local/share/bash-completion/completions/head test -f /tmp/usr/local/share/fish/vendor_completions.d/cat.fish env: RUST_BACKTRACE: "1" - name: "`make uninstall`" shell: bash run: | set -x DESTDIR=/tmp/ make uninstall # Check that the utils are not present ! test -f /tmp/usr/local/bin/tty # Check that the manpage is not present ! test -f /tmp/usr/local/share/man/man1/whoami.1 # Check that the completion is not present ! test -f /tmp/usr/local/share/zsh/site-functions/_install ! test -f /tmp/usr/local/share/bash-completion/completions/head ! test -f /tmp/usr/local/share/fish/vendor_completions.d/cat.fish build_rust_stable: name: Build/stable needs: [ min_version, deps ] runs-on: ${{ matrix.job.os }} timeout-minutes: 90 env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: feat_os_unix } - { os: macos-latest , features: feat_os_macos } - { os: windows-latest , features: feat_os_windows } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Test run: cargo nextest run --hide-progress-bar --profile ci --features ${{ matrix.job.features }} env: RUST_BACKTRACE: "1" build_rust_nightly: name: Build/nightly needs: [ min_version, deps ] runs-on: ${{ matrix.job.os }} timeout-minutes: 90 env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: feat_os_unix } - { os: macos-latest , features: feat_os_macos } - { os: windows-latest , features: feat_os_windows } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@nightly - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Test run: cargo nextest run --hide-progress-bar --profile ci --features ${{ matrix.job.features }} env: RUST_BACKTRACE: "1" compute_size: name: Binary sizes needs: [ min_version, deps ] runs-on: ${{ matrix.job.os }} env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: feat_os_unix } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Install dependencies shell: bash run: | ## Install dependencies sudo apt-get update sudo apt-get install jq libselinux1-dev - name: "`make install`" shell: bash run: | ## `make install` make install DESTDIR=target/size-release/ make install MULTICALL=y DESTDIR=target/size-multi-release/ # strip the results strip target/size*/usr/local/bin/* - name: Compute uutil release sizes shell: bash run: | ## Compute uutil release sizes DATE=$(date --rfc-email) find target/size-release/usr/local/bin -type f -printf '%f\0' | sort -z | while IFS= read -r -d '' name; do size=$(du -s target/size-release/usr/local/bin/$name | awk '{print $1}') echo "\"$name\"" echo "$size" done | \ jq -n \ --arg date "$DATE" \ --arg sha "$GITHUB_SHA" \ 'reduce inputs as $name ({}; . + { ($name): input }) | { ($date): {sha: $sha, sizes: map_values(.)} }' > individual-size-result.json SIZE=$(cat individual-size-result.json | jq '[.[] | .sizes | .[]] | reduce .[] as $num (0; . + $num)') SIZE_MULTI=$(du -s target/size-multi-release/usr/local/bin/coreutils | awk '{print $1}') jq -n \ --arg date "$DATE" \ --arg sha "$GITHUB_SHA" \ --arg size "$SIZE" \ --arg multisize "$SIZE_MULTI" \ '{($date): { sha: $sha, size: $size, multisize: $multisize, }}' > size-result.json - name: Download the previous individual size result uses: dawidd6/action-download-artifact@v11 with: workflow: CICD.yml name: individual-size-result repo: uutils/coreutils path: dl - name: Download the previous size result uses: dawidd6/action-download-artifact@v11 with: workflow: CICD.yml name: size-result repo: uutils/coreutils path: dl - name: Check uutil release sizes shell: bash run: | check() { # Warn if the size increases by more than 5% threshold='1.05' if [[ "$2" -eq 0 || "$3" -eq 0 ]]; then echo "::warning file=$4::Invalid size for $1. Sizes cannot be 0." return fi ratio=$(jq -n "$2 / $3") echo "$1: size=$2, previous_size=$3, ratio=$ratio, threshold=$threshold" if [[ "$(jq -n "$ratio > $threshold")" == 'true' ]]; then echo "::warning file=$4::Size of $1 increases by more than 5%" fi } ## Check individual size result while read -r name previous_size; do size=$(cat individual-size-result.json | jq -r ".[] | .sizes | .\"$name\"") check "\`$name\` binary" "$size" "$previous_size" 'individual-size-result.json' done < <(cat dl/individual-size-result.json | jq -r '.[] | .sizes | to_entries[] | "\(.key) \(.value)"') ## Check size result size=$(cat size-result.json | jq -r '.[] | .size') previous_size=$(cat dl/size-result.json | jq -r '.[] | .size') check 'multiple binaries' "$size" "$previous_size" 'size-result.json' multisize=$(cat size-result.json | jq -r '.[] | .multisize') previous_multisize=$(cat dl/size-result.json | jq -r '.[] | .multisize') check 'multicall binary' "$multisize" "$previous_multisize" 'size-result.json' - name: Upload the individual size result uses: actions/upload-artifact@v4 with: name: individual-size-result path: individual-size-result.json - name: Upload the size result uses: actions/upload-artifact@v4 with: name: size-result path: size-result.json build: permissions: contents: write # to create GitHub release (softprops/action-gh-release) name: Build needs: [ min_version, deps ] runs-on: ${{ matrix.job.os }} timeout-minutes: 90 env: DOCKER_OPTS: '--volume /etc/passwd:/etc/passwd --volume /etc/group:/etc/group' SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: # - { os , target , cargo-options , default-features, features , use-cross , toolchain, skip-tests, workspace-tests, skip-package, skip-publish } - { os: ubuntu-latest , target: arm-unknown-linux-gnueabihf , features: feat_os_unix_gnueabihf , use-cross: use-cross , skip-tests: true } - { os: ubuntu-24.04-arm , target: aarch64-unknown-linux-gnu , features: feat_os_unix_gnueabihf } - { os: ubuntu-latest , target: aarch64-unknown-linux-musl , features: feat_os_unix , use-cross: use-cross , skip-tests: true } # - { os: ubuntu-latest , target: x86_64-unknown-linux-gnu , features: feat_selinux , use-cross: use-cross } - { os: ubuntu-latest , target: i686-unknown-linux-gnu , features: "feat_os_unix,test_risky_names", use-cross: use-cross } - { os: ubuntu-latest , target: i686-unknown-linux-musl , features: feat_os_unix , use-cross: use-cross } - { os: ubuntu-latest , target: x86_64-unknown-linux-gnu , features: "feat_os_unix,test_risky_names", use-cross: use-cross } - { os: ubuntu-latest , target: x86_64-unknown-linux-gnu , features: "feat_os_unix,uudoc" , use-cross: no, workspace-tests: true } - { os: ubuntu-latest , target: x86_64-unknown-linux-musl , features: feat_os_unix , use-cross: use-cross } - { os: ubuntu-latest , target: x86_64-unknown-redox , features: feat_os_unix_redox , use-cross: redoxer , skip-tests: true } - { os: ubuntu-latest , target: wasm32-unknown-unknown , default-features: false, features: uucore/format, skip-tests: true, skip-package: true, skip-publish: true } - { os: macos-latest , target: aarch64-apple-darwin , features: feat_os_macos, workspace-tests: true } # M1 CPU # PR #7964: Mac should still build even if the feature is not enabled - { os: macos-latest , target: aarch64-apple-darwin , workspace-tests: true } # M1 CPU - { os: macos-13 , target: x86_64-apple-darwin , features: feat_os_macos, workspace-tests: true } - { os: windows-latest , target: i686-pc-windows-msvc , features: feat_os_windows } - { os: windows-latest , target: x86_64-pc-windows-gnu , features: feat_os_windows } - { os: windows-latest , target: x86_64-pc-windows-msvc , features: feat_os_windows } - { os: windows-latest , target: aarch64-pc-windows-msvc , features: feat_os_windows, use-cross: use-cross , skip-tests: true } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_MIN_SRV }} targets: ${{ matrix.job.target }} - uses: Swatinem/rust-cache@v2 with: key: "${{ matrix.job.os }}_${{ matrix.job.target }}" - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # toolchain TOOLCHAIN="stable" ## default to "stable" toolchain # * specify alternate/non-default TOOLCHAIN for *-pc-windows-gnu targets; gnu targets on Windows are broken for the standard *-pc-windows-msvc toolchain (refs: GH:rust-lang/rust#47048, GH:rust-lang/rust#53454, GH:rust-lang/cargo#6754) case ${{ matrix.job.target }} in *-pc-windows-gnu) TOOLCHAIN="stable-${{ matrix.job.target }}" ;; esac; # * use requested TOOLCHAIN if specified if [ -n "${{ matrix.job.toolchain }}" ]; then TOOLCHAIN="${{ matrix.job.toolchain }}" ; fi outputs TOOLCHAIN # staging directory STAGING='_staging' outputs STAGING # determine EXE suffix EXE_suffix="" ; case '${{ matrix.job.target }}' in *-pc-windows-*) EXE_suffix=".exe" ;; esac; outputs EXE_suffix # parse commit reference info echo GITHUB_REF=${GITHUB_REF} echo GITHUB_SHA=${GITHUB_SHA} REF_NAME=${GITHUB_REF#refs/*/} unset REF_BRANCH ; case "${GITHUB_REF}" in refs/heads/*) REF_BRANCH=${GITHUB_REF#refs/heads/} ;; esac; unset REF_TAG ; case "${GITHUB_REF}" in refs/tags/*) REF_TAG=${GITHUB_REF#refs/tags/} ;; esac; REF_SHAS=${GITHUB_SHA:0:10} outputs REF_NAME REF_BRANCH REF_TAG REF_SHAS # parse target unset TARGET_ARCH case '${{ matrix.job.target }}' in aarch64-*) TARGET_ARCH=arm64 ;; arm-*-*hf) TARGET_ARCH=armhf ;; i586-*) TARGET_ARCH=i586 ;; i686-*) TARGET_ARCH=i686 ;; x86_64-*) TARGET_ARCH=x86_64 ;; esac; unset TARGET_OS case '${{ matrix.job.target }}' in *-linux-*) TARGET_OS=linux ;; *-apple-*) TARGET_OS=macos ;; *-windows-*) TARGET_OS=windows ;; *-redox*) TARGET_OS=redox ;; esac outputs TARGET_ARCH TARGET_OS # package name PKG_suffix=".tar.gz" ; case '${{ matrix.job.target }}' in *-pc-windows-*) PKG_suffix=".zip" ;; esac; PKG_BASENAME=${PROJECT_NAME}-${REF_TAG:-$REF_SHAS}-${{ matrix.job.target }} PKG_NAME=${PKG_BASENAME}${PKG_suffix} outputs PKG_suffix PKG_BASENAME PKG_NAME # deployable tag? (ie, leading "vM" or "M"; M == version number) unset DEPLOY ; if [[ $REF_TAG =~ ^[vV]?[0-9].* ]]; then DEPLOY='true' ; fi outputs DEPLOY # DPKG architecture? unset DPKG_ARCH case ${{ matrix.job.target }} in x86_64-*-linux-*) DPKG_ARCH=amd64 ;; *-linux-*) DPKG_ARCH=${TARGET_ARCH} ;; esac outputs DPKG_ARCH # DPKG version? unset DPKG_VERSION ; if [[ $REF_TAG =~ ^[vV]?[0-9].* ]]; then DPKG_VERSION=${REF_TAG/#[vV]/} ; fi outputs DPKG_VERSION # DPKG base name/conflicts? DPKG_BASENAME=${PROJECT_NAME} DPKG_CONFLICTS=${PROJECT_NAME}-musl case ${{ matrix.job.target }} in *-musl) DPKG_BASENAME=${PROJECT_NAME}-musl ; DPKG_CONFLICTS=${PROJECT_NAME} ;; esac; outputs DPKG_BASENAME DPKG_CONFLICTS # DPKG name unset DPKG_NAME; if [[ -n $DPKG_ARCH && -n $DPKG_VERSION ]]; then DPKG_NAME="${DPKG_BASENAME}_${DPKG_VERSION}_${DPKG_ARCH}.deb" ; fi outputs DPKG_NAME # target-specific options # * CARGO_FEATURES_OPTION CARGO_FEATURES_OPTION='' ; if [ -n "${{ matrix.job.features }}" ]; then CARGO_FEATURES_OPTION='--features=${{ matrix.job.features }}' ; fi outputs CARGO_FEATURES_OPTION # * CARGO_DEFAULT_FEATURES_OPTION CARGO_DEFAULT_FEATURES_OPTION='' ; if [ "${{ matrix.job.default-features }}" == "false" ]; then CARGO_DEFAULT_FEATURES_OPTION='--no-default-features' ; fi outputs CARGO_DEFAULT_FEATURES_OPTION # * CARGO_CMD CARGO_CMD='cross' CARGO_CMD_OPTIONS='+${{ env.RUST_MIN_SRV }}' # Added suffix for artifacts, needed when multiple jobs use the same target. ARTIFACTS_SUFFIX='' case '${{ matrix.job.use-cross }}' in ''|0|f|false|n|no) CARGO_CMD='cargo' ARTIFACTS_SUFFIX='-nocross' ;; redoxer) CARGO_CMD='redoxer' CARGO_CMD_OPTIONS='' ;; esac # needed for target "aarch64-apple-darwin". There are two jobs, and the difference between them is whether "features" is set if [ -z "${{ matrix.job.features }}" ]; then ARTIFACTS_SUFFIX='-nofeatures' ; fi outputs CARGO_CMD outputs CARGO_CMD_OPTIONS outputs ARTIFACTS_SUFFIX CARGO_TEST_OPTIONS='' case '${{ matrix.job.workspace-tests }}' in 1|t|true|y|yes) # This also runs tests in other packages in the source directory (e.g. uucore). # We cannot enable this everywhere as some platforms are currently broken, and # we cannot use `cross` as its Docker image is ancient (Ubuntu 16.04) and is # missing required system dependencies (e.g. recent libclang-dev). CARGO_TEST_OPTIONS='--workspace' ;; esac outputs CARGO_TEST_OPTIONS # * executable for `strip`? STRIP="strip" case ${{ matrix.job.target }} in aarch64-*-linux-*) STRIP="aarch64-linux-gnu-strip" ;; arm-*-linux-gnueabihf) STRIP="arm-linux-gnueabihf-strip" ;; *-pc-windows-msvc) STRIP="" ;; esac; outputs STRIP - uses: taiki-e/install-action@v2 if: steps.vars.outputs.CARGO_CMD == 'cross' with: tool: cross@0.2.5 - name: Create all needed build/work directories shell: bash run: | ## Create build/work space mkdir -p '${{ steps.vars.outputs.STAGING }}' mkdir -p '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}' mkdir -p '${{ steps.vars.outputs.STAGING }}/dpkg' - name: Install/setup prerequisites shell: bash run: | ## Install/setup prerequisites case '${{ matrix.job.target }}' in arm-unknown-linux-gnueabihf) sudo apt-get -y update sudo apt-get -y install gcc-arm-linux-gnueabihf ;; aarch64-unknown-linux-*) sudo apt-get -y update sudo apt-get -y install gcc-aarch64-linux-gnu ;; *-redox*) sudo apt-get -y update sudo apt-get -y install fuse3 libfuse-dev ;; esac case '${{ matrix.job.os }}' in macos-latest) brew install coreutils ;; # needed for testing esac case '${{ matrix.job.os }}' in ubuntu-*) # selinux headers needed to build tests sudo apt-get -y update sudo apt-get -y install libselinux1-dev # pinky is a tool to show logged-in users from utmp, and gecos fields from /etc/passwd. # In GitHub Action *nix VMs, no accounts log in, even the "runner" account that runs the commands. The account also has empty gecos fields. # To work around this for pinky tests, we create a fake login entry for the GH runner account... FAKE_UTMP='[7] [999999] [tty2] [runner] [tty2] [] [0.0.0.0] [2022-02-22T22:22:22,222222+00:00]' # ... by dumping the login records, adding our fake line, then reverse dumping ... (utmpdump /var/run/utmp ; echo $FAKE_UTMP) | sudo utmpdump -r -o /var/run/utmp # ... and add a full name to each account with a gecos field but no full name. sudo sed -i 's/:,/:runner name,/' /etc/passwd # We also create a couple optional files pinky looks for touch /home/runner/.project echo "foo" > /home/runner/.plan ;; esac - uses: taiki-e/install-action@v2 if: steps.vars.outputs.CARGO_CMD == 'redoxer' with: tool: redoxer@0.2.37 - name: Initialize toolchain-dependent workflow variables id: dep_vars shell: bash run: | ## Dependent VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # * determine sub-crate utility list UTILITY_LIST="$(./util/show-utils.sh ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }})" echo UTILITY_LIST=${UTILITY_LIST} CARGO_UTILITY_LIST_OPTIONS="$(for u in ${UTILITY_LIST}; do echo -n "-puu_${u} "; done;)" outputs CARGO_UTILITY_LIST_OPTIONS - name: Info shell: bash run: | ## Info # commit info echo "## commit" echo GITHUB_REF=${GITHUB_REF} echo GITHUB_SHA=${GITHUB_SHA} # environment echo "## environment" echo "CI='${CI}'" # tooling info display echo "## tooling" which gcc >/dev/null 2>&1 && (gcc --version | head -1) || true rustup -V 2>/dev/null rustup show active-toolchain cargo -V rustc -V cargo tree -V # dependencies echo "## dependency list" cargo fetch --locked --quiet cargo tree --locked --target=${{ matrix.job.target }} ${{ matrix.job.cargo-options }} ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} ${{ steps.vars.outputs.CARGO_DEFAULT_FEATURES_OPTION }} --no-dedupe -e=no-dev --prefix=none | grep -vE "$PWD" | sort --unique - name: Build shell: bash run: | ## Build ${{ steps.vars.outputs.CARGO_CMD }} ${{ steps.vars.outputs.CARGO_CMD_OPTIONS }} build --release \ --target=${{ matrix.job.target }} ${{ matrix.job.cargo-options }} ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} ${{ steps.vars.outputs.CARGO_DEFAULT_FEATURES_OPTION }} - name: Test if: matrix.job.skip-tests != true shell: bash run: | ## Test ${{ steps.vars.outputs.CARGO_CMD }} ${{ steps.vars.outputs.CARGO_CMD_OPTIONS }} test --target=${{ matrix.job.target }} \ ${{ steps.vars.outputs.CARGO_TEST_OPTIONS}} ${{ matrix.job.cargo-options }} ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} ${{ steps.vars.outputs.CARGO_DEFAULT_FEATURES_OPTION }} env: RUST_BACKTRACE: "1" - name: Test individual utilities if: matrix.job.skip-tests != true shell: bash run: | ## Test individual utilities ${{ steps.vars.outputs.CARGO_CMD }} ${{ steps.vars.outputs.CARGO_CMD_OPTIONS }} test --target=${{ matrix.job.target }} \ ${{ matrix.job.cargo-options }} ${{ steps.dep_vars.outputs.CARGO_UTILITY_LIST_OPTIONS }} env: RUST_BACKTRACE: "1" - name: Archive executable artifacts uses: actions/upload-artifact@v4 with: name: ${{ env.PROJECT_NAME }}-${{ matrix.job.target }}${{ steps.vars.outputs.ARTIFACTS_SUFFIX }} path: target/${{ matrix.job.target }}/release/${{ env.PROJECT_NAME }}${{ steps.vars.outputs.EXE_suffix }} - name: Package if: matrix.job.skip-package != true shell: bash run: | ## Package artifact(s) # binary cp 'target/${{ matrix.job.target }}/release/${{ env.PROJECT_NAME }}${{ steps.vars.outputs.EXE_suffix }}' '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}/' # `strip` binary (if needed) if [ -n "${{ steps.vars.outputs.STRIP }}" ]; then "${{ steps.vars.outputs.STRIP }}" '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}/${{ env.PROJECT_NAME }}${{ steps.vars.outputs.EXE_suffix }}' ; fi # README and LICENSE # * spell-checker:ignore EADME ICENSE (shopt -s nullglob; for f in [R]"EADME"{,.*}; do cp $f '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}/' ; done) (shopt -s nullglob; for f in [L]"ICENSE"{-*,}{,.*}; do cp $f '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}/' ; done) # core compressed package pushd '${{ steps.vars.outputs.STAGING }}/' >/dev/null case '${{ matrix.job.target }}' in *-pc-windows-*) 7z -y a '${{ steps.vars.outputs.PKG_NAME }}' '${{ steps.vars.outputs.PKG_BASENAME }}'/* | tail -2 ;; *) tar czf '${{ steps.vars.outputs.PKG_NAME }}' '${{ steps.vars.outputs.PKG_BASENAME }}'/* ;; esac popd >/dev/null # dpkg if [ -n "${{ steps.vars.outputs.DPKG_NAME }}" ]; then DPKG_DIR="${{ steps.vars.outputs.STAGING }}/dpkg" # binary install -Dm755 'target/${{ matrix.job.target }}/release/${{ env.PROJECT_NAME }}${{ steps.vars.outputs.EXE_suffix }}' "${DPKG_DIR}/usr/bin/${{ env.PROJECT_NAME }}${{ steps.vars.outputs.EXE_suffix }}" if [ -n "${{ steps.vars.outputs.STRIP }}" ]; then "${{ steps.vars.outputs.STRIP }}" "${DPKG_DIR}/usr/bin/${{ env.PROJECT_NAME }}${{ steps.vars.outputs.EXE_suffix }}" ; fi # README and LICENSE (shopt -s nullglob; for f in [R]"EADME"{,.*}; do install -Dm644 "$f" "${DPKG_DIR}/usr/share/doc/${{ env.PROJECT_NAME }}/$f" ; done) (shopt -s nullglob; for f in [L]"ICENSE"{-*,}{,.*}; do install -Dm644 "$f" "${DPKG_DIR}/usr/share/doc/${{ env.PROJECT_NAME }}/$f" ; done) # control file mkdir -p "${DPKG_DIR}/DEBIAN" printf "Package: ${{ steps.vars.outputs.DPKG_BASENAME }}\nVersion: ${{ steps.vars.outputs.DPKG_VERSION }}\nSection: utils\nPriority: optional\nMaintainer: ${{ env.PROJECT_AUTH }}\nArchitecture: ${{ steps.vars.outputs.DPKG_ARCH }}\nProvides: ${{ env.PROJECT_NAME }}\nConflicts: ${{ steps.vars.outputs.DPKG_CONFLICTS }}\nDescription: ${{ env.PROJECT_DESC }}\n" > "${DPKG_DIR}/DEBIAN/control" # build dpkg fakeroot dpkg-deb --build "${DPKG_DIR}" "${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.DPKG_NAME }}" fi - name: Publish uses: softprops/action-gh-release@v2 if: steps.vars.outputs.DEPLOY && matrix.job.skip-publish != true with: draft: true files: | ${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_NAME }} ${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.DPKG_NAME }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} test_busybox: name: Tests/BusyBox test suite needs: [ min_version, deps ] runs-on: ${{ matrix.job.os }} env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: - { os: ubuntu-latest } steps: - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup echo "TEST_SUMMARY_FILE=busybox-result.json" >> $GITHUB_OUTPUT - uses: actions/checkout@v4 with: persist-credentials: false - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Install/setup prerequisites shell: bash run: | sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev ## Install/setup prerequisites make prepare-busytest - name: Run BusyBox test suite id: summary shell: bash run: | ## Run BusyBox test suite set -v cp .busybox-config target/debug/.config ## Run BusyBox test suite bindir=$(pwd)/target/debug cd tmp/busybox-*/testsuite output=$(bindir=$bindir ./runtest 2>&1 || true) printf "%s\n" "${output}" FAIL=$(echo "$output" | grep "^FAIL:\s" | wc --lines) PASS=$(echo "$output" | grep "^PASS:\s" | wc --lines) SKIP=$(echo "$output" | grep "^SKIPPED:\s" | wc --lines) TOTAL=`expr $FAIL + $PASS + $SKIP` echo "FAIL $FAIL" echo "SKIP $SKIP" echo "PASS $PASS" echo "TOTAL $TOTAL" cd - output="Busybox tests summary = TOTAL: $TOTAL / PASS: $PASS / FAIL: $FAIL / SKIP: $SKIP" echo "${output}" if [[ "$FAIL" -gt 0 || "$ERROR" -gt 0 ]]; then echo "::warning ::${output}" ; fi jq -n \ --arg date "$(date --rfc-email)" \ --arg sha "$GITHUB_SHA" \ --arg total "$TOTAL" \ --arg pass "$PASS" \ --arg skip "$SKIP" \ --arg fail "$FAIL" \ '{($date): { sha: $sha, total: $total, pass: $pass, skip: $skip, fail: $fail, }}' > '${{ steps.vars.outputs.TEST_SUMMARY_FILE }}' HASH=$(sha1sum '${{ steps.vars.outputs.TEST_SUMMARY_FILE }}' | cut --delim=" " -f 1) echo "HASH=${HASH}" >> $GITHUB_OUTPUT - name: Reserve SHA1/ID of 'test-summary' uses: actions/upload-artifact@v4 with: name: "${{ steps.summary.outputs.HASH }}" path: "${{ steps.vars.outputs.TEST_SUMMARY_FILE }}" - name: Reserve test results summary uses: actions/upload-artifact@v4 with: name: busybox-test-summary path: "${{ steps.vars.outputs.TEST_SUMMARY_FILE }}" - name: Upload json results uses: actions/upload-artifact@v4 with: name: busybox-result.json path: ${{ steps.vars.outputs.TEST_SUMMARY_FILE }} test_toybox: name: Tests/Toybox test suite needs: [ min_version, deps ] runs-on: ${{ matrix.job.os }} env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: - { os: ubuntu-latest } steps: - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } TEST_SUMMARY_FILE="toybox-result.json" outputs TEST_SUMMARY_FILE - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_MIN_SRV }} components: rustfmt - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Install/setup prerequisites shell: bash run: | sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev - name: Build coreutils as multiple binaries shell: bash run: | ## Build individual uutil binaries set -v make - name: Run toybox src shell: bash run: | make toybox-src - name: Run Toybox test suite id: summary shell: bash run: | ## Run Toybox test suite set -v cd tmp/toybox-*/ make defconfig make tests &> tmp.log || true cat tmp.log FAIL=$(grep "FAIL" tmp.log | wc --lines) PASS=$(grep "PASS:" tmp.log| wc --lines) SKIP=$(grep " disabled$" tmp.log| wc --lines) TOTAL=`expr $FAIL + $PASS + $SKIP` echo "FAIL $FAIL" echo "SKIP $SKIP" echo "PASS $PASS" echo "TOTAL $TOTAL" cd - jq -n \ --arg date "$(date --rfc-email)" \ --arg sha "$GITHUB_SHA" \ --arg total "$TOTAL" \ --arg pass "$PASS" \ --arg skip "$SKIP" \ --arg fail "$FAIL" \ '{($date): { sha: $sha, total: $total, pass: $pass, skip: $skip, fail: $fail, }}' > '${{ steps.vars.outputs.TEST_SUMMARY_FILE }}' output="Toybox tests summary = TOTAL: $TOTAL / PASS: $PASS / FAIL: $FAIL / SKIP: $SKIP" echo "${output}" if [[ "$FAIL" -gt 0 || "$ERROR" -gt 0 ]]; then echo "::warning ::${output}" ; fi HASH=$(sha1sum '${{ steps.vars.outputs.TEST_SUMMARY_FILE }}' | cut --delim=" " -f 1) echo "HASH=${HASH}" >> $GITHUB_OUTPUT - name: Reserve SHA1/ID of 'test-summary' uses: actions/upload-artifact@v4 with: name: "${{ steps.summary.outputs.HASH }}" path: "${{ steps.vars.outputs.TEST_SUMMARY_FILE }}" - name: Reserve test results summary uses: actions/upload-artifact@v4 with: name: toybox-test-summary path: "${{ steps.vars.outputs.TEST_SUMMARY_FILE }}" - name: Upload json results uses: actions/upload-artifact@v4 with: name: toybox-result.json path: ${{ steps.vars.outputs.TEST_SUMMARY_FILE }} coverage: name: Code Coverage runs-on: ${{ matrix.job.os }} timeout-minutes: 90 env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: unix, toolchain: nightly } # FIXME: Re-enable macos code coverage # - { os: macos-latest , features: macos, toolchain: nightly } # FIXME: Re-enable Code Coverage on windows, which currently fails due to "profiler_builtins". See #6686. # - { os: windows-latest , features: windows, toolchain: nightly-x86_64-pc-windows-gnu } steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.job.toolchain }} components: rustfmt - uses: taiki-e/install-action@v2 with: tool: nextest,grcov@0.8.24 - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 # - name: Reattach HEAD ## may be needed for accurate code coverage info # run: git checkout ${{ github.head_ref }} - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # toolchain TOOLCHAIN="nightly" ## default to "nightly" toolchain (required for certain required unstable compiler flags) ## !maint: refactor when stable channel has needed support # * specify gnu-type TOOLCHAIN for windows; `grcov` requires gnu-style code coverage data files case ${{ matrix.job.os }} in windows-*) TOOLCHAIN="$TOOLCHAIN-x86_64-pc-windows-gnu" ;; esac; # * use requested TOOLCHAIN if specified if [ -n "${{ matrix.job.toolchain }}" ]; then TOOLCHAIN="${{ matrix.job.toolchain }}" ; fi outputs TOOLCHAIN # target-specific options # * CARGO_FEATURES_OPTION CARGO_FEATURES_OPTION='--all-features' ; ## default to '--all-features' for code coverage if [ -n "${{ matrix.job.features }}" ]; then CARGO_FEATURES_OPTION='--features=${{ matrix.job.features }}' ; fi outputs CARGO_FEATURES_OPTION # * CODECOV_FLAGS CODECOV_FLAGS=$( echo "${{ matrix.job.os }}" | sed 's/[^[:alnum:]]/_/g' ) outputs CODECOV_FLAGS - name: Install/setup prerequisites shell: bash run: | ## Install/setup prerequisites case '${{ matrix.job.os }}' in macos-latest) brew install coreutils ;; # needed for testing esac case '${{ matrix.job.os }}' in ubuntu-latest) sudo apt-get -y update ; sudo apt-get -y install libselinux1-dev # pinky is a tool to show logged-in users from utmp, and gecos fields from /etc/passwd. # In GitHub Action *nix VMs, no accounts log in, even the "runner" account that runs the commands. The account also has empty gecos fields. # To work around this for pinky tests, we create a fake login entry for the GH runner account... FAKE_UTMP='[7] [999999] [tty2] [runner] [tty2] [] [0.0.0.0] [2022-02-22T22:22:22,222222+00:00]' # ... by dumping the login records, adding our fake line, then reverse dumping ... (utmpdump /var/run/utmp ; echo $FAKE_UTMP) | sudo utmpdump -r -o /var/run/utmp # ... and add a full name to each account with a gecos field but no full name. sudo sed -i 's/:,/:runner name,/' /etc/passwd # We also create a couple optional files pinky looks for touch /home/runner/.project echo "foo" > /home/runner/.plan ;; esac case '${{ matrix.job.os }}' in # Update binutils if MinGW due to https://github.com/rust-lang/rust/issues/112368 windows-latest) C:/msys64/usr/bin/pacman.exe -Sy --needed mingw-w64-x86_64-gcc --noconfirm ; echo "C:\msys64\mingw64\bin" >> $GITHUB_PATH ;; esac ## Install the llvm-tools component to get access to `llvm-profdata` rustup component add llvm-tools - name: Run test and coverage id: run_test_cov run: | outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # Run the coverage script ./util/build-run-test-coverage-linux.sh outputs REPORT_FILE env: COVERAGE_DIR: ${{ github.workspace }}/coverage FEATURES_OPTION: ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} # RUSTUP_TOOLCHAIN: ${{ steps.vars.outputs.TOOLCHAIN }} - name: Upload coverage results (to Codecov.io) uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} files: ${{ steps.run_test_cov.outputs.report }} ## flags: IntegrationTests, UnitTests, ${{ steps.vars.outputs.CODECOV_FLAGS }} flags: ${{ steps.vars.outputs.CODECOV_FLAGS }} name: codecov-umbrella fail_ci_if_error: false test_separately: name: Separate Builds runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: build and test all programs individually shell: bash run: | for f in $(util/show-utils.sh) do echo "Building and testing $f" cargo test -p "uu_$f" || exit 1 done test_all_features: name: Test all features separately needs: [ min_version, deps ] runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] # windows-latest - https://github.com/uutils/coreutils/issues/7044 steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: build and test all features individually shell: bash run: | for f in $(util/show-utils.sh) do echo "Running tests with --features=$f and --no-default-features" cargo test --features=$f --no-default-features done test_selinux: name: Build/SELinux needs: [ min_version, deps ] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@stable - name: Setup Lima uses: lima-vm/lima-actions/setup@v1 id: lima-actions-setup - name: Cache ~/.cache/lima uses: actions/cache@v4 with: path: ~/.cache/lima key: lima-${{ steps.lima-actions-setup.outputs.version }} - name: Start Fedora VM with SELinux run: limactl start --plain --name=default --cpus=4 --disk=30 --memory=4 --network=lima:user-v2 template://fedora - name: Setup SSH uses: lima-vm/lima-actions/ssh@v1 - run: rsync -v -a -e ssh . lima-default:~/work/ - name: Setup Rust and other build deps in VM run: | lima sudo dnf install gcc g++ git rustup libselinux-devel clang-devel attr -y lima rustup-init -y --default-toolchain stable - name: Verify SELinux Status run: | lima getenforce lima ls -laZ /etc/selinux - name: Build and Test with SELinux run: | lima ls lima bash -c "cd work && cargo test --features 'feat_selinux'" - name: Lint with SELinux run: lima bash -c "cd work && cargo clippy --all-targets --features 'feat_selinux' -- -D warnings" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/CheckScripts.yml000066400000000000000000000027161504311601400274570ustar00rootroot00000000000000name: CheckScripts # spell-checker:ignore ludeeus mfinelli shellcheck scandir shfmt env: SCRIPT_DIR: 'util' on: push: branches: - '*' paths: - 'util/**/*.sh' pull_request: branches: - main paths: - 'util/**/*.sh' # End the current execution if there is a new changeset in the PR. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: shell_check: name: ShellScript/Check runs-on: ubuntu-latest permissions: contents: read steps: - uses: actions/checkout@v4 with: persist-credentials: false - name: Run ShellCheck uses: ludeeus/action-shellcheck@master env: SHELLCHECK_OPTS: -s bash with: severity: warning scandir: ${{ env.SCRIPT_DIR }} format: tty shell_fmt: name: ShellScript/Format runs-on: ubuntu-latest permissions: contents: read steps: - uses: actions/checkout@v4 with: persist-credentials: false - name: Setup shfmt uses: mfinelli/setup-shfmt@v3 - name: Run shfmt shell: bash run: | # fmt options: bash syntax, 4 spaces indent, indent for switch-case echo "## show the differences between formatted and original scripts..." find ${{ env.SCRIPT_DIR }} -name "*.sh" -print0 | xargs -0 shfmt -ln=bash -i 4 -ci -d || true coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/FixPR.yml000066400000000000000000000105011504311601400260510ustar00rootroot00000000000000name: FixPR # spell-checker:ignore Swatinem dtolnay dedupe # Trigger automated fixes for PRs being merged (with associated commits) env: BRANCH_TARGET: main on: # * only trigger on pull request closed to specific branches # ref: https://github.community/t/trigger-workflow-only-on-pull-request-merge/17359/9 pull_request: branches: - main # == env.BRANCH_TARGET ## unfortunately, env context variables are only available in jobs/steps (see ) types: [ closed ] jobs: code_deps: # Refresh dependencies (ie, 'Cargo.lock') and show updated dependency tree if: github.event.pull_request.merged == true ## only for PR merges name: Update/dependencies runs-on: ${{ matrix.job.os }} strategy: matrix: job: - { os: ubuntu-latest , features: feat_os_unix } steps: - uses: actions/checkout@v4 with: persist-credentials: false - name: Initialize job variables id: vars shell: bash run: | # surface MSRV from CICD workflow RUST_MIN_SRV=$(grep -P "^\s+RUST_MIN_SRV:" .github/workflows/CICD.yml | grep -Po "(?<=\x22)\d+[.]\d+(?:[.]\d+)?(?=\x22)" ) echo "RUST_MIN_SRV=${RUST_MIN_SRV}" >> $GITHUB_OUTPUT - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ steps.vars.outputs.RUST_MIN_SRV }} - uses: Swatinem/rust-cache@v2 - name: Ensure updated 'Cargo.lock' shell: bash run: | # Ensure updated '*/Cargo.lock' # * '*/Cargo.lock' is required to be in a format that `cargo` of MinSRV can interpret (eg, v1-format for MinSRV < v1.38) for dir in "." "fuzz"; do ( cd "$dir" && (cargo fetch --locked --quiet || cargo +${{ steps.vars.outputs.RUST_MIN_SRV }} update) ) done - name: Info shell: bash run: | # Info ## environment echo "## environment" echo "CI='${CI}'" ## tooling info display echo "## tooling" which gcc >/dev/null 2>&1 && (gcc --version | head -1) || true rustup -V 2>/dev/null rustup show active-toolchain cargo -V rustc -V cargo tree -V ## dependencies echo "## dependency list" cargo fetch --locked --quiet ## * using the 'stable' toolchain is necessary to avoid "unexpected '--filter-platform'" errors RUSTUP_TOOLCHAIN=stable cargo tree --locked --no-dedupe -e=no-dev --prefix=none --features ${{ matrix.job.features }} | grep -vE "$PWD" | sort --unique - name: Commit any changes (to '${{ env.BRANCH_TARGET }}') uses: EndBug/add-and-commit@v9 with: new_branch: ${{ env.BRANCH_TARGET }} default_author: github_actions message: "maint ~ refresh 'Cargo.lock' 'fuzz/Cargo.lock'" add: Cargo.lock fuzz/Cargo.lock env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} code_format: # Recheck/refresh code formatting if: github.event.pull_request.merged == true ## only for PR merges name: Update/format runs-on: ${{ matrix.job.os }} strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: feat_os_unix } steps: - uses: actions/checkout@v4 with: persist-credentials: false - name: Initialize job variables id: vars shell: bash run: | # target-specific options # * CARGO_FEATURES_OPTION CARGO_FEATURES_OPTION='' ; if [ -n "${{ matrix.job.features }}" ]; then CARGO_FEATURES_OPTION='--features "${{ matrix.job.features }}"' ; fi echo "CARGO_FEATURES_OPTION=${CARGO_FEATURES_OPTION}" >> $GITHUB_OUTPUT - uses: dtolnay/rust-toolchain@master with: toolchain: stable components: rustfmt - uses: Swatinem/rust-cache@v2 - name: "`cargo fmt`" shell: bash run: | cargo fmt - name: "`cargo fmt` tests" shell: bash run: | # `cargo fmt` of tests find tests -name "*.rs" -print0 | xargs -0 cargo fmt -- - name: Commit any changes (to '${{ env.BRANCH_TARGET }}') uses: EndBug/add-and-commit@v9 with: new_branch: ${{ env.BRANCH_TARGET }} default_author: github_actions message: "maint ~ rustfmt (`cargo fmt`)" env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/GnuComment.yml000066400000000000000000000042111504311601400271360ustar00rootroot00000000000000name: GnuComment # spell-checker:ignore zizmor backquote on: workflow_run: workflows: ["GnuTests"] types: - completed # zizmor: ignore[dangerous-triggers] permissions: {} jobs: post-comment: permissions: actions: read # to list workflow runs artifacts pull-requests: write # to comment on pr runs-on: ubuntu-latest if: > github.event.workflow_run.event == 'pull_request' steps: - name: 'Download artifact' uses: actions/github-script@v7 with: script: | // List all artifacts from GnuTests var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ owner: context.repo.owner, repo: context.repo.repo, run_id: ${{ github.event.workflow_run.id }}, }); // Download the "comment" artifact, which contains a PR number (NR) and result.txt var matchArtifact = artifacts.data.artifacts.filter((artifact) => { return artifact.name == "comment" })[0]; var download = await github.rest.actions.downloadArtifact({ owner: context.repo.owner, repo: context.repo.repo, artifact_id: matchArtifact.id, archive_format: 'zip', }); var fs = require('fs'); fs.writeFileSync('${{ github.workspace }}/comment.zip', Buffer.from(download.data)); - run: unzip comment.zip - name: 'Comment on PR' uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | var fs = require('fs'); var issue_number = Number(fs.readFileSync('./NR')); var content = fs.readFileSync('./result.txt'); if (content.toString().trim().length > 7) { // 7 because we have backquote + \n await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: issue_number, body: 'GNU testsuite comparison:\n```\n' + content + '```' }); } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/GnuTests.yml000066400000000000000000000430171504311601400266450ustar00rootroot00000000000000name: GnuTests # spell-checker:ignore (abbrev/names) CodeCov gnulib GnuTests Swatinem # spell-checker:ignore (jargon) submodules devel # spell-checker:ignore (libs/utils) autopoint chksum getenforce gperf lcov libexpect limactl pyinotify setenforce shopt texinfo valgrind libattr libcap taiki-e # spell-checker:ignore (options) Ccodegen Coverflow Cpanic Zpanic # spell-checker:ignore (people) Dawid Dziurla * dawidd dtolnay # spell-checker:ignore (vars) FILESET SUBDIRS XPASS # * note: to run a single test => `REPO/util/run-gnu-test.sh PATH/TO/TEST/SCRIPT` on: pull_request: push: branches: - '*' permissions: contents: read # End the current execution if there is a new changeset in the PR. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} env: DEFAULT_BRANCH: ${{ github.event.repository.default_branch }} TEST_FULL_SUMMARY_FILE: 'gnu-full-result.json' TEST_ROOT_FULL_SUMMARY_FILE: 'gnu-root-full-result.json' TEST_SELINUX_FULL_SUMMARY_FILE: 'selinux-gnu-full-result.json' TEST_SELINUX_ROOT_FULL_SUMMARY_FILE: 'selinux-root-gnu-full-result.json' REPO_GNU_REF: "v9.7" jobs: native: name: Run GNU tests (native) runs-on: ubuntu-24.04 steps: #### Get the code, setup cache - name: Checkout code (uutils) uses: actions/checkout@v4 with: path: 'uutils' persist-credentials: false - uses: dtolnay/rust-toolchain@master with: toolchain: stable components: rustfmt - uses: Swatinem/rust-cache@v2 with: workspaces: "./uutils -> target" - name: Checkout code (GNU coreutils) uses: actions/checkout@v4 with: repository: 'coreutils/coreutils' path: 'gnu' ref: ${{ env.REPO_GNU_REF }} submodules: false persist-credentials: false - name: Override submodule URL and initialize submodules # Use github instead of upstream git server run: | git submodule sync --recursive git config submodule.gnulib.url https://github.com/coreutils/gnulib.git git submodule update --init --recursive --depth 1 working-directory: gnu #### Build environment setup - name: Install dependencies shell: bash run: | ## Install dependencies sudo apt-get update sudo apt-get install -y autoconf autopoint bison texinfo gperf gcc g++ gdb python3-pyinotify jq valgrind libexpect-perl libacl1-dev libattr1-dev libcap-dev libselinux1-dev attr quilt - name: Add various locales shell: bash run: | ## Add various locales echo "Before:" locale -a ## Some tests fail with 'cannot change locale (en_US.ISO-8859-1): No such file or directory' ## Some others need a French locale sudo locale-gen sudo locale-gen --keep-existing fr_FR sudo locale-gen --keep-existing fr_FR.UTF-8 sudo locale-gen --keep-existing es_ES.UTF-8 sudo locale-gen --keep-existing sv_SE sudo locale-gen --keep-existing sv_SE.UTF-8 sudo locale-gen --keep-existing en_US sudo locale-gen --keep-existing en_US.UTF-8 sudo locale-gen --keep-existing ru_RU.KOI8-R sudo update-locale echo "After:" locale -a ### Build - name: Build binaries shell: bash run: | ## Build binaries cd 'uutils' bash util/build-gnu.sh --release-build ### Run tests as user - name: Run GNU tests shell: bash run: | ## Run GNU tests path_GNU='gnu' path_UUTILS='uutils' bash "uutils/util/run-gnu-test.sh" - name: Extract testing info from individual logs into JSON shell: bash run : | path_UUTILS='uutils' python uutils/util/gnu-json-result.py gnu/tests > ${{ env.TEST_FULL_SUMMARY_FILE }} ### Run tests as root - name: Run GNU root tests shell: bash run: | ## Run GNU root tests path_GNU='gnu' path_UUTILS='uutils' bash "uutils/util/run-gnu-test.sh" run-root - name: Extract testing info from individual logs (run as root) into JSON shell: bash run : | path_UUTILS='uutils' python uutils/util/gnu-json-result.py gnu/tests > ${{ env.TEST_ROOT_FULL_SUMMARY_FILE }} ### Upload artifacts - name: Upload full json results uses: actions/upload-artifact@v4 with: name: gnu-full-result path: ${{ env.TEST_FULL_SUMMARY_FILE }} - name: Upload root json results uses: actions/upload-artifact@v4 with: name: gnu-root-full-result path: ${{ env.TEST_ROOT_FULL_SUMMARY_FILE }} - name: Compress test logs shell: bash run : | # Compress logs before upload (fails otherwise) gzip gnu/tests/*/*.log - name: Upload test logs uses: actions/upload-artifact@v4 with: name: test-logs path: | gnu/tests/*.log gnu/tests/*/*.log.gz selinux: name: Run GNU tests (SELinux) runs-on: ubuntu-24.04 steps: #### Get the code, setup cache - name: Checkout code (uutils) uses: actions/checkout@v4 with: path: 'uutils' persist-credentials: false - uses: dtolnay/rust-toolchain@master with: toolchain: stable components: rustfmt - uses: Swatinem/rust-cache@v2 with: workspaces: "./uutils -> target" - name: Checkout code (GNU coreutils) uses: actions/checkout@v4 with: repository: 'coreutils/coreutils' path: 'gnu' ref: ${{ env.REPO_GNU_REF }} submodules: false persist-credentials: false - name: Override submodule URL and initialize submodules # Use github instead of upstream git server run: | git submodule sync --recursive git config submodule.gnulib.url https://github.com/coreutils/gnulib.git git submodule update --init --recursive --depth 1 working-directory: gnu #### Lima build environment setup - name: Setup Lima uses: lima-vm/lima-actions/setup@v1 id: lima-actions-setup - name: Cache ~/.cache/lima uses: actions/cache@v4 with: path: ~/.cache/lima key: lima-${{ steps.lima-actions-setup.outputs.version }} - name: Start Fedora VM with SELinux run: limactl start --plain --name=default --cpus=4 --disk=40 --memory=8 --network=lima:user-v2 template://fedora - name: Setup SSH uses: lima-vm/lima-actions/ssh@v1 - name: Verify SELinux Status and Configuration run: | lima getenforce lima ls -laZ /etc/selinux lima sudo sestatus # Ensure we're running in enforcing mode lima sudo setenforce 1 lima getenforce # Create test files with SELinux contexts for testing lima sudo mkdir -p /var/test_selinux lima sudo touch /var/test_selinux/test_file lima sudo chcon -t etc_t /var/test_selinux/test_file lima ls -Z /var/test_selinux/test_file # Verify context - name: Install dependencies in VM run: | lima sudo dnf -y update lima sudo dnf -y install git autoconf autopoint bison texinfo gperf gcc g++ gdb jq libacl-devel libattr-devel libcap-devel libselinux-devel attr rustup clang-devel texinfo-tex wget automake patch quilt lima rustup-init -y --default-toolchain stable - name: Copy the sources to VM run: | rsync -a -e ssh . lima-default:~/work/ ### Build - name: Build binaries run: | lima bash -c "cd ~/work/uutils/ && bash util/build-gnu.sh --release-build" ### Run tests as user - name: Generate SELinux tests list run: | # Find and list all tests that require SELinux lima bash -c "cd ~/work/gnu/ && grep -l 'require_selinux_' -r tests/ > ~/work/uutils/selinux-tests.txt" lima bash -c "cd ~/work/uutils/ && cat selinux-tests.txt" # Count the tests lima bash -c "cd ~/work/uutils/ && echo 'Found SELinux tests:'; wc -l selinux-tests.txt" - name: Run GNU SELinux tests run: | lima sudo setenforce 1 lima getenforce lima cat /proc/filesystems lima bash -c "cd ~/work/uutils/ && bash util/run-gnu-test.sh \$(cat selinux-tests.txt)" - name: Extract testing info from individual logs into JSON shell: bash run : | lima bash -c "cd ~/work/gnu/ && python3 ../uutils/util/gnu-json-result.py tests > ~/work/${{ env.TEST_SELINUX_FULL_SUMMARY_FILE }}" ### Run tests as root - name: Run GNU SELinux root tests run: | lima bash -c "cd ~/work/uutils/ && CI=1 bash util/run-gnu-test.sh run-root \$(cat selinux-tests.txt)" - name: Extract testing info from individual logs (run as root) into JSON shell: bash run : | lima bash -c "cd ~/work/gnu/ && python3 ../uutils/util/gnu-json-result.py tests > ~/work/${{ env.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }}" ### Upload artifacts - name: Collect test logs and test results from VM run: | mkdir -p gnu/tests-selinux # Copy the json output back from the Lima VM to the host rsync -v -a -e ssh lima-default:~/work/*.json ./ # Copy the test directory now rsync -v -a -e ssh lima-default:~/work/gnu/tests/ ./gnu/tests-selinux/ - name: Upload SELinux json results uses: actions/upload-artifact@v4 with: name: selinux-gnu-full-result path: ${{ env.TEST_SELINUX_FULL_SUMMARY_FILE }} - name: Upload SELinux root json results uses: actions/upload-artifact@v4 with: name: selinux-root-gnu-full-result path: ${{ env.TEST_SELINUX_ROOT_FULL_SUMMARY_FILE }} - name: Compress SELinux test logs shell: bash run : | # Compress logs before upload (fails otherwise) gzip gnu/tests-selinux/*/*.log - name: Upload SELinux test logs uses: actions/upload-artifact@v4 with: name: selinux-test-logs path: | gnu/tests-selinux/*.log gnu/tests-selinux/*/*.log.gz aggregate: needs: [native, selinux] permissions: actions: read # for dawidd6/action-download-artifact to query and download artifacts contents: read # for actions/checkout to fetch code pull-requests: read # for dawidd6/action-download-artifact to query commit hash name: Aggregate GNU test results runs-on: ubuntu-24.04 steps: - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # TEST_SUMMARY_FILE='gnu-result.json' AGGREGATED_SUMMARY_FILE='aggregated-result.json' outputs TEST_SUMMARY_FILE AGGREGATED_SUMMARY_FILE - name: Checkout code (uutils) uses: actions/checkout@v4 with: path: 'uutils' persist-credentials: false - name: Retrieve reference artifacts uses: dawidd6/action-download-artifact@v11 # ref: continue-on-error: true ## don't break the build for missing reference artifacts (may be expired or just not generated yet) with: workflow: GnuTests.yml branch: "${{ env.DEFAULT_BRANCH }}" # workflow_conclusion: success ## (default); * but, if commit with failed GnuTests is merged into the default branch, future commits will all show regression errors in GnuTests CI until o/w fixed workflow_conclusion: completed ## continually recalibrates to last commit of default branch with a successful GnuTests (ie, "self-heals" from GnuTest regressions, but needs more supervision for/of regressions) path: "reference" - name: Download full json results uses: actions/download-artifact@v4 with: name: gnu-full-result path: results merge-multiple: true - name: Download root json results uses: actions/download-artifact@v4 with: name: gnu-root-full-result path: results merge-multiple: true - name: Download selinux json results uses: actions/download-artifact@v4 with: name: selinux-gnu-full-result path: results merge-multiple: true - name: Download selinux root json results uses: actions/download-artifact@v4 with: name: selinux-root-gnu-full-result path: results merge-multiple: true - name: Extract/summarize testing info id: summary shell: bash run: | ## Extract/summarize testing info outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } path_UUTILS='uutils' json_count=$(ls -l results/*.json | wc -l) if [[ "$json_count" -ne 4 ]]; then echo "::error ::Failed to download all results json files (expected 4 files, found $json_count); failing early" ls -lR results || true exit 1 fi # Look at all individual results and summarize eval $(python3 uutils/util/analyze-gnu-results.py -o=${{ steps.vars.outputs.AGGREGATED_SUMMARY_FILE }} results/*.json) if [[ "$TOTAL" -eq 0 || "$TOTAL" -eq 1 ]]; then echo "::error ::Failed to parse test results from '${{ env.TEST_FULL_SUMMARY_FILE }}'; failing early" exit 1 fi output="GNU tests summary = TOTAL: $TOTAL / PASS: $PASS / FAIL: $FAIL / ERROR: $ERROR / SKIP: $SKIP" echo "${output}" if [[ "$FAIL" -gt 0 || "$ERROR" -gt 0 ]]; then echo "::warning ::${output}" fi jq -n \ --arg date "$(date --rfc-email)" \ --arg sha "$GITHUB_SHA" \ --arg total "$TOTAL" \ --arg pass "$PASS" \ --arg skip "$SKIP" \ --arg fail "$FAIL" \ --arg xpass "$XPASS" \ --arg error "$ERROR" \ '{($date): { sha: $sha, total: $total, pass: $pass, skip: $skip, fail: $fail, xpass: $xpass, error: $error, }}' > '${{ steps.vars.outputs.TEST_SUMMARY_FILE }}' HASH=$(sha1sum '${{ steps.vars.outputs.TEST_SUMMARY_FILE }}' | cut --delim=" " -f 1) outputs HASH - name: Upload SHA1/ID of 'test-summary' uses: actions/upload-artifact@v4 with: name: "${{ steps.summary.outputs.HASH }}" path: "${{ steps.vars.outputs.TEST_SUMMARY_FILE }}" - name: Upload test results summary uses: actions/upload-artifact@v4 with: name: test-summary path: "${{ steps.vars.outputs.TEST_SUMMARY_FILE }}" - name: Upload aggregated json results uses: actions/upload-artifact@v4 with: name: aggregated-result path: ${{ steps.vars.outputs.AGGREGATED_SUMMARY_FILE }} - name: Compare test failures VS reference shell: bash run: | ## Compare test failures VS reference using JSON files REF_SUMMARY_FILE='reference/aggregated-result/aggregated-result.json' CURRENT_SUMMARY_FILE='${{ steps.vars.outputs.AGGREGATED_SUMMARY_FILE }}' REPO_DEFAULT_BRANCH='${{ env.DEFAULT_BRANCH }}' path_UUTILS='uutils' # Path to ignore file for intermittent issues IGNORE_INTERMITTENT="uutils/.github/workflows/ignore-intermittent.txt" # Set up comment directory COMMENT_DIR="reference/comment" mkdir -p ${COMMENT_DIR} echo ${{ github.event.number }} > ${COMMENT_DIR}/NR COMMENT_LOG="${COMMENT_DIR}/result.txt" COMPARISON_RESULT=0 if test -f "${CURRENT_SUMMARY_FILE}"; then if test -f "${REF_SUMMARY_FILE}"; then echo "Reference summary SHA1/ID: $(sha1sum -- "${REF_SUMMARY_FILE}")" echo "Current summary SHA1/ID: $(sha1sum -- "${CURRENT_SUMMARY_FILE}")" python3 uutils/util/compare_test_results.py \ --ignore-file "${IGNORE_INTERMITTENT}" \ --output "${COMMENT_LOG}" \ "${CURRENT_SUMMARY_FILE}" "${REF_SUMMARY_FILE}" COMPARISON_RESULT=$? else echo "::warning ::Skipping test comparison; no prior reference summary is available at '${REF_SUMMARY_FILE}'." fi else echo "::error ::Failed to find summary of test results (missing '${CURRENT_SUMMARY_FILE}'); failing early" exit 1 fi if [ ${COMPARISON_RESULT} -eq 1 ]; then echo "ONLY_INTERMITTENT=false" >> $GITHUB_ENV echo "::error ::Found new non-intermittent test failures" exit 1 else echo "ONLY_INTERMITTENT=true" >> $GITHUB_ENV echo "::notice ::No new test failures detected" fi - name: Upload comparison log (for GnuComment workflow) if: success() || failure() # run regardless of prior step success/failure uses: actions/upload-artifact@v4 with: name: comment path: reference/comment/ - name: Compare test summary VS reference if: success() || failure() # run regardless of prior step success/failure shell: bash run: | ## Compare test summary VS reference REF_SUMMARY_FILE='reference/test-summary/gnu-result.json' if test -f "${REF_SUMMARY_FILE}"; then echo "Reference SHA1/ID: $(sha1sum -- "${REF_SUMMARY_FILE}")" mv "${REF_SUMMARY_FILE}" main-gnu-result.json python uutils/util/compare_gnu_result.py else echo "::warning ::Skipping test summary comparison; no prior reference summary is available." fi coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/android.yml000066400000000000000000000166371504311601400265210ustar00rootroot00000000000000name: Android # spell-checker:ignore (people) reactivecircus Swatinem dtolnay juliangruber # spell-checker:ignore (shell/tools) TERMUX nextest udevadm pkill # spell-checker:ignore (misc) swiftshader playstore DATALOSS noaudio on: pull_request: push: branches: - '*' permissions: contents: read # to fetch code (actions/checkout) # End the current execution if there is a new changeset in the PR. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} env: TERMUX: v0.118.0 KEY_POSTFIX: nextest+rustc-hash+adb+sshd+upgrade+XGB+inc18 COMMON_EMULATOR_OPTIONS: -no-window -noaudio -no-boot-anim -camera-back none -gpu swiftshader_indirect -metrics-collection EMULATOR_DISK_SIZE: 12GB EMULATOR_HEAP_SIZE: 2048M EMULATOR_BOOT_TIMEOUT: 1200 # 20min jobs: test_android: name: Test builds timeout-minutes: 90 strategy: fail-fast: false matrix: os: [ubuntu-latest] # , macos-latest cores: [4] # , 6 ram: [4096, 8192] api-level: [28] target: [google_apis_playstore] arch: [x86, x86_64] # , arm64-v8a exclude: - ram: 8192 arch: x86 - ram: 4096 arch: x86_64 runs-on: ${{ matrix.os }} env: EMULATOR_RAM_SIZE: ${{ matrix.ram }} EMULATOR_CORES: ${{ matrix.cores }} RUNNER_OS: ${{ matrix.os }} AVD_CACHE_KEY: "set later due to limitations of github actions not able to concatenate env variables" steps: - name: Concatenate values to environment file run: | echo "AVD_CACHE_KEY=${{ matrix.os }}-${{ matrix.cores }}-${{ matrix.ram }}-${{ matrix.api-level }}-${{ matrix.target }}-${{ matrix.arch }}+termux-${{ env.TERMUX }}+${{ env.KEY_POSTFIX }}" >> $GITHUB_ENV - name: Collect information about runner if: always() continue-on-error: true run: | hostname uname -a free -mh df -Th cat /proc/cpuinfo - name: (Linux) create links from home to data partition if: ${{ runner.os == 'Linux' }} continue-on-error: true run: | ls -lah /mnt/ cat /mnt/DATALOSS_WARNING_README.txt sudo mkdir /mnt/data sudo chmod a+rwx /mnt/data mkdir /mnt/data/.android && ln -s /mnt/data/.android ~/.android mkdir /mnt/data/work && ln -s /mnt/data/work ~/work - name: Enable KVM group perms (linux hardware acceleration) if: ${{ runner.os == 'Linux' }} run: | echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules sudo udevadm control --reload-rules sudo udevadm trigger --name-match=kvm - uses: actions/checkout@v4 with: persist-credentials: false - name: Collect information about runner if: always() continue-on-error: true run: | free -mh df -Th - name: Restore AVD cache uses: actions/cache/restore@v4 id: avd-cache continue-on-error: true with: path: | ~/.android/avd/* ~/.android/avd/*/snapshots/* ~/.android/adb* ~/__rustc_hash__ key: avd-${{ env.AVD_CACHE_KEY }} - name: Collect information about runner after AVD cache if: always() continue-on-error: true run: | free -mh df -Th ls -lah /mnt/data du -sch /mnt/data - name: Delete AVD Lockfile when run from cache if: steps.avd-cache.outputs.cache-hit == 'true' run: | rm -f \ ~/.android/avd/*.avd/*.lock \ ~/.android/avd/*/*.lock - name: Create and cache emulator image if: steps.avd-cache.outputs.cache-hit != 'true' uses: reactivecircus/android-emulator-runner@v2.34.0 with: api-level: ${{ matrix.api-level }} target: ${{ matrix.target }} arch: ${{ matrix.arch }} ram-size: ${{ env.EMULATOR_RAM_SIZE }} heap-size: ${{ env.EMULATOR_HEAP_SIZE }} disk-size: ${{ env.EMULATOR_DISK_SIZE }} cores: ${{ env.EMULATOR_CORES }} force-avd-creation: true emulator-options: ${{ env.COMMON_EMULATOR_OPTIONS }} -no-snapshot-load emulator-boot-timeout: ${{ env.EMULATOR_BOOT_TIMEOUT }} script: | util/android-commands.sh init "${{ matrix.arch }}" "${{ matrix.api-level }}" "${{ env.TERMUX }}" - name: Save AVD cache if: steps.avd-cache.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: path: | ~/.android/avd/* ~/.android/avd/*/snapshots/* ~/.android/adb* ~/__rustc_hash__ key: avd-${{ env.AVD_CACHE_KEY }} - uses: juliangruber/read-file-action@v1 id: read_rustc_hash with: # ~ expansion didn't work path: ${{ runner.os == 'Linux' && '/home/runner/__rustc_hash__' || '/Users/runner/__rustc_hash__' }} trim: true - name: Restore rust cache id: rust-cache uses: actions/cache/restore@v4 with: path: ~/__rust_cache__ # The version vX at the end of the key is just a development version to avoid conflicts in # the github cache during the development of this workflow key: ${{ matrix.arch }}_${{ matrix.target}}_${{ steps.read_rustc_hash.outputs.content }}_${{ hashFiles('**/Cargo.toml', '**/Cargo.lock') }}_v3 - name: Collect information about runner resources if: always() continue-on-error: true run: | free -mh df -Th - name: Build and Test uses: reactivecircus/android-emulator-runner@v2.34.0 with: api-level: ${{ matrix.api-level }} target: ${{ matrix.target }} arch: ${{ matrix.arch }} ram-size: ${{ env.EMULATOR_RAM_SIZE }} heap-size: ${{ env.EMULATOR_HEAP_SIZE }} disk-size: ${{ env.EMULATOR_DISK_SIZE }} cores: ${{ env.EMULATOR_CORES }} force-avd-creation: false emulator-options: ${{ env.COMMON_EMULATOR_OPTIONS }} -no-snapshot-save -snapshot ${{ env.AVD_CACHE_KEY }} emulator-boot-timeout: ${{ env.EMULATOR_BOOT_TIMEOUT }} # This is not a usual script. Every line is executed in a separate shell with `sh -c`. If # one of the lines returns with error the whole script is failed (like running a script with # set -e) and in consequences the other lines (shells) are not executed. script: | util/android-commands.sh sync_host util/android-commands.sh build util/android-commands.sh tests if [ "${{ steps.rust-cache.outputs.cache-hit }}" != 'true' ]; then util/android-commands.sh sync_image; fi; exit 0 - name: Collect information about runner resources if: always() continue-on-error: true run: | free -mh df -Th - name: Save rust cache if: steps.rust-cache.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: path: ~/__rust_cache__ key: ${{ matrix.arch }}_${{ matrix.target}}_${{ steps.read_rustc_hash.outputs.content }}_${{ hashFiles('**/Cargo.toml', '**/Cargo.lock') }}_v3 - name: archive any output (error screenshots) if: always() uses: actions/upload-artifact@v4 with: name: test_output_${{ env.AVD_CACHE_KEY }} path: output - name: Collect information about runner resources if: always() continue-on-error: true run: | free -mh df -Th coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/code-quality.yml000066400000000000000000000224371504311601400274740ustar00rootroot00000000000000name: Code Quality # spell-checker:ignore (people) reactivecircus Swatinem dtolnay juliangruber pell taplo # spell-checker:ignore (misc) TERMUX noaudio pkill swiftshader esac sccache pcoreutils shopt subshell dequote on: pull_request: push: branches: - '*' env: # * style job configuration STYLE_FAIL_ON_FAULT: true ## (bool) fail the build if a style job contains a fault (error or warning); may be overridden on a per-job basis permissions: contents: read # to fetch code (actions/checkout) # End the current execution if there is a new changeset in the PR. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: style_format: name: Style/format runs-on: ${{ matrix.job.os }} strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: feat_os_unix } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@master with: toolchain: stable components: rustfmt - uses: Swatinem/rust-cache@v2 - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # failure mode unset FAIL_ON_FAULT ; case "$STYLE_FAIL_ON_FAULT" in ''|0|f|false|n|no|off) FAULT_TYPE=warning ;; *) FAIL_ON_FAULT=true ; FAULT_TYPE=error ;; esac; outputs FAIL_ON_FAULT FAULT_TYPE - name: "`cargo fmt` testing" shell: bash run: | ## `cargo fmt` testing unset fault fault_type="${{ steps.vars.outputs.FAULT_TYPE }}" fault_prefix=$(echo "$fault_type" | tr '[:lower:]' '[:upper:]') # * convert any errors/warnings to GHA UI annotations; ref: S=$(cargo fmt -- --check) && printf "%s\n" "$S" || { printf "%s\n" "$S" ; printf "%s\n" "$S" | sed -E -n -e "s/^Diff[[:space:]]+in[[:space:]]+${PWD//\//\\/}\/(.*)[[:space:]]+at[[:space:]]+[^0-9]+([0-9]+).*$/::${fault_type} file=\1,line=\2::${fault_prefix}: \`cargo fmt\`: style violation (file:'\1', line:\2; use \`cargo fmt -- \"\1\"\`)/p" ; fault=true ; } if [ -n "${{ steps.vars.outputs.FAIL_ON_FAULT }}" ] && [ -n "$fault" ]; then exit 1 ; fi - name: "cargo fmt on fuzz dir" shell: bash run: | cd fuzz cargo fmt --check style_lint: name: Style/lint runs-on: ${{ matrix.job.os }} env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" strategy: fail-fast: false matrix: job: - { os: ubuntu-latest , features: all , workspace: true } - { os: macos-latest , features: feat_os_macos } - { os: windows-latest , features: feat_os_windows } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@master with: toolchain: stable components: clippy - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # failure mode unset FAIL_ON_FAULT ; case "$STYLE_FAIL_ON_FAULT" in ''|0|f|false|n|no|off) FAULT_TYPE=warning ;; *) FAIL_ON_FAULT=true ; FAULT_TYPE=error ;; esac; outputs FAIL_ON_FAULT FAULT_TYPE - name: Install/setup prerequisites shell: bash run: | ## Install/setup prerequisites case '${{ matrix.job.os }}' in ubuntu-*) # selinux headers needed to enable all features sudo apt-get -y install libselinux1-dev ;; esac - name: "`cargo clippy` lint testing" uses: nick-fields/retry@v3 with: max_attempts: 3 retry_on: error timeout_minutes: 90 shell: bash command: | ## `cargo clippy` lint testing unset fault fault_type="${{ steps.vars.outputs.FAULT_TYPE }}" fault_prefix=$(echo "$fault_type" | tr '[:lower:]' '[:upper:]') # * convert any warnings to GHA UI annotations; ref: if [[ "${{ matrix.job.features }}" == "all" ]]; then extra="--all-features" else extra="--features ${{ matrix.job.features }}" fi case '${{ matrix.job.workspace-tests }}' in 1|t|true|y|yes) extra="${extra} --workspace" ;; esac S=$(cargo clippy --all-targets $extra --tests -pcoreutils -- -D warnings 2>&1) && printf "%s\n" "$S" || { printf "%s\n" "$S" ; printf "%s" "$S" | sed -E -n -e '/^error:/{' -e "N; s/^error:[[:space:]]+(.*)\\n[[:space:]]+-->[[:space:]]+(.*):([0-9]+):([0-9]+).*$/::${fault_type} file=\2,line=\3,col=\4::${fault_prefix}: \`cargo clippy\`: \1 (file:'\2', line:\3)/p;" -e '}' ; fault=true ; } if [ -n "${{ steps.vars.outputs.FAIL_ON_FAULT }}" ] && [ -n "$fault" ]; then exit 1 ; fi style_spellcheck: name: Style/spelling runs-on: ${{ matrix.job.os }} strategy: matrix: job: - { os: ubuntu-latest , features: feat_os_unix } steps: - uses: actions/checkout@v4 with: persist-credentials: false - name: Initialize workflow variables id: vars shell: bash run: | ## VARs setup outputs() { step_id="${{ github.action }}"; for var in "$@" ; do echo steps.${step_id}.outputs.${var}="${!var}"; echo "${var}=${!var}" >> $GITHUB_OUTPUT; done; } # failure mode unset FAIL_ON_FAULT ; case "$STYLE_FAIL_ON_FAULT" in ''|0|f|false|n|no|off) FAULT_TYPE=warning ;; *) FAIL_ON_FAULT=true ; FAULT_TYPE=error ;; esac; outputs FAIL_ON_FAULT FAULT_TYPE - name: Install/setup prerequisites shell: bash run: | sudo apt-get -y update ; sudo apt-get -y install npm ; sudo npm install cspell -g ; - name: Run `cspell` shell: bash run: | ## Run `cspell` unset fault fault_type="${{ steps.vars.outputs.FAULT_TYPE }}" fault_prefix=$(echo "$fault_type" | tr '[:lower:]' '[:upper:]') # * find cspell configuration ; note: avoid quotes around ${cfg_file} b/c `cspell` (v4) doesn't correctly dequote the config argument (or perhaps a subshell expansion issue?) cfg_files=($(shopt -s nullglob ; echo {.vscode,.}/{,.}c[sS]pell{.json,.config{.js,.cjs,.json,.yaml,.yml},.yaml,.yml} ;)) cfg_file=${cfg_files[0]} unset CSPELL_CFG_OPTION ; if [ -n "$cfg_file" ]; then CSPELL_CFG_OPTION="--config $cfg_file" ; fi S=$(cspell ${CSPELL_CFG_OPTION} --no-summary --no-progress .) && printf "%s\n" "$S" || { printf "%s\n" "$S" ; printf "%s" "$S" | sed -E -n "s/${PWD//\//\\/}\/(.*):(.*):(.*) - (.*)/::${fault_type} file=\1,line=\2,col=\3::${fault_type^^}: \4 (file:'\1', line:\2)/p" ; fault=true ; true ; } if [ -n "${{ steps.vars.outputs.FAIL_ON_FAULT }}" ] && [ -n "$fault" ]; then exit 1 ; fi toml_format: name: Style/toml runs-on: ubuntu-latest steps: - name: Clone repository uses: actions/checkout@v4 with: persist-credentials: false - name: Check run: npx --yes @taplo/cli fmt --check python: name: Style/Python runs-on: ubuntu-latest steps: - name: Clone repository uses: actions/checkout@v4 with: persist-credentials: false - name: ruff uses: astral-sh/ruff-action@v3 with: src: "./util" - name: ruff - format uses: astral-sh/ruff-action@v3 with: src: "./util" args: format --check - name: Run Python unit tests shell: bash run: | python3 -m unittest util/test_compare_test_results.py pre_commit: name: Pre-commit hooks runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 with: persist-credentials: false - name: Setup Rust toolchain uses: dtolnay/rust-toolchain@master with: toolchain: stable components: rustfmt, clippy - name: Cache Rust dependencies uses: Swatinem/rust-cache@v2 - name: Setup Python uses: actions/setup-python@v5 with: python-version: '3.x' - name: Install pre-commit run: pip install pre-commit - name: Install cspell run: npm install -g cspell - name: Cache pre-commit environments uses: actions/cache@v4 with: path: ~/.cache/pre-commit key: pre-commit-${{ runner.os }}-${{ hashFiles('.pre-commit-config.yaml') }} restore-keys: | pre-commit-${{ runner.os }}- - name: Run pre-commit run: pre-commit run coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/freebsd.yml000066400000000000000000000201671504311601400265040ustar00rootroot00000000000000name: FreeBSD # spell-checker:ignore sshfs usesh vmactions taiki Swatinem esac fdescfs fdesc sccache nextest copyback env: # * style job configuration STYLE_FAIL_ON_FAULT: true ## (bool) fail the build if a style job contains a fault (error or warning); may be overridden on a per-job basis on: pull_request: push: branches: - '*' permissions: contents: read # to fetch code (actions/checkout) # End the current execution if there is a new changeset in the PR. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: style: name: Style and Lint runs-on: ${{ matrix.job.os }} timeout-minutes: 45 strategy: fail-fast: false matrix: job: - { os: ubuntu-24.04 , features: unix } env: SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Prepare, build and test uses: vmactions/freebsd-vm@v1.2.1 with: usesh: true sync: rsync copyback: false # We need jq and GNU coreutils to run show-utils.sh and bash to use inline shell string replacement prepare: pkg install -y curl sudo jq coreutils bash run: | ## Prepare, build, and test # implementation modelled after ref: # * NOTE: All steps need to be run in this block, otherwise, we are operating back on the mac host set -e # TEST_USER=tester REPO_NAME=${GITHUB_WORKSPACE##*/} WORKSPACE_PARENT="/home/runner/work/${REPO_NAME}" WORKSPACE="${WORKSPACE_PARENT}/${REPO_NAME}" # pw adduser -n ${TEST_USER} -d /root/ -g wheel -c "Coreutils user to build" -w random chown -R ${TEST_USER}:wheel /root/ "${WORKSPACE_PARENT}"/ whoami # # Further work needs to be done in a sudo as we are changing users sudo -i -u ${TEST_USER} bash << EOF set -e whoami curl https://sh.rustup.rs -sSf --output rustup.sh sh rustup.sh -y -c rustfmt,clippy --profile=minimal -t stable . ${HOME}/.cargo/env ## VARs setup cd "${WORKSPACE}" unset FAIL_ON_FAULT ; case '${{ env.STYLE_FAIL_ON_FAULT }}' in ''|0|f|false|n|no|off) FAULT_TYPE=warning ;; *) FAIL_ON_FAULT=true ; FAULT_TYPE=error ;; esac; FAULT_PREFIX=\$(echo "\${FAULT_TYPE}" | tr '[:lower:]' '[:upper:]') # * determine sub-crate utility list UTILITY_LIST="\$(./util/show-utils.sh --features ${{ matrix.job.features }})" CARGO_UTILITY_LIST_OPTIONS="\$(for u in \${UTILITY_LIST}; do echo -n "-puu_\${u} "; done;)" ## Info # environment echo "## environment" echo "CI='${CI}'" echo "REPO_NAME='${REPO_NAME}'" echo "TEST_USER='${TEST_USER}'" echo "WORKSPACE_PARENT='${WORKSPACE_PARENT}'" echo "WORKSPACE='${WORKSPACE}'" echo "FAULT_PREFIX='\${FAULT_PREFIX}'" echo "UTILITY_LIST='\${UTILITY_LIST}'" env | sort # tooling info echo "## tooling info" cargo -V rustc -V # # To ensure that files are cleaned up, we don't want to exit on error set +e unset FAULT ## cargo fmt testing echo "## cargo fmt testing" # * convert any errors/warnings to GHA UI annotations; ref: S=\$(cargo fmt -- --check) && printf "%s\n" "\$S" || { printf "%s\n" "\$S" ; printf "%s\n" "\$S" | sed -E -n -e "s/^Diff[[:space:]]+in[[:space:]]+\${PWD//\//\\\\/}\/(.*)[[:space:]]+at[[:space:]]+[^0-9]+([0-9]+).*\$/::\${FAULT_TYPE} file=\1,line=\2::\${FAULT_PREFIX}: \\\`cargo fmt\\\`: style violation (file:'\1', line:\2; use \\\`cargo fmt -- \"\1\"\\\`)/p" ; FAULT=true ; } ## cargo clippy lint testing if [ -z "\${FAULT}" ]; then echo "## cargo clippy lint testing" # * convert any warnings to GHA UI annotations; ref: S=\$(cargo clippy --all-targets \${CARGO_UTILITY_LIST_OPTIONS} -- -D warnings 2>&1) && printf "%s\n" "\$S" || { printf "%s\n" "\$S" ; printf "%s" "\$S" | sed -E -n -e '/^error:/{' -e "N; s/^error:[[:space:]]+(.*)\\n[[:space:]]+-->[[:space:]]+(.*):([0-9]+):([0-9]+).*\$/::\${FAULT_TYPE} file=\2,line=\3,col=\4::\${FAULT_PREFIX}: \\\`cargo clippy\\\`: \1 (file:'\2', line:\3)/p;" -e '}' ; FAULT=true ; } fi # Clean to avoid to rsync back the files cargo clean if [ -n "\${FAIL_ON_FAULT}" ] && [ -n "\${FAULT}" ]; then exit 1 ; fi EOF test: name: Tests runs-on: ${{ matrix.job.os }} timeout-minutes: 45 strategy: fail-fast: false matrix: job: - { os: ubuntu-24.04 , features: unix } env: mem: 4096 SCCACHE_GHA_ENABLED: "true" RUSTC_WRAPPER: "sccache" steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: Swatinem/rust-cache@v2 - name: Run sccache-cache uses: mozilla-actions/sccache-action@v0.0.9 - name: Prepare, build and test uses: vmactions/freebsd-vm@v1.2.1 with: usesh: true sync: rsync copyback: false prepare: pkg install -y curl gmake sudo run: | ## Prepare, build, and test # implementation modelled after ref: # * NOTE: All steps need to be run in this block, otherwise, we are operating back on the mac host set -e # # We need a file-descriptor file system to test test_ls::test_ls_io_errors mount -t fdescfs fdesc /dev/fd # TEST_USER=tester REPO_NAME=${GITHUB_WORKSPACE##*/} WORKSPACE_PARENT="/home/runner/work/${REPO_NAME}" WORKSPACE="${WORKSPACE_PARENT}/${REPO_NAME}" # pw adduser -n ${TEST_USER} -d /root/ -g wheel -c "Coreutils user to build" -w random # chown -R ${TEST_USER}:wheel /root/ "${WORKSPACE_PARENT}"/ chown -R ${TEST_USER}:wheel /root/ "${WORKSPACE_PARENT}"/ whoami # # Further work needs to be done in a sudo as we are changing users sudo -i -u ${TEST_USER} sh << EOF set -e whoami curl https://sh.rustup.rs -sSf --output rustup.sh sh rustup.sh -y --profile=minimal . $HOME/.cargo/env # Install nextest mkdir -p ~/.cargo/bin curl -LsSf https://get.nexte.st/latest/freebsd | tar zxf - -C ~/.cargo/bin ## Info # environment echo "## environment" echo "CI='${CI}'" echo "REPO_NAME='${REPO_NAME}'" echo "TEST_USER='${TEST_USER}'" echo "WORKSPACE_PARENT='${WORKSPACE_PARENT}'" echo "WORKSPACE='${WORKSPACE}'" env | sort # tooling info echo "## tooling info" cargo -V cargo nextest --version rustc -V # # To ensure that files are cleaned up, we don't want to exit on error set +e cd "${WORKSPACE}" unset FAULT cargo build || FAULT=1 export PATH=~/.cargo/bin:${PATH} export RUST_BACKTRACE=1 export CARGO_TERM_COLOR=always if (test -z "\$FAULT"); then cargo nextest run --hide-progress-bar --profile ci --features '${{ matrix.job.features }}' || FAULT=1 ; fi if (test -z "\$FAULT"); then cargo nextest run --hide-progress-bar --profile ci --all-features -p uucore || FAULT=1 ; fi # Clean to avoid to rsync back the files cargo clean if (test -n "\$FAULT"); then exit 1 ; fi EOF coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/fuzzing.yml000066400000000000000000000262441504311601400265700ustar00rootroot00000000000000name: Fuzzing # spell-checker:ignore fuzzer dtolnay Swatinem on: pull_request: push: branches: - '*' permissions: contents: read # to fetch code (actions/checkout) # End the current execution if there is a new changeset in the PR. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: fuzz-build: name: Build the fuzzers runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@nightly - name: Install `cargo-fuzz` run: cargo install cargo-fuzz - uses: Swatinem/rust-cache@v2 with: shared-key: "cargo-fuzz-cache-key" cache-directories: "fuzz/target" - name: Run `cargo-fuzz build` run: cargo +nightly fuzz build fuzz-run: needs: fuzz-build name: Fuzz runs-on: ubuntu-latest timeout-minutes: 5 env: RUN_FOR: 60 strategy: matrix: test-target: - { name: fuzz_test, should_pass: true } # https://github.com/uutils/coreutils/issues/5311 - { name: fuzz_date, should_pass: false } - { name: fuzz_expr, should_pass: true } - { name: fuzz_printf, should_pass: true } - { name: fuzz_echo, should_pass: true } - { name: fuzz_seq, should_pass: false } - { name: fuzz_sort, should_pass: false } - { name: fuzz_wc, should_pass: false } - { name: fuzz_cut, should_pass: false } - { name: fuzz_split, should_pass: false } - { name: fuzz_tr, should_pass: false } - { name: fuzz_env, should_pass: false } - { name: fuzz_cksum, should_pass: false } - { name: fuzz_parse_glob, should_pass: true } - { name: fuzz_parse_size, should_pass: true } - { name: fuzz_parse_time, should_pass: true } - { name: fuzz_seq_parse_number, should_pass: true } steps: - uses: actions/checkout@v4 with: persist-credentials: false - uses: dtolnay/rust-toolchain@nightly - name: Install `cargo-fuzz` run: cargo install cargo-fuzz - uses: Swatinem/rust-cache@v2 with: shared-key: "cargo-fuzz-cache-key" cache-directories: "fuzz/target" - name: Restore Cached Corpus uses: actions/cache/restore@v4 with: key: corpus-cache-${{ matrix.test-target.name }} path: | fuzz/corpus/${{ matrix.test-target.name }} - name: Run ${{ matrix.test-target.name }} for XX seconds id: run_fuzzer shell: bash continue-on-error: ${{ !matrix.test-target.name.should_pass }} run: | mkdir -p fuzz/stats STATS_FILE="fuzz/stats/${{ matrix.test-target.name }}.txt" cargo +nightly fuzz run ${{ matrix.test-target.name }} -- -max_total_time=${{ env.RUN_FOR }} -timeout=${{ env.RUN_FOR }} -detect_leaks=0 -print_final_stats=1 2>&1 | tee "$STATS_FILE" # Extract key stats from the output if grep -q "stat::number_of_executed_units" "$STATS_FILE"; then RUNS=$(grep "stat::number_of_executed_units" "$STATS_FILE" | awk '{print $2}') echo "runs=$RUNS" >> "$GITHUB_OUTPUT" else echo "runs=unknown" >> "$GITHUB_OUTPUT" fi if grep -q "stat::average_exec_per_sec" "$STATS_FILE"; then EXEC_RATE=$(grep "stat::average_exec_per_sec" "$STATS_FILE" | awk '{print $2}') echo "exec_rate=$EXEC_RATE" >> "$GITHUB_OUTPUT" else echo "exec_rate=unknown" >> "$GITHUB_OUTPUT" fi if grep -q "stat::new_units_added" "$STATS_FILE"; then NEW_UNITS=$(grep "stat::new_units_added" "$STATS_FILE" | awk '{print $2}') echo "new_units=$NEW_UNITS" >> "$GITHUB_OUTPUT" else echo "new_units=unknown" >> "$GITHUB_OUTPUT" fi # Save should_pass value to file for summary job to use echo "${{ matrix.test-target.should_pass }}" > "fuzz/stats/${{ matrix.test-target.name }}.should_pass" # Print stats to job output for immediate visibility echo "----------------------------------------" echo "FUZZING STATISTICS FOR ${{ matrix.test-target.name }}" echo "----------------------------------------" echo "Runs: $(grep -q "stat::number_of_executed_units" "$STATS_FILE" && grep "stat::number_of_executed_units" "$STATS_FILE" | awk '{print $2}' || echo "unknown")" echo "Execution Rate: $(grep -q "stat::average_exec_per_sec" "$STATS_FILE" && grep "stat::average_exec_per_sec" "$STATS_FILE" | awk '{print $2}' || echo "unknown") execs/sec" echo "New Units: $(grep -q "stat::new_units_added" "$STATS_FILE" && grep "stat::new_units_added" "$STATS_FILE" | awk '{print $2}' || echo "unknown")" echo "Expected: ${{ matrix.test-target.name.should_pass }}" if grep -q "SUMMARY: " "$STATS_FILE"; then echo "Status: $(grep "SUMMARY: " "$STATS_FILE" | head -1)" else echo "Status: Completed" fi echo "----------------------------------------" # Add summary to GitHub step summary echo "### Fuzzing Results for ${{ matrix.test-target.name }}" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "| Metric | Value |" >> $GITHUB_STEP_SUMMARY echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY if grep -q "stat::number_of_executed_units" "$STATS_FILE"; then echo "| Runs | $(grep "stat::number_of_executed_units" "$STATS_FILE" | awk '{print $2}') |" >> $GITHUB_STEP_SUMMARY fi if grep -q "stat::average_exec_per_sec" "$STATS_FILE"; then echo "| Execution Rate | $(grep "stat::average_exec_per_sec" "$STATS_FILE" | awk '{print $2}') execs/sec |" >> $GITHUB_STEP_SUMMARY fi if grep -q "stat::new_units_added" "$STATS_FILE"; then echo "| New Units | $(grep "stat::new_units_added" "$STATS_FILE" | awk '{print $2}') |" >> $GITHUB_STEP_SUMMARY fi echo "| Should pass | ${{ matrix.test-target.should_pass }} |" >> $GITHUB_STEP_SUMMARY if grep -q "SUMMARY: " "$STATS_FILE"; then echo "| Status | $(grep "SUMMARY: " "$STATS_FILE" | head -1) |" >> $GITHUB_STEP_SUMMARY else echo "| Status | Completed |" >> $GITHUB_STEP_SUMMARY fi echo "" >> $GITHUB_STEP_SUMMARY - name: Save Corpus Cache uses: actions/cache/save@v4 with: key: corpus-cache-${{ matrix.test-target.name }} path: | fuzz/corpus/${{ matrix.test-target.name }} - name: Upload Stats uses: actions/upload-artifact@v4 with: name: fuzz-stats-${{ matrix.test-target.name }} path: | fuzz/stats/${{ matrix.test-target.name }}.txt fuzz/stats/${{ matrix.test-target.name }}.should_pass retention-days: 5 fuzz-summary: needs: fuzz-run name: Fuzzing Summary runs-on: ubuntu-latest if: always() steps: - uses: actions/checkout@v4 with: persist-credentials: false - name: Download all stats uses: actions/download-artifact@v4 with: path: fuzz/stats-artifacts pattern: fuzz-stats-* merge-multiple: true - name: Prepare stats directory run: | mkdir -p fuzz/stats # Debug: List content of stats-artifacts directory echo "Contents of stats-artifacts directory:" find fuzz/stats-artifacts -type f | sort # Extract files from the artifact directories - handle nested directories find fuzz/stats-artifacts -type f -name "*.txt" -exec cp {} fuzz/stats/ \; find fuzz/stats-artifacts -type f -name "*.should_pass" -exec cp {} fuzz/stats/ \; # Debug information echo "Contents of stats directory after extraction:" ls -la fuzz/stats/ echo "Contents of should_pass files (if any):" cat fuzz/stats/*.should_pass 2>/dev/null || echo "No should_pass files found" - name: Generate Summary run: | echo "# Fuzzing Summary" > fuzzing_summary.md echo "" >> fuzzing_summary.md echo "| Target | Runs | Exec/sec | New Units | Should pass | Status |" >> fuzzing_summary.md echo "|--------|------|----------|-----------|-------------|--------|" >> fuzzing_summary.md TOTAL_RUNS=0 TOTAL_NEW_UNITS=0 for stat_file in fuzz/stats/*.txt; do TARGET=$(basename "$stat_file" .txt) SHOULD_PASS_FILE="${stat_file%.*}.should_pass" # Get expected status if [ -f "$SHOULD_PASS_FILE" ]; then EXPECTED=$(cat "$SHOULD_PASS_FILE") else EXPECTED="unknown" fi # Extract runs if grep -q "stat::number_of_executed_units" "$stat_file"; then RUNS=$(grep "stat::number_of_executed_units" "$stat_file" | awk '{print $2}') TOTAL_RUNS=$((TOTAL_RUNS + RUNS)) else RUNS="unknown" fi # Extract execution rate if grep -q "stat::average_exec_per_sec" "$stat_file"; then EXEC_RATE=$(grep "stat::average_exec_per_sec" "$stat_file" | awk '{print $2}') else EXEC_RATE="unknown" fi # Extract new units added if grep -q "stat::new_units_added" "$stat_file"; then NEW_UNITS=$(grep "stat::new_units_added" "$stat_file" | awk '{print $2}') if [[ "$NEW_UNITS" =~ ^[0-9]+$ ]]; then TOTAL_NEW_UNITS=$((TOTAL_NEW_UNITS + NEW_UNITS)) fi else NEW_UNITS="unknown" fi # Extract status if grep -q "SUMMARY: " "$stat_file"; then STATUS=$(grep "SUMMARY: " "$stat_file" | head -1) else STATUS="Completed" fi echo "| $TARGET | $RUNS | $EXEC_RATE | $NEW_UNITS | $EXPECTED | $STATUS |" >> fuzzing_summary.md done echo "" >> fuzzing_summary.md echo "## Overall Statistics" >> fuzzing_summary.md echo "" >> fuzzing_summary.md echo "- **Total runs:** $TOTAL_RUNS" >> fuzzing_summary.md echo "- **Total new units discovered:** $TOTAL_NEW_UNITS" >> fuzzing_summary.md echo "- **Average execution rate:** $(grep -h "stat::average_exec_per_sec" fuzz/stats/*.txt | awk '{sum += $2; count++} END {if (count > 0) print sum/count " execs/sec"; else print "unknown"}')" >> fuzzing_summary.md # Add count by expected status echo "- **Tests expected to pass:** $(find fuzz/stats -name "*.should_pass" -exec cat {} \; | grep -c "true")" >> fuzzing_summary.md echo "- **Tests expected to fail:** $(find fuzz/stats -name "*.should_pass" -exec cat {} \; | grep -c "false")" >> fuzzing_summary.md # Write to GitHub step summary cat fuzzing_summary.md >> $GITHUB_STEP_SUMMARY - name: Show Summary run: | cat fuzzing_summary.md - name: Upload Summary uses: actions/upload-artifact@v4 with: name: fuzzing-summary path: fuzzing_summary.md retention-days: 5 coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.github/workflows/ignore-intermittent.txt000066400000000000000000000001771504311601400311160ustar00rootroot00000000000000tests/tail/inotify-dir-recreate tests/timeout/timeout tests/rm/rm1 tests/misc/stdbuf tests/misc/usage_vs_getopt tests/misc/tee coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.gitignore000066400000000000000000000003351504311601400227350ustar00rootroot00000000000000# spell-checker:ignore (misc) direnv target/ coverage/ /src/*/gen_table /build/ /tmp/ /busybox/ /.vscode/ /.vs/ /public/ *~ .*.swp .*.swo .idea lib*.a /docs/_build *.iml ### macOS ### .DS_Store ### direnv ### /.direnv/ coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.markdownlint.yaml000066400000000000000000000002761504311601400244240ustar00rootroot00000000000000# Disable 'Line length'. Doesn't provide much values MD013: false # Disable 'Fenced code blocks should have a language specified' # Doesn't provide much in src/ to enforce it MD040: false coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.pre-commit-config.yaml000066400000000000000000000034261504311601400252320ustar00rootroot00000000000000# See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: check-added-large-files - id: check-executables-have-shebangs - id: check-json - id: check-shebang-scripts-are-executable exclude: '.+\.rs' # would be triggered by #![some_attribute] - id: check-symlinks - id: check-toml - id: check-yaml args: [ --allow-multiple-documents ] - id: destroyed-symlinks - id: end-of-file-fixer - id: mixed-line-ending args: [ --fix=lf ] - id: trailing-whitespace - repo: https://github.com/mozilla-l10n/moz-fluent-linter rev: v0.4.8 hooks: - id: fluent_linter files: \.ftl$ args: [--config, .github/fluent_linter_config.yml, src/uu/] - repo: local hooks: - id: rust-linting name: Rust linting description: Run cargo fmt on files included in the commit. entry: cargo +stable fmt -- pass_filenames: true types: [file, rust] language: system - id: rust-clippy name: Rust clippy description: Run cargo clippy on files included in the commit. entry: cargo +stable clippy --workspace --all-targets --all-features -- -D warnings pass_filenames: false types: [file, rust] language: system - id: cspell name: Code spell checker (cspell) description: Run cspell to check for spelling errors (if available). entry: bash -c 'if command -v cspell >/dev/null 2>&1; then cspell --no-must-find-files -- "$@"; else echo "cspell not found, skipping spell check"; exit 0; fi' -- pass_filenames: true language: system coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.rustfmt.toml000066400000000000000000000000641504311601400234230ustar00rootroot00000000000000# * using all default `cargo fmt`/`rustfmt` options coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/000077500000000000000000000000001504311601400223055ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/.gitattributes000066400000000000000000000001501504311601400251740ustar00rootroot00000000000000# Configure GitHub to not mark comments in configuration files as errors *.json linguist-language=jsonc coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/cSpell.json000066400000000000000000000021431504311601400244220ustar00rootroot00000000000000// `cspell` settings // spell-checker:ignore oranda { // version of the setting file "version": "0.2", // spelling language "language": "en", // custom dictionaries "dictionaries": ["acronyms+names", "jargon", "people", "shell", "workspace"], "dictionaryDefinitions": [ { "name": "acronyms+names", "path": "./cspell.dictionaries/acronyms+names.wordlist.txt" }, { "name": "jargon", "path": "./cspell.dictionaries/jargon.wordlist.txt" }, { "name": "people", "path": "./cspell.dictionaries/people.wordlist.txt" }, { "name": "shell", "path": "./cspell.dictionaries/shell.wordlist.txt" }, { "name": "workspace", "path": "./cspell.dictionaries/workspace.wordlist.txt" } ], // files to ignore (globs supported) "ignorePaths": [ ".git/**", "Cargo.lock", "oranda.json", "target/**", "tests/**/fixtures/**", "src/uu/dd/test-resources/**", "vendor/**", "**/*.svg", "src/uu/*/locales/*.ftl" ], "enableGlobDot": true, // words to ignore (even if they are in the flagWords) "ignoreWords": [], // words to always consider correct "words": [] } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/cspell.dictionaries/000077500000000000000000000000001504311601400262435ustar00rootroot00000000000000acronyms+names.wordlist.txt000066400000000000000000000013021504311601400335210ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/cspell.dictionaries# * abbreviations / acronyms AIX ASLR # address space layout randomization AST # abstract syntax tree CICD # continuous integration/deployment CPU CPUs DevOps Ext3 FIFO FIFOs FQDN # fully qualified domain name GID # group ID GIDs GNU GNUEABI GNUEABIhf JFS MSRV # minimum supported rust version MSVC NixOS POSIX POSIXLY RISC RISCV RNG # random number generator RNGs ReiserFS Solaris UID # user ID UIDs UUID # universally unique identifier WASI WASM XFS aarch flac impls lzma loongarch # * names BusyBox BusyTest Codacy Cygwin Deno EditorConfig EPEL FreeBSD Gmail GNU Illumos Irix libfuzzer MS-DOS MSDOS MacOS MinGW Minix NetBSD Novell Nushell OpenBSD POSIX PowerPC SELinux SkyPack Solaris SysV Xenix Yargs coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/cspell.dictionaries/jargon.wordlist.txt000066400000000000000000000030201504311601400321250ustar00rootroot00000000000000AFAICT alloc arity autogenerate autogenerated autogenerates bitmask bitwise bytewise canonicalization canonicalize canonicalizing capget codepoint codepoints codeready codegen colorizable colorize coprime consts conv cyclomatic dedup deduplication demangle denoland deque dequeue dev devs discoverability duplicative dsync endianness enqueue errored executable executables exponentiate eval esac falsey fileio filesystem filesystems flamegraph fsxattr fullblock getfacl getfattr getopt gibi gibibytes glob globbing hardcode hardcoded hardcoding hardfloat hardlink hardlinks hasher hashsums infile iflag iflags kibi kibibytes libacl lcase listxattr llistxattr lossily lstat mebi mebibytes mergeable microbenchmark microbenchmarks microbenchmarking multibyte multicall nmerge noatime nocache nocreat noctty noerror nofollow nolinks nonblock nonportable nonprinting nonseekable notrunc noxfer ofile oflag oflags peekable performant precompiled precompute preload prepend prepended primality pseudoprime pseudoprimes quantiles readonly reparse seedable semver semiprime semiprimes setcap setfacl setfattr shortcode shortcodes siginfo sigusr subcommand subexpression submodule sync symlink symlinks syscall syscalls tokenize toolchain truthy ucase unbuffered udeps unescape unintuitive unprefixed unportable unsync urand whitespace wordlist wordlists xattrs xpass # * abbreviations consts deps dev fdlimit inacc maint proc procs # * constants xffff # * variables delim errno progname retval subdir val vals inval nofield # * clippy uninlined nonminimal coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/cspell.dictionaries/people.wordlist.txt000066400000000000000000000044301504311601400321370ustar00rootroot00000000000000Akira Hayakawa Akira Hayakawa Alan Andrade Alan Andrade Aleksander Bielawski Aleksander Bielawski Alex Lyon Alex Lyon Alexander Batischev Alexander Batischev Alexander Fomin Alexander Fomin Anthony Deschamps Anthony Deschamps Ãrni Dagur Ãrni Dagur Ben Eills Ben Eills Ben Hirsch Ben Hirsch Benoit Benedetti Benoit Benedetti Boden Garman Boden Garman Chirag B Jadwani Chirag Jadwani Derek Chiang Derek Chiang Dorota Kapturkiewicz Dorota Kapturkiewicz Evgeniy Klyuchikov Evgeniy Klyuchikov Fangxu Hu Fangxu Hu Gil Cottle Gil Cottle Haitao Li Haitao Li Inokentiy Babushkin Inokentiy Babushkin Jan Scheer * jhscheer Jan Scheer jhscheer Jeremiah Peschka Jeremiah Peschka Jian Zeng Jian Zeng Jimmy Lu Jimmy Lu Joao Oliveira Joao Oliveira Jordi Boggiano Jordi Boggiano Jordy Dickinson Jordy Dickinson Joseph Crail Joseph Crail Joshua S Miller Joshua Miller Konstantin Pospelov Konstantin Pospelov Maciej Dziardziel Maciej Dziardziel Martin Kysel Martin Kysel Michael Debertol Michael Debertol Michael Gehring Michael Gehring Mitchell Mebane Mitchell Mebane Morten Olsen Lysgaard Morten Olsen Lysgaard Nicholas Juszczak Nicholas Juszczak Nick Platt Nick Platt Orvar Segerström Orvar Segerström Peter Atashian Peter Atashian Robert Swinford Robert Swinford Rolf Morel Rolf Morel Roman Gafiyatullin Roman Gafiyatullin Roy Ivy III * rivy Roy Ivy III rivy Sergey "Shnatsel" Davidoff Sergey Shnatsel Davidoff Sergey Shnatsel Davidoff Sokovikov Evgeniy Sokovikov Evgeniy Sunrin SHIMURA Sunrin SHIMURA Sylvestre Ledru Sylvestre Ledru T Jameson Little Jameson Little Thomas Queiroz Thomas Queiroz Tobias Bohumir Schottdorf Tobias Bohumir Schottdorf Virgile Andreani Virgile Andreani Vsevolod Velichko Vsevolod Velichko Wiktor Kuropatwa Wiktor Kuropatwa Yury Krivopalov Yury Krivopalov KokaKiwi Mahkoh Smigle00 Smigle00 Smigle anonymousknight kwantam nicoo gmnsii coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/cspell.dictionaries/shell.wordlist.txt000066400000000000000000000015331504311601400317630ustar00rootroot00000000000000# * Mac clonefile # * POSIX TMPDIR adduser csh globstar inotify localtime mksh mountinfo mountpoint mtab nullglob passwd pipefail popd ptmx pushd setarch sh sudo sudoedit tcsh tzselect urandom VARNAME wtmp zsh # * Windows APPDATA COMSPEC HKCU HKLM HOMEDRIVE HOMEPATH LOCALAPPDATA PATHEXT PATHEXT SYSTEMROOT USERDOMAIN USERNAME USERPROFILE procmon # * `git` gitattributes gitignore # * `make` (`gmake`) CURDIR GNUMAKEFLAGS GNUMakefile LIBPATTERNS MAKECMDGOALS MAKEFILES MAKEFLAGS MAKELEVEL MAKESHELL SHELLSTATUS VPATH abspath addprefix addsuffix endef findstring firstword ifeq ifneq lastword notdir patsubst # * `npm` preversion # * utilities cachegrind chglog codespell commitlint dprint dtrace flamegraph flamegraphs gcov gmake grcov grep markdownlint rerast rollup samply sed selinuxenabled sestatus vdir wslpath xargs # * directories sbin libexec workspace.wordlist.txt000066400000000000000000000061351504311601400325760ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/cspell.dictionaries# * cargo cdylib rlib # * crates advapi advapi32-sys aho-corasick backtrace blake2b_simd bstr bytecount byteorder chacha chrono conv corasick crossterm exacl filetime formatteriteminfo fsext getopts getrandom globset indicatif itertools iuse langid lscolors mdbook memchr multifilereader onig ouroboros peekreader quickcheck rand_chacha ringbuffer rlimit rstest smallvec tempdir tempfile termion termios termsize termwidth textwrap thiserror unic ureq walkdir winapi xattr # * rust/rustc RUSTDOCFLAGS RUSTFLAGS clippy rustc rustfmt rustup rustdoc # bitor # BitOr trait function bitxor # BitXor trait function concat fract powi println repr rfind struct structs substr splitn trunc uninit # * uutils basenc chcon chgrp chmod chown chroot cksum csplit dircolors hashsum hostid logname mkdir mkfifo mknod mktemp nohup nproc numfmt pathchk printenv printf readlink realpath relpath rmdir runcon shuf sprintf stdbuf stty tsort uname unexpand whoami # * vars/errno errno EACCES EBADF EBUSY EEXIST EINVAL ENODATA ENOENT ENOSYS ENOTEMPTY EOPNOTSUPP EPERM EROFS # * vars/fcntl F_GETFL GETFL fcntl vmsplice # * vars/libc COMFOLLOW EXDEV FILENO FTSENT HOSTSIZE IDSIZE IFBLK IFCHR IFDIR IFIFO IFLNK IFMT IFREG IFSOCK IRGRP IROTH IRUSR ISDIR ISGID ISUID ISVTX IWGRP IWOTH IWUSR IXGRP IXOTH IXUSR LINESIZE NAMESIZE RTLD_NEXT RTLD SIGINT SIGKILL SIGSTOP SIGTERM SYS_fdatasync SYS_syncfs USERSIZE accpath addrinfo addrlen blocksize canonname chroot dlsym execvp fdatasync freeaddrinfo getaddrinfo getegid geteuid getgid getgrgid getgrnam getgrouplist getgroups getpwent getpwnam getpwuid getuid inode inodes isatty lchown pathlen setgid setgroups settime setuid socketpair socktype statfs statp statvfs strcmp strerror strlen syncfs umask waitpid wcslen # * vars/nix iovec unistd # * vars/signals SIGPIPE # * vars/std CString pathbuf # * vars/stat bavail bfree bsize ffree frsize fsid fstat fstype namelen # unix::fs::MetadataExt atime # access time blksize # blocksize for file system I/O blocks # number of blocks allocated to file ctime # creation time dev # ID of device containing the file gid # group ID of file owner ino # inode number mode # permissions mtime # modification time nlink # number of hard links to file rdev # device ID if file is a character/block special file size # total size of file in bytes uid # user ID of file owner nsec # nanosecond measurement scale # freebsd::MetadataExt iosize # * vars/time Timespec isdst nanos nsec nsecs strftime strptime subsec usec usecs utcoff # * vars/utmpx endutxent getutxent getutxid getutxline pututxline setutxent utmp utmpx utmpxname # * vars/winapi DWORD SYSTEMTIME LPVOID LPWSTR ULONG ULONGLONG UNLEN WCHAR WSADATA errhandlingapi fileapi handleapi lmcons minwinbase minwindef processthreadsapi synchapi sysinfoapi winbase winerror winnt winsock # * vars/selinux freecon getfilecon lgetfilecon lsetfilecon restorecon setfilecon # * vars/uucore optflag optflagmulti optflagopt optmulti optopt # * uutils ccmd coreopts coreutils keepenv libc libstdbuf musl tmpd uchild ucmd ucommand utmpx uucore uucore_procs uudoc uufuzz uumain uutil uutests uutils # * function names getcwd # * other algs coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/extensions.json000066400000000000000000000010061504311601400253740ustar00rootroot00000000000000// spell-checker:ignore (misc) matklad foxundermoon // see for the documentation about the extensions.json format // * // "foxundermoon.shell-format" ~ shell script formatting ; note: ENABLE "Use EditorConfig" // "matklad.rust-analyzer" ~ `rust` language support // "streetsidesoftware.code-spell-checker" ~ `cspell` spell-checker support { "recommendations": [ "rust-lang.rust-analyzer", "streetsidesoftware.code-spell-checker", "foxundermoon.shell-format" ] } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/.vscode/settings.json000066400000000000000000000000551504311601400250400ustar00rootroot00000000000000{ "cSpell.import": [".vscode/cspell.json"] } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/CODE_OF_CONDUCT.md000066400000000000000000000121531504311601400235450ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at sylvestre@debian.org. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at . Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at . Translations are available at . coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/CONTRIBUTING.md000066400000000000000000000333601504311601400232020ustar00rootroot00000000000000 # Contributing to coreutils Hi! Welcome to uutils/coreutils! Thanks for wanting to contribute to this project! This document explains everything you need to know to contribute. Before you start make sure to also check out these documents: - Our community's [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md). - [DEVELOPMENT.md](./DEVELOPMENT.md) for setting up your development environment. Now follows a very important warning: > [!WARNING] > uutils is original code and cannot contain any code from GNU or > other implementations. This means that **we cannot accept any changes based on > the GNU source code**. To make sure that cannot happen, **you cannot link to > the GNU source code** either. It is however possible to look at other implementations > under a BSD or MIT license like [Apple's implementation](https://github.com/apple-oss-distributions/file_cmds/) > or [OpenBSD](https://github.com/openbsd/src/tree/master/bin). Finally, feel free to join our [Discord](https://discord.gg/wQVJbvJ)! ## Getting Oriented uutils is a big project consisting of many parts. Here are the most important parts for getting started: - [`src/uu`](https://github.com/uutils/coreutils/tree/main/src/uu/): The code for all utilities - [`src/uucore`](https://github.com/uutils/coreutils/tree/main/src/uucore/): Crate containing all the shared code between the utilities. - [`tests/by-util`](https://github.com/uutils/coreutils/tree/main/tests/by-util/): The tests for all utilities. - [`src/bin/coreutils.rs`](https://github.com/uutils/coreutils/tree/main/src/bin/coreutils.rs): Code for the multicall binary. - [`docs`](https://github.com/uutils/coreutils/tree/main/docs/src): the documentation for the website - [`tests/uutests/`](https://github.com/uutils/coreutils/tree/main/tests/uutests/): Crate implementing the various functions to test uutils commands. Each utility is defined as a separate crate. The structure of each of these crates is as follows: - `Cargo.toml` - `src/main.rs`: contains only a single macro call - `src/.rs`: the actual code for the utility - `.md`: the documentation for the utility We have separated repositories for crates that we maintain but also publish for use by others: - [uutils-term-grid](https://github.com/uutils/uutils-term-grid) - [parse_datetime](https://github.com/uutils/parse_datetime) ## Design Goals We have the following goals with our development: - **Compatible**: The utilities should be a drop-in replacement for the GNU coreutils. - **Cross-platform**: All utilities should run on as many of the supported platforms as possible. - **Reliable**: The utilities should never unexpectedly fail. - **Performant**: Our utilities should be written in fast idiomatic Rust. We aim to match or exceed the performance of the GNU utilities. - **Well-tested**: We should have a lot of tests to be able to guarantee reliability and compatibility. ## How to Help There are several ways to help and writing code is just one of them. Reporting issues and writing documentation are just as important as writing code. ### Reporting Issues We can't fix bugs we don't know about, so good issues are super helpful! Here are some tips for writing good issues: - If you find a bug, make sure it's still a problem on the `main` branch. - Search through the existing issues to see whether it has already been reported. - Make sure to include all relevant information, such as: - Which version of uutils did you check? - Which version of GNU coreutils are you comparing with? - What platform are you on? - Provide a way to reliably reproduce the issue. - Be as specific as possible! ### Writing Documentation There's never enough documentation. If you come across any documentation that could be improved, feel free to submit a PR for it! ### Writing Code If you want to submit a PR, make sure that you've discussed the solution with the maintainers beforehand. We want to avoid situations where you put a lot of work into a fix that we can't merge! If there's no issue for what you're trying to fix yet, make one _before_ you start working on the PR. Generally, we try to follow what GNU is doing in terms of options and behavior. It is recommended to look at the GNU coreutils manual ([on the web](https://www.gnu.org/software/coreutils/manual/html_node/index.html), or locally using `info `). It is more in depth than the man pages and provides a good description of available features and their implementation details. But remember, you cannot look at the GNU source code! Also remember that we can only merge PRs which pass our test suite, follow rustfmt, and do not have any warnings from clippy. See [DEVELOPMENT.md](./DEVELOPMENT.md) for more information. Be sure to also read about our [Rust style](#our-rust-style). ## Our Rust Style We want uutils to be written in idiomatic Rust, so here are some guidelines to follow. Some of these are aspirational, meaning that we don't do them correctly everywhere in the code. If you find violations of the advice below, feel free to submit a patch! ### Don't `panic!` The coreutils should be very reliable. This means that we should never `panic!`. Therefore, you should avoid using `.unwrap()` and `panic!`. Sometimes the use of `unreachable!` can be justified with a comment explaining why that code is unreachable. ### Don't `exit` We want uutils to be embeddable in other programs. This means that no function in uutils should exit the program. Doing so would also lead to code with more confusing control flow. Avoid therefore `std::process::exit` and similar functions which exit the program early. ### `unsafe` uutils cannot be entirely safe, because we have to call out to `libc` and do syscalls. However, we still want to limit our use of `unsafe`. We generally only accept `unsafe` for FFI, with very few exceptions. Note that performance is very rarely a valid argument for using `unsafe`. If you still need to write code with `unsafe`, make sure to read the [Rustonomicon](https://doc.rust-lang.org/nomicon/intro.html) and annotate the calls with `// SAFETY:` comments explaining why the use of `unsafe` is sound. ### Macros Macros can be a great tool, but they are also usually hard to understand. They should be used sparingly. Make sure to explore simpler options before you reach for a solution involving macros. ### `str`, `OsStr` & `Path` Rust has many string-like types, and sometimes it's hard to choose the right one. It's tempting to use `str` (and `String`) for everything, but that is not always the right choice for uutils, because we need to support invalid UTF-8, just like the GNU coreutils. For example, paths on Linux might not be valid UTF-8! Whenever we are dealing with paths, we should therefore stick with `OsStr` and `Path`. Make sure that you only convert to `str`/`String` if you know that something is always valid UTF-8. If you need more operations on `OsStr`, you can use the [`bstr`](https://docs.rs/bstr/latest/bstr/) crate. ### Doc-comments We use rustdoc for our documentation, so it's best to follow [rustdoc's guidelines](https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html#documenting-components). Make sure that your documentation is not just repeating the name of the function, but actually giving more useful information. Rustdoc recommends the following structure: ``` [short sentence explaining what it is] [more detailed explanation] [at least one code example that users can copy/paste to try it] [even more advanced explanations if necessary] ``` ### Other comments Comments should be written to _explain_ the code, not to _describe_ the code. Try to focus on explaining _why_ the code is the way it is. If you feel like you have to describe the code, that's usually a sign that you could improve the naming of variables and functions. If you edit a piece of code, make sure to update any comments that need to change as a result. The only thing worse than having no comments is having outdated comments! ## Git Etiquette To ensure easy collaboration, we have guidelines for using Git and GitHub. ### Commits - Make small and atomic commits. - Keep a clean history of commits. - Write informative commit messages. - Annotate your commit message with the component you're editing. For example: `cp: do not overwrite on with -i` or `uucore: add support for FreeBSD`. - Do not unnecessarily move items around in the code. This makes the changes much harder to review. If you do need to move things around, do that in a separate commit. ### Commit messages You can read [this section in the Git book](https://git-scm.com/book/ms/v2/Distributed-Git-Contributing-to-a-Project) to learn how to write good commit messages. In addition, here are a few examples for a summary line when committing to uutils: - commit for a single utility ``` nohup: cleanup and refactor ``` - commit for a utility's tests ``` tests/rm: test new feature ``` Beyond changes to an individual utility or its tests, other summary lines for non-utility modules include: ``` README: add help uucore: add new modules uutils: add new utility gitignore: add temporary files ``` ### PRs - Make the titles of PRs descriptive. - This means describing the problem you solve. For example, do not write `Fix #1234`, but `ls: fix version sort order`. - You can prefix the title with the utility the PR concerns. - Keep PRs small and self-contained. A set of small PRs is much more likely to get merged quickly than one large PR. - Make sure the CI passes (up to intermittently failing tests). - You know your code best, that's why it's best if you can solve merge conflicts on your branch yourself. - It's up to you whether you want to use `git merge main` or `git rebase main`. - Feel free to ask for help with merge conflicts. - You do not need to ping maintainers to request a review, but it's fine to do so if you don't get a response within a few days. ## Platforms We take pride in supporting many operating systems and architectures. Any code you contribute must at least compile without warnings for all platforms in the CI. However, you can use `#[cfg(...)]` attributes to create platform dependent features. **Tip:** For Windows, Microsoft provides some images (VMWare, Hyper-V, VirtualBox and Parallels) for development [on their official download page](https://developer.microsoft.com/windows/downloads/virtual-machines/). ## Improving the GNU compatibility Please make sure you have installed [GNU utils and prerequisites](DEVELOPMENT.md#gnu-utils-and-prerequisites) and can execute commands described in [Comparing with GNU](DEVELOPMENT.md#comparing-with-gnu) section of [DEVELOPMENT.md](DEVELOPMENT.md) The Python script `./util/remaining-gnu-error.py` shows the list of failing tests in the CI. To improve the GNU compatibility, the following process is recommended: 1. Identify a test (the smaller, the better) on a program that you understand or is easy to understand. You can use the `./util/remaining-gnu-error.py` script to help with this decision. 1. Build both the GNU and Rust coreutils using: `bash util/build-gnu.sh` 1. Run the test with `bash util/run-gnu-test.sh ` 1. Start to modify `` to understand what is wrong. Examples: 1. Add `set -v` to have the bash verbose mode 1. Add `echo $?` where needed 1. When the variable `fail` is used in the test, `echo $fail` to see when the test started to fail 1. Bump the content of the output (ex: `cat err`) 1. ... 1. Or, if the test is simple, extract the relevant information to create a new test case running both GNU & Rust implementation 1. Start to modify the Rust implementation to match the expected behavior 1. Add a test to make sure that we don't regress (our test suite is super quick) ## Code coverage To generate code coverage report locally please follow [Code coverage report](DEVELOPMENT.md#code-coverage-report) section of [DEVELOPMENT.md](DEVELOPMENT.md) ## Other implementations The Coreutils have different implementations, with different levels of completions: - [GNU's](https://git.savannah.gnu.org/gitweb/?p=coreutils.git) - [OpenBSD](https://github.com/openbsd/src/tree/master/bin) - [Busybox](https://github.com/mirror/busybox/tree/master/coreutils) - [Toybox (Android)](https://github.com/landley/toybox/tree/master/toys/posix) - [Mac OS](https://github.com/apple-oss-distributions/file_cmds/) - [V lang](https://github.com/vlang/coreutils) - [SerenityOS](https://github.com/SerenityOS/serenity/tree/master/Userland/Utilities) - [Initial Unix](https://github.com/dspinellis/unix-history-repo) - [Perl Power Tools](https://metacpan.org/pod/PerlPowerTools) However, when reimplementing the tools/options in Rust, don't read their source codes when they are using reciprocal licenses (ex: GNU GPL, GNU LGPL, etc). ## Licensing uutils is distributed under the terms of the MIT License; see the `LICENSE` file for details. This is a permissive license, which allows the software to be used with few restrictions. Copyrights in the uutils project are retained by their contributors, and no copyright assignment is required to contribute. If you wish to add or change dependencies as part of a contribution to the project, a tool like `cargo-license` can be used to show their license details. The following types of license are acceptable: - MIT License - Dual- or tri-license with an MIT License option ("Apache-2.0 or MIT" is a popular combination) - "MIT equivalent" license (2-clause BSD, 3-clause BSD, ISC) - License less restrictive than the MIT License (CC0 1.0 Universal) - Apache License version 2.0 Licenses we will not use: - An ambiguous license, or no license - Strongly reciprocal licenses (GNU GPL, GNU LGPL) If you wish to add a reference but it doesn't meet these requirements, please raise an issue to describe the dependency. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/Cargo.lock000066400000000000000000003166051504311601400226640ustar00rootroot00000000000000# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "adler2" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android-tzdata" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] name = "android_system_properties" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] [[package]] name = "anes" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "ansi-width" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219e3ce6f2611d83b51ec2098a12702112c29e57203a6b0a0929b2cddb486608" dependencies = [ "unicode-width 0.1.14", ] [[package]] name = "anstream" version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", "once_cell_polyfill", "windows-sys 0.59.0", ] [[package]] name = "arbitrary" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] [[package]] name = "array-init" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc" [[package]] name = "arrayref" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "bigdecimal" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a22f228ab7a1b23027ccc6c350b72868017af7ea8356fbdf19f8d991c690013" dependencies = [ "autocfg", "libm", "num-bigint", "num-integer", "num-traits", ] [[package]] name = "binary-heap-plus" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4551d8382e911ecc0d0f0ffb602777988669be09447d536ff4388d1def11296" dependencies = [ "compare", ] [[package]] name = "bincode" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36eaf5d7b090263e8150820482d5d93cd964a81e4019913c972f4edcc6edb740" dependencies = [ "bincode_derive", "serde", "unty", ] [[package]] name = "bincode_derive" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf95709a440f45e986983918d0e8a1f30a9b1df04918fc828670606804ac3c09" dependencies = [ "virtue", ] [[package]] name = "bindgen" version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ "bitflags 2.9.1", "cexpr", "clang-sys", "itertools 0.13.0", "log", "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", "syn", ] [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "bitvec" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", "tap", "wyz", ] [[package]] name = "blake2b_simd" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06e903a20b159e944f91ec8499fe1e55651480c541ea0a584f5d967c49ad9d99" dependencies = [ "arrayref", "arrayvec", "constant_time_eq", ] [[package]] name = "blake3" version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", "constant_time_eq", ] [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "bstr" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" dependencies = [ "memchr", "regex-automata", "serde", ] [[package]] name = "bumpalo" version = "3.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" [[package]] name = "bytecount" version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" [[package]] name = "byteorder" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" version = "1.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" dependencies = [ "shlex", ] [[package]] name = "cexpr" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ "nom 7.1.3", ] [[package]] name = "cfg-if" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "windows-link", ] [[package]] name = "ciborium" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", "serde", ] [[package]] name = "ciborium-io" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", ] [[package]] name = "clang-sys" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", "libloading", ] [[package]] name = "clap" version = "4.5.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" version = "4.5.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966" dependencies = [ "anstream", "anstyle", "clap_lex", "strsim", "terminal_size", ] [[package]] name = "clap_complete" version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5abde44486daf70c5be8b8f8f1b66c49f86236edf6fa2abadb4d961c4c6229a" dependencies = [ "clap", ] [[package]] name = "clap_lex" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "clap_mangen" version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27b4c3c54b30f0d9adcb47f25f61fcce35c4dd8916638c6b82fbd5f4fb4179e2" dependencies = [ "clap", "roff", ] [[package]] name = "colorchoice" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "compare" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "120133d4db2ec47efe2e26502ee984747630c67f51974fca0b6c1340cf2368d3" [[package]] name = "console" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d" dependencies = [ "encode_unicode", "libc", "once_cell", "unicode-width 0.2.1", "windows-sys 0.60.2", ] [[package]] name = "const-random" version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" dependencies = [ "const-random-macro", ] [[package]] name = "const-random-macro" version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ "getrandom 0.2.16", "once_cell", "tiny-keccak", ] [[package]] name = "constant_time_eq" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "convert_case" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" dependencies = [ "unicode-segmentation", ] [[package]] name = "core-foundation-sys" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "coreutils" version = "0.1.0" dependencies = [ "bincode", "chrono", "clap", "clap_complete", "clap_mangen", "ctor", "filetime", "glob", "hex-literal", "libc", "nix", "num-prime", "phf", "phf_codegen", "pretty_assertions", "rand 0.9.2", "regex", "rlimit", "rstest", "selinux", "serde", "serde-big-array", "sha1", "tempfile", "textwrap", "time", "unindent", "uu_arch", "uu_base32", "uu_base64", "uu_basename", "uu_basenc", "uu_cat", "uu_chcon", "uu_chgrp", "uu_chmod", "uu_chown", "uu_chroot", "uu_cksum", "uu_comm", "uu_cp", "uu_csplit", "uu_cut", "uu_date", "uu_dd", "uu_df", "uu_dir", "uu_dircolors", "uu_dirname", "uu_du", "uu_echo", "uu_env", "uu_expand", "uu_expr", "uu_factor", "uu_false", "uu_fmt", "uu_fold", "uu_groups", "uu_hashsum", "uu_head", "uu_hostid", "uu_hostname", "uu_id", "uu_install", "uu_join", "uu_kill", "uu_link", "uu_ln", "uu_logname", "uu_ls", "uu_mkdir", "uu_mkfifo", "uu_mknod", "uu_mktemp", "uu_more", "uu_mv", "uu_nice", "uu_nl", "uu_nohup", "uu_nproc", "uu_numfmt", "uu_od", "uu_paste", "uu_pathchk", "uu_pinky", "uu_pr", "uu_printenv", "uu_printf", "uu_ptx", "uu_pwd", "uu_readlink", "uu_realpath", "uu_rm", "uu_rmdir", "uu_runcon", "uu_seq", "uu_shred", "uu_shuf", "uu_sleep", "uu_sort", "uu_split", "uu_stat", "uu_stdbuf", "uu_stty", "uu_sum", "uu_sync", "uu_tac", "uu_tail", "uu_tee", "uu_test", "uu_timeout", "uu_touch", "uu_tr", "uu_true", "uu_truncate", "uu_tsort", "uu_tty", "uu_uname", "uu_unexpand", "uu_uniq", "uu_unlink", "uu_uptime", "uu_users", "uu_vdir", "uu_wc", "uu_who", "uu_whoami", "uu_yes", "uucore", "uuhelp_parser", "uutests", "walkdir", "xattr", "zip", ] [[package]] name = "cpufeatures" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] [[package]] name = "crc32fast" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] [[package]] name = "criterion" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bf7af66b0989381bd0be551bd7cc91912a655a58c6918420c9527b1fd8b4679" dependencies = [ "anes", "cast", "ciborium", "clap", "criterion-plot", "itertools 0.13.0", "num-traits", "oorandom", "plotters", "rayon", "regex", "serde", "serde_json", "tinytemplate", "walkdir", ] [[package]] name = "criterion-plot" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", "itertools 0.10.5", ] [[package]] name = "crossbeam-deque" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crossterm" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8b9f2e4c67f833b660cdb0a3523065869fb35570177239812ed4c905aeff87b" dependencies = [ "bitflags 2.9.1", "crossterm_winapi", "derive_more", "document-features", "filedescriptor", "mio", "parking_lot", "rustix", "signal-hook", "signal-hook-mio", "winapi", ] [[package]] name = "crossterm_winapi" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" dependencies = [ "winapi", ] [[package]] name = "crunchy" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "ctor" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec09e802f5081de6157da9a75701d6c713d8dc3ba52571fd4bd25f412644e8a6" dependencies = [ "ctor-proc-macro", "dtor", ] [[package]] name = "ctor-proc-macro" version = "0.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2931af7e13dc045d8e9d26afccc6fa115d64e115c9c84b1166288b46f6782c2" [[package]] name = "ctrlc" version = "3.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46f93780a459b7d656ef7f071fe699c4d3d2cb201c4b24d085b6ddc505276e73" dependencies = [ "nix", "windows-sys 0.59.0", ] [[package]] name = "data-encoding" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" [[package]] name = "data-encoding-macro" version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" dependencies = [ "data-encoding", "data-encoding-macro-internal", ] [[package]] name = "data-encoding-macro-internal" version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", "syn", ] [[package]] name = "deranged" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", ] [[package]] name = "derive_arbitrary" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "derive_more" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "convert_case", "proc-macro2", "quote", "syn", ] [[package]] name = "diff" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", ] [[package]] name = "displaydoc" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "dlv-list" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" dependencies = [ "const-random", ] [[package]] name = "dns-lookup" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5766087c2235fec47fafa4cfecc81e494ee679d0fd4a59887ea0919bfb0e4fc" dependencies = [ "cfg-if", "libc", "socket2", "windows-sys 0.48.0", ] [[package]] name = "document-features" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d" dependencies = [ "litrs", ] [[package]] name = "dtor" version = "0.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97cbdf2ad6846025e8e25df05171abfb30e3ababa12ee0a0e44b9bbe570633a8" dependencies = [ "dtor-proc-macro", ] [[package]] name = "dtor-proc-macro" version = "0.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7454e41ff9012c00d53cf7f475c5e3afa3b91b7c90568495495e8d9bf47a1055" [[package]] name = "dunce" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "either" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "encode_unicode" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "equivalent" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" dependencies = [ "libc", "windows-sys 0.59.0", ] [[package]] name = "exacl" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22be12de19decddab85d09f251ec8363f060ccb22ec9c81bc157c0c8433946d8" dependencies = [ "bitflags 2.9.1", "log", "scopeguard", "uuid", ] [[package]] name = "fastrand" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "file_diff" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31a7a908b8f32538a2143e59a6e4e2508988832d5d4d6f7c156b3cbc762643a5" [[package]] name = "filedescriptor" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e40758ed24c9b2eeb76c35fb0aebc66c626084edd827e07e1552279814c6682d" dependencies = [ "libc", "thiserror 1.0.69", "winapi", ] [[package]] name = "filetime" version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if", "libc", "libredox", "windows-sys 0.59.0", ] [[package]] name = "fixed_decimal" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35943d22b2f19c0cb198ecf915910a8158e94541c89dcc63300d7799d46c2c5e" dependencies = [ "displaydoc", "smallvec", "writeable", ] [[package]] name = "flate2" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "libz-rs-sys", "miniz_oxide", ] [[package]] name = "fluent" version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8137a6d5a2c50d6b0ebfcb9aaa91a28154e0a70605f112d30cb0cd4a78670477" dependencies = [ "fluent-bundle", "unic-langid", ] [[package]] name = "fluent-bundle" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01203cb8918f5711e73891b347816d932046f95f54207710bda99beaeb423bf4" dependencies = [ "fluent-langneg", "fluent-syntax", "intl-memoizer", "intl_pluralrules", "rustc-hash", "self_cell", "smallvec", "unic-langid", ] [[package]] name = "fluent-langneg" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c4ad0989667548f06ccd0e306ed56b61bd4d35458d54df5ec7587c0e8ed5e94" dependencies = [ "unic-langid", ] [[package]] name = "fluent-syntax" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54f0d287c53ffd184d04d8677f590f4ac5379785529e5e08b1c8083acdd5c198" dependencies = [ "memchr", "thiserror 2.0.12", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "fs_extra" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" [[package]] name = "fsevent-sys" version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" dependencies = [ "libc", ] [[package]] name = "fts-sys" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43119ec0f2227f8505c8bb6c60606b5eefc328607bfe1a421e561c4decfa02ab" dependencies = [ "bindgen", "libc", ] [[package]] name = "funty" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures-core" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-macro" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "futures-task" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-core", "futures-macro", "futures-task", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "gcd" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d758ba1b47b00caf47f24925c0074ecb20d6dfcffe7f6d53395c0465674841a" [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] name = "getrandom" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "libc", "r-efi", "wasi 0.14.2+wasi-0.2.4", ] [[package]] name = "glob" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "half" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" dependencies = [ "cfg-if", "crunchy", ] [[package]] name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ "allocator-api2", "equivalent", "foldhash", ] [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcaaec4551594c969335c98c903c1397853d4198408ea609190f420500f6be71" [[package]] name = "hostname" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" dependencies = [ "cfg-if", "libc", "windows-link", ] [[package]] name = "iana-time-zone" version = "0.1.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "log", "wasm-bindgen", "windows-core", ] [[package]] name = "iana-time-zone-haiku" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ "cc", ] [[package]] name = "icu_collator" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ad4c6a556938dfd31f75a8c54141079e8821dc697ffb799cfe0f0fa11f2edc" dependencies = [ "displaydoc", "icu_collator_data", "icu_collections", "icu_locale", "icu_locale_core", "icu_normalizer", "icu_properties", "icu_provider", "smallvec", "utf16_iter", "utf8_iter", "zerovec", ] [[package]] name = "icu_collator_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d880b8e680799eabd90c054e1b95526cd48db16c95269f3c89fb3117e1ac92c5" [[package]] name = "icu_collections" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] name = "icu_decimal" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fec61c43fdc4e368a9f450272833123a8ef0d7083a44597660ce94d791b8a2e2" dependencies = [ "displaydoc", "fixed_decimal", "icu_decimal_data", "icu_locale", "icu_locale_core", "icu_provider", "tinystr", "writeable", "zerovec", ] [[package]] name = "icu_decimal_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b70963bc35f9bdf1bc66a5c1f458f4991c1dc71760e00fa06016b2c76b2738d5" [[package]] name = "icu_locale" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ae5921528335e91da1b6c695dbf1ec37df5ac13faa3f91e5640be93aa2fbefd" dependencies = [ "displaydoc", "icu_collections", "icu_locale_core", "icu_locale_data", "icu_provider", "potential_utf", "tinystr", "zerovec", ] [[package]] name = "icu_locale_core" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", "tinystr", "writeable", "zerovec", ] [[package]] name = "icu_locale_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fdef0c124749d06a743c69e938350816554eb63ac979166590e2b4ee4252765" [[package]] name = "icu_normalizer" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", "icu_provider", "smallvec", "zerovec", ] [[package]] name = "icu_normalizer_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", "zerotrie", "zerovec", ] [[package]] name = "indexmap" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", "hashbrown 0.15.4", ] [[package]] name = "indicatif" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70a646d946d06bedbbc4cac4c218acf4bbf2d87757a784857025f4d447e4e1cd" dependencies = [ "console", "portable-atomic", "unicode-width 0.2.1", "unit-prefix", "web-time", ] [[package]] name = "inotify" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3" dependencies = [ "bitflags 2.9.1", "inotify-sys", "libc", ] [[package]] name = "inotify-sys" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" dependencies = [ "libc", ] [[package]] name = "intl-memoizer" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "310da2e345f5eb861e7a07ee182262e94975051db9e4223e909ba90f392f163f" dependencies = [ "type-map", "unic-langid", ] [[package]] name = "intl_pluralrules" version = "7.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "078ea7b7c29a2b4df841a7f6ac8775ff6074020c6776d48491ce2268e068f972" dependencies = [ "unic-langid", ] [[package]] name = "is_terminal_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itertools" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] [[package]] name = "itertools" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jiff" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" dependencies = [ "jiff-static", "jiff-tzdb-platform", "log", "portable-atomic", "portable-atomic-util", "serde", "windows-sys 0.59.0", ] [[package]] name = "jiff-static" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "jiff-tzdb" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1283705eb0a21404d2bfd6eef2a7593d240bc42a0bdb39db0ad6fa2ec026524" [[package]] name = "jiff-tzdb-platform" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "875a5a69ac2bab1a891711cf5eccbec1ce0341ea805560dcd90b7a2e925132e8" dependencies = [ "jiff-tzdb", ] [[package]] name = "js-sys" version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] name = "keccak" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] [[package]] name = "kqueue" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" dependencies = [ "kqueue-sys", "libc", ] [[package]] name = "kqueue-sys" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" dependencies = [ "bitflags 1.3.2", "libc", ] [[package]] name = "libc" version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libloading" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", "windows-targets 0.53.2", ] [[package]] name = "libm" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.9.1", "libc", "redox_syscall", ] [[package]] name = "libz-rs-sys" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "172a788537a2221661b480fee8dc5f96c580eb34fa88764d3205dc356c7e4221" dependencies = [ "zlib-rs", ] [[package]] name = "linux-raw-sys" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "linux-raw-sys" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d6a630ed4f43c11056af8768c4773df2c43bc780b6d8a46de345c17236c562" [[package]] name = "litemap" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "litrs" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5" [[package]] name = "lock_api" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "lru" version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ "hashbrown 0.15.4", ] [[package]] name = "lscolors" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61183da5de8ba09a58e330d55e5ea796539d8443bd00fdeb863eac39724aa4ab" dependencies = [ "aho-corasick", "nu-ansi-term", ] [[package]] name = "md-5" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ "cfg-if", "digest", ] [[package]] name = "memchr" version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memmap2" version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "483758ad303d734cec05e5c12b41d7e93e6a6390c5e9dae6bdeb7c1259012d28" dependencies = [ "libc", ] [[package]] name = "memoffset" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" dependencies = [ "autocfg", ] [[package]] name = "minimal-lexical" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] [[package]] name = "mio" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.59.0", ] [[package]] name = "nix" version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ "bitflags 2.9.1", "cfg-if", "cfg_aliases", "libc", "memoffset", ] [[package]] name = "nom" version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] [[package]] name = "nom" version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" dependencies = [ "memchr", ] [[package]] name = "notify" version = "8.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3163f59cd3fa0e9ef8c32f242966a7b9994fd7378366099593e0e73077cd8c97" dependencies = [ "bitflags 2.9.1", "fsevent-sys", "inotify", "kqueue", "libc", "log", "mio", "notify-types", "walkdir", "windows-sys 0.60.2", ] [[package]] name = "notify-types" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d" [[package]] name = "nu-ansi-term" version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "num-bigint" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", "rand 0.8.5", ] [[package]] name = "num-conv" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-integer" version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ "num-traits", ] [[package]] name = "num-modular" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64a5fe11d4135c3bcdf3a95b18b194afa9608a5f6ff034f5d857bc9a27fb0119" dependencies = [ "num-bigint", "num-integer", "num-traits", ] [[package]] name = "num-prime" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e238432a7881ec7164503ccc516c014bf009be7984cde1ba56837862543bdec3" dependencies = [ "bitvec", "either", "lru", "num-bigint", "num-integer", "num-modular", "num-traits", "rand 0.8.5", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "num_threads" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" dependencies = [ "libc", ] [[package]] name = "number_prefix" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "once_cell_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" [[package]] name = "onig" version = "6.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "336b9c63443aceef14bea841b899035ae3abe89b7c486aaf4c5bd8aafedac3f0" dependencies = [ "bitflags 2.9.1", "libc", "once_cell", "onig_sys", ] [[package]] name = "onig_sys" version = "69.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7f86c6eef3d6df15f23bcfb6af487cbd2fed4e5581d58d5bf1f5f8b7f6727dc" dependencies = [ "cc", "pkg-config", ] [[package]] name = "oorandom" version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "ordered-multimap" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79" dependencies = [ "dlv-list", "hashbrown 0.14.5", ] [[package]] name = "os_display" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad5fd71b79026fb918650dde6d125000a233764f1c2f1659a1c71118e33ea08f" dependencies = [ "unicode-width 0.2.1", ] [[package]] name = "parking_lot" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-targets 0.52.6", ] [[package]] name = "parse_datetime" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5b77d27257a460cefd73a54448e5f3fd4db224150baf6ca3e02eedf4eb2b3e9" dependencies = [ "chrono", "num-traits", "regex", "winnow", ] [[package]] name = "phf" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7" dependencies = [ "phf_shared", "serde", ] [[package]] name = "phf_codegen" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efbdcb6f01d193b17f0b9c3360fa7e0e620991b193ff08702f78b3ce365d7e61" dependencies = [ "phf_generator", "phf_shared", ] [[package]] name = "phf_generator" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2cbb1126afed61dd6368748dae63b1ee7dc480191c6262a3b4ff1e29d86a6c5b" dependencies = [ "fastrand", "phf_shared", ] [[package]] name = "phf_shared" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981" dependencies = [ "siphasher", ] [[package]] name = "pin-project-lite" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "platform-info" version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7539aeb3fdd8cb4f6a331307cf71a1039cee75e94e8a71725b9484f4a0d9451a" dependencies = [ "libc", "winapi", ] [[package]] name = "plotters" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", "plotters-svg", "wasm-bindgen", "web-sys", ] [[package]] name = "plotters-backend" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] [[package]] name = "portable-atomic" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "portable-atomic-util" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" dependencies = [ "portable-atomic", ] [[package]] name = "potential_utf" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" dependencies = [ "serde", "zerovec", ] [[package]] name = "powerfmt" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ "zerocopy 0.8.25", ] [[package]] name = "pretty_assertions" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", "yansi", ] [[package]] name = "prettyplease" version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6837b9e10d61f45f987d50808f83d1ee3d206c66acf650c3e4ae2e1f6ddedf55" dependencies = [ "proc-macro2", "syn", ] [[package]] name = "proc-macro-crate" version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ "toml_edit", ] [[package]] name = "proc-macro2" version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" [[package]] name = "radium" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", ] [[package]] name = "rand" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core 0.6.4", ] [[package]] name = "rand_chacha" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", "rand_core 0.9.3", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom 0.2.16", ] [[package]] name = "rand_core" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ "getrandom 0.3.3", ] [[package]] name = "rayon" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "redox_syscall" version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" dependencies = [ "bitflags 2.9.1", ] [[package]] name = "regex" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "relative-path" version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "rlimit" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" dependencies = [ "libc", ] [[package]] name = "roff" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88f8660c1ff60292143c98d08fc6e2f654d722db50410e3f3797d40baaf9d8f3" [[package]] name = "rstest" version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5a3193c063baaa2a95a33f03035c8a72b83d97a54916055ba22d35ed3839d49" dependencies = [ "futures-timer", "futures-util", "rstest_macros", ] [[package]] name = "rstest_macros" version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c845311f0ff7951c5506121a9ad75aec44d083c31583b2ea5a30bcb0b0abba0" dependencies = [ "cfg-if", "glob", "proc-macro-crate", "proc-macro2", "quote", "regex", "relative-path", "rustc_version", "syn", "unicode-ident", ] [[package]] name = "rust-ini" version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7295b7ce3bf4806b419dc3420745998b447178b7005e2011947b38fc5aa6791" dependencies = [ "cfg-if", "ordered-multimap", ] [[package]] name = "rustc-hash" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.9.4", "windows-sys 0.59.0", ] [[package]] name = "rustversion" version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "ryu" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "self_cell" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f7d95a54511e0c7be3f51e8867aa8cf35148d7b9445d44de2f943e2b206e749" [[package]] name = "selinux" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ef2ca58174235414aee5465f5d8ef9f5833023b31484eb52ca505f306f4573c" dependencies = [ "bitflags 2.9.1", "errno", "libc", "once_cell", "parking_lot", "selinux-sys", "thiserror 2.0.12", ] [[package]] name = "selinux-sys" version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "280da3df1236da180be5ac50a893b26a1d3c49e3a44acb2d10d1f082523ff916" dependencies = [ "bindgen", "cc", "dunce", "walkdir", ] [[package]] name = "semver" version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "serde" version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde-big-array" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" dependencies = [ "serde", ] [[package]] name = "serde_derive" version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", "ryu", "serde", ] [[package]] name = "sha1" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha2" version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha3" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest", "keccak", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook" version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" dependencies = [ "libc", "signal-hook-registry", ] [[package]] name = "signal-hook-mio" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", "mio", "signal-hook", ] [[package]] name = "signal-hook-registry" version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" dependencies = [ "libc", ] [[package]] name = "simd-adler32" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] name = "siphasher" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "sm3" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebb9a3b702d0a7e33bc4d85a14456633d2b165c2ad839c5fd9a8417c1ab15860" dependencies = [ "digest", ] [[package]] name = "smallvec" version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "smawk" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" [[package]] name = "socket2" version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "syn" version = "2.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "synstructure" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tap" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", "rustix", "windows-sys 0.59.0", ] [[package]] name = "terminal_size" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" dependencies = [ "rustix", "windows-sys 0.59.0", ] [[package]] name = "textwrap" version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" dependencies = [ "smawk", "terminal_size", "unicode-linebreak", "unicode-width 0.2.1", ] [[package]] name = "thiserror" version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ "thiserror-impl 1.0.69", ] [[package]] name = "thiserror" version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ "thiserror-impl 2.0.12", ] [[package]] name = "thiserror-impl" version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "thiserror-impl" version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "time" version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", "libc", "num-conv", "num_threads", "powerfmt", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", ] [[package]] name = "tiny-keccak" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" dependencies = [ "crunchy", ] [[package]] name = "tinystr" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", ] [[package]] name = "tinytemplate" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", ] [[package]] name = "toml_datetime" version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" [[package]] name = "toml_edit" version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap", "toml_datetime", "winnow", ] [[package]] name = "type-map" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb30dbbd9036155e74adad6812e9898d03ec374946234fbcebd5dfc7b9187b90" dependencies = [ "rustc-hash", ] [[package]] name = "typenum" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "unic-langid" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28ba52c9b05311f4f6e62d5d9d46f094bd6e84cb8df7b3ef952748d752a7d05" dependencies = [ "unic-langid-impl", ] [[package]] name = "unic-langid-impl" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dce1bf08044d4b7a94028c93786f8566047edc11110595914de93362559bc658" dependencies = [ "tinystr", ] [[package]] name = "unicode-ident" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-linebreak" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-segmentation" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-width" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" [[package]] name = "unindent" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7264e107f553ccae879d21fbea1d6724ac785e8c3bfc762137959b5802826ef3" [[package]] name = "unit-prefix" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "323402cff2dd658f39ca17c789b502021b3f18707c91cdf22e3838e1b4023817" [[package]] name = "unty" version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae" [[package]] name = "utf16_iter" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" [[package]] name = "utf8_iter" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "utf8parse" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "utmp-classic" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e24c654e19afaa6b8f3877ece5d3bed849c2719c56f6752b18ca7da4fcc6e85a" dependencies = [ "cfg-if", "libc", "thiserror 1.0.69", "time", "utmp-classic-raw", "zerocopy 0.7.35", ] [[package]] name = "utmp-classic-raw" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22c226537a3d6e01c440c1926ca0256dbee2d19b2229ede6fc4863a6493dd831" dependencies = [ "cfg-if", "zerocopy 0.7.35", ] [[package]] name = "uu_arch" version = "0.1.0" dependencies = [ "clap", "fluent", "platform-info", "uucore", ] [[package]] name = "uu_base32" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_base64" version = "0.1.0" dependencies = [ "clap", "fluent", "uu_base32", "uucore", ] [[package]] name = "uu_basename" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_basenc" version = "0.1.0" dependencies = [ "clap", "fluent", "uu_base32", "uucore", ] [[package]] name = "uu_cat" version = "0.1.0" dependencies = [ "clap", "fluent", "memchr", "nix", "tempfile", "thiserror 2.0.12", "uucore", "winapi-util", "windows-sys 0.60.2", ] [[package]] name = "uu_chcon" version = "0.1.0" dependencies = [ "clap", "fluent", "fts-sys", "libc", "selinux", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_chgrp" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_chmod" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_chown" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_chroot" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_cksum" version = "0.1.0" dependencies = [ "clap", "fluent", "hex", "uucore", ] [[package]] name = "uu_comm" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_cp" version = "0.1.0" dependencies = [ "clap", "exacl", "filetime", "fluent", "indicatif", "libc", "linux-raw-sys 0.10.0", "selinux", "thiserror 2.0.12", "uucore", "walkdir", "xattr", ] [[package]] name = "uu_csplit" version = "0.1.0" dependencies = [ "clap", "fluent", "regex", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_cut" version = "0.1.0" dependencies = [ "bstr", "clap", "fluent", "memchr", "uucore", ] [[package]] name = "uu_date" version = "0.1.0" dependencies = [ "chrono", "clap", "fluent", "jiff", "libc", "parse_datetime", "uucore", "windows-sys 0.60.2", ] [[package]] name = "uu_dd" version = "0.1.0" dependencies = [ "clap", "fluent", "gcd", "libc", "nix", "signal-hook", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_df" version = "0.1.0" dependencies = [ "clap", "fluent", "tempfile", "thiserror 2.0.12", "unicode-width 0.2.1", "uucore", ] [[package]] name = "uu_dir" version = "0.1.0" dependencies = [ "clap", "uu_ls", "uucore", ] [[package]] name = "uu_dircolors" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_dirname" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_du" version = "0.1.0" dependencies = [ "clap", "fluent", "glob", "thiserror 2.0.12", "uucore", "windows-sys 0.60.2", ] [[package]] name = "uu_echo" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_env" version = "0.1.0" dependencies = [ "clap", "fluent", "nix", "rust-ini", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_expand" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "unicode-width 0.2.1", "uucore", ] [[package]] name = "uu_expr" version = "0.1.0" dependencies = [ "clap", "fluent", "num-bigint", "num-traits", "onig", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_factor" version = "0.1.0" dependencies = [ "clap", "fluent", "num-bigint", "num-prime", "num-traits", "uucore", ] [[package]] name = "uu_factor_benches" version = "0.0.0" dependencies = [ "array-init", "criterion", "num-prime", "rand 0.9.2", "rand_chacha 0.9.0", ] [[package]] name = "uu_false" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_fmt" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "unicode-width 0.2.1", "uucore", ] [[package]] name = "uu_fold" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_groups" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_hashsum" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_head" version = "0.1.0" dependencies = [ "clap", "fluent", "memchr", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_hostid" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "uucore", ] [[package]] name = "uu_hostname" version = "0.1.0" dependencies = [ "clap", "dns-lookup", "fluent", "hostname", "uucore", "windows-sys 0.60.2", ] [[package]] name = "uu_id" version = "0.1.0" dependencies = [ "clap", "fluent", "selinux", "uucore", ] [[package]] name = "uu_install" version = "0.1.0" dependencies = [ "clap", "file_diff", "filetime", "fluent", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_join" version = "0.1.0" dependencies = [ "clap", "fluent", "memchr", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_kill" version = "0.1.0" dependencies = [ "clap", "fluent", "nix", "uucore", ] [[package]] name = "uu_link" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_ln" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_logname" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "uucore", ] [[package]] name = "uu_ls" version = "0.1.0" dependencies = [ "ansi-width", "clap", "fluent", "glob", "hostname", "lscolors", "selinux", "terminal_size", "thiserror 2.0.12", "uucore", "uutils_term_grid", ] [[package]] name = "uu_mkdir" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_mkfifo" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "uucore", ] [[package]] name = "uu_mknod" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "uucore", ] [[package]] name = "uu_mktemp" version = "0.1.0" dependencies = [ "clap", "fluent", "rand 0.9.2", "tempfile", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_more" version = "0.1.0" dependencies = [ "clap", "crossterm", "fluent", "nix", "tempfile", "uucore", ] [[package]] name = "uu_mv" version = "0.1.0" dependencies = [ "clap", "fluent", "fs_extra", "indicatif", "libc", "thiserror 2.0.12", "uucore", "windows-sys 0.60.2", ] [[package]] name = "uu_nice" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "nix", "uucore", ] [[package]] name = "uu_nl" version = "0.1.0" dependencies = [ "clap", "fluent", "regex", "uucore", ] [[package]] name = "uu_nohup" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_nproc" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "uucore", ] [[package]] name = "uu_numfmt" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_od" version = "0.1.0" dependencies = [ "byteorder", "clap", "fluent", "half", "uucore", ] [[package]] name = "uu_paste" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_pathchk" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "uucore", ] [[package]] name = "uu_pinky" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_pr" version = "0.1.0" dependencies = [ "clap", "fluent", "itertools 0.14.0", "regex", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_printenv" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_printf" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_ptx" version = "0.1.0" dependencies = [ "clap", "fluent", "regex", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_pwd" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_readlink" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_realpath" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_rm" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "thiserror 2.0.12", "uucore", "windows-sys 0.60.2", ] [[package]] name = "uu_rmdir" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "uucore", ] [[package]] name = "uu_runcon" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "selinux", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_seq" version = "0.1.0" dependencies = [ "bigdecimal", "clap", "fluent", "num-bigint", "num-traits", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_shred" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "rand 0.9.2", "uucore", ] [[package]] name = "uu_shuf" version = "0.1.0" dependencies = [ "clap", "fluent", "rand 0.9.2", "rand_core 0.9.3", "uucore", ] [[package]] name = "uu_sleep" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_sort" version = "0.1.0" dependencies = [ "bigdecimal", "binary-heap-plus", "clap", "compare", "ctrlc", "fluent", "fnv", "itertools 0.14.0", "memchr", "nix", "rand 0.9.2", "rayon", "self_cell", "tempfile", "thiserror 2.0.12", "unicode-width 0.2.1", "uucore", ] [[package]] name = "uu_split" version = "0.1.0" dependencies = [ "clap", "fluent", "memchr", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_stat" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_stdbuf" version = "0.1.0" dependencies = [ "clap", "fluent", "tempfile", "thiserror 2.0.12", "uu_stdbuf_libstdbuf", "uucore", ] [[package]] name = "uu_stdbuf_libstdbuf" version = "0.1.0" dependencies = [ "ctor", "libc", ] [[package]] name = "uu_stty" version = "0.1.0" dependencies = [ "clap", "fluent", "nix", "uucore", ] [[package]] name = "uu_sum" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_sync" version = "0.1.0" dependencies = [ "clap", "fluent", "nix", "uucore", "windows-sys 0.60.2", ] [[package]] name = "uu_tac" version = "0.1.0" dependencies = [ "clap", "fluent", "memchr", "memmap2", "regex", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_tail" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "memchr", "notify", "rstest", "same-file", "uucore", "winapi-util", "windows-sys 0.60.2", ] [[package]] name = "uu_tee" version = "0.1.0" dependencies = [ "clap", "fluent", "nix", "uucore", ] [[package]] name = "uu_test" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_timeout" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "nix", "uucore", ] [[package]] name = "uu_touch" version = "0.1.0" dependencies = [ "chrono", "clap", "filetime", "fluent", "parse_datetime", "thiserror 2.0.12", "uucore", "windows-sys 0.60.2", ] [[package]] name = "uu_tr" version = "0.1.0" dependencies = [ "clap", "fluent", "nom 8.0.0", "uucore", ] [[package]] name = "uu_true" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_truncate" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_tsort" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "uucore", ] [[package]] name = "uu_tty" version = "0.1.0" dependencies = [ "clap", "fluent", "nix", "uucore", ] [[package]] name = "uu_uname" version = "0.1.0" dependencies = [ "clap", "fluent", "platform-info", "uucore", ] [[package]] name = "uu_unexpand" version = "0.1.0" dependencies = [ "clap", "fluent", "thiserror 2.0.12", "unicode-width 0.2.1", "uucore", ] [[package]] name = "uu_uniq" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_unlink" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_uptime" version = "0.1.0" dependencies = [ "chrono", "clap", "fluent", "thiserror 2.0.12", "utmp-classic", "uucore", ] [[package]] name = "uu_users" version = "0.1.0" dependencies = [ "clap", "fluent", "utmp-classic", "uucore", ] [[package]] name = "uu_vdir" version = "0.1.0" dependencies = [ "clap", "uu_ls", "uucore", ] [[package]] name = "uu_wc" version = "0.1.0" dependencies = [ "bytecount", "clap", "fluent", "libc", "nix", "thiserror 2.0.12", "unicode-width 0.2.1", "uucore", ] [[package]] name = "uu_who" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_whoami" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", "windows-sys 0.60.2", ] [[package]] name = "uu_yes" version = "0.1.0" dependencies = [ "clap", "fluent", "itertools 0.14.0", "nix", "uucore", ] [[package]] name = "uucore" version = "0.1.0" dependencies = [ "bigdecimal", "blake2b_simd", "blake3", "bstr", "chrono", "clap", "crc32fast", "data-encoding", "data-encoding-macro", "digest", "dns-lookup", "dunce", "fluent", "fluent-syntax", "glob", "hex", "icu_collator", "icu_decimal", "icu_locale", "icu_provider", "itertools 0.14.0", "jiff", "libc", "md-5", "memchr", "nix", "num-traits", "number_prefix", "os_display", "selinux", "sha1", "sha2", "sha3", "sm3", "tempfile", "thiserror 2.0.12", "time", "unic-langid", "utmp-classic", "uucore_procs", "walkdir", "wild", "winapi-util", "windows-sys 0.60.2", "xattr", "z85", ] [[package]] name = "uucore_procs" version = "0.1.0" dependencies = [ "proc-macro2", "quote", "uuhelp_parser", ] [[package]] name = "uuhelp_parser" version = "0.1.0" [[package]] name = "uuid" version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "uutests" version = "0.1.0" dependencies = [ "ctor", "libc", "nix", "pretty_assertions", "rand 0.9.2", "regex", "rlimit", "tempfile", "uucore", "xattr", ] [[package]] name = "uutils_term_grid" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcba141ce511bad08e80b43f02976571072e1ff4286f7d628943efbd277c6361" dependencies = [ "ansi-width", ] [[package]] name = "version_check" version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "virtue" version = "0.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "051eb1abcf10076295e815102942cc58f9d5e3b4560e46e53c21e8ff6f3af7b1" [[package]] name = "walkdir" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] [[package]] name = "wasm-bindgen" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "web-time" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "wild" version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3131afc8c575281e1e80f36ed6a092aa502c08b18ed7524e86fbbb12bb410e1" dependencies = [ "glob", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", "windows-strings", ] [[package]] name = "windows-implement" version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-interface" version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-link" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-result" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.5", ] [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.6", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets 0.52.6", ] [[package]] name = "windows-sys" version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ "windows-targets 0.53.2", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm 0.48.5", "windows_aarch64_msvc 0.48.5", "windows_i686_gnu 0.48.5", "windows_i686_msvc 0.48.5", "windows_x86_64_gnu 0.48.5", "windows_x86_64_gnullvm 0.48.5", "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] [[package]] name = "windows-targets" version = "0.53.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" dependencies = [ "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", "windows_i686_gnu 0.53.0", "windows_i686_gnullvm 0.53.0", "windows_i686_msvc 0.53.0", "windows_x86_64_gnu 0.53.0", "windows_x86_64_gnullvm 0.53.0", "windows_x86_64_msvc 0.53.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" dependencies = [ "memchr", ] [[package]] name = "wit-bindgen-rt" version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags 2.9.1", ] [[package]] name = "writeable" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "wyz" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ "tap", ] [[package]] name = "xattr" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909" dependencies = [ "libc", "rustix", ] [[package]] name = "yansi" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", "yoke-derive", "zerofrom", ] [[package]] name = "yoke-derive" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "z85" version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b3a41ce106832b4da1c065baa4c31cf640cf965fa1483816402b7f6b96f0a64" [[package]] name = "zerocopy" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", "zerocopy-derive 0.7.35", ] [[package]] name = "zerocopy" version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" dependencies = [ "zerocopy-derive 0.8.25", ] [[package]] name = "zerocopy-derive" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "zerocopy-derive" version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "zerofrom" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "zerotrie" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ "displaydoc", "yoke", "zerofrom", ] [[package]] name = "zerovec" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ "yoke", "zerofrom", "zerovec-derive", ] [[package]] name = "zerovec-derive" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "zip" version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aed4ac33e8eb078c89e6cbb1d5c4c7703ec6d299fc3e7c3695af8f8b423468b" dependencies = [ "arbitrary", "crc32fast", "flate2", "indexmap", "memchr", "zopfli", ] [[package]] name = "zlib-rs" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "626bd9fa9734751fc50d6060752170984d7053f5a39061f524cda68023d4db8a" [[package]] name = "zopfli" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfc5ee405f504cd4984ecc6f14d02d55cfda60fa4b689434ef4102aae150cd7" dependencies = [ "bumpalo", "crc32fast", "log", "simd-adler32", ] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/Cargo.toml000066400000000000000000000606321504311601400227030ustar00rootroot00000000000000# coreutils (uutils) # * see the repository LICENSE, README, and CONTRIBUTING files for more information # spell-checker:ignore (libs) bigdecimal datetime serde bincode gethostid kqueue libselinux mangen memmap uuhelp startswith constness expl unnested [package] name = "coreutils" description = "coreutils ~ GNU coreutils (updated); implemented as universal (cross-platform) utils, written in Rust" default-run = "coreutils" repository = "https://github.com/uutils/coreutils" rust-version = "1.85.0" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true [package.metadata.docs.rs] all-features = true [features] default = ["feat_common_core"] ## OS feature shortcodes macos = ["feat_os_macos"] unix = ["feat_os_unix"] windows = ["feat_os_windows"] ## project-specific feature shortcodes nightly = [] test_unimplemented = [] expensive_tests = [] # "test_risky_names" == enable tests that create problematic file names (would make a network share inaccessible to Windows, breaks SVN on Mac OS, etc.) test_risky_names = [] # * only build `uudoc` when `--feature uudoc` is activated uudoc = ["zip", "dep:uuhelp_parser"] ## features ## Optional feature for stdbuf # "feat_external_libstdbuf" == use an external libstdbuf.so for stdbuf instead of embedding it feat_external_libstdbuf = ["stdbuf/feat_external_libstdbuf"] # "feat_acl" == enable support for ACLs (access control lists; by using`--features feat_acl`) # NOTE: # * On linux, the posix-acl/acl-sys crate requires `libacl` headers and shared library to be accessible in the C toolchain at compile time. # * On FreeBSD and macOS this is not required. feat_acl = ["cp/feat_acl"] # "feat_selinux" == enable support for SELinux Security Context (by using `--features feat_selinux`) # NOTE: # * The selinux(-sys) crate requires `libselinux` headers and shared library to be accessible in the C toolchain at compile time. # * Running a uutils compiled with `feat_selinux` requires an SELinux enabled Kernel at run time. feat_selinux = [ "cp/selinux", "feat_require_selinux", "id/selinux", "install/selinux", "ls/selinux", "mkdir/selinux", "mkfifo/selinux", "mknod/selinux", "selinux", "stat/selinux", ] ## ## feature sets ## (common/core and Tier1) feature sets # "feat_common_core" == baseline core set of utilities which can be built/run on most targets feat_common_core = [ "base32", "base64", "basename", "basenc", "cat", "cksum", "comm", "cp", "csplit", "cut", "date", "dd", "df", "dir", "dircolors", "dirname", "du", "echo", "env", "expand", "expr", "factor", "false", "fmt", "fold", "hashsum", "head", "join", "link", "ln", "ls", "mkdir", "mktemp", "more", "mv", "nl", "numfmt", "od", "paste", "pr", "printenv", "printf", "ptx", "pwd", "readlink", "realpath", "rm", "rmdir", "seq", "shred", "shuf", "sleep", "sort", "split", "sum", "tac", "tail", "tee", "test", "touch", "tr", "true", "truncate", "tsort", "unexpand", "uniq", "unlink", "vdir", "wc", "yes", ] # "feat_Tier1" == expanded set of utilities which can be built/run on the usual rust "Tier 1" target platforms (ref: ) feat_Tier1 = [ "feat_common_core", # "arch", "hostname", "nproc", "sync", "touch", "uname", "whoami", ] ## (primary platforms) feature sets # "feat_os_macos" == set of utilities which can be built/run on the MacOS platform feat_os_macos = [ "feat_os_unix", ## == a modern/usual *nix platform # "feat_require_unix_hostid", ] # "feat_os_unix" == set of utilities which can be built/run on modern/usual *nix platforms. # Also used for targets binding to the "musl" library (ref: ) feat_os_unix = [ "feat_Tier1", # "feat_require_unix", "feat_require_unix_hostid", "feat_require_unix_utmpx", ] # "feat_os_windows" == set of utilities which can be built/run on modern/usual windows platforms feat_os_windows = [ "feat_Tier1", ## == "feat_os_windows_legacy" + "hostname" ] ## (secondary platforms) feature sets # "feat_os_unix_gnueabihf" == set of utilities which can be built/run on the "arm-unknown-linux-gnueabihf" target (ARMv6 Linux [hardfloat]) feat_os_unix_gnueabihf = [ "feat_Tier1", # "feat_require_unix", "feat_require_unix_hostid", "feat_require_unix_utmpx", ] feat_os_unix_android = [ "feat_Tier1", # "feat_require_unix", ] ## feature sets with requirements (restricting cross-platform availability) # # ** NOTE: these `feat_require_...` sets should be minimized as much as possible to encourage cross-platform availability of utilities # # "feat_require_unix" == set of utilities requiring support which is only available on unix platforms feat_require_unix = [ "chgrp", "chmod", "chown", "chroot", "groups", "id", "install", "kill", "logname", "mkfifo", "mknod", "nice", "nohup", "pathchk", "stat", "stdbuf", "stty", "timeout", "tty", ] # "feat_require_unix_utmpx" == set of utilities requiring unix utmp/utmpx support # * ref: feat_require_unix_utmpx = ["pinky", "uptime", "users", "who"] # "feat_require_unix_hostid" == set of utilities requiring gethostid in libc (only some unixes provide) feat_require_unix_hostid = ["hostid"] # "feat_require_selinux" == set of utilities depending on SELinux. feat_require_selinux = ["chcon", "runcon"] ## (alternate/newer/smaller platforms) feature sets # "feat_os_unix_fuchsia" == set of utilities which can be built/run on the "Fuchsia" OS (refs: ; ) feat_os_unix_fuchsia = [ "feat_common_core", # "chgrp", "chmod", "chown", "du", "groups", "hostid", "install", "logname", "mkfifo", "mknod", "nice", "pathchk", "tty", "uname", "unlink", ] # "feat_os_unix_redox" == set of utilities which can be built/run on "Redox OS" (refs: ; ) feat_os_unix_redox = [ "feat_common_core", # "chmod", "stat", "uname", ] # "feat_os_windows_legacy" == slightly restricted set of utilities which can be built/run on early windows platforms (eg, "WinXP") feat_os_windows_legacy = [ "feat_common_core", # "arch", "nproc", "sync", "touch", "whoami", ] ## # * bypass/override ~ translate 'test' feature name to avoid dependency collision with rust core 'test' crate (o/w surfaces as compiler errors during testing) test = ["uu_test"] [workspace] resolver = "3" members = [ ".", "src/uu/*", "src/uu/stdbuf/src/libstdbuf", "src/uucore", "src/uucore_procs", "src/uuhelp_parser", "tests/benches/factor", "tests/uutests", # "fuzz", # TODO ] [workspace.package] authors = ["uutils developers"] categories = ["command-line-utilities"] edition = "2024" homepage = "https://github.com/uutils/coreutils" keywords = ["coreutils", "uutils", "cross-platform", "cli", "utility"] license = "MIT" readme = "README.package.md" version = "0.1.0" [workspace.dependencies] ansi-width = "0.1.0" bigdecimal = "0.4" binary-heap-plus = "0.5.0" bstr = "1.9.1" bytecount = "0.6.8" byteorder = "1.5.0" chrono = { version = "0.4.41", default-features = false, features = [ "std", "alloc", "clock", ] } clap = { version = "4.5", features = ["wrap_help", "cargo"] } clap_complete = "4.4" clap_mangen = "0.2" compare = "0.1.0" crossterm = "0.29.0" ctor = "0.4.1" ctrlc = { version = "3.4.7", features = ["termination"] } dns-lookup = { version = "2.0.4" } exacl = "0.12.0" file_diff = "1.0.0" filetime = "0.2.23" fnv = "1.0.7" fs_extra = "1.3.0" fts-sys = "0.2.16" gcd = "2.3" glob = "0.3.1" half = "2.4.1" hostname = "0.4" icu_collator = "2.0.0" icu_decimal = "2.0.0" icu_locale = "2.0.0" icu_provider = "2.0.0" indicatif = "0.18.0" itertools = "0.14.0" jiff = { version = "0.2.10", default-features = false, features = [ "std", "alloc", "tz-system", ] } libc = "0.2.172" linux-raw-sys = "0.10" lscolors = { version = "0.20.0", default-features = false, features = [ "gnu_legacy", ] } memchr = "2.7.2" memmap2 = "0.9.4" nix = { version = "0.30", default-features = false } nom = "8.0.0" notify = { version = "=8.1.0", features = ["macos_kqueue"] } num-bigint = "0.4.4" num-prime = "0.4.4" num-traits = "0.2.19" number_prefix = "0.4" onig = { version = "~6.5.1", default-features = false } parse_datetime = "0.11.0" phf = "0.12.1" phf_codegen = "0.12.1" platform-info = "2.0.3" rand = { version = "0.9.0", features = ["small_rng"] } rand_core = "0.9.0" rayon = "1.10" regex = "1.10.4" rstest = "0.26.0" rust-ini = "0.21.0" same-file = "1.0.6" self_cell = "1.0.4" selinux = "0.5.2" signal-hook = "0.3.17" tempfile = "3.15.0" terminal_size = "0.4.0" textwrap = { version = "0.16.1", features = ["terminal_size"] } thiserror = "2.0.3" time = { version = "0.3.36" } unicode-width = "0.2.0" utmp-classic = "0.1.6" uutils_term_grid = "0.7" walkdir = "2.5" winapi-util = "0.1.8" windows-sys = { version = "0.60.1", default-features = false } xattr = "1.3.1" zip = { version = "4.0.0", default-features = false, features = ["deflate"] } hex = "0.4.3" md-5 = "0.10.6" sha1 = "0.10.6" sha2 = "0.10.8" sha3 = "0.10.8" blake2b_simd = "1.0.2" blake3 = "1.5.1" sm3 = "0.4.2" crc32fast = "1.4.2" digest = "0.10.7" # Fluent dependencies fluent = "0.17.0" unic-langid = "0.9.6" fluent-syntax = "0.12.0" uucore = { version = "0.1.0", package = "uucore", path = "src/uucore" } uucore_procs = { version = "0.1.0", package = "uucore_procs", path = "src/uucore_procs" } uu_ls = { version = "0.1.0", path = "src/uu/ls" } uu_base32 = { version = "0.1.0", path = "src/uu/base32" } uutests = { version = "0.1.0", package = "uutests", path = "tests/uutests" } [dependencies] clap.workspace = true uucore.workspace = true clap_complete.workspace = true clap_mangen.workspace = true phf.workspace = true selinux = { workspace = true, optional = true } textwrap.workspace = true zip = { workspace = true, optional = true } uuhelp_parser = { optional = true, version = ">=0.0.19", path = "src/uuhelp_parser" } # * uutils uu_test = { optional = true, version = "0.1.0", package = "uu_test", path = "src/uu/test" } # arch = { optional = true, version = "0.1.0", package = "uu_arch", path = "src/uu/arch" } base32 = { optional = true, version = "0.1.0", package = "uu_base32", path = "src/uu/base32" } base64 = { optional = true, version = "0.1.0", package = "uu_base64", path = "src/uu/base64" } basename = { optional = true, version = "0.1.0", package = "uu_basename", path = "src/uu/basename" } basenc = { optional = true, version = "0.1.0", package = "uu_basenc", path = "src/uu/basenc" } cat = { optional = true, version = "0.1.0", package = "uu_cat", path = "src/uu/cat" } chcon = { optional = true, version = "0.1.0", package = "uu_chcon", path = "src/uu/chcon" } chgrp = { optional = true, version = "0.1.0", package = "uu_chgrp", path = "src/uu/chgrp" } chmod = { optional = true, version = "0.1.0", package = "uu_chmod", path = "src/uu/chmod" } chown = { optional = true, version = "0.1.0", package = "uu_chown", path = "src/uu/chown" } chroot = { optional = true, version = "0.1.0", package = "uu_chroot", path = "src/uu/chroot" } cksum = { optional = true, version = "0.1.0", package = "uu_cksum", path = "src/uu/cksum" } comm = { optional = true, version = "0.1.0", package = "uu_comm", path = "src/uu/comm" } cp = { optional = true, version = "0.1.0", package = "uu_cp", path = "src/uu/cp" } csplit = { optional = true, version = "0.1.0", package = "uu_csplit", path = "src/uu/csplit" } cut = { optional = true, version = "0.1.0", package = "uu_cut", path = "src/uu/cut" } date = { optional = true, version = "0.1.0", package = "uu_date", path = "src/uu/date" } dd = { optional = true, version = "0.1.0", package = "uu_dd", path = "src/uu/dd" } df = { optional = true, version = "0.1.0", package = "uu_df", path = "src/uu/df" } dir = { optional = true, version = "0.1.0", package = "uu_dir", path = "src/uu/dir" } dircolors = { optional = true, version = "0.1.0", package = "uu_dircolors", path = "src/uu/dircolors" } dirname = { optional = true, version = "0.1.0", package = "uu_dirname", path = "src/uu/dirname" } du = { optional = true, version = "0.1.0", package = "uu_du", path = "src/uu/du" } echo = { optional = true, version = "0.1.0", package = "uu_echo", path = "src/uu/echo" } env = { optional = true, version = "0.1.0", package = "uu_env", path = "src/uu/env" } expand = { optional = true, version = "0.1.0", package = "uu_expand", path = "src/uu/expand" } expr = { optional = true, version = "0.1.0", package = "uu_expr", path = "src/uu/expr" } factor = { optional = true, version = "0.1.0", package = "uu_factor", path = "src/uu/factor" } false = { optional = true, version = "0.1.0", package = "uu_false", path = "src/uu/false" } fmt = { optional = true, version = "0.1.0", package = "uu_fmt", path = "src/uu/fmt" } fold = { optional = true, version = "0.1.0", package = "uu_fold", path = "src/uu/fold" } groups = { optional = true, version = "0.1.0", package = "uu_groups", path = "src/uu/groups" } hashsum = { optional = true, version = "0.1.0", package = "uu_hashsum", path = "src/uu/hashsum" } head = { optional = true, version = "0.1.0", package = "uu_head", path = "src/uu/head" } hostid = { optional = true, version = "0.1.0", package = "uu_hostid", path = "src/uu/hostid" } hostname = { optional = true, version = "0.1.0", package = "uu_hostname", path = "src/uu/hostname" } id = { optional = true, version = "0.1.0", package = "uu_id", path = "src/uu/id" } install = { optional = true, version = "0.1.0", package = "uu_install", path = "src/uu/install" } join = { optional = true, version = "0.1.0", package = "uu_join", path = "src/uu/join" } kill = { optional = true, version = "0.1.0", package = "uu_kill", path = "src/uu/kill" } link = { optional = true, version = "0.1.0", package = "uu_link", path = "src/uu/link" } ln = { optional = true, version = "0.1.0", package = "uu_ln", path = "src/uu/ln" } ls = { optional = true, version = "0.1.0", package = "uu_ls", path = "src/uu/ls" } logname = { optional = true, version = "0.1.0", package = "uu_logname", path = "src/uu/logname" } mkdir = { optional = true, version = "0.1.0", package = "uu_mkdir", path = "src/uu/mkdir" } mkfifo = { optional = true, version = "0.1.0", package = "uu_mkfifo", path = "src/uu/mkfifo" } mknod = { optional = true, version = "0.1.0", package = "uu_mknod", path = "src/uu/mknod" } mktemp = { optional = true, version = "0.1.0", package = "uu_mktemp", path = "src/uu/mktemp" } more = { optional = true, version = "0.1.0", package = "uu_more", path = "src/uu/more" } mv = { optional = true, version = "0.1.0", package = "uu_mv", path = "src/uu/mv" } nice = { optional = true, version = "0.1.0", package = "uu_nice", path = "src/uu/nice" } nl = { optional = true, version = "0.1.0", package = "uu_nl", path = "src/uu/nl" } nohup = { optional = true, version = "0.1.0", package = "uu_nohup", path = "src/uu/nohup" } nproc = { optional = true, version = "0.1.0", package = "uu_nproc", path = "src/uu/nproc" } numfmt = { optional = true, version = "0.1.0", package = "uu_numfmt", path = "src/uu/numfmt" } od = { optional = true, version = "0.1.0", package = "uu_od", path = "src/uu/od" } paste = { optional = true, version = "0.1.0", package = "uu_paste", path = "src/uu/paste" } pathchk = { optional = true, version = "0.1.0", package = "uu_pathchk", path = "src/uu/pathchk" } pinky = { optional = true, version = "0.1.0", package = "uu_pinky", path = "src/uu/pinky" } pr = { optional = true, version = "0.1.0", package = "uu_pr", path = "src/uu/pr" } printenv = { optional = true, version = "0.1.0", package = "uu_printenv", path = "src/uu/printenv" } printf = { optional = true, version = "0.1.0", package = "uu_printf", path = "src/uu/printf" } ptx = { optional = true, version = "0.1.0", package = "uu_ptx", path = "src/uu/ptx" } pwd = { optional = true, version = "0.1.0", package = "uu_pwd", path = "src/uu/pwd" } readlink = { optional = true, version = "0.1.0", package = "uu_readlink", path = "src/uu/readlink" } realpath = { optional = true, version = "0.1.0", package = "uu_realpath", path = "src/uu/realpath" } rm = { optional = true, version = "0.1.0", package = "uu_rm", path = "src/uu/rm" } rmdir = { optional = true, version = "0.1.0", package = "uu_rmdir", path = "src/uu/rmdir" } runcon = { optional = true, version = "0.1.0", package = "uu_runcon", path = "src/uu/runcon" } seq = { optional = true, version = "0.1.0", package = "uu_seq", path = "src/uu/seq" } shred = { optional = true, version = "0.1.0", package = "uu_shred", path = "src/uu/shred" } shuf = { optional = true, version = "0.1.0", package = "uu_shuf", path = "src/uu/shuf" } sleep = { optional = true, version = "0.1.0", package = "uu_sleep", path = "src/uu/sleep" } sort = { optional = true, version = "0.1.0", package = "uu_sort", path = "src/uu/sort" } split = { optional = true, version = "0.1.0", package = "uu_split", path = "src/uu/split" } stat = { optional = true, version = "0.1.0", package = "uu_stat", path = "src/uu/stat" } stdbuf = { optional = true, version = "0.1.0", package = "uu_stdbuf", path = "src/uu/stdbuf" } stty = { optional = true, version = "0.1.0", package = "uu_stty", path = "src/uu/stty" } sum = { optional = true, version = "0.1.0", package = "uu_sum", path = "src/uu/sum" } sync = { optional = true, version = "0.1.0", package = "uu_sync", path = "src/uu/sync" } tac = { optional = true, version = "0.1.0", package = "uu_tac", path = "src/uu/tac" } tail = { optional = true, version = "0.1.0", package = "uu_tail", path = "src/uu/tail" } tee = { optional = true, version = "0.1.0", package = "uu_tee", path = "src/uu/tee" } timeout = { optional = true, version = "0.1.0", package = "uu_timeout", path = "src/uu/timeout" } touch = { optional = true, version = "0.1.0", package = "uu_touch", path = "src/uu/touch" } tr = { optional = true, version = "0.1.0", package = "uu_tr", path = "src/uu/tr" } true = { optional = true, version = "0.1.0", package = "uu_true", path = "src/uu/true" } truncate = { optional = true, version = "0.1.0", package = "uu_truncate", path = "src/uu/truncate" } tsort = { optional = true, version = "0.1.0", package = "uu_tsort", path = "src/uu/tsort" } tty = { optional = true, version = "0.1.0", package = "uu_tty", path = "src/uu/tty" } uname = { optional = true, version = "0.1.0", package = "uu_uname", path = "src/uu/uname" } unexpand = { optional = true, version = "0.1.0", package = "uu_unexpand", path = "src/uu/unexpand" } uniq = { optional = true, version = "0.1.0", package = "uu_uniq", path = "src/uu/uniq" } unlink = { optional = true, version = "0.1.0", package = "uu_unlink", path = "src/uu/unlink" } uptime = { optional = true, version = "0.1.0", package = "uu_uptime", path = "src/uu/uptime" } users = { optional = true, version = "0.1.0", package = "uu_users", path = "src/uu/users" } vdir = { optional = true, version = "0.1.0", package = "uu_vdir", path = "src/uu/vdir" } wc = { optional = true, version = "0.1.0", package = "uu_wc", path = "src/uu/wc" } who = { optional = true, version = "0.1.0", package = "uu_who", path = "src/uu/who" } whoami = { optional = true, version = "0.1.0", package = "uu_whoami", path = "src/uu/whoami" } yes = { optional = true, version = "0.1.0", package = "uu_yes", path = "src/uu/yes" } # this breaks clippy linting with: "tests/by-util/test_factor_benches.rs: No such file or directory (os error 2)" # factor_benches = { optional = true, version = "0.0.0", package = "uu_factor_benches", path = "tests/benches/factor" } # # * pinned transitive dependencies # Not needed for now. Keep as examples: #pin_cc = { version="1.0.61, < 1.0.62", package="cc" } ## cc v1.0.62 has compiler errors for MinRustV v1.32.0, requires 1.34 (for `std::str::split_ascii_whitespace()`) [dev-dependencies] chrono.workspace = true ctor.workspace = true filetime.workspace = true glob.workspace = true libc.workspace = true num-prime.workspace = true pretty_assertions = "1.4.0" rand.workspace = true regex.workspace = true sha1 = { workspace = true, features = ["std"] } tempfile.workspace = true time = { workspace = true, features = ["local-offset"] } unindent = "0.2.3" uutests.workspace = true uucore = { workspace = true, features = [ "mode", "entries", "process", "signals", "utmpx", ] } walkdir.workspace = true hex-literal = "1.0.0" rstest.workspace = true [target.'cfg(unix)'.dev-dependencies] nix = { workspace = true, features = [ "process", "signal", "socket", "user", "term", ] } rlimit = "0.10.1" xattr.workspace = true # Used in test_uptime::test_uptime_with_file_containing_valid_boot_time_utmpx_record # to deserialize an utmpx struct into a binary file [target.'cfg(all(target_family= "unix",not(target_os = "macos")))'.dev-dependencies] serde = { version = "1.0.202", features = ["derive"] } bincode = { version = "2.0.1", features = ["serde"] } serde-big-array = "0.5.1" [build-dependencies] phf_codegen.workspace = true [[bin]] name = "coreutils" path = "src/bin/coreutils.rs" [[bin]] name = "uudoc" path = "src/bin/uudoc.rs" required-features = ["uudoc"] # The default release profile. It contains all optimizations. # With this profile (like in the standard release profile), # the stack traces will still be available. [profile.release] lto = true # A release-like profile that is tuned to be fast, even when being fast # compromises on binary size. This includes aborting on panic. [profile.release-fast] inherits = "release" panic = "abort" # A release-like profile that is as small as possible. [profile.release-small] inherits = "release" opt-level = "z" panic = "abort" strip = true # A release-like profile with debug info, useful for profiling. # See https://github.com/mstange/samply . [profile.profiling] inherits = "release" debug = true [lints.clippy] multiple_crate_versions = "allow" cargo_common_metadata = "allow" missing_panics_doc = "allow" # TODO remove when https://github.com/rust-lang/rust-clippy/issues/13774 is fixed large_stack_arrays = "allow" use_self = "warn" needless_pass_by_value = "warn" semicolon_if_nothing_returned = "warn" single_char_pattern = "warn" explicit_iter_loop = "warn" if_not_else = "warn" manual_let_else = "warn" all = { level = "deny", priority = -1 } cargo = { level = "warn", priority = -1 } pedantic = { level = "deny", priority = -1 } # This is the linting configuration for all crates. # Eventually the clippy settings from the `[lints]` section should be moved here. # In order to use these, all crates have `[lints] workspace = true` section. [workspace.lints.rust] unused_qualifications = "warn" [workspace.lints.clippy] # The counts were generated with this command: # cargo clippy --all-targets --workspace --message-format=json --quiet \ # | jq -r '.message.code.code | select(. != null and startswith("clippy::"))' \ # | sort | uniq -c | sort -h -r # # TODO: # remove large_stack_arrays when https://github.com/rust-lang/rust-clippy/issues/13774 is fixed # all = { level = "warn", priority = -1 } cargo = { level = "warn", priority = -1 } pedantic = { level = "warn", priority = -1 } cargo_common_metadata = "allow" # 3240 multiple_crate_versions = "allow" # 2882 missing_errors_doc = "allow" # 1572 missing_panics_doc = "allow" # 946 must_use_candidate = "allow" # 322 match_same_arms = "allow" # 204 redundant_closure_for_method_calls = "allow" # 125 cast_possible_truncation = "allow" # 122 too_many_lines = "allow" # 101 trivially_copy_pass_by_ref = "allow" # 84 single_match_else = "allow" # 82 cast_possible_wrap = "allow" # 78 cast_sign_loss = "allow" # 70 struct_excessive_bools = "allow" # 68 cast_precision_loss = "allow" # 52 cast_lossless = "allow" # 35 unnecessary_wraps = "allow" # 33 ignored_unit_patterns = "allow" # 21 similar_names = "allow" # 20 large_stack_arrays = "allow" # 20 wildcard_imports = "allow" # 18 used_underscore_binding = "allow" # 18 needless_pass_by_value = "allow" # 16 float_cmp = "allow" # 12 items_after_statements = "allow" # 11 return_self_not_must_use = "allow" # 8 needless_continue = "allow" # 6 inline_always = "allow" # 6 fn_params_excessive_bools = "allow" # 6 used_underscore_items = "allow" # 2 should_panic_without_expect = "allow" # 2 coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/Cross.toml000066400000000000000000000003451504311601400227340ustar00rootroot00000000000000# spell-checker:ignore (misc) dpkg noninteractive tzdata [build] pre-build = [ "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install tzdata", ] [build.env] passthrough = ["CI", "RUST_BACKTRACE", "CARGO_TERM_COLOR"] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/DEVELOPMENT.md000066400000000000000000000264551504311601400230640ustar00rootroot00000000000000 # Setting up your local development environment For contributing rules and best practices please refer to [CONTRIBUTING.md](CONTRIBUTING.md) ## Before you start For this guide we assume that you already have a GitHub account and have `git` and your favorite code editor or IDE installed and configured. Before you start working on coreutils, please follow these steps: 1. Fork the [coreutils repository](https://github.com/uutils/coreutils) to your GitHub account. ***Tip:*** See [this GitHub guide](https://docs.github.com/en/get-started/quickstart/fork-a-repo) for more information on this step. 2. Clone that fork to your local development environment: ```shell git clone https://github.com/YOUR-GITHUB-ACCOUNT/coreutils cd coreutils ``` ## Tools You will need the tools mentioned in this section to build and test your code changes locally. This section will explain how to install and configure these tools. We also have an extensive CI that uses these tools and will check your code before it can be merged. The next section [Testing](#testing) will explain how to run those checks locally to avoid waiting for the CI. ### Rust toolchain [Install Rust](https://www.rust-lang.org/tools/install) If you're using rustup to install and manage your Rust toolchains, `clippy` and `rustfmt` are usually already installed. If you are using one of the alternative methods, please make sure to install them manually. See following sub-sections for their usage: [clippy](#clippy) [rustfmt](#rustfmt). ***Tip*** You might also need to add 'llvm-tools' component if you are going to [generate code coverage reports locally](#code-coverage-report): ```shell rustup component add llvm-tools-preview ``` ### GNU utils and prerequisites If you are developing on Linux, most likely you already have all/most GNU utilities and prerequisites installed. To make sure, please check GNU coreutils [README-prereq](https://github.com/coreutils/coreutils/blob/master/README-prereq). You will need these to [run uutils against the GNU test suite locally](#comparing-with-gnu). For MacOS and Windows platform specific setup please check [MacOS GNU utils](#macos-gnu-utils) and [Windows GNU utils](#windows-gnu-utils) sections respectfully. ### pre-commit hooks A configuration for `pre-commit` is provided in the repository. It allows automatically checking every git commit you make to ensure it compiles, and passes `clippy` and `rustfmt` without warnings. To use the provided hook: 1. [Install `pre-commit`](https://pre-commit.com/#install) 1. Run `pre-commit install` while in the repository directory Your git commits will then automatically be checked. If a check fails, an error message will explain why, and your commit will be canceled. You can then make the suggested changes, and run `git commit ...` again. **NOTE: On MacOS** the pre-commit hooks are currently broken. There are workarounds involving switching to unstable nightly Rust and components. ### clippy ```shell cargo clippy --all-targets --all-features ``` The `msrv` key in the clippy configuration file `clippy.toml` is used to disable lints pertaining to newer features by specifying the minimum supported Rust version (MSRV). ### rustfmt ```shell cargo fmt --all ``` ### cargo-deny This project uses [cargo-deny](https://github.com/EmbarkStudios/cargo-deny/) to detect duplicate dependencies, checks licenses, etc. To run it locally, first install it and then run with: ```shell cargo deny --all-features check all ``` ### Markdown linter We use [markdownlint](https://github.com/DavidAnson/markdownlint) to lint the Markdown files in the repository. ### Spell checker We use `cspell` as spell checker for all files in the project. If you are using VS Code, you can install the [code spell checker](https://marketplace.visualstudio.com/items?itemName=streetsidesoftware.code-spell-checker) extension to enable spell checking within your editor. Otherwise, you can install [cspell](https://cspell.org/) separately. If you want to make the spell checker ignore a word, you can add ```rust // spell-checker:ignore word_to_ignore ``` at the top of the file. ## Testing This section explains how to run our CI checks locally. Testing can be done using either Cargo or `make`. ### Testing with Cargo Just like with building, we follow the standard procedure for testing using Cargo: ```shell cargo test ``` By default, `cargo test` only runs the common programs. To run also platform specific tests, run: ```shell cargo test --features unix ``` If you would prefer to test a select few utilities: ```shell cargo test --features "chmod mv tail" --no-default-features ``` If you also want to test the core utilities: ```shell cargo test -p uucore -p coreutils # or cargo test --all-features -p uucore ``` Running the complete test suite might take a while. We use [nextest](https://nexte.st/index.html) in the CI and you might want to try it out locally. It can speed up the execution time of the whole test run significantly if the cpu has multiple cores. ```shell cargo nextest run --features unix --no-fail-fast ``` To debug: ```shell rust-gdb --args target/debug/coreutils ls (gdb) b ls.rs:79 (gdb) run ``` ### Testing with GNU Make To simply test all available utilities: ```shell make test ``` To test all but a few of the available utilities: ```shell make SKIP_UTILS='UTILITY_1 UTILITY_2' test ``` To test only a few of the available utilities: ```shell make UTILS='UTILITY_1 UTILITY_2' test ``` To include tests for unimplemented behavior: ```shell make UTILS='UTILITY_1 UTILITY_2' SPEC=y test ``` To run tests with `nextest` just use the nextest target. Note you'll need to [install](https://nexte.st/book/installation.html) `nextest` first. The `nextest` target accepts the same arguments like the default `test` target, so it's possible to pass arguments to `nextest run` via `CARGOFLAGS`: ```shell make CARGOFLAGS='--no-fail-fast' UTILS='UTILITY_1 UTILITY_2' nextest ``` ### Run Busybox Tests This testing functionality is only available on *nix operating systems and requires `make`. To run busybox tests for all utilities for which busybox has tests ```shell make busytest ``` To run busybox tests for a few of the available utilities ```shell make UTILS='UTILITY_1 UTILITY_2' busytest ``` To pass an argument like "-v" to the busybox test runtime ```shell make UTILS='UTILITY_1 UTILITY_2' RUNTEST_ARGS='-v' busytest ``` ### Comparing with GNU To run uutils against the GNU test suite locally, run the following commands: ```shell bash util/build-gnu.sh # Build uutils with release optimizations bash util/build-gnu.sh --release-build bash util/run-gnu-test.sh # To run a single test: bash util/run-gnu-test.sh tests/touch/not-owner.sh # for example # To run several tests: bash util/run-gnu-test.sh tests/touch/not-owner.sh tests/rm/no-give-up.sh # for example # If this is a perl (.pl) test, to run in debug: DEBUG=1 bash util/run-gnu-test.sh tests/misc/sm3sum.pl ``` ***Tip:*** First time you run `bash util/build-gnu.sh` command, it will provide instructions on how to checkout GNU coreutils repository at the correct release tag. Please follow those instructions and when done, run `bash util/build-gnu.sh` command again. Note that GNU test suite relies on individual utilities (not the multicall binary). You also need to install [quilt](https://savannah.nongnu.org/projects/quilt), a tool used to manage a stack of patches for modifying GNU tests. On FreeBSD, you need to install packages for GNU coreutils and sed (used in shell scripts instead of system commands): ```shell pkg install coreutils gsed ``` ## Code coverage report Code coverage report can be generated using [grcov](https://github.com/mozilla/grcov). To generate [gcov-based](https://github.com/mozilla/grcov#example-how-to-generate-gcda-files-for-a-rust-project) coverage report ```shell export CARGO_INCREMENTAL=0 export RUSTFLAGS="-Cinstrument-coverage -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort" export RUSTDOCFLAGS="-Cpanic=abort" cargo build # e.g., --features feat_os_unix cargo test # e.g., --features feat_os_unix test_pathchk grcov . -s . --binary-path ./target/debug/ -t html --branch --ignore-not-existing --ignore build.rs --excl-br-line "^\s*((debug_)?assert(_eq|_ne)?\#\[derive\()" -o ./target/debug/coverage/ # open target/debug/coverage/index.html in browser ``` if changes are not reflected in the report then run `cargo clean` and run the above commands. ## Tips for setting up on Mac ### C Compiler and linker On MacOS you'll need to install C compiler & linker: ```shell xcode-select --install ``` ### MacOS GNU utils On MacOS you will need to install [Homebrew](https://docs.brew.sh/Installation) and use it to install the following Homebrew formulas: ```shell brew install \ coreutils \ autoconf \ gettext \ wget \ texinfo \ xz \ automake \ gnu-sed \ m4 \ bison \ pre-commit \ findutils ``` After installing these Homebrew formulas, please make sure to add the following lines to your `zsh` or `bash` rc file, i.e. `~/.profile` or `~/.zshrc` or `~/.bashrc` ... (assuming Homebrew is installed at default location `/opt/homebrew`): ```shell eval "$(/opt/homebrew/bin/brew shellenv)" export PATH="/opt/homebrew/opt/coreutils/libexec/gnubin:$PATH" export PATH="/opt/homebrew/opt/bison/bin:$PATH" export PATH="/opt/homebrew/opt/findutils/libexec/gnubin:$PATH" ``` Last step is to link Homebrew coreutils version of `timeout` to `/usr/local/bin` (as admin user): ```shell sudo ln -s /opt/homebrew/bin/timeout /usr/local/bin/timeout ``` Do not forget to either source updated rc file or restart you terminal session to update environment variables. ## Tips for setting up on Windows ### MSVC build tools On Windows you'll need the MSVC build tools for Visual Studio 2013 or later. If you are using `rustup-init.exe` to install Rust toolchain, it will guide you through the process of downloading and installing these prerequisites. Otherwise please follow [this guide](https://learn.microsoft.com/en-us/windows/dev-environment/rust/setup). ### Windows GNU utils If you have used [Git for Windows](https://gitforwindows.org) to install `git` on you Windows system you might already have some GNU core utilities installed as part of "GNU Bash" included in Git for Windows package, but it is not a complete package. [This article](https://gist.github.com/evanwill/0207876c3243bbb6863e65ec5dc3f058) provides instruction on how to add more to it. Alternatively you can install [Cygwin](https://www.cygwin.com) and/or use [WSL2](https://learn.microsoft.com/en-us/windows/wsl/compare-versions#whats-new-in-wsl-2) to get access to all GNU core utilities on Windows. ## Preparing a new release 1. Modify `util/update-version.sh` (FROM & TO) and run it 1. Submit a new PR with these changes and wait for it to be merged 1. Tag the new release `git tag -a X.Y.Z` and `git push --tags` 1. Once the CI is green, a new release will be automatically created in draft mode. Reuse this release and make sure that assets have been added. 1. Write the release notes (it takes time) following previous examples 1. Run `util/publish.sh --do-it` to publish the new release to crates.io coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/GNUmakefile000066400000000000000000000262061504311601400230240ustar00rootroot00000000000000# spell-checker:ignore (misc) testsuite runtest findstring (targets) busytest toybox distclean pkgs nextest ; (vars/env) BINDIR BUILDDIR CARGOFLAGS DESTDIR DOCSDIR INSTALLDIR INSTALLEES MULTICALL DATAROOTDIR TESTDIR manpages # Config options PROFILE ?= debug MULTICALL ?= n COMPLETIONS ?= y MANPAGES ?= y LOCALES ?= y INSTALL ?= install ifneq (,$(filter install, $(MAKECMDGOALS))) override PROFILE:=release endif # Needed for the foreach loops to split each loop into a separate command define newline endef PROFILE_CMD := ifeq ($(PROFILE),release) PROFILE_CMD = --release endif RM := rm -rf # Binaries CARGO ?= cargo CARGOFLAGS ?= # Install directories PREFIX ?= /usr/local DESTDIR ?= BINDIR ?= $(PREFIX)/bin DATAROOTDIR ?= $(PREFIX)/share LIBSTDBUF_DIR ?= $(PREFIX)/libexec/coreutils # Export variable so that it is used during the build export LIBSTDBUF_DIR INSTALLDIR_BIN=$(DESTDIR)$(BINDIR) #prefix to apply to coreutils binary and all tool binaries PROG_PREFIX ?= # This won't support any directory with spaces in its name, but you can just # make a symlink without spaces that points to the directory. BASEDIR ?= $(shell pwd) ifdef CARGO_TARGET_DIR BUILDDIR := $(CARGO_TARGET_DIR)/${PROFILE} else BUILDDIR := $(BASEDIR)/target/${PROFILE} endif PKG_BUILDDIR := $(BUILDDIR)/deps DOCSDIR := $(BASEDIR)/docs BUSYBOX_ROOT := $(BASEDIR)/tmp BUSYBOX_VER := 1.36.1 BUSYBOX_SRC := $(BUSYBOX_ROOT)/busybox-$(BUSYBOX_VER) TOYBOX_ROOT := $(BASEDIR)/tmp TOYBOX_VER := 0.8.12 TOYBOX_SRC := $(TOYBOX_ROOT)/toybox-$(TOYBOX_VER) #------------------------------------------------------------------------ # Detect the host system. # On Windows the environment already sets OS = Windows_NT. # Otherwise let it default to the kernel name returned by uname -s # (Linux, Darwin, FreeBSD, …). #------------------------------------------------------------------------ OS ?= $(shell uname -s) ifdef SELINUX_ENABLED override SELINUX_ENABLED := 0 # Now check if we should enable it (only on non-Windows) ifneq ($(OS),Windows_NT) ifeq ($(shell if [ -x /sbin/selinuxenabled ] && /sbin/selinuxenabled 2>/dev/null; then echo 0; else echo 1; fi),0) override SELINUX_ENABLED := 1 $(info /sbin/selinuxenabled successful) else $(info SELINUX_ENABLED=1 but /sbin/selinuxenabled failed) endif endif endif # Possible programs PROGS := \ base32 \ base64 \ basenc \ basename \ cat \ cksum \ comm \ cp \ csplit \ cut \ date \ dd \ df \ dir \ dircolors \ dirname \ echo \ env \ expand \ expr \ factor \ false \ fmt \ fold \ hashsum \ head \ join \ link \ ln \ ls \ mkdir \ mktemp \ more \ mv \ nl \ numfmt \ nproc \ od \ paste \ pr \ printenv \ printf \ ptx \ pwd \ readlink \ realpath \ rm \ rmdir \ seq \ shred \ shuf \ sleep \ sort \ split \ stty \ sum \ sync \ tac \ tail \ tee \ test \ tr \ true \ truncate \ tsort \ unexpand \ uniq \ vdir \ wc \ whoami \ yes UNIX_PROGS := \ arch \ chgrp \ chmod \ chown \ chroot \ du \ groups \ hostid \ hostname \ id \ install \ kill \ logname \ mkfifo \ mknod \ nice \ nohup \ pathchk \ pinky \ stat \ stdbuf \ timeout \ touch \ tty \ uname \ unlink \ uptime \ users \ who SELINUX_PROGS := \ chcon \ runcon $(info Detected OS = $(OS)) # Don't build the SELinux programs on macOS (Darwin) ifeq ($(OS),Darwin) SELINUX_PROGS := endif ifneq ($(OS),Windows_NT) PROGS := $(PROGS) $(UNIX_PROGS) # Build the selinux command even if not on the system PROGS := $(PROGS) $(SELINUX_PROGS) # Always use external libstdbuf when building with make (Unix only) CARGOFLAGS += --features feat_external_libstdbuf endif UTILS ?= $(PROGS) # Programs with usable tests TEST_PROGS := \ base32 \ base64 \ basename \ cat \ chcon \ chgrp \ chmod \ chown \ cksum \ comm \ cp \ csplit \ cut \ date \ dircolors \ dirname \ echo \ env \ expr \ factor \ false \ fold \ hashsum \ head \ install \ link \ ln \ ls \ mkdir \ mktemp \ mv \ nl \ numfmt \ od \ paste \ pathchk \ pinky \ pr \ printf \ ptx \ pwd \ readlink \ realpath \ rm \ rmdir \ runcon \ seq \ sleep \ sort \ split \ stat \ stdbuf \ sum \ tac \ tail \ test \ touch \ tr \ true \ truncate \ tsort \ uname \ unexpand \ uniq \ unlink \ wc \ who TESTS := \ $(sort $(filter $(UTILS),$(filter-out $(SKIP_UTILS),$(TEST_PROGS)))) TEST_NO_FAIL_FAST := TEST_SPEC_FEATURE := ifneq ($(SPEC),) TEST_NO_FAIL_FAST :=--no-fail-fast TEST_SPEC_FEATURE := test_unimplemented else ifeq ($(SELINUX_ENABLED),1) TEST_NO_FAIL_FAST := TEST_SPEC_FEATURE := selinux BUILD_SPEC_FEATURE := selinux endif define TEST_BUSYBOX test_busybox_$(1): -(cd $(BUSYBOX_SRC)/testsuite && bindir=$(BUILDDIR) ./runtest $(RUNTEST_ARGS) $(1)) endef # Output names EXES := \ $(sort $(filter $(UTILS),$(filter-out $(SKIP_UTILS),$(PROGS)))) INSTALLEES := ${EXES} ifeq (${MULTICALL}, y) INSTALLEES := ${INSTALLEES} coreutils endif all: build do_install = $(INSTALL) ${1} use_default := 1 build-pkgs: ifneq (${MULTICALL}, y) ifdef BUILD_SPEC_FEATURE ${CARGO} build ${CARGOFLAGS} --features "$(BUILD_SPEC_FEATURE)" ${PROFILE_CMD} $(foreach pkg,$(EXES),-p uu_$(pkg)) else ${CARGO} build ${CARGOFLAGS} ${PROFILE_CMD} $(foreach pkg,$(EXES),-p uu_$(pkg)) endif endif build-coreutils: ${CARGO} build ${CARGOFLAGS} --features "${EXES} $(BUILD_SPEC_FEATURE)" ${PROFILE_CMD} --no-default-features build: build-coreutils build-pkgs locales $(foreach test,$(filter-out $(SKIP_UTILS),$(PROGS)),$(eval $(call TEST_BUSYBOX,$(test)))) test: ${CARGO} test ${CARGOFLAGS} --features "$(TESTS) $(TEST_SPEC_FEATURE)" --no-default-features $(TEST_NO_FAIL_FAST) nextest: ${CARGO} nextest run ${CARGOFLAGS} --features "$(TESTS) $(TEST_SPEC_FEATURE)" --no-default-features $(TEST_NO_FAIL_FAST) test_toybox: -(cd $(TOYBOX_SRC)/ && make tests) toybox-src: if [ ! -e "$(TOYBOX_SRC)" ] ; then \ mkdir -p "$(TOYBOX_ROOT)" ; \ wget "https://github.com/landley/toybox/archive/refs/tags/$(TOYBOX_VER).tar.gz" -P "$(TOYBOX_ROOT)" ; \ tar -C "$(TOYBOX_ROOT)" -xf "$(TOYBOX_ROOT)/$(TOYBOX_VER).tar.gz" ; \ sed -i -e "s|TESTDIR=\".*\"|TESTDIR=\"$(BUILDDIR)\"|g" $(TOYBOX_SRC)/scripts/test.sh; \ sed -i -e "s/ || exit 1//g" $(TOYBOX_SRC)/scripts/test.sh; \ fi ; busybox-src: if [ ! -e "$(BUSYBOX_SRC)" ] ; then \ mkdir -p "$(BUSYBOX_ROOT)" ; \ wget "https://busybox.net/downloads/busybox-$(BUSYBOX_VER).tar.bz2" -P "$(BUSYBOX_ROOT)" ; \ tar -C "$(BUSYBOX_ROOT)" -xf "$(BUSYBOX_ROOT)/busybox-$(BUSYBOX_VER).tar.bz2" ; \ fi ; # This is a busybox-specific config file their test suite wants to parse. $(BUILDDIR)/.config: $(BASEDIR)/.busybox-config cp $< $@ # Test under the busybox test suite $(BUILDDIR)/busybox: busybox-src build-coreutils $(BUILDDIR)/.config cp "$(BUILDDIR)/coreutils" "$(BUILDDIR)/busybox" chmod +x $@ prepare-busytest: $(BUILDDIR)/busybox # disable inapplicable tests -( cd "$(BUSYBOX_SRC)/testsuite" ; if [ -e "busybox.tests" ] ; then mv busybox.tests busybox.tests- ; fi ; ) ifeq ($(EXES),) busytest: else busytest: $(BUILDDIR)/busybox $(addprefix test_busybox_,$(filter-out $(SKIP_UTILS),$(EXES))) endif clean: cargo clean cd $(DOCSDIR) && $(MAKE) clean distclean: clean $(CARGO) clean $(CARGOFLAGS) && $(CARGO) update $(CARGOFLAGS) ifeq ($(MANPAGES),y) manpages: build-coreutils mkdir -p $(BUILDDIR)/man/ $(foreach prog, $(INSTALLEES), \ $(BUILDDIR)/coreutils manpage $(prog) > $(BUILDDIR)/man/$(PROG_PREFIX)$(prog).1 $(newline) \ ) install-manpages: manpages mkdir -p $(DESTDIR)$(DATAROOTDIR)/man/man1 $(foreach prog, $(INSTALLEES), \ $(INSTALL) $(BUILDDIR)/man/$(PROG_PREFIX)$(prog).1 $(DESTDIR)$(DATAROOTDIR)/man/man1/ $(newline) \ ) else install-manpages: endif ifeq ($(COMPLETIONS),y) completions: build-coreutils mkdir -p $(BUILDDIR)/completions/zsh $(BUILDDIR)/completions/bash $(BUILDDIR)/completions/fish $(foreach prog, $(INSTALLEES), \ $(BUILDDIR)/coreutils completion $(prog) zsh > $(BUILDDIR)/completions/zsh/_$(PROG_PREFIX)$(prog) $(newline) \ $(BUILDDIR)/coreutils completion $(prog) bash > $(BUILDDIR)/completions/bash/$(PROG_PREFIX)$(prog) $(newline) \ $(BUILDDIR)/coreutils completion $(prog) fish > $(BUILDDIR)/completions/fish/$(PROG_PREFIX)$(prog).fish $(newline) \ ) install-completions: completions mkdir -p $(DESTDIR)$(DATAROOTDIR)/zsh/site-functions mkdir -p $(DESTDIR)$(DATAROOTDIR)/bash-completion/completions mkdir -p $(DESTDIR)$(DATAROOTDIR)/fish/vendor_completions.d $(foreach prog, $(INSTALLEES), \ $(INSTALL) $(BUILDDIR)/completions/zsh/_$(PROG_PREFIX)$(prog) $(DESTDIR)$(DATAROOTDIR)/zsh/site-functions/ $(newline) \ $(INSTALL) $(BUILDDIR)/completions/bash/$(PROG_PREFIX)$(prog) $(DESTDIR)$(DATAROOTDIR)/bash-completion/completions/ $(newline) \ $(INSTALL) $(BUILDDIR)/completions/fish/$(PROG_PREFIX)$(prog).fish $(DESTDIR)$(DATAROOTDIR)/fish/vendor_completions.d/ $(newline) \ ) else install-completions: endif ifeq ($(LOCALES),y) locales: $(foreach prog, $(INSTALLEES), \ if [ -d "$(BASEDIR)/src/uu/$(prog)/locales" ]; then \ mkdir -p "$(BUILDDIR)/locales/$(prog)"; \ for locale_file in "$(BASEDIR)"/src/uu/$(prog)/locales/*.ftl; do \ $(INSTALL) -v "$$locale_file" "$(BUILDDIR)/locales/$(prog)/"; \ done; \ fi $(newline) \ ) install-locales: $(foreach prog, $(INSTALLEES), \ if [ -d "$(BASEDIR)/src/uu/$(prog)/locales" ]; then \ mkdir -p "$(DESTDIR)$(DATAROOTDIR)/locales/$(prog)"; \ for locale_file in "$(BASEDIR)"/src/uu/$(prog)/locales/*.ftl; do \ $(INSTALL) -v "$$locale_file" "$(DESTDIR)$(DATAROOTDIR)/locales/$(prog)/"; \ done; \ fi $(newline) \ ) else install-locales: endif install: build install-manpages install-completions install-locales mkdir -p $(INSTALLDIR_BIN) ifneq ($(OS),Windows_NT) mkdir -p $(DESTDIR)$(LIBSTDBUF_DIR) $(INSTALL) -m 755 $(BUILDDIR)/deps/libstdbuf* $(DESTDIR)$(LIBSTDBUF_DIR)/ endif ifeq (${MULTICALL}, y) $(INSTALL) $(BUILDDIR)/coreutils $(INSTALLDIR_BIN)/$(PROG_PREFIX)coreutils $(foreach prog, $(filter-out coreutils, $(INSTALLEES)), \ cd $(INSTALLDIR_BIN) && ln -fs $(PROG_PREFIX)coreutils $(PROG_PREFIX)$(prog) $(newline) \ ) $(if $(findstring test,$(INSTALLEES)), cd $(INSTALLDIR_BIN) && ln -fs $(PROG_PREFIX)coreutils $(PROG_PREFIX)[) else $(foreach prog, $(INSTALLEES), \ $(INSTALL) $(BUILDDIR)/$(prog) $(INSTALLDIR_BIN)/$(PROG_PREFIX)$(prog) $(newline) \ ) $(if $(findstring test,$(INSTALLEES)), $(INSTALL) $(BUILDDIR)/test $(INSTALLDIR_BIN)/$(PROG_PREFIX)[) endif uninstall: ifneq ($(OS),Windows_NT) rm -f $(DESTDIR)$(LIBSTDBUF_DIR)/libstdbuf* -rmdir $(DESTDIR)$(LIBSTDBUF_DIR) 2>/dev/null || true endif ifeq (${MULTICALL}, y) rm -f $(addprefix $(INSTALLDIR_BIN)/,$(PROG_PREFIX)coreutils) endif rm -f $(addprefix $(INSTALLDIR_BIN)/$(PROG_PREFIX),$(PROGS)) rm -f $(INSTALLDIR_BIN)/$(PROG_PREFIX)[ rm -f $(addprefix $(DESTDIR)$(DATAROOTDIR)/zsh/site-functions/_$(PROG_PREFIX),$(PROGS)) rm -f $(addprefix $(DESTDIR)$(DATAROOTDIR)/bash-completion/completions/$(PROG_PREFIX),$(PROGS)) rm -f $(addprefix $(DESTDIR)$(DATAROOTDIR)/fish/vendor_completions.d/$(PROG_PREFIX),$(addsuffix .fish,$(PROGS))) rm -f $(addprefix $(DESTDIR)$(DATAROOTDIR)/man/man1/$(PROG_PREFIX),$(addsuffix .1,$(PROGS))) .PHONY: all build build-coreutils build-pkgs test distclean clean busytest install uninstall coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/LICENSE000066400000000000000000000020401504311601400217450ustar00rootroot00000000000000Copyright (c) uutils developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/Makefile000066400000000000000000000000671504311601400224070ustar00rootroot00000000000000UseGNU=gmake $* all: @$(UseGNU) .DEFAULT: @$(UseGNU) coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/Makefile.toml000066400000000000000000000240301504311601400233550ustar00rootroot00000000000000# spell-checker:ignore (cargo-make) duckscript [config] min_version = "0.26.2" default_to_workspace = false init_task = "_init_task" [config.modify_core_tasks] namespace = "core" ### initialization ### * note: the task executed from 'init_task' ignores dependencies; workaround is to run a secondary task via 'run_task' [tasks._init_task] # dependencies are unavailable # * delegate (via 'run_task') to "real" initialization task ('_init') with full capabilities private = true run_task = "_init" [tasks._init] private = true dependencies = ["_init-vars"] [tasks._init-vars] private = true script_runner = "@duckscript" script = [''' # reset build/test flags set_env CARGO_MAKE_CARGO_BUILD_TEST_FLAGS "" # determine features env_features = get_env CARGO_FEATURES if is_empty "${env_features}" env_features = get_env FEATURES end_if if is_empty "${env_features}" if eq "${CARGO_MAKE_RUST_TARGET_OS}" "macos" features = set "unix" else if eq "${CARGO_MAKE_RUST_TARGET_OS}" "linux" features = set "unix" else if eq "${CARGO_MAKE_RUST_TARGET_OS}" "windows" features = set "windows" end_if end_if end_if end_if if is_empty "${features}" features = set "${env_features}" else if not is_empty "${env_features}" features = set "${features},${env_features}" end_if end_if # set build flags from features if not is_empty "${features}" set_env CARGO_MAKE_VAR_BUILD_TEST_FEATURES "${features}" set_env CARGO_MAKE_CARGO_BUILD_TEST_FLAGS "--features ${features}" end_if # determine show-utils helper script show_utils = set "util/show-utils.sh" if eq "${CARGO_MAKE_RUST_TARGET_OS}" "windows" show_utils = set "util/show-utils.BAT" end_if set_env CARGO_MAKE_VAR_SHOW_UTILS "${show_utils}" # rebuild CARGO_MAKE_TASK_ARGS for various targets args = set ${CARGO_MAKE_TASK_ARGS} # * rebuild for 'features' target args_features = replace ${args} ";" "," set_env CARGO_MAKE_TASK_BUILD_FEATURES_ARGS "${args_features}" # * rebuild for 'examples' target args_examples = replace ${args} ";" " --example " if is_empty "${args_examples}" args_examples = set "--examples" end_if set_env CARGO_MAKE_TASK_BUILD_EXAMPLES_ARGS "${args_examples}" # * rebuild for 'utils' target args_utils_list = split "${args}" ";" for arg in "${args_utils_list}" if not is_empty "${arg}" if not starts_with "${arg}" "uu_" arg = set "uu_${arg}" end_if args_utils = set "${args_utils} -p${arg}" end_if end args_utils = trim "${args_utils}" set_env CARGO_MAKE_TASK_BUILD_UTILS_ARGS "${args_utils}" '''] ### tasks [tasks.default] description = "## *DEFAULT* Build (debug-mode) and test project" category = "[project]" dependencies = ["action-build-debug", "test-terse"] ## [tasks.build] description = "## Build (release-mode) project" category = "[project]" dependencies = ["core::pre-build", "action-build-release", "core::post-build"] [tasks.build-debug] description = "## Build (debug-mode) project" category = "[project]" dependencies = ["action-build-debug"] [tasks.build-examples] description = "## Build (release-mode) project example(s); usage: `cargo make (build-examples | examples) [EXAMPLE]...`" category = "[project]" dependencies = ["core::pre-build", "action-build-examples", "core::post-build"] [tasks.build-features] description = "## Build (with features; release-mode) project; usage: `cargo make (build-features | features) FEATURE...`" category = "[project]" dependencies = ["core::pre-build", "action-build-features", "core::post-build"] [tasks.build-release] alias = "build" [tasks.debug] alias = "build-debug" [tasks.example] description = "hidden singular-form alias for 'examples'" category = "[project]" dependencies = ["examples"] [tasks.examples] alias = "build-examples" [tasks.features] alias = "build-features" [tasks.format] description = "## Format code files (with `cargo fmt`; includes tests)" category = "[project]" dependencies = ["action-format", "action-format-tests"] [tasks.help] description = "## Display help" category = "[project]" dependencies = ["action-display-help"] [tasks.install] description = "## Install project binary (to $HOME/.cargo/bin)" category = "[project]" command = "cargo" args = ["install", "--path", "."] [tasks.lint] description = "## Display lint report" category = "[project]" dependencies = ["action-clippy", "action-fmt_report"] [tasks.release] alias = "build" [tasks.test] description = "## Run project tests" category = "[project]" dependencies = ["core::pre-test", "core::test", "core::post-test"] [tasks.test-terse] description = "## Run project tests (with terse/summary output)" category = "[project]" dependencies = ["core::pre-test", "action-test_quiet", "core::post-test"] [tasks.test-util] description = "## Test (individual) utilities; usage: `cargo make (test-util | test-uutil) [UTIL_NAME...]`" category = "[project]" dependencies = ["action-test-utils"] [tasks.test-utils] description = "hidden plural-form alias for 'test-util'" category = "[project]" dependencies = ["test-util"] [tasks.test-uutil] description = "hidden alias for 'test-util'" category = "[project]" dependencies = ["test-util"] [tasks.test-uutils] description = "hidden alias for 'test-util'" category = "[project]" dependencies = ["test-util"] [tasks.uninstall] description = "## Remove project binary (from $HOME/.cargo/bin)" category = "[project]" command = "cargo" args = ["uninstall"] [tasks.util] description = "## Build (individual; release-mode) utilities; usage: `cargo make (util | uutil) [UTIL_NAME...]`" category = "[project]" dependencies = [ "core::pre-build", "action-determine-utils", "action-build-utils", "core::post-build", ] [tasks.utils] description = "hidden plural-form alias for 'util'" category = "[project]" dependencies = ["util"] [tasks.uutil] description = "hidden alias for 'util'" category = "[project]" dependencies = ["util"] [tasks.uutils] description = "hidden plural-form alias for 'util'" category = "[project]" dependencies = ["util"] ### actions [tasks.action-build-release] description = "`cargo build --release`" command = "cargo" args = ["build", "--release", "@@split(CARGO_MAKE_CARGO_BUILD_TEST_FLAGS, )"] [tasks.action-build-debug] description = "`cargo build`" command = "cargo" args = ["build", "@@split(CARGO_MAKE_CARGO_BUILD_TEST_FLAGS, )"] [tasks.action-build-examples] description = "`cargo build (--examples|(--example EXAMPLE)...)`" command = "cargo" args = [ "build", "--release", "@@split(CARGO_MAKE_CARGO_BUILD_TEST_FLAGS, )", "${CARGO_MAKE_TASK_BUILD_EXAMPLES_ARGS}", ] [tasks.action-build-features] description = "`cargo build --release --features FEATURES`" command = "cargo" args = [ "build", "--release", "--no-default-features", "--features", "${CARGO_MAKE_TASK_BUILD_FEATURES_ARGS}", ] [tasks.action-build-utils] description = "Build individual utilities" dependencies = ["action-determine-utils"] command = "cargo" # args = ["build", "@@remove-empty(CARGO_MAKE_TASK_BUILD_UTILS_ARGS)" ] args = ["build", "--release", "@@split(CARGO_MAKE_TASK_BUILD_UTILS_ARGS, )"] [tasks.action-clippy] description = "`cargo clippy` lint report" command = "cargo" args = ["clippy", "@@split(CARGO_MAKE_CARGO_BUILD_TEST_FLAGS, )"] [tasks.action-determine-utils] script_runner = "@duckscript" script = [''' package_options = get_env CARGO_MAKE_TASK_BUILD_UTILS_ARGS if is_empty "${package_options}" show_utils = get_env CARGO_MAKE_VAR_SHOW_UTILS features = get_env CARGO_MAKE_VAR_BUILD_TEST_FEATURES if not is_empty "${features}" result = exec "${show_utils}" --features "${features}" else result = exec "${show_utils}" endif set_env CARGO_MAKE_VAR_UTILS ${result.stdout} utils = array %{result.stdout} for util in ${utils} if not is_empty "${util}" if not starts_with "${util}" "uu_" util = set "uu_${util}" end_if package_options = set "${package_options} -p${util}" end_if end package_options = trim "${package_options}" end_if set_env CARGO_MAKE_TASK_BUILD_UTILS_ARGS "${package_options}" '''] [tasks.action-determine-tests] script_runner = "@duckscript" script = [''' test_files = glob_array tests/**/*.rs for file in ${test_files} file = replace "${file}" "\\" "/" if not is_empty ${file} if is_empty "${tests}" tests = set "${file}" else tests = set "${tests} ${file}" end_if end_if end set_env CARGO_MAKE_VAR_TESTS "${tests}" '''] [tasks.action-format] description = "`cargo fmt`" command = "cargo" args = ["fmt"] [tasks.action-format-tests] description = "`cargo fmt` tests" dependencies = ["action-determine-tests"] command = "cargo" args = ["fmt", "--", "@@split(CARGO_MAKE_VAR_TESTS, )"] [tasks.action-fmt] alias = "action-format" [tasks.action-fmt_report] description = "`cargo fmt` lint report" command = "cargo" args = ["fmt", "--", "--check"] [tasks.action-spellcheck-codespell] description = "`codespell` spellcheck repository" command = "codespell" # (from `pip install codespell`) args = [ ".", "--skip=*/.git,./target,./tests/fixtures", "--ignore-words-list=mut,od", ] [tasks.action-test-utils] description = "Build individual utilities" dependencies = ["action-determine-utils"] command = "cargo" # args = ["build", "@@remove-empty(CARGO_MAKE_TASK_BUILD_UTILS_ARGS)" ] args = ["test", "@@split(CARGO_MAKE_TASK_BUILD_UTILS_ARGS, )"] [tasks.action-test_quiet] description = "Test (in `--quiet` mode)" command = "cargo" args = ["test", "--quiet", "@@split(CARGO_MAKE_CARGO_BUILD_TEST_FLAGS, )"] [tasks.action-display-help] script_runner = "@duckscript" script = [''' echo "" echo "usage: `cargo make TARGET [ARGS...]`" echo "" echo "TARGETs:" echo "" result = exec "cargo" make --list-all-steps # set_env CARGO_MAKE_VAR_UTILS ${result.stdout} # echo ${result.stdout} lines = split ${result.stdout} "\n" # echo ${lines} for line in ${lines} if not is_empty ${line} if contains ${line} " - ##" line_segments = split ${line} " - ##" desc = array_pop ${line_segments} desc = trim ${desc} target = array_pop ${line_segments} target = trim ${target} l = length ${target} r = range 0 18 spacing = set "" for i in ${r} if greater_than ${i} ${l} spacing = set "${spacing} " end_if end echo ${target}${spacing}${desc} end_if end_if end echo "" '''] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/README.md000066400000000000000000000176211504311601400222320ustar00rootroot00000000000000
![uutils logo](docs/src/logo.svg) # uutils coreutils [![Crates.io](https://img.shields.io/crates/v/coreutils.svg)](https://crates.io/crates/coreutils) [![Discord](https://img.shields.io/badge/discord-join-7289DA.svg?logo=discord&longCache=true&style=flat)](https://discord.gg/wQVJbvJ) [![License](http://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/uutils/coreutils/blob/main/LICENSE) [![dependency status](https://deps.rs/repo/github/uutils/coreutils/status.svg)](https://deps.rs/repo/github/uutils/coreutils) [![CodeCov](https://codecov.io/gh/uutils/coreutils/branch/master/graph/badge.svg)](https://codecov.io/gh/uutils/coreutils) ![MSRV](https://img.shields.io/badge/MSRV-1.85.0-brightgreen)
---
uutils coreutils is a cross-platform reimplementation of the GNU coreutils in [Rust](http://www.rust-lang.org). While all programs have been implemented, some options might be missing or different behavior might be experienced.
To install it: ```shell cargo install coreutils ~/.cargo/bin/coreutils ```
## Goals uutils aims to be a drop-in replacement for the GNU utils. Differences with GNU are treated as bugs. uutils aims to work on as many platforms as possible, to be able to use the same utils on Linux, macOS, Windows and other platforms. This ensures, for example, that scripts can be easily transferred between platforms.
## Documentation uutils has both user and developer documentation available: - [User Manual](https://uutils.github.io/coreutils/docs/) - [Developer Documentation](https://docs.rs/crate/coreutils/) Both can also be generated locally, the instructions for that can be found in the [coreutils docs](https://github.com/uutils/uutils.github.io) repository. ## Requirements - Rust (`cargo`, `rustc`) - GNU Make (optional) ### Rust Version uutils follows Rust's release channels and is tested against stable, beta and nightly. The current Minimum Supported Rust Version (MSRV) is `1.85.0`. ## Building There are currently two methods to build the uutils binaries: either Cargo or GNU Make. > Building the full package, including all documentation, requires both Cargo > and GNU Make on a Unix platform. For either method, we first need to fetch the repository: ```shell git clone https://github.com/uutils/coreutils cd coreutils ``` ### Cargo Building uutils using Cargo is easy because the process is the same as for every other Rust program: ```shell cargo build --release ``` This command builds the most portable common core set of uutils into a multicall (BusyBox-type) binary, named 'coreutils', on most Rust-supported platforms. Additional platform-specific uutils are often available. Building these expanded sets of uutils for a platform (on that platform) is as simple as specifying it as a feature: ```shell cargo build --release --features macos # or ... cargo build --release --features windows # or ... cargo build --release --features unix ``` If you don't want to build every utility available on your platform into the final binary, you can also specify which ones you want to build manually. For example: ```shell cargo build --features "base32 cat echo rm" --no-default-features ``` If you don't want to build the multicall binary and would prefer to build the utilities as individual binaries, that is also possible. Each utility is contained in its own package within the main repository, named "uu_UTILNAME". To build individual utilities, use cargo to build just the specific packages (using the `--package` [aka `-p`] option). For example: ```shell cargo build -p uu_base32 -p uu_cat -p uu_echo -p uu_rm ``` ### GNU Make Building using `make` is a simple process as well. To simply build all available utilities: ```shell make ``` In release mode: ```shell make PROFILE=release ``` To build all but a few of the available utilities: ```shell make SKIP_UTILS='UTILITY_1 UTILITY_2' ``` To build only a few of the available utilities: ```shell make UTILS='UTILITY_1 UTILITY_2' ``` ## Installation ### Install with Cargo Likewise, installing can simply be done using: ```shell cargo install --path . --locked ``` This command will install uutils into Cargo's _bin_ folder (_e.g._ `$HOME/.cargo/bin`). This does not install files necessary for shell completion or manpages. For manpages or shell completion to work, use `GNU Make` or see `Manually install shell completions`/`Manually install manpages`. ### Install with GNU Make To install all available utilities: ```shell make install ``` To install using `sudo` switch `-E` must be used: ```shell sudo -E make install ``` To install all but a few of the available utilities: ```shell make SKIP_UTILS='UTILITY_1 UTILITY_2' install ``` To install only a few of the available utilities: ```shell make UTILS='UTILITY_1 UTILITY_2' install ``` To install every program with a prefix (e.g. uu-echo uu-cat): ```shell make PROG_PREFIX=PREFIX_GOES_HERE install ``` To install the multicall binary: ```shell make MULTICALL=y install ``` Set install parent directory (default value is /usr/local): ```shell # DESTDIR is also supported make PREFIX=/my/path install ``` Installing with `make` installs shell completions for all installed utilities for `bash`, `fish` and `zsh`. Completions for `elvish` and `powershell` can also be generated; See `Manually install shell completions`. To skip installation of completions and manpages: ```shell make COMPLETIONS=n MANPAGES=n install ``` ### Manually install shell completions The `coreutils` binary can generate completions for the `bash`, `elvish`, `fish`, `powershell` and `zsh` shells. It prints the result to stdout. The syntax is: ```shell cargo run completion ``` So, to install completions for `ls` on `bash` to `/usr/local/share/bash-completion/completions/ls`, run: ```shell cargo run completion ls bash > /usr/local/share/bash-completion/completions/ls ``` ### Manually install manpages To generate manpages, the syntax is: ```bash cargo run manpage ``` So, to install the manpage for `ls` to `/usr/local/share/man/man1/ls.1` run: ```bash cargo run manpage ls > /usr/local/share/man/man1/ls.1 ``` ## Un-installation Un-installation differs depending on how you have installed uutils. If you used Cargo to install, use Cargo to uninstall. If you used GNU Make to install, use Make to uninstall. ### Uninstall with Cargo To uninstall uutils: ```shell cargo uninstall coreutils ``` ### Uninstall with GNU Make To uninstall all utilities: ```shell make uninstall ``` To uninstall every program with a set prefix: ```shell make PROG_PREFIX=PREFIX_GOES_HERE uninstall ``` To uninstall the multicall binary: ```shell make MULTICALL=y uninstall ``` To uninstall from a custom parent directory: ```shell # DESTDIR is also supported make PREFIX=/my/path uninstall ``` ## GNU test suite compatibility Below is the evolution of how many GNU tests uutils passes. A more detailed breakdown of the GNU test results of the main branch can be found [in the user manual](https://uutils.github.io/coreutils/docs/test_coverage.html). See for the main meta bugs (many are missing). ![Evolution over time](https://github.com/uutils/coreutils-tracking/blob/main/gnu-results.svg?raw=true)
## Contributing To contribute to uutils, please see [CONTRIBUTING](CONTRIBUTING.md). ## License uutils is licensed under the MIT License - see the `LICENSE` file for details GNU Coreutils is licensed under the GPL 3.0 or later. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/README.package.md000066400000000000000000000023061504311601400236160ustar00rootroot00000000000000
![uutils logo](docs/src/logo.svg) # uutils coreutils [![Crates.io](https://img.shields.io/crates/v/coreutils.svg)](https://crates.io/crates/coreutils) [![Discord](https://img.shields.io/badge/discord-join-7289DA.svg?logo=discord&longCache=true&style=flat)](https://discord.gg/wQVJbvJ) [![License](http://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/uutils/coreutils/blob/main/LICENSE) [![dependency status](https://deps.rs/repo/github/uutils/coreutils/status.svg)](https://deps.rs/repo/github/uutils/coreutils) [![CodeCov](https://codecov.io/gh/uutils/coreutils/branch/master/graph/badge.svg)](https://codecov.io/gh/uutils/coreutils) ![MSRV](https://img.shields.io/badge/MSRV-1.70.0-brightgreen)
---
This package is part of uutils coreutils. uutils coreutils is a cross-platform reimplementation of the GNU coreutils in [Rust](http://www.rust-lang.org). This package does not have its specific `README.md`. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/build.rs000066400000000000000000000110761504311601400224160ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (vars) krate use std::env; use std::fs::File; use std::io::Write; use std::path::Path; pub fn main() { const ENV_FEATURE_PREFIX: &str = "CARGO_FEATURE_"; const FEATURE_PREFIX: &str = "feat_"; const OVERRIDE_PREFIX: &str = "uu_"; // Do not rebuild build script unless the script itself or the enabled features are modified // See println!("cargo:rerun-if-changed=build.rs"); if let Ok(profile) = env::var("PROFILE") { println!("cargo:rustc-cfg=build={profile:?}"); } let out_dir = env::var("OUT_DIR").unwrap(); let mut crates = Vec::new(); for (key, val) in env::vars() { if val == "1" && key.starts_with(ENV_FEATURE_PREFIX) { let krate = key[ENV_FEATURE_PREFIX.len()..].to_lowercase(); // Allow this as we have a bunch of info in the comments #[allow(clippy::match_same_arms)] match krate.as_ref() { "default" | "macos" | "unix" | "windows" | "selinux" | "zip" => continue, // common/standard feature names "nightly" | "test_unimplemented" | "expensive_tests" | "test_risky_names" => { continue; } // crate-local custom features "uudoc" => continue, // is not a utility "test" => continue, // over-ridden with 'uu_test' to avoid collision with rust core crate 'test' s if s.starts_with(FEATURE_PREFIX) => continue, // crate feature sets _ => {} // util feature name } crates.push(krate); } } crates.sort(); let mut mf = File::create(Path::new(&out_dir).join("uutils_map.rs")).unwrap(); mf.write_all( "type UtilityMap = phf::OrderedMap<&'static str, (fn(T) -> i32, fn() -> Command)>;\n\ \n\ #[allow(clippy::too_many_lines)] #[allow(clippy::unreadable_literal)] fn util_map() -> UtilityMap {\n" .as_bytes(), ) .unwrap(); let mut phf_map = phf_codegen::OrderedMap::<&str>::new(); for krate in &crates { let map_value = format!("({krate}::uumain, {krate}::uu_app)"); match krate.as_ref() { // 'test' is named uu_test to avoid collision with rust core crate 'test'. // It can also be invoked by name '[' for the '[ expr ] syntax'. "uu_test" => { phf_map.entry("test", map_value.clone()); phf_map.entry("[", map_value.clone()); } k if k.starts_with(OVERRIDE_PREFIX) => { phf_map.entry(&k[OVERRIDE_PREFIX.len()..], map_value.clone()); } "false" | "true" => { phf_map.entry(krate, format!("(r#{krate}::uumain, r#{krate}::uu_app)")); } "hashsum" => { phf_map.entry(krate, format!("({krate}::uumain, {krate}::uu_app_custom)")); let map_value = format!("({krate}::uumain, {krate}::uu_app_common)"); let map_value_bits = format!("({krate}::uumain, {krate}::uu_app_bits)"); let map_value_b3sum = format!("({krate}::uumain, {krate}::uu_app_b3sum)"); phf_map.entry("md5sum", map_value.clone()); phf_map.entry("sha1sum", map_value.clone()); phf_map.entry("sha224sum", map_value.clone()); phf_map.entry("sha256sum", map_value.clone()); phf_map.entry("sha384sum", map_value.clone()); phf_map.entry("sha512sum", map_value.clone()); phf_map.entry("sha3sum", map_value_bits.clone()); phf_map.entry("sha3-224sum", map_value.clone()); phf_map.entry("sha3-256sum", map_value.clone()); phf_map.entry("sha3-384sum", map_value.clone()); phf_map.entry("sha3-512sum", map_value.clone()); phf_map.entry("shake128sum", map_value_bits.clone()); phf_map.entry("shake256sum", map_value_bits.clone()); phf_map.entry("b2sum", map_value.clone()); phf_map.entry("b3sum", map_value_b3sum); } _ => { phf_map.entry(krate, map_value.clone()); } } } write!(mf, "{}", phf_map.build()).unwrap(); mf.write_all(b"\n}\n").unwrap(); mf.flush().unwrap(); } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/deny.toml000066400000000000000000000107231504311601400226030ustar00rootroot00000000000000# spell-checker:ignore SSLeay RUSTSEC # This section is considered when running `cargo deny check advisories` # More documentation for the advisories section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html [advisories] db-path = "~/.cargo/advisory-db" db-urls = ["https://github.com/rustsec/advisory-db"] version = 2 yanked = "warn" ignore = [ #"RUSTSEC-0000-0000", ] # This section is considered when running `cargo deny check licenses` # More documentation for the licenses section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html [licenses] version = 2 allow = [ "MIT", "Apache-2.0", "ISC", "BSD-2-Clause", "BSD-3-Clause", "BSL-1.0", "CC0-1.0", "Unicode-3.0", "Zlib", ] confidence-threshold = 0.8 [[licenses.clarify]] name = "ring" # SPDX considers OpenSSL to encompass both the OpenSSL and SSLeay licenses # https://spdx.org/licenses/OpenSSL.html # ISC - Both BoringSSL and ring use this for their new files # MIT - "Files in third_party/ have their own licenses, as described therein. The MIT # license, for third_party/fiat, which, unlike other third_party directories, is # compiled into non-test libraries, is included below." # OpenSSL - Obviously expression = "ISC AND MIT AND OpenSSL" license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] # This section is considered when running `cargo deny check bans`. # More documentation about the 'bans' section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html [bans] multiple-versions = "deny" wildcards = "allow" highlight = "all" # For each duplicate dependency, indicate the name of the dependency which # introduces it. # spell-checker: disable skip = [ # dns-lookup { name = "windows-sys", version = "0.48.0" }, # mio, nu-ansi-term, socket2 { name = "windows-sys", version = "0.52.0" }, # anstyle-query { name = "windows-sys", version = "0.59.0" }, # windows-sys { name = "windows-targets", version = "0.48.5" }, # parking_lot_core { name = "windows-targets", version = "0.52.6" }, # windows-targets { name = "windows_aarch64_gnullvm", version = "0.48.5" }, # windows-targets { name = "windows_aarch64_gnullvm", version = "0.52.6" }, # windows-targets { name = "windows_aarch64_msvc", version = "0.48.5" }, # windows-targets { name = "windows_aarch64_msvc", version = "0.52.6" }, # windows-targets { name = "windows_i686_gnu", version = "0.48.5" }, # windows-targets { name = "windows_i686_gnu", version = "0.52.6" }, # windows-targets { name = "windows_i686_gnullvm", version = "0.52.6" }, # windows-targets { name = "windows_i686_msvc", version = "0.48.5" }, # windows-targets { name = "windows_i686_msvc", version = "0.52.6" }, # windows-targets { name = "windows_x86_64_gnu", version = "0.48.5" }, # windows-targets { name = "windows_x86_64_gnu", version = "0.52.6" }, # windows-targets { name = "windows_x86_64_gnullvm", version = "0.48.5" }, # windows-targets { name = "windows_x86_64_gnullvm", version = "0.52.6" }, # windows-targets { name = "windows_x86_64_msvc", version = "0.48.5" }, # windows-targets { name = "windows_x86_64_msvc", version = "0.52.6" }, # kqueue-sys, onig { name = "bitflags", version = "1.3.2" }, # ansi-width { name = "unicode-width", version = "0.1.13" }, # filedescriptor, utmp-classic { name = "thiserror", version = "1.0.69" }, # thiserror { name = "thiserror-impl", version = "1.0.69" }, # bindgen { name = "itertools", version = "0.13.0" }, # ordered-multimap { name = "hashbrown", version = "0.14.5" }, # cexpr (via bindgen) { name = "nom", version = "7.1.3" }, # const-random-macro, rand_core { name = "getrandom", version = "0.2.15" }, # getrandom, mio { name = "wasi", version = "0.11.0+wasi-snapshot-preview1" }, # num-bigint, num-prime, phf_generator { name = "rand", version = "0.8.5" }, # rand { name = "rand_chacha", version = "0.3.1" }, # rand { name = "rand_core", version = "0.6.4" }, # utmp-classic { name = "zerocopy", version = "0.7.35" }, # rustix { name = "linux-raw-sys", version = "0.9.4" }, ] # spell-checker: enable # This section is considered when running `cargo deny check sources`. # More documentation about the 'sources' section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html [sources] unknown-registry = "warn" unknown-git = "warn" allow-registry = ["https://github.com/rust-lang/crates.io-index"] allow-git = [] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/000077500000000000000000000000001504311601400216745ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/.gitignore000066400000000000000000000000741504311601400236650ustar00rootroot00000000000000book src/utils src/SUMMARY.md src/platform_table.md tldr.zipcoreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/Makefile000066400000000000000000000000751504311601400233360ustar00rootroot00000000000000clean: rm -rf book rm -f src/SUMMARY.md rm -f src/utils/* coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/book.toml000066400000000000000000000004251504311601400235240ustar00rootroot00000000000000[book] authors = ["uutils contributors"] language = "en" multilingual = false src = "src" title = "uutils Documentation" [output.html] git-repository-url = "https://github.com/uutils/coreutils/tree/main/docs/src" [preprocessor.toc] command = "mdbook-toc" renderer = ["html"] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/compiles_table.csv000066400000000000000000000146661504311601400254100ustar00rootroot00000000000000target,arch,base32,base64,basename,cat,chgrp,chmod,chown,chroot,cksum,comm,cp,csplit,cut,date,df,dircolors,dirname,du,echo,env,expand,expr,factor,false,fmt,fold,groups,hashsum,head,hostid,hostname,id,install,join,kill,link,ln,logname,ls,mkdir,mkfifo,mknod,mktemp,more,mv,nice,nl,nohup,nproc,numfmt,od,paste,pathchk,pinky,printenv,printf,ptx,pwd,readlink,realpath,rm,rmdir,seq,shred,shuf,sleep,sort,split,stat,stdbuf,sum,sync,tac,tail,tee,test,timeout,touch,tr,true,truncate,tsort,tty,uname,unexpand,uniq,unlink,uptime,users,wc,who,whoami,yes,chcon,pr,dir,vdir,dd,basenc,runcon aarch64-unknown-linux-gnu,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 i686-unknown-linux-gnu,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 powerpc64-unknown-linux-gnu,0,0,0,0,0,0,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 riscv64gc-unknown-linux-gnu,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101 x86_64-unknown-linux-gnu,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 aarch64-pc-windows-msvc,0,0,0,0,0,101,101,101,101,0,0,0,0,0,0,0,0,0,101,0,0,0,101,0,0,0,0,101,0,0,0,0,101,101,0,101,0,0,0,101,0,101,101,0,0,0,101,0,101,0,0,0,0,101,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,101,0,0,0,0,0,0,101,0,0,0,0,0,101,0,0,0,101,0,101,0,101,101,0,0,0,0,0,0,0,0 i686-pc-windows-gnu,0,0,0,0,0,101,101,101,101,0,0,0,0,0,0,0,0,0,101,0,0,0,101,0,0,0,0,101,0,0,0,0,101,101,0,101,0,0,0,0,0,101,101,0,0,0,101,0,101,0,0,0,0,101,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,101,0,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,101,0,101,0,101,0,0,0,0,0,0,0,0,0 i686-pc-windows-msvc,0,0,0,0,0,101,101,101,101,0,0,0,0,0,0,0,0,0,101,0,0,0,101,0,0,0,0,101,0,0,0,0,101,101,0,101,0,0,0,0,0,101,101,0,0,0,101,0,101,0,0,0,0,101,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,101,0,0,0,0,0,0,101,0,0,0,0,0,101,0,0,0,101,0,101,0,101,0,0,0,0,0,0,0,0,0 x86_64-pc-windows-gnu,0,0,0,0,0,101,101,101,101,0,0,0,0,0,0,0,0,0,101,0,0,0,101,0,0,0,0,101,0,0,0,0,101,101,0,101,0,0,0,0,0,101,101,0,0,0,101,0,101,0,0,0,0,101,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,101,0,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,101,0,101,0,101,0,0,0,0,0,0,0,0,0 x86_64-pc-windows-msvc,0,0,0,0,0,101,101,101,101,0,0,0,0,0,0,0,0,0,101,0,0,0,101,0,0,0,0,101,0,0,0,0,101,101,0,101,0,0,0,0,0,101,101,0,0,0,101,0,101,0,0,0,0,101,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,101,0,0,0,0,0,0,101,0,0,0,0,0,101,0,0,0,101,0,101,0,101,0,0,0,0,0,0,0,0,0 x86_64-apple-darwin,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 x86_64-unknown-freebsd,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 x86_64-unknown-netbsd,0,0,0,0,0,0,0,0,101,0,0,0,0,0,0,101,0,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,101,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,101,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,0,0,0,0,0,0,101,0,0,0,101,101,0,101,0,0,0,0,0,0,0,0,0 aarch64-linux-android,0,0,0,0,0,0,0,0,101,0,0,0,0,0,0,101,0,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,101,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,101,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,0,0,101,0,0,0,0,101,0,0,0,0,0,0,101,0,0,0,101,101,0,101,0,0,0,0,0,0,0,0,0 x86_64-linux-android,0,0,0,0,0,0,0,0,101,0,0,0,0,0,0,101,0,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,101,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,101,0,0,0,0,0,101,0,0,0,0,0,0,0,0,0,0,0,0,0,0,101,0,0,101,0,0,0,0,101,0,0,0,0,0,0,101,0,0,0,101,101,0,101,0,0,0,0,0,0,0,0,0 x86_64-sun-solaris,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101 wasm32-wasi,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101 x86_64-unknown-redox,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101 aarch64-fuchsia,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101 x86_64-fuchsia,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101,101 coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/compiles_table.py000066400000000000000000000204341504311601400252330ustar00rootroot00000000000000#!/usr/bin/env python3 import multiprocessing import subprocess import argparse import csv import sys from collections import defaultdict from pathlib import Path # third party dependencies from tqdm import tqdm # spell-checker:ignore (libs) tqdm imap ; (shell/mac) xcrun ; (vars) nargs retcode csvfile BINS_PATH = Path("../src/uu") CACHE_PATH = Path("compiles_table.csv") TARGETS = [ # Linux - GNU "aarch64-unknown-linux-gnu", "i686-unknown-linux-gnu", "powerpc64-unknown-linux-gnu", "riscv64gc-unknown-linux-gnu", "x86_64-unknown-linux-gnu", # Windows "aarch64-pc-windows-msvc", "i686-pc-windows-gnu", "i686-pc-windows-msvc", "x86_64-pc-windows-gnu", "x86_64-pc-windows-msvc", # Apple "aarch64-apple-darwin", "x86_64-apple-darwin", "aarch64-apple-ios", "x86_64-apple-ios", # BSDs "x86_64-unknown-freebsd", "x86_64-unknown-netbsd", # Android "aarch64-linux-android", "x86_64-linux-android", # Solaris "x86_64-sun-solaris", # Illumos "x86_64-unknown-illumos", # WASM "wasm32-wasi", # Redox "x86_64-unknown-redox", # Fuchsia "aarch64-fuchsia", "x86_64-fuchsia", ] class Target(str): def __new__(cls, content): obj = super().__new__(cls, content) obj.arch, obj.platform, obj.os = Target.parse(content) return obj @staticmethod def parse(s): elem = s.split("-") if len(elem) == 2: arch, platform, os = elem[0], "n/a", elem[1] else: arch, platform, os = elem[0], elem[1], "-".join(elem[2:]) if os == "ios": os = "apple IOS" if os == "darwin": os = "apple MacOS" return (arch, platform, os) @staticmethod def get_heading(): return ["OS", "ARCH"] def get_row_heading(self): return [self.os, self.arch] def requires_nightly(self): return "redox" in self # Perform the 'it-compiles' check def check(self, binary): if self.requires_nightly(): args = [ "cargo", "+nightly", "check", "-p", f"uu_{binary}", "--bin", binary, f"--target={self}", ] else: args = [ "cargo", "check", "-p", f"uu_{binary}", "--bin", binary, f"--target={self}", ] res = subprocess.run(args, capture_output=True) return res.returncode # Validate that the dependencies for running this target are met def is_installed(self): # check IOS sdk is installed, raise exception otherwise if "ios" in self: res = subprocess.run(["which", "xcrun"], capture_output=True) if len(res.stdout) == 0: raise Exception( "Error: IOS sdk does not seem to be installed. Please do that manually" ) if not self.requires_nightly(): # check std toolchains are installed toolchains = subprocess.run( ["rustup", "target", "list"], capture_output=True ) toolchains = toolchains.stdout.decode("utf-8").split("\n") if "installed" not in next(filter(lambda x: self in x, toolchains)): raise Exception( f"Error: the {self} target is not installed. Please do that manually" ) else: # check nightly toolchains are installed toolchains = subprocess.run( ["rustup", "+nightly", "target", "list"], capture_output=True ) toolchains = toolchains.stdout.decode("utf-8").split("\n") if "installed" not in next(filter(lambda x: self in x, toolchains)): raise Exception( f"Error: the {self} nightly target is not installed. Please do that manually" ) return True def install_targets(): cmd = ["rustup", "target", "add"] + TARGETS print(" ".join(cmd)) ret = subprocess.run(cmd) assert ret.returncode == 0 def get_all_bins(): bins = map(lambda x: x.name, BINS_PATH.iterdir()) return sorted(list(bins)) def get_targets(selection): if "all" in selection: return list(map(Target, TARGETS)) else: # preserve the same order as in TARGETS return list(map(Target, filter(lambda x: x in selection, TARGETS))) def test_helper(tup): bin, target = tup retcode = target.check(bin) return (target, bin, retcode) def test_all_targets(targets, bins): pool = multiprocessing.Pool() inputs = [(b, t) for b in bins for t in targets] outputs = list(tqdm(pool.imap(test_helper, inputs), total=len(inputs))) table = defaultdict(dict) for (t, b, r) in outputs: table[t][b] = r return table def save_csv(file, table): targets = get_targets(table.keys()) # preserve order in CSV bins = list(list(table.values())[0].keys()) with open(file, "w") as csvfile: header = ["target"] + bins writer = csv.DictWriter(csvfile, fieldnames=header) writer.writeheader() for t in targets: d = {"target": t} d.update(table[t]) writer.writerow(d) def load_csv(file): table = {} cols = [] rows = [] with open(file, "r") as csvfile: reader = csv.DictReader(csvfile) cols = list(filter(lambda x: x != "target", reader.fieldnames)) for row in reader: t = Target(row["target"]) rows += [t] del row["target"] table[t] = dict([k, int(v)] for k, v in row.items()) return (table, rows, cols) def merge_tables(old, new): from copy import deepcopy tmp = deepcopy(old) tmp.update(deepcopy(new)) return tmp def render_md(fd, table, headings: str, row_headings: Target): def print_row(lst, lens=[]): lens = lens + [0] * (len(lst) - len(lens)) for e, lmd in zip(lst, lens): fmt = "|{}" if lmd == 0 else "|{:>%s}" % len(header[0]) fd.write(fmt.format(e)) fd.write("|\n") def cell_render(target, bin): return "y" if table[target][bin] == 0 else " " # add some 'hard' padding to specific columns lens = [ max(map(lambda x: len(x.os), row_headings)) + 2, max(map(lambda x: len(x.arch), row_headings)) + 2, ] header = Target.get_heading() header[0] = ("{:#^%d}" % lens[0]).format(header[0]) header[1] = ("{:#^%d}" % lens[1]).format(header[1]) header += headings print_row(header) lines = list(map(lambda x: "-" * len(x), header)) print_row(lines) for t in row_headings: row = list(map(lambda b: cell_render(t, b), headings)) row = t.get_row_heading() + row print_row(row) if __name__ == "__main__": # create the top-level parser parser = argparse.ArgumentParser(prog="compiles_table.py") subparsers = parser.add_subparsers( help="sub-command to execute", required=True, dest="cmd" ) # create the parser for the "check" command parser_a = subparsers.add_parser( "check", help="run cargo check on specified targets and update csv cache" ) parser_a.add_argument( "targets", metavar="TARGET", type=str, nargs="+", choices=["all"] + TARGETS, help="target-triple to check, as shown by 'rustup target list'", ) # create the parser for the "render" command parser_b = subparsers.add_parser("render", help="print a markdown table to stdout") parser_b.add_argument( "--equidistant", action="store_true", help="NOT IMPLEMENTED: render each column with an equal width (in plaintext)", ) args = parser.parse_args() if args.cmd == "render": table, targets, bins = load_csv(CACHE_PATH) render_md(sys.stdout, table, bins, targets) if args.cmd == "check": targets = get_targets(args.targets) bins = get_all_bins() assert all(map(Target.is_installed, targets)) table = test_all_targets(targets, bins) prev_table, _, _ = load_csv(CACHE_PATH) new_table = merge_tables(prev_table, table) save_csv(CACHE_PATH, new_table) coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/000077500000000000000000000000001504311601400224635ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/CODE_OF_CONDUCT.md000066400000000000000000000001151504311601400252570ustar00rootroot00000000000000 {{ #include ../../CODE_OF_CONDUCT.md }} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/CONTRIBUTING.md000066400000000000000000000001121504311601400247060ustar00rootroot00000000000000 {{ #include ../../CONTRIBUTING.md }} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/DEVELOPMENT.md000066400000000000000000000001111504311601400245600ustar00rootroot00000000000000 {{ #include ../../DEVELOPMENT.md }} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/build.md000066400000000000000000000000711504311601400241020ustar00rootroot00000000000000# Build from source {{#include ../../README.md:build }} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/extensions.md000066400000000000000000000202041504311601400252020ustar00rootroot00000000000000 # Extensions over GNU Though the main goal of the project is compatibility, uutils supports a few features that are not supported by GNU coreutils. We take care not to introduce features that are incompatible with the GNU coreutils. Below is a list of uutils extensions. ## General GNU coreutils provides two ways to define short options taking an argument: ``` $ ls -w 80 $ ls -w80 ``` We support a third way: ``` $ ls -w=80 ``` ## `env` GNU `env` allows the empty string to be used as an environment variable name. This is unsupported by uutils, and it will show a warning on any such assignment. `env` has an additional `-f`/`--file` flag that can parse `.env` files and set variables accordingly. This feature is adopted from `dotenv` style packages. ## `cp` `cp` can display a progress bar when the `-g`/`--progress` flag is set. ## `mv` `mv` can display a progress bar when the `-g`/`--progress` flag is set. ## `hashsum` This utility does not exist in GNU coreutils. `hashsum` is a utility that supports computing the checksums with several algorithms. The flags and options are identical to the `*sum` family of utils (`sha1sum`, `sha256sum`, `b2sum`, etc.). ## `b3sum` This utility does not exist in GNU coreutils. The behavior is modeled after both the `b2sum` utility of GNU and the [`b3sum`](https://github.com/BLAKE3-team/BLAKE3) utility by the BLAKE3 team and supports the `--no-names` option that does not appear in the GNU util. ## `more` We provide a simple implementation of `more`, which is not part of GNU coreutils. We do not aim for full compatibility with the `more` utility from `util-linux`. Features from more modern pagers (like `less` and `bat`) are therefore welcomed. ## `cut` `cut` can separate fields by whitespace (Space and Tab) with `-w` flag. This feature is adopted from [FreeBSD](https://www.freebsd.org/cgi/man.cgi?cut). ## `fmt` `fmt` has additional flags for prefixes: `-P`/`--skip-prefix`, `-x`/`--exact-prefix`, and `-X`/`--exact-skip-prefix`. With `-m`/`--preserve-headers`, an attempt is made to detect and preserve mail headers in the input. `-q`/`--quick` breaks lines more quickly. And `-T`/`--tab-width` defines the number of spaces representing a tab when determining the line length. ## `printf` `printf` uses arbitrary precision decimal numbers to parse and format floating point numbers. GNU coreutils uses `long double`, whose actual size may be [double precision 64-bit float](https://en.wikipedia.org/wiki/Double-precision_floating-point_format) (e.g 32-bit arm), [extended precision 80-bit float](https://en.wikipedia.org/wiki/Extended_precision) (x86(-64)), or [quadruple precision 128-bit float](https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format) (e.g. arm64). Practically, this means that printing a number with a large precision will stay exact: ``` printf "%.48f\n" 0.1 0.100000000000000000000000000000000000000000000000 << uutils on all platforms 0.100000000000000000001355252715606880542509316001 << GNU coreutils on x86(-64) 0.100000000000000000000000000000000004814824860968 << GNU coreutils on arm64 0.100000000000000005551115123125782702118158340454 << GNU coreutils on armv7 (32-bit) ``` ### Hexadecimal floats For hexadecimal float format (`%a`), POSIX only states that one hexadecimal number should be present left of the decimal point (`0xh.hhhhp±d` [1]), but does not say how many _bits_ should be included (between 1 and 4). On x86(-64), the first digit always includes 4 bits, so its value is always between `0x8` and `0xf`, while on other architectures, only 1 bit is included, so the value is always `0x1`. However, the first digit will of course be `0x0` if the number is zero. Also, rounding numbers may cause the first digit to be `0x1` on x86(-64) (e.g. `0xf.fffffffp-5` rounds to `0x1.00p-1`), or `0x2` on other architectures. We chose to replicate x86-64 behavior on all platforms. Additionally, the default precision of the hexadecimal float format (`%a` without any specifier) is expected to be "sufficient for exact representation of the value" [1]. This is not possible in uutils as we store arbitrary precision numbers that may be periodic in hexadecimal form (`0.1 = 0xc.ccc...p-7`), so we revert to the number of digits that would be required to exactly print an [extended precision 80-bit float](https://en.wikipedia.org/wiki/Extended_precision), emulating GNU coreutils behavior on x86(-64). An 80-bit float has 64 bits in its integer and fractional part, so 16 hexadecimal digits are printed in total (1 digit before the decimal point, 15 after). Practically, this means that the default hexadecimal floating point output is identical to x86(-64) GNU coreutils: ``` printf "%a\n" 0.1 0xc.ccccccccccccccdp-7 << uutils on all platforms 0xc.ccccccccccccccdp-7 << GNU coreutils on x86-64 0x1.999999999999999999999999999ap-4 << GNU coreutils on arm64 0x1.999999999999ap-4 << GNU coreutils on armv7 (32-bit) ``` We _can_ print an arbitrary number of digits if a larger precision is requested, and the leading digit will still be in the `0x8`-`0xf` range: ``` printf "%.32a\n" 0.1 0xc.cccccccccccccccccccccccccccccccdp-7 << uutils on all platforms 0xc.ccccccccccccccd00000000000000000p-7 << GNU coreutils on x86-64 0x1.999999999999999999999999999a0000p-4 << GNU coreutils on arm64 0x1.999999999999a0000000000000000000p-4 << GNU coreutils on armv7 (32-bit) ``` ***Note: The architecture-specific behavior on non-x86(-64) platforms may change in the future.*** ## `seq` Unlike GNU coreutils, `seq` always uses arbitrary precision decimal numbers, no matter the parameters (integers, decimal numbers, positive or negative increments, format specified, etc.), so its output will be more correct than GNU coreutils for some inputs (e.g. small fractional increments where GNU coreutils uses `long double`). The only limitation is that the position of the decimal point is stored in a `i64`, so values smaller than 10**(-2**63) will underflow to 0, and some values larger than 10**(2**63) may overflow to infinity. See also comments under `printf` for formatting precision and differences. `seq` provides `-t`/`--terminator` to set the terminator character. ## `sort` When sorting with `-g`/`--general-numeric-sort`, arbitrary precision decimal numbers are parsed and compared, unlike GNU coreutils that uses platform-specific long double floating point numbers. Extremely large or small values can still overflow or underflow to infinity or zero, see note in `seq`. ## `ls` GNU `ls` provides two ways to use a long listing format: `-l` and `--format=long`. We support a third way: `--long`. GNU `ls --sort=VALUE` only supports special non-default sort orders. We support `--sort=name`, which makes it possible to override an earlier value. ## `du` `du` allows `birth` and `creation` as values for the `--time` argument to show the creation time. It also provides a `-v`/`--verbose` flag. ## `id` `id` has three additional flags: * `-P` displays the id as a password file entry * `-p` makes the output human-readable * `-A` displays the process audit user ID ## `uptime` Similar to the proc-ps implementation and unlike GNU/Coreutils, `uptime` provides `-s`/`--since` to show since when the system is up. ## `base32/base64/basenc` Just like on macOS, `base32/base64/basenc` provides `-D` to decode data. ## `shred` The number of random passes is deterministic in both GNU and uutils. However, uutils `shred` computes the number of random passes in a simplified way, specifically `max(3, x / 10)`, which is very close but not identical to the number of random passes that GNU would do. This also satisfies an expectation that reasonable users might have, namely that the number of random passes increases monotonically with the number of passes overall; GNU `shred` violates this assumption. ## `unexpand` GNU `unexpand` provides `--first-only` to convert only leading sequences of blanks. We support a second way: `-f` like busybox. Using `-U`/`--no-utf8`, you can interpret input files as 8-bit ASCII rather than UTF-8. ## `expand` `expand` also offers the `-U`/`--no-utf8` option to interpret input files as 8-bit ASCII instead of UTF-8. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/index.md000066400000000000000000000026311504311601400241160ustar00rootroot00000000000000 {{#include logo.svg}} # uutils Coreutils Documentation uutils is an attempt at writing universal (as in cross-platform) CLI utilities in [Rust](https://www.rust-lang.org). It is available for Linux, Windows, Mac and other platforms. The API reference for `uucore`, the library of functions shared between various utils, is hosted at [docs.rs](https://docs.rs/uucore/latest/uucore/). uutils is licensed under the [MIT License](https://github.com/uutils/coreutils/blob/main/LICENSE). ## Useful links - [Releases](https://github.com/uutils/coreutils/releases) - [Source Code](https://github.com/uutils/coreutils) - [Issues](https://github.com/uutils/coreutils/issues) - [Discord](https://discord.gg/wQVJbvJ) > Note: This manual is automatically generated from the source code and is a > work in progress. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/installation.md000066400000000000000000000130201504311601400255020ustar00rootroot00000000000000 # Installation This is a list of uutils packages in various distributions and package managers. Note that these are packaged by third-parties and the packages might contain patches. You can also [build uutils from source](build.md). ## Cargo [![crates.io package](https://repology.org/badge/version-for-repo/crates_io/uutils-coreutils.svg)](https://crates.io/crates/coreutils) ```shell # Linux cargo install coreutils --features unix --locked # MacOs cargo install coreutils --features macos --locked # Windows cargo install coreutils --features windows --locked ``` ## Linux ### Alpine [![Alpine Linux Edge package](https://repology.org/badge/version-for-repo/alpine_edge/uutils-coreutils.svg)](https://pkgs.alpinelinux.org/packages?name=uutils-coreutils) ```shell apk update uutils-coreutils ``` > **Note**: Requires the `edge` repository. ### Arch [![Arch package](https://repology.org/badge/version-for-repo/arch/uutils-coreutils.svg)](https://archlinux.org/packages/extra/x86_64/uutils-coreutils/) ```shell pacman -S uutils-coreutils ``` ### Debian [![Debian package](https://repology.org/badge/version-for-repo/debian_unstable/uutils-coreutils.svg)](https://packages.debian.org/sid/source/rust-coreutils) ```shell apt install rust-coreutils # To use it: export PATH=/usr/lib/cargo/bin/coreutils:$PATH ``` ### Fedora [![Fedora package](https://repology.org/badge/version-for-repo/fedora_rawhide/uutils-coreutils.svg)](https://packages.fedoraproject.org/pkgs/rust-coreutils/uutils-coreutils) ```shell dnf install uutils-coreutils # To use it: export PATH=/usr/libexec/uutils-coreutils:$PATH ``` ### Gentoo [![Gentoo package](https://repology.org/badge/version-for-repo/gentoo/uutils-coreutils.svg)](https://packages.gentoo.org/packages/sys-apps/uutils-coreutils) ```shell emerge -pv sys-apps/uutils-coreutils ``` ### Manjaro [![Manjaro Stable package](https://repology.org/badge/version-for-repo/manjaro_stable/uutils-coreutils.svg)](https://packages.manjaro.org/?query=uutils-coreutils) [![Manjaro Testing package](https://repology.org/badge/version-for-repo/manjaro_testing/uutils-coreutils.svg)](https://packages.manjaro.org/?query=uutils-coreutils) [![Manjaro Unstable package](https://repology.org/badge/version-for-repo/manjaro_unstable/uutils-coreutils.svg)](https://packages.manjaro.org/?query=uutils-coreutils) ```shell pacman -S uutils-coreutils # or pamac install uutils-coreutils ``` ### NixOS [![nixpkgs unstable package](https://repology.org/badge/version-for-repo/nix_unstable/uutils-coreutils.svg)](https://search.nixos.org/packages?query=uutils-coreutils) ```shell nix-env -iA nixos.uutils-coreutils ``` ### OpenMandriva Lx [![openmandriva cooker package](https://repology.org/badge/version-for-repo/openmandriva_cooker/uutils-coreutils.svg)](https://repology.org/project/uutils-coreutils/versions) ```shell dnf install uutils-coreutils ``` ### RHEL/AlmaLinux/CENTOS Stream/Rocky Linux/EPEL 9 [![epel 9 package](https://repology.org/badge/version-for-repo/epel_9/uutils-coreutils.svg)](https://packages.fedoraproject.org/pkgs/rust-coreutils/uutils-coreutils/epel-9.html) ```shell # Install EPEL 9 - Specific For RHEL please check codeready-builder-for-rhel-9 First then install epel dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm -y # Install Core Utils dnf install uutils-coreutils # To use it: export PATH=/usr/libexec/uutils-coreutils:$PATH ``` ### Ubuntu [![Ubuntu package](https://repology.org/badge/version-for-repo/ubuntu_25_04/uutils-coreutils.svg)](https://packages.ubuntu.com/source/plucky/rust-coreutils) ```shell apt install rust-coreutils # To use it: export PATH=/usr/lib/cargo/bin/coreutils:$PATH ``` ## MacOS ### Homebrew [![Homebrew package](https://repology.org/badge/version-for-repo/homebrew/uutils-coreutils.svg)](https://formulae.brew.sh/formula/uutils-coreutils) ```shell brew install uutils-coreutils ``` ### MacPorts [![MacPorts package](https://repology.org/badge/version-for-repo/macports/uutils-coreutils.svg)](https://ports.macports.org/port/coreutils-uutils/) ``` port install coreutils-uutils ``` ## FreeBSD [![FreeBSD port](https://repology.org/badge/version-for-repo/freebsd/rust-coreutils.svg)](https://repology.org/project/rust-coreutils/versions) ```sh pkg install rust-coreutils ``` ## Windows ### Winget ```shell winget install uutils.coreutils ``` ### Scoop [Scoop package](https://scoop.sh/#/apps?q=uutils-coreutils&s=0&d=1&o=true) ```shell scoop install uutils-coreutils ``` ## Alternative installers ### Conda [Conda package](https://anaconda.org/conda-forge/uutils-coreutils) ``` conda install -c conda-forge uutils-coreutils ``` ### Yocto [Yocto recipe](https://github.com/openembedded/meta-openembedded/tree/master/meta-oe/recipes-core/uutils-coreutils) The uutils-coreutils recipe is provided as part of the meta-openembedded yocto layer. Clone [poky](https://github.com/yoctoproject/poky) and [meta-openembedded](https://github.com/openembedded/meta-openembedded/tree/master), add `meta-openembedded/meta-oe` as layer in your `build/conf/bblayers.conf` file, and then either call `bitbake uutils-coreutils`, or use `PREFERRED_PROVIDER_coreutils = "uutils-coreutils"` in your `build/conf/local.conf` file and then build your usual yocto image. ## Non-standard packages ### `coreutils-uutils` (AUR) [AUR package](https://aur.archlinux.org/packages/coreutils-uutils) Cross-platform Rust rewrite of the GNU coreutils being used as actual system coreutils. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/l10n.md000066400000000000000000000074301504311601400235630ustar00rootroot00000000000000# 🌠Localization (L10n) in uutils coreutils This guide explains how localization (L10n) is implemented in the **Rust-based coreutils project**, detailing the use of [Fluent](https://projectfluent.org/) files, runtime behavior, and developer integration. --- ## 📠Fluent File Layout Each utility has its own set of translation files under: ``` src/uu//locales/.ftl ``` Examples: ``` src/uu/ls/locales/en-US.ftl src/uu/ls/locales/fr-FR.ftl ``` These files follow Fluent syntax and contain localized message patterns. --- ## âš™ï¸ Initialization Localization must be explicitly initialized at runtime using: ``` setup_localization(path) ``` This is typically done: - In `src/bin/coreutils.rs` for **multi-call binaries** - In `src/uucore/src/lib.rs` for **single-call utilities** The string parameter determines the lookup path for Fluent files. --- ## 🌠Locale Detection Locale selection is automatic and performed via: ``` fn detect_system_locale() -> Result ``` It reads the `LANG` environment variable (e.g., `fr-FR.UTF-8`), strips encoding, and parses the identifier. If parsing fails or `LANG` is not set, it falls back to: ``` const DEFAULT_LOCALE: &str = "en-US"; ``` You can override the locale at runtime by running: ``` LANG=ja-JP ./target/debug/ls ``` --- ## 📥 Retrieving Messages We have a single macro to handle translations. It can be used in two ways: ### `translate!(id: &str) -> String` Returns the message from the current locale bundle. ``` let msg = translate!("id-greeting"); ``` If not found, falls back to `en-US`. If still missing, returns the ID itself. --- ### `translate!(id: &str, args: key-value pairs) -> String` Supports variable interpolation and pluralization. ``` let msg = translate!( "error-io", "error" => std::io::Error::last_os_error() ); ``` Fluent message example: ``` error-io = I/O error occurred: { $error } ``` Variables must match the Fluent placeholder keys (`$error`, `$name`, `$count`, etc.). --- ## 📦 Fluent Syntax Example ``` id-greeting = Hello, world! welcome = Welcome, { $name }! count-files = You have { $count -> [one] { $count } file *[other] { $count } files } ``` Use plural rules and inline variables to adapt messages dynamically. --- ## 🧪 Testing Localization Run all localization-related unit tests with: ``` cargo test --lib -p uucore ``` Tests include: - Loading bundles - Plural logic - Locale fallback - Fluent parse errors - Thread-local behavior - ... --- ## 🧵 Thread-local Storage Localization is stored per thread using a `OnceLock`. Each thread must call `setup_localization()` individually. Initialization is **one-time-only** per thread — re-initialization results in an error. --- ## 🧪 Development vs Release Mode During development (`cfg(debug_assertions)`), paths are resolved relative to the crate source: ``` $CARGO_MANIFEST_DIR/../uu//locales/ ``` In release mode, **paths are resolved relative to the executable**: ``` /locales// ``` If both fallback paths fail, an error is returned during `setup_localization()`. --- ## 🔤 Unicode Isolation Handling By default, the Fluent system wraps variables with Unicode directional isolate characters (`U+2068`, `U+2069`) to protect against visual reordering issues in bidirectional text (e.g., mixing Arabic and English). In this implementation, isolation is **disabled** via: ``` bundle.set_use_isolating(false); ``` This improves readability in CLI environments by preventing extraneous characters around interpolated values: Correct (as rendered): ``` "Welcome, Alice!" ``` Fluent default (disabled here): ``` "\u{2068}Alice\u{2069}" ``` coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/logo.svg000066400000000000000000000070401504311601400241450ustar00rootroot00000000000000 coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/multicall.md000066400000000000000000000006731504311601400250010ustar00rootroot00000000000000# Multi-call binary uutils includes a multi-call binary from which the utils can be invoked. This reduces the binary size of the binary and can be useful for portability. The first argument of the multi-call binary is the util to run, after which the regular arguments to the util can be passed. ```shell coreutils [util] [util options] ``` The `--help` flag will print a list of available utils. ## Example ```shell coreutils ls -l ``` coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/oranda.css000066400000000000000000000000551504311601400244410ustar00rootroot00000000000000.logo { display: block; height: 170px; } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/packaging.md000066400000000000000000000075401504311601400247370ustar00rootroot00000000000000# Packaging coreutils > **Note**: This page is intended as a guide for packaging the uutils coreutils > for package maintainers. Normal users probably do not need to read this. If you > just want to install the coreutils, look at the > [installation](installation.md) instructions. The maintainers of this project do not have the capacity to maintain packages for every distribution and package manager out there. Therefore, we encourage other people to package the uutils coreutils for their preferred distributions. You do not need to ask permission for this and you can do this however you want as long as you comply with the license. However, we do like to hear and advertise where the uutils coreutils are available, so please do let us know! ## License The uutils coreutils are licensed under the MIT license. See the [LICENSE](https://github.com/uutils/coreutils/blob/main/LICENSE) for the full license text. Make sure to add attribution and the license text to the package to comply with the license. ## Package We recommend to name the package `uutils-coreutils`. Just `uutils` is incorrect, because that is the name of the organization, which also includes other projects. ## Selecting the utils to include Not all utils are available on all platforms. To get the full set of utils for a particular platform, you must enable the feature flag with the platform name. For example, on Unix-like system, use `--features unix` and `--features windows` on Windows. For a more fine-grained selection, you can enable just the features with the name of the utils you want to include and disable the default feature set. Additionally, support for SELinux must explicitly enabled with the `feat_selinux` feature. We recommend including all the utilities that a platform supports. ## Compilation parameters There are several compile-time flags that allow you to tune the coreutils to your particular needs. Some distributions, for example, might choose to minimize the binary size as much as possible. This can be achieved by customizing the configuration passed to cargo. You can view the full documentation in the [cargo documentation](https://doc.rust-lang.org/cargo/reference/profiles.html). We provide three release profiles out of the box, though you may want to tweak them: - `release`: This is the standard Rust release profile, but with link-time optimization enabled. It is a balance between compile time, performance and a reasonable amount of debug info. The main drawback of this profile is that the binary is quite large (roughly 2x the GNU coreutils). - `release-fast`: Every setting is tuned for the best performance, at the cost of compile time. This binary is still quite large. - `release-small`: Generates the smallest binary possible. This strips _all_ debug info from the binary and leads to worse backtraces. The performance of this profile is also really good as it is close to the `release-fast` profile, but with all debuginfo stripped. For the precise definition of these profiles, you can look at the root [`Cargo.toml`](https://github.com/uutils/coreutils/blob/main/Cargo.toml). The profiles above are just examples. We encourage package maintainers to decide for themselves what the best parameters for their distribution are. For example, a distribution focused on embedded systems would probably choose `release-small`, but another distribution focused on security might enable bounds checks. It is also possible to split the debuginfo into a separate package. See the [`split-debuginfo`](https://doc.rust-lang.org/cargo/reference/profiles.html#split-debuginfo) option in `cargo`. ## Additional artifacts This project supports automatically generating manpages and shell completion files which you may want to include in the package. See the page on [building from source](build.md) for how to generate these. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/performance.md000066400000000000000000000060011504311601400253030ustar00rootroot00000000000000 # Performance Profiling Tutorial ## Effective Benchmarking with Hyperfine [Hyperfine](https://github.com/sharkdp/hyperfine) is a powerful command-line benchmarking tool that allows you to measure and compare execution times of commands with statistical rigor. ### Benchmarking Best Practices When evaluating performance improvements, always set up your benchmarks to compare: 1. The GNU implementation as reference 2. The implementation without the change 3. The implementation with your change This three-way comparison provides clear insights into: - How your implementation compares to the standard (GNU) - The actual performance impact of your specific change ### Example Benchmark First, you will need to build the binary in release mode. Debug builds are significantly slower: ```bash cargo build --features unix --profile profiling ``` ```bash # Three-way comparison benchmark hyperfine \ --warmup 3 \ "/usr/bin/ls -R ." \ "./target/profiling/coreutils.prev ls -R ." \ "./target/profiling/coreutils ls -R ." # can be simplified with: hyperfine \ --warmup 3 \ -L ls /usr/bin/ls,"./target/profiling/coreutils.prev ls","./target/profiling/coreutils ls" \ "{ls} -R ." ``` ``` # to improve the reproducibility of the results: taskset -c 0 ``` ### Interpreting Results Hyperfine provides summary statistics including: - Mean execution time - Standard deviation - Min/max times - Relative performance comparison Look for consistent patterns rather than focusing on individual runs, and be aware of system noise that might affect results. ## Using Samply for Profiling [Samply](https://github.com/mstange/samply) is a sampling profiler that helps you identify performance bottlenecks in your code. ### Basic Profiling ```bash # Generate a flame graph for your application samply record ./target/debug/coreutils ls -R # Profile with higher sampling frequency samply record --rate 1000 ./target/debug/coreutils seq 1 1000 ``` The output using the `debug` profile might be easier to understand, but the performance characteristics may be somewhat different from `release` profile that we _actually_ care about. Consider using the `profiling` profile, that compiles in `release` mode but with debug symbols. For example: ```bash cargo build --profile profiling -p uu_ls samply record -r 10000 target/profiling/ls -lR /var .git .git .git > /dev/null ``` ## Workflow: Measuring Performance Improvements 1. **Establish baselines**: ```bash hyperfine --warmup 3 \ "/usr/bin/sort large_file.txt" \ "our-sort-v1 large_file.txt" ``` 2. **Identify bottlenecks**: ```bash samply record ./our-sort-v1 large_file.txt ``` 3. **Make targeted improvements** based on profiling data 4. **Verify improvements**: ```bash hyperfine --warmup 3 \ "/usr/bin/sort large_file.txt" \ "our-sort-v1 large_file.txt" \ "our-sort-v2 large_file.txt" ``` 5. **Document performance changes** with concrete numbers ```bash hyperfine --export-markdown file.md [...] ``` coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/platforms.md000066400000000000000000000035641504311601400250240ustar00rootroot00000000000000# Platform support uutils aims to be as "universal" as possible, meaning that we try to support many platforms. However, it is infeasible for us to guarantee that every platform works. Just like Rust itself, we therefore have multiple tiers of platform support, with different guarantees. We support two tiers of platforms: - **Tier 1**: All applicable utils are compiled and tested in CI for these platforms. - **Tier 2**: These platforms are supported but not actively tested. We do accept fixes for these platforms. > **Note**: The tiers are dictated by our CI. We would happily accept a job > in the CI for testing more platforms, bumping those platforms to tier 1. ## Platforms per tier The platforms in tier 1 and the platforms that we test in CI are listed below. | Operating system | Tested targets | | ---------------- | -------------- | | **Linux** | `x86_64-unknown-linux-gnu`
`x86_64-unknown-linux-musl`
`arm-unknown-linux-gnueabihf`
`i686-unknown-linux-gnu`
`aarch64-unknown-linux-gnu` | | **macOS** | `x86_64-apple-darwin` | | **Windows** | `i686-pc-windows-msvc`
`x86_64-pc-windows-gnu`
`x86_64-pc-windows-msvc` | | **FreeBSD** | `x86_64-unknown-freebsd` | | **Android** | `i686-linux-android` | The platforms in tier 2 are more vague, but include: - untested variations of the platforms above, - Redox OS, - and BSDs such as OpenBSD, NetBSD & DragonFlyBSD. ## Utility compatibility per platform Not all utils work on every platform. For instance, `chgrp` is not supported on Windows, because Windows does not have the concept of groups. Below is a full table detailing which utilities are supported for the tier 1 platforms. Note that for some utilities, not all functionality is supported on each platform. This is documented per utility. {{ #include platform_table.md }} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/test_coverage.css000066400000000000000000000012641504311601400260320ustar00rootroot00000000000000:root { --PASS: #44AF69; --ERROR: #F8333C; --FAIL: #F8333C; --SKIP: #d3c994; } .PASS { color: var(--PASS); } .ERROR { color: var(--ERROR); } .FAIL { color: var(--FAIL); } .SKIP { color: var(--SKIP); } .testSummary { display: inline-flex; align-items: center; justify-content: space-between; width: 90%; } .progress { width: 80%; display: flex; justify-content: right; align-items: center; } .progress-bar { height: 10px; width: calc(100% - 15ch); border-radius: 5px; } .result { font-weight: bold; width: 7ch; display: inline-block; } .result-line { margin: 8px; } .counts { margin-right: 10px; }coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/test_coverage.js000066400000000000000000000050421504311601400256540ustar00rootroot00000000000000// spell-checker:ignore hljs function progressBar(totals) { const bar = document.createElement("div"); bar.className = "progress-bar"; let totalTests = 0; for (const [key, value] of Object.entries(totals)) { totalTests += value; } const passPercentage = Math.round(100 * totals["PASS"] / totalTests); const skipPercentage = passPercentage + Math.round(100 * totals["SKIP"] / totalTests); // The ternary expressions are used for some edge-cases where there are no failing test, // but still a red (or beige) line shows up because of how CSS draws gradients. bar.style = `background: linear-gradient( to right, var(--PASS) ${passPercentage}%` + ( passPercentage === 100 ? ", var(--PASS)" : `, var(--SKIP) ${passPercentage}%, var(--SKIP) ${skipPercentage}%` ) + (skipPercentage === 100 ? ")" : ", var(--FAIL) 0)"); const progress = document.createElement("div"); progress.className = "progress" progress.innerHTML = ` ${totals["PASS"]} / ${totals["SKIP"]} / ${totals["FAIL"] + totals["ERROR"]} `; progress.appendChild(bar); return progress } function parse_result(parent, obj) { const totals = { PASS: 0, SKIP: 0, FAIL: 0, ERROR: 0, }; for (const [category, content] of Object.entries(obj)) { if (typeof content === "string") { const p = document.createElement("p"); p.className = "result-line"; totals[content]++; p.innerHTML = `${content} ${category}`; parent.appendChild(p); } else { const categoryName = document.createElement("code"); categoryName.innerHTML = category; categoryName.className = "hljs"; const details = document.createElement("details"); const subtotals = parse_result(details, content); for (const [subtotal, count] of Object.entries(subtotals)) { totals[subtotal] += count; } const summaryDiv = document.createElement("div"); summaryDiv.className = "testSummary"; summaryDiv.appendChild(categoryName); summaryDiv.appendChild(progressBar(subtotals)); const summary = document.createElement("summary"); summary.appendChild(summaryDiv); details.appendChild(summary); parent.appendChild(details); } } return totals; } fetch("https://raw.githubusercontent.com/uutils/coreutils-tracking/main/aggregated-result.json") .then((r) => r.json()) .then((obj) => { let parent = document.getElementById("test-cov"); parse_result(parent, obj); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/src/test_coverage.md000066400000000000000000000011621504311601400256370ustar00rootroot00000000000000# GNU Test Coverage uutils is actively tested against the GNU coreutils test suite. The results below are automatically updated every day. ## Coverage per category Click on the categories to see the names of the tests. Green indicates a passing test, yellow indicates a skipped test and red means that the test either failed or resulted in an error.
## Progress over time coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/theme/000077500000000000000000000000001504311601400227765ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/theme/favicon.svg000066400000000000000000000074721504311601400251560ustar00rootroot00000000000000 coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/docs/theme/head.hbs000066400000000000000000000006011504311601400243720ustar00rootroot00000000000000 coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/flake.lock000066400000000000000000000017241504311601400227040ustar00rootroot00000000000000{ "nodes": { "nixpkgs": { "locked": { "lastModified": 1720633750, "narHash": "sha256-N8apMO2pP/upWeH+JY5eM8VDp2qBAAzE+OY5LRW6qpw=", "owner": "nixos", "repo": "nixpkgs", "rev": "54bc082f5a7219d122e74fe52c021cf59fed9d6f", "type": "github" }, "original": { "owner": "nixos", "repo": "nixpkgs", "type": "github" } }, "root": { "inputs": { "nixpkgs": "nixpkgs", "systems": "systems" } }, "systems": { "locked": { "lastModified": 1681028828, "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", "owner": "nix-systems", "repo": "default", "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", "type": "github" }, "original": { "owner": "nix-systems", "repo": "default", "type": "github" } } }, "root": "root", "version": 7 } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/flake.nix000066400000000000000000000037611504311601400225550ustar00rootroot00000000000000# spell-checker:ignore bintools gnum gperf ldflags libclang nixpkgs numtide pkgs texinfo gettext { inputs = { nixpkgs.url = "github:nixos/nixpkgs"; # systems.url = "github:nix-systems/default"; }; outputs = inputs: let inherit (inputs.nixpkgs) lib legacyPackages; eachSystem = lib.genAttrs (import inputs.systems); pkgsFor = legacyPackages; in { devShells = eachSystem ( system: let libselinuxPath = with pkgsFor.${system}; lib.makeLibraryPath [ libselinux ]; libaclPath = with pkgsFor.${system}; lib.makeLibraryPath [ acl ]; build_deps = with pkgsFor.${system}; [ clang llvmPackages.bintools rustup pre-commit nodePackages.cspell # debugging gdb ]; gnu_testing_deps = with pkgsFor.${system}; [ autoconf automake bison gnum4 gperf gettext texinfo ]; in { default = pkgsFor.${system}.pkgs.mkShell { packages = build_deps ++ gnu_testing_deps; RUSTC_VERSION = "1.85"; LIBCLANG_PATH = pkgsFor.${system}.lib.makeLibraryPath [pkgsFor.${system}.llvmPackages_latest.libclang.lib]; shellHook = '' export PATH=$PATH:''${CARGO_HOME:-~/.cargo}/bin export PATH=$PATH:''${RUSTUP_HOME:-~/.rustup}/toolchains/$RUSTC_VERSION-x86_64-unknown-linux-gnu/bin/ ''; SELINUX_INCLUDE_DIR = ''${pkgsFor.${system}.libselinux.dev}/include''; SELINUX_LIB_DIR = libselinuxPath; SELINUX_STATIC = "0"; # Necessary to build GNU. LDFLAGS = ''-L ${libselinuxPath} -L ${libaclPath}''; # Add precompiled library to rustc search path RUSTFLAGS = [ ''-L ${libselinuxPath}'' ''-L ${libaclPath}'' ]; }; } ); }; } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/000077500000000000000000000000001504311601400217425ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/.gitignore000066400000000000000000000000301504311601400237230ustar00rootroot00000000000000target corpus artifacts coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/Cargo.lock000066400000000000000000001453401504311601400236560ustar00rootroot00000000000000# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "android-tzdata" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] name = "android_system_properties" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] [[package]] name = "anstream" version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", "once_cell_polyfill", "windows-sys 0.59.0", ] [[package]] name = "arbitrary" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" [[package]] name = "arrayref" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "bigdecimal" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a22f228ab7a1b23027ccc6c350b72868017af7ea8356fbdf19f8d991c690013" dependencies = [ "autocfg", "libm", "num-bigint", "num-integer", "num-traits", ] [[package]] name = "binary-heap-plus" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4551d8382e911ecc0d0f0ffb602777988669be09447d536ff4388d1def11296" dependencies = [ "compare", ] [[package]] name = "bitflags" version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "blake2b_simd" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06e903a20b159e944f91ec8499fe1e55651480c541ea0a584f5d967c49ad9d99" dependencies = [ "arrayref", "arrayvec", "constant_time_eq", ] [[package]] name = "blake3" version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", "constant_time_eq", ] [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "bstr" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" dependencies = [ "memchr", "regex-automata", "serde", ] [[package]] name = "bumpalo" version = "3.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" [[package]] name = "bytecount" version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" [[package]] name = "cc" version = "1.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" dependencies = [ "jobserver", "libc", "shlex", ] [[package]] name = "cfg-if" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "windows-link", ] [[package]] name = "clap" version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" dependencies = [ "anstream", "anstyle", "clap_lex", "strsim", "terminal_size", ] [[package]] name = "clap_lex" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "colorchoice" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "compare" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "120133d4db2ec47efe2e26502ee984747630c67f51974fca0b6c1340cf2368d3" [[package]] name = "console" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d" dependencies = [ "encode_unicode", "libc", "once_cell", "unicode-width", "windows-sys 0.60.2", ] [[package]] name = "const-random" version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" dependencies = [ "const-random-macro", ] [[package]] name = "const-random-macro" version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ "getrandom 0.2.16", "once_cell", "tiny-keccak", ] [[package]] name = "constant_time_eq" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "core-foundation-sys" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] [[package]] name = "crc32fast" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-deque" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "ctrlc" version = "3.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46f93780a459b7d656ef7f071fe699c4d3d2cb201c4b24d085b6ddc505276e73" dependencies = [ "nix", "windows-sys 0.59.0", ] [[package]] name = "data-encoding" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" [[package]] name = "data-encoding-macro" version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" dependencies = [ "data-encoding", "data-encoding-macro-internal", ] [[package]] name = "data-encoding-macro-internal" version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", "syn", ] [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", ] [[package]] name = "displaydoc" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "dlv-list" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" dependencies = [ "const-random", ] [[package]] name = "dunce" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "either" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "encode_unicode" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "errno" version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" dependencies = [ "libc", "windows-sys 0.59.0", ] [[package]] name = "fastrand" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fluent" version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8137a6d5a2c50d6b0ebfcb9aaa91a28154e0a70605f112d30cb0cd4a78670477" dependencies = [ "fluent-bundle", "unic-langid", ] [[package]] name = "fluent-bundle" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01203cb8918f5711e73891b347816d932046f95f54207710bda99beaeb423bf4" dependencies = [ "fluent-langneg", "fluent-syntax", "intl-memoizer", "intl_pluralrules", "rustc-hash", "self_cell", "smallvec", "unic-langid", ] [[package]] name = "fluent-langneg" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c4ad0989667548f06ccd0e306ed56b61bd4d35458d54df5ec7587c0e8ed5e94" dependencies = [ "unic-langid", ] [[package]] name = "fluent-syntax" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54f0d287c53ffd184d04d8677f590f4ac5379785529e5e08b1c8083acdd5c198" dependencies = [ "memchr", "thiserror", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] name = "getrandom" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "libc", "r-efi", "wasi 0.14.2+wasi-0.2.4", ] [[package]] name = "glob" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "iana-time-zone" version = "0.1.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "log", "wasm-bindgen", "windows-core", ] [[package]] name = "iana-time-zone-haiku" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ "cc", ] [[package]] name = "icu_collator" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ad4c6a556938dfd31f75a8c54141079e8821dc697ffb799cfe0f0fa11f2edc" dependencies = [ "displaydoc", "icu_collator_data", "icu_collections", "icu_locale", "icu_locale_core", "icu_normalizer", "icu_properties", "icu_provider", "smallvec", "utf16_iter", "utf8_iter", "zerovec", ] [[package]] name = "icu_collator_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d880b8e680799eabd90c054e1b95526cd48db16c95269f3c89fb3117e1ac92c5" [[package]] name = "icu_collections" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] name = "icu_locale" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ae5921528335e91da1b6c695dbf1ec37df5ac13faa3f91e5640be93aa2fbefd" dependencies = [ "displaydoc", "icu_collections", "icu_locale_core", "icu_locale_data", "icu_provider", "potential_utf", "tinystr", "zerovec", ] [[package]] name = "icu_locale_core" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", "tinystr", "writeable", "zerovec", ] [[package]] name = "icu_locale_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fdef0c124749d06a743c69e938350816554eb63ac979166590e2b4ee4252765" [[package]] name = "icu_normalizer" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", "icu_provider", "smallvec", "zerovec", ] [[package]] name = "icu_normalizer_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", "zerotrie", "zerovec", ] [[package]] name = "intl-memoizer" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "310da2e345f5eb861e7a07ee182262e94975051db9e4223e909ba90f392f163f" dependencies = [ "type-map", "unic-langid", ] [[package]] name = "intl_pluralrules" version = "7.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "078ea7b7c29a2b4df841a7f6ac8775ff6074020c6776d48491ce2268e068f972" dependencies = [ "unic-langid", ] [[package]] name = "is_terminal_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ "either", ] [[package]] name = "jiff" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" dependencies = [ "jiff-static", "jiff-tzdb-platform", "log", "portable-atomic", "portable-atomic-util", "serde", "windows-sys 0.59.0", ] [[package]] name = "jiff-static" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "jiff-tzdb" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1283705eb0a21404d2bfd6eef2a7593d240bc42a0bdb39db0ad6fa2ec026524" [[package]] name = "jiff-tzdb-platform" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "875a5a69ac2bab1a891711cf5eccbec1ce0341ea805560dcd90b7a2e925132e8" dependencies = [ "jiff-tzdb", ] [[package]] name = "jobserver" version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ "getrandom 0.3.3", "libc", ] [[package]] name = "js-sys" version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] name = "keccak" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] [[package]] name = "libc" version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libfuzzer-sys" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5037190e1f70cbeef565bd267599242926f724d3b8a9f510fd7e0b540cfa4404" dependencies = [ "arbitrary", "cc", ] [[package]] name = "libm" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "linux-raw-sys" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "litemap" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "log" version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "md-5" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ "cfg-if", "digest", ] [[package]] name = "memchr" version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "nix" version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ "bitflags", "cfg-if", "cfg_aliases", "libc", ] [[package]] name = "nom" version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" dependencies = [ "memchr", ] [[package]] name = "num-bigint" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", ] [[package]] name = "num-integer" version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ "num-traits", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "number_prefix" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "once_cell_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" [[package]] name = "onig" version = "6.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "336b9c63443aceef14bea841b899035ae3abe89b7c486aaf4c5bd8aafedac3f0" dependencies = [ "bitflags", "libc", "once_cell", "onig_sys", ] [[package]] name = "onig_sys" version = "69.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7f86c6eef3d6df15f23bcfb6af487cbd2fed4e5581d58d5bf1f5f8b7f6727dc" dependencies = [ "cc", "pkg-config", ] [[package]] name = "ordered-multimap" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79" dependencies = [ "dlv-list", "hashbrown", ] [[package]] name = "os_display" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad5fd71b79026fb918650dde6d125000a233764f1c2f1659a1c71118e33ea08f" dependencies = [ "unicode-width", ] [[package]] name = "parse_datetime" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5b77d27257a460cefd73a54448e5f3fd4db224150baf6ca3e02eedf4eb2b3e9" dependencies = [ "chrono", "num-traits", "regex", "winnow", ] [[package]] name = "pkg-config" version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "portable-atomic" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "portable-atomic-util" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" dependencies = [ "portable-atomic", ] [[package]] name = "potential_utf" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" dependencies = [ "serde", "zerovec", ] [[package]] name = "ppv-lite86" version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ "zerocopy", ] [[package]] name = "proc-macro2" version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" [[package]] name = "rand" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ "getrandom 0.3.3", ] [[package]] name = "rayon" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "regex" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rust-ini" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e310ef0e1b6eeb79169a1171daf9abcb87a2e17c03bee2c4bb100b55c75409f" dependencies = [ "cfg-if", "ordered-multimap", "trim-in-place", ] [[package]] name = "rustc-hash" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustix" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", "windows-sys 0.59.0", ] [[package]] name = "rustversion" version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "self_cell" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f7d95a54511e0c7be3f51e8867aa8cf35148d7b9445d44de2f943e2b206e749" [[package]] name = "serde" version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "sha1" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha2" version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha3" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest", "keccak", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "similar" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" [[package]] name = "sm3" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebb9a3b702d0a7e33bc4d85a14456633d2b165c2ad839c5fd9a8417c1ab15860" dependencies = [ "digest", ] [[package]] name = "smallvec" version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "syn" version = "2.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "synstructure" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tempfile" version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", "rustix", "windows-sys 0.59.0", ] [[package]] name = "terminal_size" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" dependencies = [ "rustix", "windows-sys 0.59.0", ] [[package]] name = "thiserror" version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tiny-keccak" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" dependencies = [ "crunchy", ] [[package]] name = "tinystr" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", ] [[package]] name = "trim-in-place" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "343e926fc669bc8cde4fa3129ab681c63671bae288b1f1081ceee6d9d37904fc" [[package]] name = "type-map" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb30dbbd9036155e74adad6812e9898d03ec374946234fbcebd5dfc7b9187b90" dependencies = [ "rustc-hash", ] [[package]] name = "typenum" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "unic-langid" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28ba52c9b05311f4f6e62d5d9d46f094bd6e84cb8df7b3ef952748d752a7d05" dependencies = [ "unic-langid-impl", ] [[package]] name = "unic-langid-impl" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dce1bf08044d4b7a94028c93786f8566047edc11110595914de93362559bc658" dependencies = [ "tinystr", ] [[package]] name = "unicode-ident" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-width" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" [[package]] name = "utf16_iter" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" [[package]] name = "utf8_iter" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "utf8parse" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uu_cksum" version = "0.1.0" dependencies = [ "clap", "fluent", "hex", "uucore", ] [[package]] name = "uu_cut" version = "0.1.0" dependencies = [ "bstr", "clap", "fluent", "memchr", "uucore", ] [[package]] name = "uu_date" version = "0.1.0" dependencies = [ "chrono", "clap", "fluent", "jiff", "libc", "parse_datetime", "uucore", "windows-sys 0.60.2", ] [[package]] name = "uu_echo" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_env" version = "0.1.0" dependencies = [ "clap", "fluent", "nix", "rust-ini", "thiserror", "uucore", ] [[package]] name = "uu_expr" version = "0.1.0" dependencies = [ "clap", "fluent", "num-bigint", "num-traits", "onig", "thiserror", "uucore", ] [[package]] name = "uu_printf" version = "0.1.0" dependencies = [ "clap", "fluent", "uucore", ] [[package]] name = "uu_seq" version = "0.1.0" dependencies = [ "bigdecimal", "clap", "fluent", "num-bigint", "num-traits", "thiserror", "uucore", ] [[package]] name = "uu_sort" version = "0.1.0" dependencies = [ "bigdecimal", "binary-heap-plus", "clap", "compare", "ctrlc", "fluent", "fnv", "itertools", "memchr", "nix", "rand", "rayon", "self_cell", "tempfile", "thiserror", "unicode-width", "uucore", ] [[package]] name = "uu_split" version = "0.1.0" dependencies = [ "clap", "fluent", "memchr", "thiserror", "uucore", ] [[package]] name = "uu_test" version = "0.1.0" dependencies = [ "clap", "fluent", "libc", "thiserror", "uucore", ] [[package]] name = "uu_tr" version = "0.1.0" dependencies = [ "clap", "fluent", "nom", "uucore", ] [[package]] name = "uu_wc" version = "0.1.0" dependencies = [ "bytecount", "clap", "fluent", "libc", "nix", "thiserror", "unicode-width", "uucore", ] [[package]] name = "uucore" version = "0.1.0" dependencies = [ "bigdecimal", "blake2b_simd", "blake3", "bstr", "clap", "crc32fast", "data-encoding", "data-encoding-macro", "digest", "dunce", "fluent", "fluent-syntax", "glob", "hex", "icu_collator", "icu_locale", "itertools", "libc", "md-5", "memchr", "nix", "num-traits", "number_prefix", "os_display", "sha1", "sha2", "sha3", "sm3", "thiserror", "unic-langid", "uucore_procs", "wild", "winapi-util", "windows-sys 0.60.2", "z85", ] [[package]] name = "uucore-fuzz" version = "0.0.0" dependencies = [ "libfuzzer-sys", "rand", "uu_cksum", "uu_cut", "uu_date", "uu_echo", "uu_env", "uu_expr", "uu_printf", "uu_seq", "uu_sort", "uu_split", "uu_test", "uu_tr", "uu_wc", "uucore", "uufuzz", ] [[package]] name = "uucore_procs" version = "0.1.0" dependencies = [ "proc-macro2", "quote", "uuhelp_parser", ] [[package]] name = "uufuzz" version = "0.1.0" dependencies = [ "console", "libc", "rand", "similar", "tempfile", "uucore", ] [[package]] name = "uuhelp_parser" version = "0.1.0" [[package]] name = "version_check" version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] [[package]] name = "wasm-bindgen" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" dependencies = [ "unicode-ident", ] [[package]] name = "wild" version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3131afc8c575281e1e80f36ed6a092aa502c08b18ed7524e86fbbb12bb410e1" dependencies = [ "glob", ] [[package]] name = "winapi-util" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "windows-core" version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", "windows-strings", ] [[package]] name = "windows-implement" version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-interface" version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-link" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-result" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets 0.52.6", ] [[package]] name = "windows-sys" version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ "windows-targets 0.53.2", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] [[package]] name = "windows-targets" version = "0.53.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" dependencies = [ "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", "windows_i686_gnu 0.53.0", "windows_i686_gnullvm 0.53.0", "windows_i686_msvc 0.53.0", "windows_x86_64_gnu 0.53.0", "windows_x86_64_gnullvm 0.53.0", "windows_x86_64_msvc 0.53.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" dependencies = [ "memchr", ] [[package]] name = "wit-bindgen-rt" version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags", ] [[package]] name = "writeable" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "yoke" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", "yoke-derive", "zerofrom", ] [[package]] name = "yoke-derive" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "z85" version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b3a41ce106832b4da1c065baa4c31cf640cf965fa1483816402b7f6b96f0a64" [[package]] name = "zerocopy" version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "zerofrom" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "zerotrie" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ "displaydoc", "yoke", "zerofrom", ] [[package]] name = "zerovec" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ "yoke", "zerofrom", "zerovec-derive", ] [[package]] name = "zerovec-derive" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", "syn", ] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/Cargo.toml000066400000000000000000000050731504311601400236770ustar00rootroot00000000000000[package] name = "uucore-fuzz" version = "0.0.0" description = "uutils ~ 'core' uutils fuzzers" repository = "https://github.com/uutils/coreutils/tree/main/fuzz/" edition.workspace = true license.workspace = true publish = false # Prevent this from interfering with workspaces [workspace] members = ["."] [workspace.package] edition = "2024" license = "MIT" [package.metadata] cargo-fuzz = true [dependencies] libfuzzer-sys = "0.4.7" rand = { version = "0.9.0", features = ["small_rng"] } uufuzz = { path = "uufuzz" } uucore = { path = "../src/uucore", features = ["parser"] } uu_date = { path = "../src/uu/date" } uu_test = { path = "../src/uu/test" } uu_expr = { path = "../src/uu/expr" } uu_printf = { path = "../src/uu/printf" } uu_echo = { path = "../src/uu/echo" } uu_seq = { path = "../src/uu/seq" } uu_sort = { path = "../src/uu/sort" } uu_wc = { path = "../src/uu/wc" } uu_cut = { path = "../src/uu/cut" } uu_split = { path = "../src/uu/split" } uu_tr = { path = "../src/uu/tr" } uu_env = { path = "../src/uu/env" } uu_cksum = { path = "../src/uu/cksum" } [[bin]] name = "fuzz_date" path = "fuzz_targets/fuzz_date.rs" test = false doc = false [[bin]] name = "fuzz_printf" path = "fuzz_targets/fuzz_printf.rs" test = false doc = false [[bin]] name = "fuzz_echo" path = "fuzz_targets/fuzz_echo.rs" test = false doc = false [[bin]] name = "fuzz_seq" path = "fuzz_targets/fuzz_seq.rs" test = false doc = false [[bin]] name = "fuzz_sort" path = "fuzz_targets/fuzz_sort.rs" test = false doc = false [[bin]] name = "fuzz_split" path = "fuzz_targets/fuzz_split.rs" test = false doc = false [[bin]] name = "fuzz_cut" path = "fuzz_targets/fuzz_cut.rs" test = false doc = false [[bin]] name = "fuzz_wc" path = "fuzz_targets/fuzz_wc.rs" test = false doc = false [[bin]] name = "fuzz_expr" path = "fuzz_targets/fuzz_expr.rs" test = false doc = false [[bin]] name = "fuzz_test" path = "fuzz_targets/fuzz_test.rs" test = false doc = false [[bin]] name = "fuzz_seq_parse_number" path = "fuzz_targets/fuzz_seq_parse_number.rs" test = false doc = false [[bin]] name = "fuzz_parse_glob" path = "fuzz_targets/fuzz_parse_glob.rs" test = false doc = false [[bin]] name = "fuzz_parse_size" path = "fuzz_targets/fuzz_parse_size.rs" test = false doc = false [[bin]] name = "fuzz_parse_time" path = "fuzz_targets/fuzz_parse_time.rs" test = false doc = false [[bin]] name = "fuzz_tr" path = "fuzz_targets/fuzz_tr.rs" test = false doc = false [[bin]] name = "fuzz_env" path = "fuzz_targets/fuzz_env.rs" test = false doc = false [[bin]] name = "fuzz_cksum" path = "fuzz_targets/fuzz_cksum.rs" test = false doc = false coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/000077500000000000000000000000001504311601400244715ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_cksum.rs000066400000000000000000000116551504311601400272470ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore chdir #![no_main] use libfuzzer_sys::fuzz_target; use rand::Rng; use std::env::temp_dir; use std::ffi::OsString; use std::fs::{self, File}; use std::io::Write; use std::process::Command; use uu_cksum::uumain; use uufuzz::{ CommandResult, compare_result, generate_and_run_uumain, generate_random_file, generate_random_string, pretty_print::{print_or_empty, print_test_begin}, replace_fuzz_binary_name, run_gnu_cmd, }; static CMD_PATH: &str = "cksum"; fn generate_cksum_args() -> Vec { let mut rng = rand::rng(); let mut args = Vec::new(); let digests = [ "sysv", "bsd", "crc", "md5", "sha1", "sha224", "sha256", "sha384", "sha512", "blake2b", "sm3", ]; let digest_opts = [ "--base64", "--raw", "--tag", "--untagged", "--text", "--binary", ]; if rng.random_bool(0.3) { args.push("-a".to_string()); args.push(digests[rng.random_range(0..digests.len())].to_string()); } if rng.random_bool(0.2) { args.push(digest_opts[rng.random_range(0..digest_opts.len())].to_string()); } if rng.random_bool(0.15) { args.push("-l".to_string()); args.push(rng.random_range(8..513).to_string()); } if rng.random_bool(0.05) { for _ in 0..rng.random_range(0..3) { args.push(format!("file_{}", generate_random_string(5))); } } else { args.push("-c".to_string()); } if rng.random_bool(0.25) { if let Ok(file_path) = generate_random_file() { args.push(file_path); } } if args.is_empty() || !args.iter().any(|arg| arg.starts_with("file_")) { args.push("-a".to_string()); args.push(digests[rng.random_range(0..digests.len())].to_string()); if let Ok(file_path) = generate_random_file() { args.push(file_path); } } args } fn generate_checksum_file( algo: &str, file_path: &str, digest_opts: &[&str], ) -> Result { let checksum_file_path = temp_dir().join("checksum_file"); let mut cmd = Command::new(CMD_PATH); cmd.arg("-a").arg(algo); for opt in digest_opts { cmd.arg(opt); } cmd.arg(file_path); let output = cmd.output()?; let mut checksum_file = File::create(&checksum_file_path)?; checksum_file.write_all(&output.stdout)?; Ok(checksum_file_path.to_str().unwrap().to_string()) } fn select_random_digest_opts<'a>( rng: &mut rand::rngs::ThreadRng, digest_opts: &'a [&'a str], ) -> Vec<&'a str> { digest_opts .iter() .filter(|_| rng.random_bool(0.5)) .copied() .collect() } fuzz_target!(|_data: &[u8]| { let cksum_args = generate_cksum_args(); let mut args = vec![OsString::from("cksum")]; args.extend(cksum_args.iter().map(OsString::from)); if let Ok(file_path) = generate_random_file() { let algo = cksum_args .iter() .position(|arg| arg == "-a") .map_or("md5", |index| &cksum_args[index + 1]); let all_digest_opts = ["--base64", "--raw", "--tag", "--untagged"]; let mut rng = rand::rng(); let selected_digest_opts = select_random_digest_opts(&mut rng, &all_digest_opts); if let Ok(checksum_file_path) = generate_checksum_file(algo, &file_path, &selected_digest_opts) { print_test_begin(format!("cksum {args:?}")); if let Ok(content) = fs::read_to_string(&checksum_file_path) { println!("File content ({checksum_file_path})"); print_or_empty(&content); } else { eprintln!("Error reading the checksum file."); } let mut rust_result = generate_and_run_uumain(&args, uumain, None); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, None) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; // Lower the number of false positives caused by binary names replace_fuzz_binary_name("cksum", &mut rust_result); compare_result( "cksum", &format!("{:?}", &args[1..]), None, &rust_result, &gnu_result, false, ); } } }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_cut.rs000066400000000000000000000052371504311601400267170ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore parens #![no_main] use libfuzzer_sys::fuzz_target; use uu_cut::uumain; use rand::Rng; use std::ffi::OsString; use uufuzz::{ CommandResult, compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd, }; static CMD_PATH: &str = "cut"; fn generate_cut_args() -> String { let mut rng = rand::rng(); let arg_count = rng.random_range(1..=6); let mut args = Vec::new(); for _ in 0..arg_count { if rng.random_bool(0.1) { args.push(generate_random_string(rng.random_range(1..=20))); } else { match rng.random_range(0..=4) { 0 => args.push(String::from("-b") + &rng.random_range(1..=10).to_string()), 1 => args.push(String::from("-c") + &rng.random_range(1..=10).to_string()), 2 => args.push(String::from("-d,") + &generate_random_string(1)), // Using a comma as a default delimiter 3 => args.push(String::from("-f") + &rng.random_range(1..=5).to_string()), _ => (), } } } args.join(" ") } fn generate_delimited_data(count: usize) -> String { let mut rng = rand::rng(); let mut lines = Vec::new(); for _ in 0..count { let fields = (0..rng.random_range(1..=5)) .map(|_| generate_random_string(rng.random_range(1..=10))) .collect::>() .join(","); lines.push(fields); } lines.join("\n") } fuzz_target!(|_data: &[u8]| { let cut_args = generate_cut_args(); let mut args = vec![OsString::from("cut")]; args.extend(cut_args.split_whitespace().map(OsString::from)); let input_lines = generate_delimited_data(10); let rust_result = generate_and_run_uumain(&args, uumain, Some(&input_lines)); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, Some(&input_lines)) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "cut", &format!("{:?}", &args[1..]), Some(&input_lines), &rust_result, &gnu_result, false, // Set to true if you want to fail on stderr diff ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_date.rs000066400000000000000000000004711504311601400270340ustar00rootroot00000000000000#![no_main] use libfuzzer_sys::fuzz_target; use std::ffi::OsString; use uu_date::uumain; fuzz_target!(|data: &[u8]| { let delim: u8 = 0; // Null byte let args = data .split(|b| *b == delim) .filter_map(|e| std::str::from_utf8(e).ok()) .map(OsString::from); uumain(args); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_echo.rs000066400000000000000000000045261504311601400270420ustar00rootroot00000000000000#![no_main] use libfuzzer_sys::fuzz_target; use uu_echo::uumain; use rand::Rng; use rand::prelude::IndexedRandom; use std::ffi::OsString; use uufuzz::CommandResult; use uufuzz::{compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd}; static CMD_PATH: &str = "echo"; fn generate_echo() -> String { let mut rng = rand::rng(); let mut echo_str = String::new(); // Randomly decide whether to include options let include_n = rng.random_bool(0.1); // 10% chance let include_e = rng.random_bool(0.1); // 10% chance #[allow(non_snake_case)] let include_E = rng.random_bool(0.1); // 10% chance if include_n { echo_str.push_str("-n "); } if include_e { echo_str.push_str("-e "); } if include_E { echo_str.push_str("-E "); } // Add a random string echo_str.push_str(&generate_random_string(rng.random_range(1..=10))); // Include escape sequences if -e is enabled if include_e { // Add a 10% chance of including an escape sequence if rng.random_bool(0.1) { echo_str.push_str(&generate_escape_sequence(&mut rng)); } } echo_str } fn generate_escape_sequence(rng: &mut impl Rng) -> String { let escape_sequences = [ "\\\\", "\\a", "\\b", "\\c", "\\e", "\\f", "\\n", "\\r", "\\t", "\\v", "\\0NNN", "\\xHH", ]; // \0NNN and \xHH need more work escape_sequences.choose(rng).unwrap().to_string() } fuzz_target!(|_data: &[u8]| { let echo_input = generate_echo(); let mut args = vec![OsString::from("echo")]; args.extend(echo_input.split_whitespace().map(OsString::from)); let rust_result = generate_and_run_uumain(&args, uumain, None); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, None) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "echo", &format!("{:?}", &args[1..]), None, &rust_result, &gnu_result, true, ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_env.rs000066400000000000000000000051711504311601400267110ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore chdir #![no_main] use libfuzzer_sys::fuzz_target; use uu_env::uumain; use std::ffi::OsString; use rand::Rng; use uufuzz::{ CommandResult, compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd, }; static CMD_PATH: &str = "env"; fn generate_env_args() -> Vec { let mut rng = rand::rng(); let mut args = Vec::new(); let opts = ["-i", "-0", "-v", "-vv"]; for opt in &opts { if rng.random_bool(0.2) { args.push(opt.to_string()); } } if rng.random_bool(0.3) { args.push(format!( "-u={}", generate_random_string(rng.random_range(3..10)) )); } if rng.random_bool(0.2) { args.push(format!("--chdir={}", "/tmp")); // Simplified example } /* Options not implemented for now if rng.random_bool(0.15) { let sig_opts = ["--block-signal"];//, /*"--default-signal",*/ "--ignore-signal"]; let chosen_sig_opt = sig_opts[rng.random_range(0..sig_opts.len())]; args.push(chosen_sig_opt.to_string()); // Simplify by assuming SIGPIPE for demonstration if !chosen_sig_opt.ends_with("list-signal-handling") { args.push(String::from("SIGPIPE")); } }*/ // Adding a few random NAME=VALUE pairs for _ in 0..rng.random_range(0..3) { args.push(format!( "{}={}", generate_random_string(5), generate_random_string(5) )); } args } fuzz_target!(|_data: &[u8]| { let env_args = generate_env_args(); let mut args = vec![OsString::from("env")]; args.extend(env_args.iter().map(OsString::from)); let input_lines = generate_random_string(10); let rust_result = generate_and_run_uumain(&args, uumain, Some(&input_lines)); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, None) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "env", &format!("{:?}", &args[1..]), None, &rust_result, &gnu_result, false, ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_expr.rs000066400000000000000000000060621504311601400270770ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore parens #![no_main] use libfuzzer_sys::fuzz_target; use uu_expr::uumain; use rand::Rng; use rand::prelude::IndexedRandom; use std::{env, ffi::OsString}; use uufuzz::CommandResult; use uufuzz::{compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd}; static CMD_PATH: &str = "expr"; fn generate_expr(max_depth: u32) -> String { let mut rng = rand::rng(); let ops = [ "+", "-", "*", "/", "%", "<", ">", "=", "&", "|", "!=", "<=", ">=", ":", "index", "length", "substr", ]; let mut expr = String::new(); let mut depth = 0; let mut last_was_operator = false; while depth <= max_depth { if last_was_operator || depth == 0 { // Add a number expr.push_str(&rng.random_range(1..=100).to_string()); last_was_operator = false; } else { // 90% chance to add an operator followed by a number if rng.random_bool(0.9) { let op = *ops.choose(&mut rng).unwrap(); expr.push_str(&format!(" {op} ")); last_was_operator = true; } // 10% chance to add a random string (potentially invalid syntax) else { let random_str = generate_random_string(rng.random_range(1..=10)); expr.push_str(&random_str); last_was_operator = false; } } depth += 1; } // Ensure the expression ends with a number if it ended with an operator if last_was_operator { expr.push_str(&rng.random_range(1..=100).to_string()); } expr } fuzz_target!(|_data: &[u8]| { let mut rng = rand::rng(); let expr = generate_expr(rng.random_range(0..=20)); let mut args = vec![OsString::from("expr")]; args.extend(expr.split_whitespace().map(OsString::from)); // Use C locale to avoid false positives, like in https://github.com/uutils/coreutils/issues/5378, // because uutils expr doesn't support localization yet // TODO remove once uutils expr supports localization unsafe { env::set_var("LC_COLLATE", "C"); } let rust_result = generate_and_run_uumain(&args, uumain, None); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, None) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "expr", &format!("{:?}", &args[1..]), None, &rust_result, &gnu_result, false, // Set to true if you want to fail on stderr diff ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_parse_glob.rs000066400000000000000000000003111504311601400302250ustar00rootroot00000000000000#![no_main] use libfuzzer_sys::fuzz_target; use uucore::parser::parse_glob; fuzz_target!(|data: &[u8]| { if let Ok(s) = std::str::from_utf8(data) { _ = parse_glob::from_str(s); } }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_parse_size.rs000066400000000000000000000003231504311601400302570ustar00rootroot00000000000000#![no_main] use libfuzzer_sys::fuzz_target; use uucore::parser::parse_size::parse_size_u64; fuzz_target!(|data: &[u8]| { if let Ok(s) = std::str::from_utf8(data) { _ = parse_size_u64(s); } }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_parse_time.rs000066400000000000000000000003731504311601400302500ustar00rootroot00000000000000#![no_main] use libfuzzer_sys::fuzz_target; use uucore::parser::parse_time; fuzz_target!(|data: &[u8]| { if let Ok(s) = std::str::from_utf8(data) { _ = parse_time::from_str(s, true); _ = parse_time::from_str(s, false); } }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_printf.rs000066400000000000000000000062741504311601400274300ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore parens #![no_main] use libfuzzer_sys::fuzz_target; use uu_printf::uumain; use rand::Rng; use rand::seq::IndexedRandom; use std::env; use std::ffi::OsString; use uufuzz::CommandResult; use uufuzz::{compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd}; static CMD_PATH: &str = "printf"; fn generate_escape_sequence(rng: &mut impl Rng) -> String { let escape_sequences = [ "\\\"", "\\\\", "\\a", "\\b", "\\c", "\\e", "\\f", "\\n", "\\r", "\\t", "\\v", "\\000", "\\x00", "\\u0000", "\\U00000000", "%%", ]; escape_sequences.choose(rng).unwrap().to_string() } fn generate_printf() -> String { let mut rng = rand::rng(); let format_specifiers = ["%s", "%d", "%f", "%x", "%o", "%c", "%b", "%q"]; let mut printf_str = String::new(); // Add a 20% chance of generating an invalid format specifier if rng.random_bool(0.2) { printf_str.push_str("%z"); // Invalid format specifier } else { let specifier = *format_specifiers.choose(&mut rng).unwrap(); printf_str.push_str(specifier); // Add a 20% chance of introducing complex format strings if rng.random_bool(0.2) { printf_str.push_str(&format!(" %{}", rng.random_range(1..=1000))); } else { // Add a random string or number after the specifier if specifier == "%s" { printf_str.push_str(&format!( " {}", generate_random_string(rng.random_range(1..=10)) )); } else { printf_str.push_str(&format!(" {}", rng.random_range(1..=1000))); } } } // Add a 10% chance of including an escape sequence if rng.random_bool(0.1) { printf_str.push_str(&generate_escape_sequence(&mut rng)); } printf_str } fuzz_target!(|_data: &[u8]| { let printf_input = generate_printf(); let mut args = vec![OsString::from("printf")]; args.extend(printf_input.split_whitespace().map(OsString::from)); let rust_result = generate_and_run_uumain(&args, uumain, None); // TODO remove once uutils printf supports localization unsafe { env::set_var("LC_ALL", "C"); } let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, None) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "printf", &format!("{:?}", &args[1..]), None, &rust_result, &gnu_result, false, // Set to true if you want to fail on stderr diff ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_seq.rs000066400000000000000000000044751504311601400267170ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore parens #![no_main] use libfuzzer_sys::fuzz_target; use uu_seq::uumain; use rand::Rng; use std::ffi::OsString; use uufuzz::CommandResult; use uufuzz::{compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd}; static CMD_PATH: &str = "seq"; fn generate_seq() -> String { let mut rng = rand::rng(); // Generate 1 to 3 numbers for seq arguments let arg_count = rng.random_range(1..=3); let mut args = Vec::new(); for _ in 0..arg_count { if rng.random_ratio(1, 100) { // 1% chance to add a random string args.push(generate_random_string(rng.random_range(1..=10))); } else { // 99% chance to add a numeric value match rng.random_range(0..=3) { 0 => args.push(rng.random_range(-10000..=10000).to_string()), // Large or small integers 1 => args.push(rng.random_range(-100.0..100.0).to_string()), // Floating-point numbers 2 => args.push(rng.random_range(-100..0).to_string()), // Negative integers _ => args.push(rng.random_range(1..=100).to_string()), // Regular integers } } } args.join(" ") } fuzz_target!(|_data: &[u8]| { let seq = generate_seq(); let mut args = vec![OsString::from("seq")]; args.extend(seq.split_whitespace().map(OsString::from)); let rust_result = generate_and_run_uumain(&args, uumain, None); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, None) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "seq", &format!("{:?}", &args[1..]), None, &rust_result, &gnu_result, false, // Set to true if you want to fail on stderr diff ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_seq_parse_number.rs000066400000000000000000000006421504311601400314510ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. #![no_main] use libfuzzer_sys::fuzz_target; use std::str::FromStr; use uu_seq::number::PreciseNumber; fuzz_target!(|data: &[u8]| { if let Ok(s) = std::str::from_utf8(data) { let _ = PreciseNumber::from_str(s); } }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_sort.rs000066400000000000000000000052021504311601400271030ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore parens #![no_main] use libfuzzer_sys::fuzz_target; use uu_sort::uumain; use rand::Rng; use std::env; use std::ffi::OsString; use uufuzz::CommandResult; use uufuzz::{compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd}; static CMD_PATH: &str = "sort"; fn generate_sort_args() -> String { let mut rng = rand::rng(); let arg_count = rng.random_range(1..=5); let mut args = Vec::new(); for _ in 0..arg_count { match rng.random_range(0..=4) { 0 => args.push(String::from("-r")), // Reverse the result of comparisons 1 => args.push(String::from("-n")), // Compare according to string numerical value 2 => args.push(String::from("-f")), // Fold lower case to upper case characters 3 => args.push(generate_random_string(rng.random_range(1..=10))), // Random string (to simulate file names) _ => args.push(String::from("-k") + &rng.random_range(1..=5).to_string()), // Sort via a specified field } } args.join(" ") } fn generate_random_lines(count: usize) -> String { let mut rng = rand::rng(); let mut lines = Vec::new(); for _ in 0..count { lines.push(generate_random_string(rng.random_range(1..=20))); } lines.join("\n") } fuzz_target!(|_data: &[u8]| { let sort_args = generate_sort_args(); let mut args = vec![OsString::from("sort")]; args.extend(sort_args.split_whitespace().map(OsString::from)); // Generate random lines to sort let input_lines = generate_random_lines(10); let rust_result = generate_and_run_uumain(&args, uumain, Some(&input_lines)); // TODO remove once uutils sort supports localization unsafe { env::set_var("LC_ALL", "C"); } let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, Some(&input_lines)) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "sort", &format!("{:?}", &args[1..]), None, &rust_result, &gnu_result, false, // Set to true if you want to fail on stderr diff ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_split.rs000066400000000000000000000063021504311601400272510ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore parens #![no_main] use libfuzzer_sys::fuzz_target; use uu_split::uumain; use rand::Rng; use std::ffi::OsString; use uufuzz::{ CommandResult, compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd, }; static CMD_PATH: &str = "split"; fn generate_split_args() -> String { let mut rng = rand::rng(); let mut args = Vec::new(); match rng.random_range(0..=9) { 0 => { args.push(String::from("-a")); // Suffix length args.push(rng.random_range(1..=8).to_string()); } 1 => { args.push(String::from("--additional-suffix")); args.push(generate_random_string(5)); // Random suffix } 2 => { args.push(String::from("-b")); // Bytes per output file args.push(rng.random_range(1..=1024).to_string() + "K"); } 3 => { args.push(String::from("-C")); // Line bytes args.push(rng.random_range(1..=1024).to_string()); } 4 => args.push(String::from("-d")), // Use numeric suffixes 5 => args.push(String::from("-x")), // Use hex suffixes 6 => { args.push(String::from("-l")); // Number of lines per output file args.push(rng.random_range(1..=1000).to_string()); } 7 => { args.push(String::from("--filter")); args.push(String::from("cat > /dev/null")); // Example filter command } 8 => { args.push(String::from("-t")); // Separator args.push(String::from("\n")); // Newline as separator } 9 => args.push(String::from("--verbose")), // Verbose _ => (), } args.join(" ") } /// Function to generate a random string of lines fn generate_random_lines(count: usize) -> String { let mut rng = rand::rng(); let mut lines = Vec::new(); for _ in 0..count { lines.push(generate_random_string(rng.random_range(1..=20))); } lines.join("\n") } fuzz_target!(|_data: &[u8]| { let split_args = generate_split_args(); let mut args = vec![OsString::from("split")]; args.extend(split_args.split_whitespace().map(OsString::from)); let input_lines = generate_random_lines(10); let rust_result = generate_and_run_uumain(&args, uumain, Some(&input_lines)); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, Some(&input_lines)) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "split", &format!("{:?}", &args[1..]), None, &rust_result, &gnu_result, false, // Set to true if you want to fail on stderr diff ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_test.rs000066400000000000000000000142651504311601400271040ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore STRINGSTRING INTEGERINTEGER FILEFILE #![no_main] use libfuzzer_sys::fuzz_target; use uu_test::uumain; use rand::Rng; use rand::prelude::IndexedRandom; use std::ffi::OsString; use uufuzz::CommandResult; use uufuzz::{compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd}; #[allow(clippy::upper_case_acronyms)] #[derive(PartialEq, Debug, Clone)] enum ArgType { STRING, STRINGSTRING, INTEGER, INTEGERINTEGER, FILE, FILEFILE, // Add any other types as needed } static CMD_PATH: &str = "test"; #[derive(Debug, Clone)] struct TestArg { arg: String, arg_type: ArgType, } fn generate_random_path(rng: &mut dyn rand::RngCore) -> &'static str { match rng.random_range(0..=3) { 0 => "/dev/null", 1 => "/dev/random", 2 => "/tmp", _ => "/dev/urandom", } } fn generate_test_args() -> Vec { vec![ TestArg { arg: "-z".to_string(), arg_type: ArgType::STRING, }, TestArg { arg: "-n".to_string(), arg_type: ArgType::STRING, }, TestArg { arg: "=".to_string(), arg_type: ArgType::STRINGSTRING, }, TestArg { arg: "!=".to_string(), arg_type: ArgType::STRINGSTRING, }, TestArg { arg: ">".to_string(), arg_type: ArgType::STRINGSTRING, }, TestArg { arg: "<".to_string(), arg_type: ArgType::STRINGSTRING, }, TestArg { arg: "-eq".to_string(), arg_type: ArgType::INTEGERINTEGER, }, TestArg { arg: "-ne".to_string(), arg_type: ArgType::INTEGERINTEGER, }, TestArg { arg: "-gt".to_string(), arg_type: ArgType::INTEGERINTEGER, }, TestArg { arg: "-ge".to_string(), arg_type: ArgType::INTEGERINTEGER, }, TestArg { arg: "-lt".to_string(), arg_type: ArgType::INTEGERINTEGER, }, TestArg { arg: "-le".to_string(), arg_type: ArgType::INTEGERINTEGER, }, TestArg { arg: "-f".to_string(), arg_type: ArgType::FILE, }, TestArg { arg: "-d".to_string(), arg_type: ArgType::FILE, }, TestArg { arg: "-e".to_string(), arg_type: ArgType::FILE, }, TestArg { arg: "-ef".to_string(), arg_type: ArgType::FILEFILE, }, TestArg { arg: "-nt".to_string(), arg_type: ArgType::FILEFILE, }, ] } fn generate_test_arg() -> String { let mut rng = rand::rng(); let test_args = generate_test_args(); let mut arg = String::new(); let choice = rng.random_range(0..=5); match choice { 0 => { arg.push_str(&rng.random_range(-100..=100).to_string()); } 1..=3 => { let test_arg = test_args .choose(&mut rng) .expect("Failed to choose a random test argument"); if test_arg.arg_type == ArgType::INTEGER { arg.push_str(&format!( "{} {} {}", rng.random_range(-100..=100).to_string(), test_arg.arg, rng.random_range(-100..=100).to_string() )); } else if test_arg.arg_type == ArgType::STRINGSTRING { let random_str = generate_random_string(rng.random_range(1..=10)); let random_str2 = generate_random_string(rng.random_range(1..=10)); arg.push_str(&format!("{random_str} {} {random_str2}", test_arg.arg,)); } else if test_arg.arg_type == ArgType::STRING { let random_str = generate_random_string(rng.random_range(1..=10)); arg.push_str(&format!("{} {random_str}", test_arg.arg)); } else if test_arg.arg_type == ArgType::FILEFILE { let path = generate_random_path(&mut rng); let path2 = generate_random_path(&mut rng); arg.push_str(&format!("{path} {} {path2}", test_arg.arg)); } else if test_arg.arg_type == ArgType::FILE { let path = generate_random_path(&mut rng); arg.push_str(&format!("{} {path}", test_arg.arg)); } } 4 => { let random_str = generate_random_string(rng.random_range(1..=10)); arg.push_str(&random_str); } _ => { let path = generate_random_path(&mut rng); let file_test_args: Vec = test_args .iter() .filter(|ta| ta.arg_type == ArgType::FILE) .cloned() .collect(); if let Some(test_arg) = file_test_args.choose(&mut rng) { arg.push_str(&format!("{}{path}", test_arg.arg)); } } } arg } fuzz_target!(|_data: &[u8]| { let mut rng = rand::rng(); let max_args = rng.random_range(1..=6); let mut args = vec![OsString::from("test")]; for _ in 0..max_args { args.push(OsString::from(generate_test_arg())); } let rust_result = generate_and_run_uumain(&args, uumain, None); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, None) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "test", &format!("{:?}", &args[1..]), None, &rust_result, &gnu_result, false, // Set to true if you want to fail on stderr diff ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_tr.rs000066400000000000000000000040431504311601400265430ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. #![no_main] use libfuzzer_sys::fuzz_target; use std::ffi::OsString; use uu_tr::uumain; use rand::Rng; use uufuzz::{ CommandResult, compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd, }; static CMD_PATH: &str = "tr"; fn generate_tr_args() -> Vec { let mut rng = rand::rng(); let mut args = Vec::new(); // Translate, squeeze, and/or delete characters let opts = ["-c", "-d", "-s", "-t"]; for opt in &opts { if rng.random_bool(0.25) { args.push(opt.to_string()); } } // Generating STRING1 and optionally STRING2 let string1 = generate_random_string(rng.random_range(1..=20)); args.push(string1); if rng.random_bool(0.7) { // Higher chance to add STRING2 for translation let string2 = generate_random_string(rng.random_range(1..=20)); args.push(string2); } args } fuzz_target!(|_data: &[u8]| { let tr_args = generate_tr_args(); let mut args = vec![OsString::from("tr")]; args.extend(tr_args.iter().map(OsString::from)); let input_chars = generate_random_string(100); let rust_result = generate_and_run_uumain(&args, uumain, Some(&input_chars)); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, Some(&input_chars)) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "tr", &format!("{:?}", &args[1..]), Some(&input_chars), &rust_result, &gnu_result, false, ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/fuzz_targets/fuzz_wc.rs000066400000000000000000000060461504311601400265340ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore parens #![no_main] use libfuzzer_sys::fuzz_target; use uu_wc::uumain; use rand::Rng; use std::ffi::OsString; use uufuzz::{ CommandResult, compare_result, generate_and_run_uumain, generate_random_string, run_gnu_cmd, }; static CMD_PATH: &str = "wc"; fn generate_wc_args() -> String { let mut rng = rand::rng(); let arg_count = rng.random_range(1..=6); let mut args = Vec::new(); for _ in 0..arg_count { // Introduce a chance to add invalid arguments if rng.random_bool(0.1) { args.push(generate_random_string(rng.random_range(1..=20))); } else { match rng.random_range(0..=5) { 0 => args.push(String::from("-c")), 1 => args.push(String::from("-m")), 2 => args.push(String::from("-l")), 3 => args.push(String::from("-L")), 4 => args.push(String::from("-w")), // TODO 5 => { args.push(String::from("--files0-from")); if rng.random_bool(0.5) { args.push(generate_random_string(50)); // Longer invalid file name } else { args.push(generate_random_string(5)); } } _ => (), } } } args.join(" ") } /// Function to generate a random string of lines, including invalid ones fn generate_random_lines(count: usize) -> String { let mut rng = rand::rng(); let mut lines = Vec::new(); for _ in 0..count { if rng.random_bool(0.1) { lines.push(generate_random_string(rng.random_range(1000..=5000))); // Very long invalid line } else { lines.push(generate_random_string(rng.random_range(1..=20))); } } lines.join("\n") } fuzz_target!(|_data: &[u8]| { let wc_args = generate_wc_args(); let mut args = vec![OsString::from("wc")]; args.extend(wc_args.split_whitespace().map(OsString::from)); let input_lines = generate_random_lines(10); let rust_result = generate_and_run_uumain(&args, uumain, Some(&input_lines)); let gnu_result = match run_gnu_cmd(CMD_PATH, &args[1..], false, Some(&input_lines)) { Ok(result) => result, Err(error_result) => { eprintln!("Failed to run GNU command:"); eprintln!("Stderr: {}", error_result.stderr); eprintln!("Exit Code: {}", error_result.exit_code); CommandResult { stdout: String::new(), stderr: error_result.stderr, exit_code: error_result.exit_code, } } }; compare_result( "wc", &format!("{:?}", &args[1..]), Some(&input_lines), &rust_result, &gnu_result, false, // Set to true if you want to fail on stderr diff ); }); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/uufuzz/000077500000000000000000000000001504311601400233125ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/uufuzz/Cargo.toml000066400000000000000000000007151504311601400252450ustar00rootroot00000000000000[package] name = "uufuzz" authors = ["uutils developers"] description = "uutils ~ 'core' uutils fuzzing library" repository = "https://github.com/uutils/coreutils/tree/main/fuzz/uufuzz" version = "0.1.0" edition.workspace = true license.workspace = true [dependencies] console = "0.16.0" libc = "0.2.153" rand = { version = "0.9.0", features = ["small_rng"] } similar = "2.5.0" uucore = { path = "../../src/uucore", features = ["parser"] } tempfile = "3.15.0" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/uufuzz/src/000077500000000000000000000000001504311601400241015ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/uufuzz/src/lib.rs000066400000000000000000000331351504311601400252220ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use console::Style; use libc::STDIN_FILENO; use libc::{STDERR_FILENO, STDOUT_FILENO, close, dup, dup2, pipe}; use pretty_print::{ print_diff, print_end_with_status, print_or_empty, print_section, print_with_style, }; use rand::Rng; use rand::prelude::IndexedRandom; use std::env::temp_dir; use std::ffi::OsString; use std::fs::File; use std::io::{Seek, SeekFrom, Write}; use std::os::fd::{AsRawFd, RawFd}; use std::process::{Command, Stdio}; use std::sync::atomic::Ordering; use std::sync::{Once, atomic::AtomicBool}; use std::{io, thread}; pub mod pretty_print; /// Represents the result of running a command, including its standard output, /// standard error, and exit code. pub struct CommandResult { /// The standard output (stdout) of the command as a string. pub stdout: String, /// The standard error (stderr) of the command as a string. pub stderr: String, /// The exit code of the command. pub exit_code: i32, } static CHECK_GNU: Once = Once::new(); static IS_GNU: AtomicBool = AtomicBool::new(false); pub fn is_gnu_cmd(cmd_path: &str) -> Result<(), std::io::Error> { CHECK_GNU.call_once(|| { let version_output = Command::new(cmd_path).arg("--version").output().unwrap(); println!("version_output {version_output:#?}"); let version_str = String::from_utf8_lossy(&version_output.stdout).to_string(); if version_str.contains("GNU coreutils") { IS_GNU.store(true, Ordering::Relaxed); } }); if IS_GNU.load(Ordering::Relaxed) { Ok(()) } else { panic!("Not the GNU implementation"); } } pub fn generate_and_run_uumain( args: &[OsString], uumain_function: F, pipe_input: Option<&str>, ) -> CommandResult where F: FnOnce(std::vec::IntoIter) -> i32 + Send + 'static, { // Duplicate the stdout and stderr file descriptors let original_stdout_fd = unsafe { dup(STDOUT_FILENO) }; let original_stderr_fd = unsafe { dup(STDERR_FILENO) }; if original_stdout_fd == -1 || original_stderr_fd == -1 { return CommandResult { stdout: "".to_string(), stderr: "Failed to duplicate STDOUT_FILENO or STDERR_FILENO".to_string(), exit_code: -1, }; } println!("Running test {:?}", &args[0..]); let mut pipe_stdout_fds = [-1; 2]; let mut pipe_stderr_fds = [-1; 2]; // Create pipes for stdout and stderr if unsafe { pipe(pipe_stdout_fds.as_mut_ptr()) } == -1 || unsafe { pipe(pipe_stderr_fds.as_mut_ptr()) } == -1 { return CommandResult { stdout: "".to_string(), stderr: "Failed to create pipes".to_string(), exit_code: -1, }; } // Redirect stdout and stderr to their respective pipes if unsafe { dup2(pipe_stdout_fds[1], STDOUT_FILENO) } == -1 || unsafe { dup2(pipe_stderr_fds[1], STDERR_FILENO) } == -1 { unsafe { close(pipe_stdout_fds[0]); close(pipe_stdout_fds[1]); close(pipe_stderr_fds[0]); close(pipe_stderr_fds[1]); } return CommandResult { stdout: "".to_string(), stderr: "Failed to redirect STDOUT_FILENO or STDERR_FILENO".to_string(), exit_code: -1, }; } let original_stdin_fd = if let Some(input_str) = pipe_input { // we have pipe input let mut input_file = tempfile::tempfile().unwrap(); write!(input_file, "{input_str}").unwrap(); input_file.seek(SeekFrom::Start(0)).unwrap(); // Redirect stdin to read from the in-memory file let original_stdin_fd = unsafe { dup(STDIN_FILENO) }; if original_stdin_fd == -1 || unsafe { dup2(input_file.as_raw_fd(), STDIN_FILENO) } == -1 { return CommandResult { stdout: "".to_string(), stderr: "Failed to set up stdin redirection".to_string(), exit_code: -1, }; } Some(original_stdin_fd) } else { None }; let (uumain_exit_status, captured_stdout, captured_stderr) = thread::scope(|s| { let out = s.spawn(|| read_from_fd(pipe_stdout_fds[0])); let err = s.spawn(|| read_from_fd(pipe_stderr_fds[0])); #[allow(clippy::unnecessary_to_owned)] // TODO: clippy wants us to use args.iter().cloned() ? let status = uumain_function(args.to_owned().into_iter()); // Reset the exit code global variable in case we run another test after this one // See https://github.com/uutils/coreutils/issues/5777 uucore::error::set_exit_code(0); io::stdout().flush().unwrap(); io::stderr().flush().unwrap(); unsafe { close(pipe_stdout_fds[1]); close(pipe_stderr_fds[1]); close(STDOUT_FILENO); close(STDERR_FILENO); } (status, out.join().unwrap(), err.join().unwrap()) }); // Restore the original stdout and stderr if unsafe { dup2(original_stdout_fd, STDOUT_FILENO) } == -1 || unsafe { dup2(original_stderr_fd, STDERR_FILENO) } == -1 { return CommandResult { stdout: "".to_string(), stderr: "Failed to restore the original STDOUT_FILENO or STDERR_FILENO".to_string(), exit_code: -1, }; } unsafe { close(original_stdout_fd); close(original_stderr_fd); } // Restore the original stdin if it was modified if let Some(fd) = original_stdin_fd { if unsafe { dup2(fd, STDIN_FILENO) } == -1 { return CommandResult { stdout: "".to_string(), stderr: "Failed to restore the original STDIN".to_string(), exit_code: -1, }; } unsafe { close(fd) }; } CommandResult { stdout: captured_stdout, stderr: captured_stderr .split_once(':') .map(|x| x.1) .unwrap_or("") .trim() .to_string(), exit_code: uumain_exit_status, } } fn read_from_fd(fd: RawFd) -> String { let mut captured_output = Vec::new(); let mut read_buffer = [0; 1024]; loop { let bytes_read = unsafe { libc::read( fd, read_buffer.as_mut_ptr() as *mut libc::c_void, read_buffer.len(), ) }; if bytes_read == -1 { eprintln!("Failed to read from the pipe"); break; } if bytes_read == 0 { break; } captured_output.extend_from_slice(&read_buffer[..bytes_read as usize]); } unsafe { libc::close(fd) }; String::from_utf8_lossy(&captured_output).into_owned() } pub fn run_gnu_cmd( cmd_path: &str, args: &[OsString], check_gnu: bool, pipe_input: Option<&str>, ) -> Result { if check_gnu { match is_gnu_cmd(cmd_path) { Ok(_) => {} // if the check passes, do nothing Err(e) => { // Convert the io::Error into the function's error type return Err(CommandResult { stdout: String::new(), stderr: e.to_string(), exit_code: -1, }); } } } let mut command = Command::new(cmd_path); for arg in args { command.arg(arg); } // See https://github.com/uutils/coreutils/issues/6794 // uutils' coreutils is not locale-aware, and aims to mirror/be compatible with GNU Core Utilities's LC_ALL=C behavior command.env("LC_ALL", "C"); let output = if let Some(input_str) = pipe_input { // We have an pipe input command .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); let mut child = command.spawn().expect("Failed to execute command"); let child_stdin = child.stdin.as_mut().unwrap(); child_stdin .write_all(input_str.as_bytes()) .expect("Failed to write to stdin"); match child.wait_with_output() { Ok(output) => output, Err(e) => { return Err(CommandResult { stdout: String::new(), stderr: e.to_string(), exit_code: -1, }); } } } else { // Just run with args match command.output() { Ok(output) => output, Err(e) => { return Err(CommandResult { stdout: String::new(), stderr: e.to_string(), exit_code: -1, }); } } }; let exit_code = output.status.code().unwrap_or(-1); // Here we get stdout and stderr as Strings let stdout = String::from_utf8_lossy(&output.stdout).to_string(); let stderr = String::from_utf8_lossy(&output.stderr).to_string(); let stderr = stderr .split_once(':') .map(|x| x.1) .unwrap_or("") .trim() .to_string(); if output.status.success() || !check_gnu { Ok(CommandResult { stdout, stderr, exit_code, }) } else { Err(CommandResult { stdout, stderr, exit_code, }) } } /// Compare results from two different implementations of a command. /// /// # Arguments /// * `test_type` - The command. /// * `input` - The input provided to the command. /// * `rust_result` - The result of running the command with the Rust implementation. /// * `gnu_result` - The result of running the command with the GNU implementation. /// * `fail_on_stderr_diff` - Whether to fail the test if there is a difference in stderr output. pub fn compare_result( test_type: &str, input: &str, pipe_input: Option<&str>, rust_result: &CommandResult, gnu_result: &CommandResult, fail_on_stderr_diff: bool, ) { print_section(format!("Compare result for: {test_type} {input}")); if let Some(pipe) = pipe_input { println!("Pipe: {pipe}"); } let mut discrepancies = Vec::new(); let mut should_panic = false; if rust_result.stdout.trim() != gnu_result.stdout.trim() { discrepancies.push("stdout differs"); println!("Rust stdout:"); print_or_empty(rust_result.stdout.as_str()); println!("GNU stdout:"); print_or_empty(gnu_result.stdout.as_ref()); print_diff(&rust_result.stdout, &gnu_result.stdout); should_panic = true; } if rust_result.stderr.trim() != gnu_result.stderr.trim() { discrepancies.push("stderr differs"); println!("Rust stderr:"); print_or_empty(rust_result.stderr.as_str()); println!("GNU stderr:"); print_or_empty(gnu_result.stderr.as_str()); print_diff(&rust_result.stderr, &gnu_result.stderr); if fail_on_stderr_diff { should_panic = true; } } if rust_result.exit_code != gnu_result.exit_code { discrepancies.push("exit code differs"); println!( "Different exit code: (Rust: {}, GNU: {})", rust_result.exit_code, gnu_result.exit_code ); should_panic = true; } if discrepancies.is_empty() { print_end_with_status("Same behavior", true); } else { print_with_style( format!("Discrepancies detected: {}", discrepancies.join(", ")), Style::new().red(), ); if should_panic { print_end_with_status( format!("Test failed and will panic for: {test_type} {input}"), false, ); panic!("Test failed for: {test_type} {input}"); } else { print_end_with_status( format!("Test completed with discrepancies for: {test_type} {input}"), false, ); } } println!(); } pub fn generate_random_string(max_length: usize) -> String { let mut rng = rand::rng(); let valid_utf8: Vec = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" .chars() .collect(); let invalid_utf8 = [0xC3, 0x28]; // Invalid UTF-8 sequence let mut result = String::new(); for _ in 0..rng.random_range(0..=max_length) { if rng.random_bool(0.9) { let ch = valid_utf8.choose(&mut rng).unwrap(); result.push(*ch); } else { let ch = invalid_utf8.choose(&mut rng).unwrap(); if let Some(c) = char::from_u32(*ch as u32) { result.push(c); } } } result } #[allow(dead_code)] pub fn generate_random_file() -> Result { let mut rng = rand::rng(); let file_name: String = (0..10) .map(|_| rng.random_range(b'a'..=b'z') as char) .collect(); let mut file_path = temp_dir(); file_path.push(file_name); let mut file = File::create(&file_path)?; let content_length = rng.random_range(10..1000); let content: String = (0..content_length) .map(|_| (rng.random_range(b' '..=b'~') as char)) .collect(); file.write_all(content.as_bytes())?; Ok(file_path.to_str().unwrap().to_string()) } #[allow(dead_code)] pub fn replace_fuzz_binary_name(cmd: &str, result: &mut CommandResult) { let fuzz_bin_name = format!("fuzz/target/x86_64-unknown-linux-gnu/release/fuzz_{cmd}"); result.stdout = result.stdout.replace(&fuzz_bin_name, cmd); result.stderr = result.stderr.replace(&fuzz_bin_name, cmd); } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/fuzz/uufuzz/src/pretty_print.rs000066400000000000000000000034761504311601400272240ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use std::fmt; use console::{Style, style}; use similar::TextDiff; pub fn print_section(s: S) { println!("{}", style(format!("=== {s}")).bold()); } pub fn print_subsection(s: S) { println!("{}", style(format!("--- {s}")).bright()); } #[allow(dead_code)] pub fn print_test_begin(msg: S) { println!( "{} {} {}", style("===").bold(), // Kind of gray style("TEST").black().on_yellow().bold(), style(msg).bold() ); } pub fn print_end_with_status(msg: S, ok: bool) { let ok = if ok { style(" OK ").black().on_green().bold() } else { style(" KO ").black().on_red().bold() }; println!( "{} {ok} {}", style("===").bold(), // Kind of gray style(msg).bold() ); } pub fn print_or_empty(s: &str) { let to_print = if s.is_empty() { "(empty)" } else { s }; println!("{}", style(to_print).dim()); } pub fn print_with_style(msg: S, style: Style) { println!("{}", style.apply_to(msg)); } pub fn print_diff(got: &str, expected: &str) { let diff = TextDiff::from_lines(got, expected); print_subsection("START diff"); for change in diff.iter_all_changes() { let (sign, style) = match change.tag() { similar::ChangeTag::Equal => (" ", Style::new().dim()), similar::ChangeTag::Delete => ("-", Style::new().red()), similar::ChangeTag::Insert => ("+", Style::new().green()), }; print!("{}{}", style.apply_to(sign).bold(), style.apply_to(change)); } print_subsection("END diff"); println!(); } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/oranda.json000066400000000000000000000004621504311601400231050ustar00rootroot00000000000000{ "project": { "name": "uutils coreutils" }, "build": { "path_prefix": "coreutils" }, "components": { "changelog": { "read_changelog_file": false } }, "styles": { "theme": "light", "logo": "docs/src/logo.svg", "additional_css": ["docs/src/oranda.css"] } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/renovate.json000066400000000000000000000000541504311601400234610ustar00rootroot00000000000000{ "extends": [ "config:recommended" ] } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/000077500000000000000000000000001504311601400215335ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/bin/000077500000000000000000000000001504311601400223035ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/bin/coreutils.rs000066400000000000000000000303671504311601400246730ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore manpages mangen prefixcat testcat use clap::{Arg, Command}; use clap_complete::Shell; use std::cmp; use std::ffi::OsStr; use std::ffi::OsString; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use std::process; use uucore::display::Quotable; use uucore::locale; const VERSION: &str = env!("CARGO_PKG_VERSION"); include!(concat!(env!("OUT_DIR"), "/uutils_map.rs")); fn usage(utils: &UtilityMap, name: &str) { println!("{name} {VERSION} (multi-call binary)\n"); println!("Usage: {name} [function [arguments...]]"); println!(" {name} --list\n"); println!("Options:"); println!(" --list lists all defined functions, one per row\n"); println!("Currently defined functions:\n"); #[allow(clippy::map_clone)] let mut utils: Vec<&str> = utils.keys().map(|&s| s).collect(); utils.sort_unstable(); let display_list = utils.join(", "); let width = cmp::min(textwrap::termwidth(), 100) - 4 * 2; // (opinion/heuristic) max 100 chars wide with 4 character side indentions println!( "{}", textwrap::indent(&textwrap::fill(&display_list, width), " ") ); } /// # Panics /// Panics if the binary path cannot be determined fn binary_path(args: &mut impl Iterator) -> PathBuf { match args.next() { Some(ref s) if !s.is_empty() => PathBuf::from(s), _ => std::env::current_exe().unwrap(), } } fn name(binary_path: &Path) -> Option<&str> { binary_path.file_stem()?.to_str() } fn get_canonical_util_name(util_name: &str) -> &str { match util_name { // uu_test aliases - '[' is an alias for test "[" => "test", // hashsum aliases - all these hash commands are aliases for hashsum "md5sum" | "sha1sum" | "sha224sum" | "sha256sum" | "sha384sum" | "sha512sum" | "sha3sum" | "sha3-224sum" | "sha3-256sum" | "sha3-384sum" | "sha3-512sum" | "shake128sum" | "shake256sum" | "b2sum" | "b3sum" => "hashsum", "dir" => "ls", // dir is an alias for ls // Default case - return the util name as is _ => util_name, } } fn find_prefixed_util<'a>( binary_name: &str, mut util_keys: impl Iterator, ) -> Option<&'a str> { util_keys.find(|util| { binary_name.ends_with(*util) && binary_name.len() > util.len() // Ensure there's actually a prefix && !binary_name[..binary_name.len() - (*util).len()] .ends_with(char::is_alphanumeric) }) } fn setup_localization_or_exit(util_name: &str) { locale::setup_localization(get_canonical_util_name(util_name)).unwrap_or_else(|err| { match err { uucore::locale::LocalizationError::ParseResource { error: err_msg, snippet, } => eprintln!("Localization parse error at {snippet}: {err_msg}"), other => eprintln!("Could not init the localization system: {other}"), } process::exit(99) }); } #[allow(clippy::cognitive_complexity)] fn main() { uucore::panic::mute_sigpipe_panic(); let utils = util_map(); let mut args = uucore::args_os(); let binary = binary_path(&mut args); let binary_as_util = name(&binary).unwrap_or_else(|| { usage(&utils, ""); process::exit(0); }); // binary name equals util name? if let Some(&(uumain, _)) = utils.get(binary_as_util) { setup_localization_or_exit(binary_as_util); process::exit(uumain(vec![binary.into()].into_iter().chain(args))); } // binary name equals prefixed util name? // * prefix/stem may be any string ending in a non-alphanumeric character // For example, if the binary is named `uu_test`, it will match `test` as a utility. let util_name = if let Some(util) = find_prefixed_util(binary_as_util, utils.keys().copied()) { // prefixed util => replace 0th (aka, executable name) argument Some(OsString::from(util)) } else { // unmatched binary name => regard as multi-binary container and advance argument list uucore::set_utility_is_second_arg(); args.next() }; // 0th argument equals util name? if let Some(util_os) = util_name { fn not_found(util: &OsStr) -> ! { println!("{}: function/utility not found", util.maybe_quote()); process::exit(1); } let Some(util) = util_os.to_str() else { not_found(&util_os) }; match util { "completion" => gen_completions(args, &utils), "manpage" => gen_manpage(args, &utils), "--list" => { let mut utils: Vec<_> = utils.keys().collect(); utils.sort(); for util in utils { println!("{util}"); } process::exit(0); } "--version" | "-V" => { println!("{binary_as_util} {VERSION} (multi-call binary)"); process::exit(0); } // Not a special command: fallthrough to calling a util _ => {} } match utils.get(util) { Some(&(uumain, _)) => { // TODO: plug the deactivation of the translation // and load the English strings directly at compilation time in the // binary to avoid the load of the flt // Could be something like: // #[cfg(not(feature = "only_english"))] setup_localization_or_exit(util); process::exit(uumain(vec![util_os].into_iter().chain(args))); } None => { if util == "--help" || util == "-h" { // see if they want help on a specific util if let Some(util_os) = args.next() { let Some(util) = util_os.to_str() else { not_found(&util_os) }; match utils.get(util) { Some(&(uumain, _)) => { let code = uumain( vec![util_os, OsString::from("--help")] .into_iter() .chain(args), ); io::stdout().flush().expect("could not flush stdout"); process::exit(code); } None => not_found(&util_os), } } usage(&utils, binary_as_util); process::exit(0); } else { not_found(&util_os); } } } } else { // no arguments provided usage(&utils, binary_as_util); process::exit(0); } } /// Prints completions for the utility in the first parameter for the shell in the second parameter to stdout /// # Panics /// Panics if the utility map is empty fn gen_completions( args: impl Iterator, util_map: &UtilityMap, ) -> ! { let all_utilities: Vec<_> = std::iter::once("coreutils") .chain(util_map.keys().copied()) .collect(); let matches = Command::new("completion") .about("Prints completions to stdout") .arg( Arg::new("utility") .value_parser(clap::builder::PossibleValuesParser::new(all_utilities)) .required(true), ) .arg( Arg::new("shell") .value_parser(clap::builder::EnumValueParser::::new()) .required(true), ) .get_matches_from(std::iter::once(OsString::from("completion")).chain(args)); let utility = matches.get_one::("utility").unwrap(); let shell = *matches.get_one::("shell").unwrap(); let mut command = if utility == "coreutils" { gen_coreutils_app(util_map) } else { util_map.get(utility).unwrap().1() }; let bin_name = std::env::var("PROG_PREFIX").unwrap_or_default() + utility; clap_complete::generate(shell, &mut command, bin_name, &mut io::stdout()); io::stdout().flush().unwrap(); process::exit(0); } /// Generate the manpage for the utility in the first parameter /// # Panics /// Panics if the utility map is empty fn gen_manpage( args: impl Iterator, util_map: &UtilityMap, ) -> ! { let all_utilities: Vec<_> = std::iter::once("coreutils") .chain(util_map.keys().copied()) .collect(); let matches = Command::new("manpage") .about("Prints manpage to stdout") .arg( Arg::new("utility") .value_parser(clap::builder::PossibleValuesParser::new(all_utilities)) .required(true), ) .get_matches_from(std::iter::once(OsString::from("manpage")).chain(args)); let utility = matches.get_one::("utility").unwrap(); let command = if utility == "coreutils" { gen_coreutils_app(util_map) } else { setup_localization_or_exit(utility); util_map.get(utility).unwrap().1() }; let man = clap_mangen::Man::new(command); man.render(&mut io::stdout()) .expect("Man page generation failed"); io::stdout().flush().unwrap(); process::exit(0); } /// # Panics /// Panics if the utility map is empty fn gen_coreutils_app(util_map: &UtilityMap) -> Command { let mut command = Command::new("coreutils"); for (name, (_, sub_app)) in util_map { // Recreate a small subcommand with only the relevant info // (name & short description) let about = sub_app() .get_about() .expect("Could not get the 'about'") .to_string(); let sub_app = Command::new(name).about(about); command = command.subcommand(sub_app); } command } #[cfg(test)] mod tests { use super::*; use std::path::Path; #[test] fn test_get_canonical_util_name() { // Test a few key aliases assert_eq!(get_canonical_util_name("["), "test"); assert_eq!(get_canonical_util_name("md5sum"), "hashsum"); assert_eq!(get_canonical_util_name("dir"), "ls"); // Test passthrough case assert_eq!(get_canonical_util_name("cat"), "cat"); } #[test] fn test_name() { // Test normal executable name assert_eq!(name(Path::new("/usr/bin/ls")), Some("ls")); assert_eq!(name(Path::new("cat")), Some("cat")); assert_eq!( name(Path::new("./target/debug/coreutils")), Some("coreutils") ); // Test with extensions assert_eq!(name(Path::new("program.exe")), Some("program")); assert_eq!(name(Path::new("/path/to/utility.bin")), Some("utility")); // Test edge cases assert_eq!(name(Path::new("")), None); assert_eq!(name(Path::new("/")), None); } #[test] fn test_find_prefixed_util() { let utils = ["test", "cat", "ls", "cp"]; // Test exact prefixed matches assert_eq!( find_prefixed_util("uu_test", utils.iter().copied()), Some("test") ); assert_eq!( find_prefixed_util("my-cat", utils.iter().copied()), Some("cat") ); assert_eq!( find_prefixed_util("prefix_ls", utils.iter().copied()), Some("ls") ); // Test non-alphanumeric separator requirement assert_eq!(find_prefixed_util("prefixcat", utils.iter().copied()), None); // no separator assert_eq!(find_prefixed_util("testcat", utils.iter().copied()), None); // no separator // Test no match assert_eq!(find_prefixed_util("unknown", utils.iter().copied()), None); assert_eq!(find_prefixed_util("", utils.iter().copied()), None); // Test exact util name (should not match as prefixed) assert_eq!(find_prefixed_util("test", utils.iter().copied()), None); assert_eq!(find_prefixed_util("cat", utils.iter().copied()), None); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/bin/uudoc.rs000066400000000000000000000311201504311601400237650ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore tldr uuhelp use clap::Command; use std::collections::HashMap; use std::ffi::OsString; use std::fs::File; use std::io::{self, Read, Seek, Write}; use zip::ZipArchive; include!(concat!(env!("OUT_DIR"), "/uutils_map.rs")); /// # Errors /// Returns an error if the writer fails. #[allow(clippy::too_many_lines)] fn main() -> io::Result<()> { let mut tldr_zip = File::open("docs/tldr.zip") .ok() .and_then(|f| ZipArchive::new(f).ok()); if tldr_zip.is_none() { println!("Warning: No tldr archive found, so the documentation will not include examples."); println!( "To include examples in the documentation, download the tldr archive and put it in the docs/ folder." ); println!(); println!(" curl https://tldr.sh/assets/tldr.zip -o docs/tldr.zip"); println!(); } let utils = util_map::>>(); match std::fs::create_dir("docs/src/utils/") { Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => Ok(()), x => x, }?; println!("Writing initial info to SUMMARY.md"); let mut summary = File::create("docs/src/SUMMARY.md")?; let _ = write!( summary, "# Summary\n\ \n\ [Introduction](index.md)\n\ * [Installation](installation.md)\n\ * [Build from source](build.md)\n\ * [Platform support](platforms.md)\n\ * [Contributing](CONTRIBUTING.md)\n\ \t* [Development](DEVELOPMENT.md)\n\ \t* [Code of Conduct](CODE_OF_CONDUCT.md)\n\ * [GNU test coverage](test_coverage.md)\n\ * [Extensions](extensions.md)\n\ \n\ # Reference\n\ * [Multi-call binary](multicall.md)\n", ); println!("Gathering utils per platform"); let utils_per_platform = { let mut map = HashMap::new(); for platform in ["unix", "macos", "windows", "unix_android"] { let platform_utils: Vec = String::from_utf8( std::process::Command::new("./util/show-utils.sh") .arg(format!("--features=feat_os_{platform}")) .output()? .stdout, ) .unwrap() .trim() .split(' ') .map(ToString::to_string) .collect(); map.insert(platform, platform_utils); } // Linux is a special case because it can support selinux let platform_utils: Vec = String::from_utf8( std::process::Command::new("./util/show-utils.sh") .arg("--features=feat_os_unix feat_selinux") .output()? .stdout, ) .unwrap() .trim() .split(' ') .map(ToString::to_string) .collect(); map.insert("linux", platform_utils); map }; let mut utils = utils.entries().collect::>(); utils.sort(); println!("Writing util per platform table"); { let mut platform_table_file = File::create("docs/src/platform_table.md").unwrap(); // sum, cksum, b2sum, etc. are all available on all platforms, but not in the data structure // otherwise, we check the map for the util name. let check_supported = |name: &str, platform: &str| { if name.ends_with("sum") || utils_per_platform[platform].iter().any(|u| u == name) { "✓" } else { " " } }; writeln!( platform_table_file, "| util | Linux | macOS | Windows | FreeBSD | Android |\n\ | ---------------- | ----- | ----- | ------- | ------- | ------- |" )?; for &(&name, _) in &utils { if name == "[" { continue; } // The alignment is not necessary, but makes the output a bit more // pretty when viewed as plain markdown. writeln!( platform_table_file, "| {:<16} | {:<5} | {:<5} | {:<7} | {:<7} | {:<7} |", format!("**{name}**"), check_supported(name, "linux"), check_supported(name, "macos"), check_supported(name, "windows"), check_supported(name, "unix"), check_supported(name, "unix_android"), )?; } } println!("Writing to utils"); for (&name, (_, command)) in utils { if name == "[" { continue; } let p = format!("docs/src/utils/{name}.md"); let markdown = File::open(format!("src/uu/{name}/{name}.md")) .and_then(|mut f: File| { let mut s = String::new(); f.read_to_string(&mut s)?; Ok(s) }) .ok(); if let Ok(f) = File::create(&p) { MDWriter { w: Box::new(f), command: command(), name, tldr_zip: &mut tldr_zip, utils_per_platform: &utils_per_platform, markdown, } .markdown()?; println!("Wrote to '{p}'"); } else { println!("Error writing to {p}"); } writeln!(summary, "* [{name}](utils/{name}.md)")?; } Ok(()) } struct MDWriter<'a, 'b> { w: Box, command: Command, name: &'a str, tldr_zip: &'b mut Option>, utils_per_platform: &'b HashMap<&'b str, Vec>, markdown: Option, } impl MDWriter<'_, '_> { /// # Errors /// Returns an error if the writer fails. fn markdown(&mut self) -> io::Result<()> { write!(self.w, "# {}\n\n", self.name)?; self.additional()?; self.usage()?; self.about()?; self.options()?; self.after_help()?; self.examples() } /// # Errors /// Returns an error if the writer fails. fn additional(&mut self) -> io::Result<()> { writeln!(self.w, "
")?; self.platforms()?; self.version()?; writeln!(self.w, "
") } /// # Errors /// Returns an error if the writer fails. fn platforms(&mut self) -> io::Result<()> { writeln!(self.w, "
")?; for (feature, icon) in [ ("linux", "linux"), // freebsd is disabled for now because mdbook does not use font-awesome 5 yet. // ("unix", "freebsd"), ("macos", "apple"), ("windows", "windows"), ] { if self.name.contains("sum") || self.utils_per_platform[feature] .iter() .any(|u| u == self.name) { writeln!(self.w, "")?; } } writeln!(self.w, "
")?; Ok(()) } /// # Errors /// Returns an error if the writer fails. /// # Panics /// Panics if the version is not found. fn version(&mut self) -> io::Result<()> { writeln!( self.w, "
v{}
", self.command.render_version().split_once(' ').unwrap().1 ) } /// # Errors /// Returns an error if the writer fails. fn usage(&mut self) -> io::Result<()> { if let Some(markdown) = &self.markdown { let usage = uuhelp_parser::parse_usage(markdown); let usage = usage.replace("{}", self.name); writeln!(self.w, "\n```")?; writeln!(self.w, "{usage}")?; writeln!(self.w, "```") } else { Ok(()) } } /// # Errors /// Returns an error if the writer fails. fn about(&mut self) -> io::Result<()> { if let Some(markdown) = &self.markdown { writeln!(self.w, "{}", uuhelp_parser::parse_about(markdown)) } else { Ok(()) } } /// # Errors /// Returns an error if the writer fails. fn after_help(&mut self) -> io::Result<()> { if let Some(markdown) = &self.markdown { if let Some(after_help) = uuhelp_parser::parse_section("after help", markdown) { return writeln!(self.w, "\n\n{after_help}"); } } Ok(()) } /// # Errors /// Returns an error if the writer fails. fn examples(&mut self) -> io::Result<()> { if let Some(zip) = self.tldr_zip { let content = if let Some(f) = get_zip_content(zip, &format!("pages/common/{}.md", self.name)) { f } else if let Some(f) = get_zip_content(zip, &format!("pages/linux/{}.md", self.name)) { f } else { println!( "Warning: Could not find tldr examples for page '{}'", self.name ); return Ok(()); }; writeln!(self.w, "## Examples")?; writeln!(self.w)?; for line in content.lines().skip_while(|l| !l.starts_with('-')) { if let Some(l) = line.strip_prefix("- ") { writeln!(self.w, "{l}")?; } else if line.starts_with('`') { writeln!(self.w, "```shell\n{}\n```", line.trim_matches('`'))?; } else if line.is_empty() { writeln!(self.w)?; } else { println!("Not sure what to do with this line:"); println!("{line}"); } } writeln!(self.w)?; writeln!( self.w, "> The examples are provided by the [tldr-pages project](https://tldr.sh) under the [CC BY 4.0 License](https://github.com/tldr-pages/tldr/blob/main/LICENSE.md)." )?; writeln!(self.w, ">")?; writeln!( self.w, "> Please note that, as uutils is a work in progress, some examples might fail." )?; } Ok(()) } /// # Errors /// Returns an error if the writer fails. fn options(&mut self) -> io::Result<()> { writeln!(self.w, "

Options

")?; write!(self.w, "
")?; for arg in self.command.get_arguments() { write!(self.w, "
")?; let mut first = true; for l in arg.get_long_and_visible_aliases().unwrap_or_default() { if first { first = false; } else { write!(self.w, ", ")?; } write!(self.w, "")?; write!(self.w, "--{l}")?; if let Some(names) = arg.get_value_names() { write!( self.w, "={}", names .iter() .map(|x| format!("<{x}>")) .collect::>() .join(" ") )?; } write!(self.w, "")?; } for s in arg.get_short_and_visible_aliases().unwrap_or_default() { if first { first = false; } else { write!(self.w, ", ")?; } write!(self.w, "")?; write!(self.w, "-{s}")?; if let Some(names) = arg.get_value_names() { write!( self.w, " {}", names .iter() .map(|x| format!("<{x}>")) .collect::>() .join(" ") )?; } write!(self.w, "")?; } writeln!(self.w, "
")?; writeln!( self.w, "
\n\n{}\n\n
", arg.get_help() .unwrap_or_default() .to_string() .replace('\n', "
") )?; } writeln!(self.w, "
\n") } } /// # Panics /// Panics if the archive is not ok fn get_zip_content(archive: &mut ZipArchive, name: &str) -> Option { let mut s = String::new(); archive.by_name(name).ok()?.read_to_string(&mut s).unwrap(); Some(s) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/000077500000000000000000000000001504311601400221645ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/arch/000077500000000000000000000000001504311601400231015ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/arch/Cargo.toml000066400000000000000000000011341504311601400250300ustar00rootroot00000000000000[package] name = "uu_arch" description = "arch ~ (uutils) display machine architecture" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/arch" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/arch.rs" [dependencies] platform-info = { workspace = true } clap = { workspace = true } uucore = { workspace = true } fluent = { workspace = true } [[bin]] name = "arch" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/arch/LICENSE000077700000000000000000000000001504311601400257472../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/arch/locales/000077500000000000000000000000001504311601400245235ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/arch/locales/en-US.ftl000066400000000000000000000003421504311601400261600ustar00rootroot00000000000000# Error message when system architecture information cannot be retrieved cannot-get-system = cannot get system name arch-about = Display machine architecture arch-after-help = Determine architecture name for current machine. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/arch/locales/fr-FR.ftl000066400000000000000000000004121504311601400261430ustar00rootroot00000000000000# Error message when system architecture information cannot be retrieved cannot-get-system = impossible d'obtenir le nom du système arch-about = Afficher l'architecture de la machine arch-after-help = Déterminer le nom de l'architecture pour la machine actuelle. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/arch/src/000077500000000000000000000000001504311601400236705ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/arch/src/arch.rs000066400000000000000000000015001504311601400251470ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use platform_info::*; use clap::Command; use uucore::error::{UResult, USimpleError}; use uucore::translate; #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { uu_app().try_get_matches_from(args)?; let uts = PlatformInfo::new().map_err(|_e| USimpleError::new(1, translate!("cannot-get-system")))?; println!("{}", uts.machine().to_string_lossy().trim()); Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("arch-about")) .after_help(translate!("arch-after-help")) .infer_long_args(true) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/arch/src/main.rs000066400000000000000000000000271504311601400251610ustar00rootroot00000000000000uucore::bin!(uu_arch); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/000077500000000000000000000000001504311601400232435ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/Cargo.toml000066400000000000000000000011431504311601400251720ustar00rootroot00000000000000[package] name = "uu_base32" description = "base32 ~ (uutils) decode/encode input (base32-encoding)" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/base32" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/base32.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["encoding"] } fluent = { workspace = true } [[bin]] name = "base32" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/LICENSE000077700000000000000000000000001504311601400261112../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/locales/000077500000000000000000000000001504311601400246655ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/locales/en-US.ftl000066400000000000000000000051741504311601400263320ustar00rootroot00000000000000# This file contains base32, base64 and basenc strings # This is because we have some common strings for all these tools # and it is easier to have a single file than one file for program # and loading several bundles at the same time. base32-about = encode/decode data and print to standard output With no FILE, or when FILE is -, read standard input. The data are encoded as described for the base32 alphabet in RFC 4648. When decoding, the input may contain newlines in addition to the bytes of the formal base32 alphabet. Use --ignore-garbage to attempt to recover from any other non-alphabet bytes in the encoded stream. base32-usage = base32 [OPTION]... [FILE] base64-about = encode/decode data and print to standard output With no FILE, or when FILE is -, read standard input. The data are encoded as described for the base64 alphabet in RFC 3548. When decoding, the input may contain newlines in addition to the bytes of the formal base64 alphabet. Use --ignore-garbage to attempt to recover from any other non-alphabet bytes in the encoded stream. base64-usage = base64 [OPTION]... [FILE] basenc-about = Encode/decode data and print to standard output With no FILE, or when FILE is -, read standard input. When decoding, the input may contain newlines in addition to the bytes of the formal alphabet. Use --ignore-garbage to attempt to recover from any other non-alphabet bytes in the encoded stream. basenc-usage = basenc [OPTION]... [FILE] # Help messages for encoding formats basenc-help-base64 = same as 'base64' program basenc-help-base64url = file- and url-safe base64 basenc-help-base32 = same as 'base32' program basenc-help-base32hex = extended hex alphabet base32 basenc-help-base16 = hex encoding basenc-help-base2lsbf = bit string with least significant bit (lsb) first basenc-help-base2msbf = bit string with most significant bit (msb) first basenc-help-z85 = ascii85-like encoding; when encoding, input length must be a multiple of 4; when decoding, input length must be a multiple of 5 # Error messages basenc-error-missing-encoding-type = missing encoding type # Shared base_common error messages (used by base32, base64, basenc) base-common-extra-operand = extra operand {$operand} base-common-no-such-file = {$file}: No such file or directory base-common-invalid-wrap-size = invalid wrap size: {$size} base-common-read-error = read error: {$error} # Shared base_common help messages base-common-help-decode = decode data base-common-help-ignore-garbage = when decoding, ignore non-alphabetic characters base-common-help-wrap = wrap encoded lines after COLS character (default {$default}, 0 to disable wrapping) coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/locales/fr-FR.ftl000066400000000000000000000055441504311601400263200ustar00rootroot00000000000000base32-about = encoder/décoder les données et les imprimer sur la sortie standard Sans FICHIER, ou quand FICHIER est -, lire l'entrée standard. Les données sont encodées comme décrit pour l'alphabet base32 dans RFC 4648. Lors du décodage, l'entrée peut contenir des retours à la ligne en plus des octets de l'alphabet base32 formel. Utilisez --ignore-garbage pour tenter de récupérer des autres octets non-alphabétiques dans le flux encodé. base32-usage = base32 [OPTION]... [FICHIER] base64-about = encoder/décoder les données et les imprimer sur la sortie standard Sans FICHIER, ou quand FICHIER est -, lire l'entrée standard. Les données sont encodées comme décrit pour l'alphabet base64 dans RFC 3548. Lors du décodage, l'entrée peut contenir des retours à la ligne en plus des octets de l'alphabet base64 formel. Utilisez --ignore-garbage pour tenter de récupérer des autres octets non-alphabétiques dans le flux encodé. base64-usage = base64 [OPTION]... [FICHIER] basenc-about = Encoder/décoder des données et afficher vers la sortie standard Sans FICHIER, ou lorsque FICHIER est -, lire l'entrée standard. Lors du décodage, l'entrée peut contenir des nouvelles lignes en plus des octets de l'alphabet formel. Utilisez --ignore-garbage pour tenter de récupérer depuis tout autre octet non-alphabétique dans le flux encodé. basenc-usage = basenc [OPTION]... [FICHIER] # Messages d'aide pour les formats d'encodage basenc-help-base64 = identique au programme 'base64' basenc-help-base64url = base64 sécurisé pour fichiers et URLs basenc-help-base32 = identique au programme 'base32' basenc-help-base32hex = base32 avec alphabet hexadécimal étendu basenc-help-base16 = encodage hexadécimal basenc-help-base2lsbf = chaîne de bits avec le bit de poids faible (lsb) en premier basenc-help-base2msbf = chaîne de bits avec le bit de poids fort (msb) en premier basenc-help-z85 = encodage de type ascii85 ; lors de l'encodage, la longueur d'entrée doit être un multiple de 4 ; lors du décodage, la longueur d'entrée doit être un multiple de 5 # Messages d'erreur basenc-error-missing-encoding-type = type d'encodage manquant # Messages d'erreur partagés de base_common (utilisés par base32, base64, basenc) base-common-extra-operand = opérande supplémentaire {$operand} base-common-no-such-file = {$file} : Aucun fichier ou répertoire de ce type base-common-invalid-wrap-size = taille de retour à la ligne invalide : {$size} base-common-read-error = erreur de lecture : {$error} # Messages d'aide partagés de base_common base-common-help-decode = décoder les données base-common-help-ignore-garbage = lors du décodage, ignorer les caractères non-alphabétiques base-common-help-wrap = retour à la ligne des lignes encodées après COLS caractères (par défaut {$default}, 0 pour désactiver le retour à la ligne) coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/src/000077500000000000000000000000001504311601400240325ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/src/base32.rs000066400000000000000000000017051504311601400254620ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. pub mod base_common; use clap::Command; use uucore::{encoding::Format, error::UResult, translate}; #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let format = Format::Base32; let (about, usage) = get_info(); let config = base_common::parse_base_cmd_args(args, about, usage)?; let mut input = base_common::get_input(&config)?; base_common::handle_input(&mut input, format, config) } pub fn uu_app() -> Command { let (about, usage) = get_info(); base_common::base_app(about, usage) } fn get_info() -> (&'static str, &'static str) { let about: &'static str = Box::leak(translate!("base32-about").into_boxed_str()); let usage: &'static str = Box::leak(translate!("base32-usage").into_boxed_str()); (about, usage) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/src/base_common.rs000066400000000000000000000716401504311601400266720ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore hexupper lsbf msbf unpadded nopad aGVsbG8sIHdvcmxkIQ use clap::{Arg, ArgAction, Command}; use std::fs::File; use std::io::{self, ErrorKind, Read, Seek, SeekFrom}; use std::path::{Path, PathBuf}; use uucore::display::Quotable; use uucore::encoding::{ BASE2LSBF, BASE2MSBF, Format, Z85Wrapper, for_base_common::{BASE32, BASE32HEX, BASE64, BASE64_NOPAD, BASE64URL, HEXUPPER_PERMISSIVE}, }; use uucore::encoding::{EncodingWrapper, SupportsFastDecodeAndEncode}; use uucore::error::{FromIo, UResult, USimpleError, UUsageError}; use uucore::format_usage; use uucore::translate; pub const BASE_CMD_PARSE_ERROR: i32 = 1; /// Encoded output will be formatted in lines of this length (the last line can be shorter) /// /// Other implementations default to 76 /// /// This default is only used if no "-w"/"--wrap" argument is passed pub const WRAP_DEFAULT: usize = 76; pub struct Config { pub decode: bool, pub ignore_garbage: bool, pub wrap_cols: Option, pub to_read: Option, } pub mod options { pub static DECODE: &str = "decode"; pub static WRAP: &str = "wrap"; pub static IGNORE_GARBAGE: &str = "ignore-garbage"; pub static FILE: &str = "file"; } impl Config { pub fn from(options: &clap::ArgMatches) -> UResult { let to_read = match options.get_many::(options::FILE) { Some(mut values) => { let name = values.next().unwrap(); if let Some(extra_op) = values.next() { return Err(UUsageError::new( BASE_CMD_PARSE_ERROR, translate!("base-common-extra-operand", "operand" => extra_op.quote()), )); } if name == "-" { None } else { let path = Path::new(name); if !path.exists() { return Err(USimpleError::new( BASE_CMD_PARSE_ERROR, translate!("base-common-no-such-file", "file" => path.maybe_quote()), )); } Some(path.to_owned()) } } None => None, }; let wrap_cols = options .get_one::(options::WRAP) .map(|num| { num.parse::().map_err(|_| { USimpleError::new( BASE_CMD_PARSE_ERROR, translate!("base-common-invalid-wrap-size", "size" => num.quote()), ) }) }) .transpose()?; Ok(Self { decode: options.get_flag(options::DECODE), ignore_garbage: options.get_flag(options::IGNORE_GARBAGE), wrap_cols, to_read, }) } } pub fn parse_base_cmd_args( args: impl uucore::Args, about: &'static str, usage: &str, ) -> UResult { let command = base_app(about, usage); Config::from(&command.try_get_matches_from(args)?) } pub fn base_app(about: &'static str, usage: &str) -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(about) .override_usage(format_usage(usage)) .infer_long_args(true) // Format arguments. .arg( Arg::new(options::DECODE) .short('d') .visible_short_alias('D') .long(options::DECODE) .help(translate!("base-common-help-decode")) .action(ArgAction::SetTrue) .overrides_with(options::DECODE), ) .arg( Arg::new(options::IGNORE_GARBAGE) .short('i') .long(options::IGNORE_GARBAGE) .help(translate!("base-common-help-ignore-garbage")) .action(ArgAction::SetTrue) .overrides_with(options::IGNORE_GARBAGE), ) .arg( Arg::new(options::WRAP) .short('w') .long(options::WRAP) .value_name("COLS") .help(translate!("base-common-help-wrap", "default" => WRAP_DEFAULT)) .overrides_with(options::WRAP), ) // "multiple" arguments are used to check whether there is more than one // file passed in. .arg( Arg::new(options::FILE) .index(1) .action(ArgAction::Append) .value_hint(clap::ValueHint::FilePath), ) } /// A trait alias for types that implement both `Read` and `Seek`. pub trait ReadSeek: Read + Seek {} /// Automatically implement the `ReadSeek` trait for any type that implements both `Read` and `Seek`. impl ReadSeek for T {} pub fn get_input(config: &Config) -> UResult> { match &config.to_read { Some(path_buf) => { // Do not buffer input, because buffering is handled by `fast_decode` and `fast_encode` let file = File::open(path_buf).map_err_context(|| path_buf.maybe_quote().to_string())?; Ok(Box::new(file)) } None => { let mut buffer = Vec::new(); io::stdin().read_to_end(&mut buffer)?; Ok(Box::new(io::Cursor::new(buffer))) } } } /// Determines if the input buffer ends with padding ('=') after trimming trailing whitespace. fn has_padding(input: &mut R) -> UResult { let mut buf = Vec::new(); input .read_to_end(&mut buf) .map_err(|err| USimpleError::new(1, format_read_error(err.kind())))?; // Reverse iterator and skip trailing whitespace without extra collections let has_padding = buf .iter() .rfind(|&&byte| !byte.is_ascii_whitespace()) .is_some_and(|&byte| byte == b'='); input.seek(SeekFrom::Start(0))?; Ok(has_padding) } pub fn handle_input(input: &mut R, format: Format, config: Config) -> UResult<()> { let has_padding = has_padding(input)?; let supports_fast_decode_and_encode = get_supports_fast_decode_and_encode(format, config.decode, has_padding); let supports_fast_decode_and_encode_ref = supports_fast_decode_and_encode.as_ref(); let mut stdout_lock = io::stdout().lock(); if config.decode { fast_decode::fast_decode( input, &mut stdout_lock, supports_fast_decode_and_encode_ref, config.ignore_garbage, ) } else { fast_encode::fast_encode( input, &mut stdout_lock, supports_fast_decode_and_encode_ref, config.wrap_cols, ) } } pub fn get_supports_fast_decode_and_encode( format: Format, decode: bool, has_padding: bool, ) -> Box { const BASE16_VALID_DECODING_MULTIPLE: usize = 2; const BASE2_VALID_DECODING_MULTIPLE: usize = 8; const BASE32_VALID_DECODING_MULTIPLE: usize = 8; const BASE64_VALID_DECODING_MULTIPLE: usize = 4; const BASE16_UNPADDED_MULTIPLE: usize = 1; const BASE2_UNPADDED_MULTIPLE: usize = 1; const BASE32_UNPADDED_MULTIPLE: usize = 5; const BASE64_UNPADDED_MULTIPLE: usize = 3; match format { Format::Base16 => Box::from(EncodingWrapper::new( HEXUPPER_PERMISSIVE, BASE16_VALID_DECODING_MULTIPLE, BASE16_UNPADDED_MULTIPLE, // spell-checker:disable-next-line b"0123456789ABCDEFabcdef", )), Format::Base2Lsbf => Box::from(EncodingWrapper::new( BASE2LSBF, BASE2_VALID_DECODING_MULTIPLE, BASE2_UNPADDED_MULTIPLE, // spell-checker:disable-next-line b"01", )), Format::Base2Msbf => Box::from(EncodingWrapper::new( BASE2MSBF, BASE2_VALID_DECODING_MULTIPLE, BASE2_UNPADDED_MULTIPLE, // spell-checker:disable-next-line b"01", )), Format::Base32 => Box::from(EncodingWrapper::new( BASE32, BASE32_VALID_DECODING_MULTIPLE, BASE32_UNPADDED_MULTIPLE, // spell-checker:disable-next-line b"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567=", )), Format::Base32Hex => Box::from(EncodingWrapper::new( BASE32HEX, BASE32_VALID_DECODING_MULTIPLE, BASE32_UNPADDED_MULTIPLE, // spell-checker:disable-next-line b"0123456789ABCDEFGHIJKLMNOPQRSTUV=", )), Format::Base64 => { let alphabet: &[u8] = if has_padding { &b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/="[..] } else { &b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/"[..] }; let wrapper = if decode && !has_padding { BASE64_NOPAD } else { BASE64 }; Box::from(EncodingWrapper::new( wrapper, BASE64_VALID_DECODING_MULTIPLE, BASE64_UNPADDED_MULTIPLE, alphabet, )) } Format::Base64Url => Box::from(EncodingWrapper::new( BASE64URL, BASE64_VALID_DECODING_MULTIPLE, BASE64_UNPADDED_MULTIPLE, // spell-checker:disable-next-line b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=_-", )), Format::Z85 => Box::from(Z85Wrapper {}), } } pub mod fast_encode { use crate::base_common::{WRAP_DEFAULT, format_read_error}; use std::{ collections::VecDeque, io::{self, ErrorKind, Read, Write}, num::NonZeroUsize, }; use uucore::{ encoding::SupportsFastDecodeAndEncode, error::{UResult, USimpleError}, }; struct LineWrapping { line_length: NonZeroUsize, print_buffer: Vec, } // Start of helper functions fn encode_in_chunks_to_buffer( supports_fast_decode_and_encode: &dyn SupportsFastDecodeAndEncode, encode_in_chunks_of_size: usize, bytes_to_steal: usize, read_buffer: &[u8], encoded_buffer: &mut VecDeque, leftover_buffer: &mut VecDeque, ) -> UResult<()> { let bytes_to_chunk = if bytes_to_steal > 0 { let (stolen_bytes, rest_of_read_buffer) = read_buffer.split_at(bytes_to_steal); leftover_buffer.extend(stolen_bytes); // After appending the stolen bytes to `leftover_buffer`, it should be the right size assert_eq!(leftover_buffer.len(), encode_in_chunks_of_size); // Encode the old unencoded data and the stolen bytes, and add the result to // `encoded_buffer` supports_fast_decode_and_encode .encode_to_vec_deque(leftover_buffer.make_contiguous(), encoded_buffer)?; // Reset `leftover_buffer` leftover_buffer.clear(); rest_of_read_buffer } else { // Do not need to steal bytes from `read_buffer` read_buffer }; let chunks_exact = bytes_to_chunk.chunks_exact(encode_in_chunks_of_size); let remainder = chunks_exact.remainder(); for sl in chunks_exact { assert_eq!(sl.len(), encode_in_chunks_of_size); supports_fast_decode_and_encode.encode_to_vec_deque(sl, encoded_buffer)?; } leftover_buffer.extend(remainder); Ok(()) } fn write_without_line_breaks( encoded_buffer: &mut VecDeque, output: &mut dyn Write, is_cleanup: bool, empty_wrap: bool, ) -> io::Result<()> { // TODO // `encoded_buffer` only has to be a VecDeque if line wrapping is enabled // (`make_contiguous` should be a no-op here) // Refactoring could avoid this call output.write_all(encoded_buffer.make_contiguous())?; if is_cleanup { if !empty_wrap { output.write_all(b"\n")?; } } else { encoded_buffer.clear(); } Ok(()) } fn write_with_line_breaks( &mut LineWrapping { ref line_length, ref mut print_buffer, }: &mut LineWrapping, encoded_buffer: &mut VecDeque, output: &mut dyn Write, is_cleanup: bool, ) -> io::Result<()> { let line_length = line_length.get(); let make_contiguous_result = encoded_buffer.make_contiguous(); let chunks_exact = make_contiguous_result.chunks_exact(line_length); let mut bytes_added_to_print_buffer = 0; for sl in chunks_exact { bytes_added_to_print_buffer += sl.len(); print_buffer.extend_from_slice(sl); print_buffer.push(b'\n'); } output.write_all(print_buffer)?; // Remove the bytes that were just printed from `encoded_buffer` drop(encoded_buffer.drain(..bytes_added_to_print_buffer)); if is_cleanup { if encoded_buffer.is_empty() { // Do not write a newline in this case, because two trailing newlines should never be printed } else { // Print the partial line, since this is cleanup and no more data is coming output.write_all(encoded_buffer.make_contiguous())?; output.write_all(b"\n")?; } } else { print_buffer.clear(); } Ok(()) } fn write_to_output( line_wrapping: &mut Option, encoded_buffer: &mut VecDeque, output: &mut dyn Write, is_cleanup: bool, empty_wrap: bool, ) -> io::Result<()> { // Write all data in `encoded_buffer` to `output` if let &mut Some(ref mut li) = line_wrapping { write_with_line_breaks(li, encoded_buffer, output, is_cleanup)?; } else { write_without_line_breaks(encoded_buffer, output, is_cleanup, empty_wrap)?; } Ok(()) } // End of helper functions pub fn fast_encode( input: &mut dyn Read, output: &mut dyn Write, supports_fast_decode_and_encode: &dyn SupportsFastDecodeAndEncode, wrap: Option, ) -> UResult<()> { // Based on performance testing const INPUT_BUFFER_SIZE: usize = 32 * 1_024; const ENCODE_IN_CHUNKS_OF_SIZE_MULTIPLE: usize = 1_024; let encode_in_chunks_of_size = supports_fast_decode_and_encode.unpadded_multiple() * ENCODE_IN_CHUNKS_OF_SIZE_MULTIPLE; assert!(encode_in_chunks_of_size > 0); // The "data-encoding" crate supports line wrapping, but not arbitrary line wrapping, only certain widths, so // line wrapping must be handled here. // https://github.com/ia0/data-encoding/blob/4f42ad7ef242f6d243e4de90cd1b46a57690d00e/lib/src/lib.rs#L1710 let mut line_wrapping = match wrap { // Line wrapping is disabled because "-w"/"--wrap" was passed with "0" Some(0) => None, // A custom line wrapping value was passed Some(an) => Some(LineWrapping { line_length: NonZeroUsize::new(an).unwrap(), print_buffer: Vec::::new(), }), // Line wrapping was not set, so the default is used None => Some(LineWrapping { line_length: NonZeroUsize::new(WRAP_DEFAULT).unwrap(), print_buffer: Vec::::new(), }), }; // Start of buffers // Data that was read from `input` let mut input_buffer = vec![0; INPUT_BUFFER_SIZE]; assert!(!input_buffer.is_empty()); // Data that was read from `input` but has not been encoded yet let mut leftover_buffer = VecDeque::::new(); // Encoded data that needs to be written to `output` let mut encoded_buffer = VecDeque::::new(); // End of buffers loop { match input.read(&mut input_buffer) { Ok(bytes_read_from_input) => { if bytes_read_from_input == 0 { break; } // The part of `input_buffer` that was actually filled by the call to `read` let read_buffer = &input_buffer[..bytes_read_from_input]; // How many bytes to steal from `read_buffer` to get `leftover_buffer` to the right size let bytes_to_steal = encode_in_chunks_of_size - leftover_buffer.len(); if bytes_to_steal > bytes_read_from_input { // Do not have enough data to encode a chunk, so copy data to `leftover_buffer` and read more leftover_buffer.extend(read_buffer); assert!(leftover_buffer.len() < encode_in_chunks_of_size); continue; } // Encode data in chunks, then place it in `encoded_buffer` encode_in_chunks_to_buffer( supports_fast_decode_and_encode, encode_in_chunks_of_size, bytes_to_steal, read_buffer, &mut encoded_buffer, &mut leftover_buffer, )?; assert!(leftover_buffer.len() < encode_in_chunks_of_size); // Write all data in `encoded_buffer` to `output` write_to_output( &mut line_wrapping, &mut encoded_buffer, output, false, wrap == Some(0), )?; } Err(er) => { let kind = er.kind(); if kind == ErrorKind::Interrupted { // Retry reading continue; } return Err(USimpleError::new(1, format_read_error(kind))); } } } // Cleanup // `input` has finished producing data, so the data remaining in the buffers needs to be encoded and printed { // Encode all remaining unencoded bytes, placing them in `encoded_buffer` supports_fast_decode_and_encode .encode_to_vec_deque(leftover_buffer.make_contiguous(), &mut encoded_buffer)?; // Write all data in `encoded_buffer` to output // `is_cleanup` triggers special cleanup-only logic write_to_output( &mut line_wrapping, &mut encoded_buffer, output, true, wrap == Some(0), )?; } Ok(()) } } pub mod fast_decode { use crate::base_common::format_read_error; use std::io::{self, ErrorKind, Read, Write}; use uucore::{ encoding::SupportsFastDecodeAndEncode, error::{UResult, USimpleError}, }; // Start of helper functions fn alphabet_to_table(alphabet: &[u8], ignore_garbage: bool) -> [bool; 256] { // If `ignore_garbage` is enabled, all characters outside the alphabet are ignored // If it is not enabled, only '\n' and '\r' are ignored if ignore_garbage { // Note: "false" here let mut table = [false; 256]; // Pass through no characters except those in the alphabet for ue in alphabet { let us = usize::from(*ue); // Should not have been set yet assert!(!table[us]); table[us] = true; } table } else { // Note: "true" here let mut table = [true; 256]; // Pass through all characters except '\n' and '\r' for ue in [b'\n', b'\r'] { let us = usize::from(ue); // Should not have been set yet assert!(table[us]); table[us] = false; } table } } fn decode_in_chunks_to_buffer( supports_fast_decode_and_encode: &dyn SupportsFastDecodeAndEncode, decode_in_chunks_of_size: usize, bytes_to_steal: usize, read_buffer_filtered: &[u8], decoded_buffer: &mut Vec, leftover_buffer: &mut Vec, ) -> UResult<()> { let bytes_to_chunk = if bytes_to_steal > 0 { let (stolen_bytes, rest_of_read_buffer_filtered) = read_buffer_filtered.split_at(bytes_to_steal); leftover_buffer.extend(stolen_bytes); // After appending the stolen bytes to `leftover_buffer`, it should be the right size assert_eq!(leftover_buffer.len(), decode_in_chunks_of_size); // Decode the old un-decoded data and the stolen bytes, and add the result to // `decoded_buffer` supports_fast_decode_and_encode.decode_into_vec(leftover_buffer, decoded_buffer)?; // Reset `leftover_buffer` leftover_buffer.clear(); rest_of_read_buffer_filtered } else { // Do not need to steal bytes from `read_buffer` read_buffer_filtered }; let chunks_exact = bytes_to_chunk.chunks_exact(decode_in_chunks_of_size); let remainder = chunks_exact.remainder(); for sl in chunks_exact { assert_eq!(sl.len(), decode_in_chunks_of_size); supports_fast_decode_and_encode.decode_into_vec(sl, decoded_buffer)?; } leftover_buffer.extend(remainder); Ok(()) } fn write_to_output(decoded_buffer: &mut Vec, output: &mut dyn Write) -> io::Result<()> { // Write all data in `decoded_buffer` to `output` output.write_all(decoded_buffer.as_slice())?; decoded_buffer.clear(); Ok(()) } // End of helper functions pub fn fast_decode( input: &mut dyn Read, output: &mut dyn Write, supports_fast_decode_and_encode: &dyn SupportsFastDecodeAndEncode, ignore_garbage: bool, ) -> UResult<()> { // Based on performance testing const INPUT_BUFFER_SIZE: usize = 32 * 1_024; const DECODE_IN_CHUNKS_OF_SIZE_MULTIPLE: usize = 1_024; let alphabet = supports_fast_decode_and_encode.alphabet(); let decode_in_chunks_of_size = supports_fast_decode_and_encode.valid_decoding_multiple() * DECODE_IN_CHUNKS_OF_SIZE_MULTIPLE; assert!(decode_in_chunks_of_size > 0); // Note that it's not worth using "data-encoding"'s ignore functionality if `ignore_garbage` is true, because // "data-encoding"'s ignore functionality cannot discard non-ASCII bytes. The data has to be filtered before // passing it to "data-encoding", so there is no point in doing any filtering in "data-encoding". This also // allows execution to stay on the happy path in "data-encoding": // https://github.com/ia0/data-encoding/blob/4f42ad7ef242f6d243e4de90cd1b46a57690d00e/lib/src/lib.rs#L754-L756 // It is also not worth using "data-encoding"'s ignore functionality when `ignore_garbage` is // false. // Note that the alphabet constants above already include the padding characters // TODO // Precompute this let table = alphabet_to_table(alphabet, ignore_garbage); // Start of buffers // Data that was read from `input` let mut input_buffer = vec![0; INPUT_BUFFER_SIZE]; assert!(!input_buffer.is_empty()); // Data that was read from `input` but has not been decoded yet let mut leftover_buffer = Vec::::new(); // Decoded data that needs to be written to `output` let mut decoded_buffer = Vec::::new(); // Buffer that will be used when `ignore_garbage` is true, and the chunk read from `input` contains garbage // data let mut non_garbage_buffer = Vec::::new(); // End of buffers loop { match input.read(&mut input_buffer) { Ok(bytes_read_from_input) => { if bytes_read_from_input == 0 { break; } let read_buffer_filtered = { // The part of `input_buffer` that was actually filled by the call to `read` let read_buffer = &input_buffer[..bytes_read_from_input]; // First just scan the data for the happy path // Yields significant speedup when the input does not contain line endings let found_garbage = read_buffer.iter().any(|ue| { // Garbage, since it was not found in the table !table[usize::from(*ue)] }); if found_garbage { non_garbage_buffer.clear(); for ue in read_buffer { if table[usize::from(*ue)] { // Not garbage, since it was found in the table non_garbage_buffer.push(*ue); } } non_garbage_buffer.as_slice() } else { read_buffer } }; // How many bytes to steal from `read_buffer` to get `leftover_buffer` to the right size let bytes_to_steal = decode_in_chunks_of_size - leftover_buffer.len(); if bytes_to_steal > read_buffer_filtered.len() { // Do not have enough data to decode a chunk, so copy data to `leftover_buffer` and read more leftover_buffer.extend(read_buffer_filtered); assert!(leftover_buffer.len() < decode_in_chunks_of_size); continue; } // Decode data in chunks, then place it in `decoded_buffer` decode_in_chunks_to_buffer( supports_fast_decode_and_encode, decode_in_chunks_of_size, bytes_to_steal, read_buffer_filtered, &mut decoded_buffer, &mut leftover_buffer, )?; assert!(leftover_buffer.len() < decode_in_chunks_of_size); // Write all data in `decoded_buffer` to `output` write_to_output(&mut decoded_buffer, output)?; } Err(er) => { let kind = er.kind(); if kind == ErrorKind::Interrupted { // Retry reading continue; } return Err(USimpleError::new(1, format_read_error(kind))); } } } // Cleanup // `input` has finished producing data, so the data remaining in the buffers needs to be decoded and printed { // Decode all remaining encoded bytes, placing them in `decoded_buffer` supports_fast_decode_and_encode .decode_into_vec(&leftover_buffer, &mut decoded_buffer)?; // Write all data in `decoded_buffer` to `output` write_to_output(&mut decoded_buffer, output)?; } Ok(()) } } fn format_read_error(kind: ErrorKind) -> String { let kind_string = kind.to_string(); // e.g. "is a directory" -> "Is a directory" let mut kind_string_capitalized = String::with_capacity(kind_string.len()); for (index, ch) in kind_string.char_indices() { if index == 0 { for cha in ch.to_uppercase() { kind_string_capitalized.push(cha); } } else { kind_string_capitalized.push(ch); } } translate!("base-common-read-error", "error" => kind_string_capitalized) } #[cfg(test)] mod tests { use super::*; use std::io::Cursor; #[test] fn test_has_padding() { let test_cases = vec![ ("aGVsbG8sIHdvcmxkIQ==", true), ("aGVsbG8sIHdvcmxkIQ== ", true), ("aGVsbG8sIHdvcmxkIQ==\n", true), ("aGVsbG8sIHdvcmxkIQ== \n", true), ("aGVsbG8sIHdvcmxkIQ=", true), ("aGVsbG8sIHdvcmxkIQ= ", true), ("aGVsbG8sIHdvcmxkIQ \n", false), ("aGVsbG8sIHdvcmxkIQ", false), ]; for (input, expected) in test_cases { let mut cursor = Cursor::new(input.as_bytes()); assert_eq!( has_padding(&mut cursor).unwrap(), expected, "Failed for input: '{input}'" ); } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base32/src/main.rs000066400000000000000000000000311504311601400253160ustar00rootroot00000000000000uucore::bin!(uu_base32); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base64/000077500000000000000000000000001504311601400232505ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base64/Cargo.toml000066400000000000000000000012041504311601400251750ustar00rootroot00000000000000[package] name = "uu_base64" description = "base64 ~ (uutils) decode/encode input (base64-encoding)" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/base64" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/base64.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["encoding"] } uu_base32 = { workspace = true } fluent = { workspace = true } [[bin]] name = "base64" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base64/LICENSE000077700000000000000000000000001504311601400261162../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base64/locales000077700000000000000000000000001504311601400274472../base32/localesustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base64/src/000077500000000000000000000000001504311601400240375ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base64/src/base64.rs000066400000000000000000000017271504311601400255000ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use clap::Command; use uu_base32::base_common; use uucore::translate; use uucore::{encoding::Format, error::UResult}; #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let format = Format::Base64; let (about, usage) = get_info(); let config = base_common::parse_base_cmd_args(args, about, usage)?; let mut input = base_common::get_input(&config)?; base_common::handle_input(&mut input, format, config) } pub fn uu_app() -> Command { let (about, usage) = get_info(); base_common::base_app(about, usage) } fn get_info() -> (&'static str, &'static str) { let about: &'static str = Box::leak(translate!("base64-about").into_boxed_str()); let usage: &'static str = Box::leak(translate!("base64-usage").into_boxed_str()); (about, usage) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/base64/src/main.rs000066400000000000000000000000311504311601400253230ustar00rootroot00000000000000uucore::bin!(uu_base64); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basename/000077500000000000000000000000001504311601400237375ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basename/Cargo.toml000066400000000000000000000011511504311601400256650ustar00rootroot00000000000000[package] name = "uu_basename" description = "basename ~ (uutils) display PATHNAME with leading directory components removed" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/basename" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/basename.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true } fluent = { workspace = true } [[bin]] name = "basename" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basename/LICENSE000077700000000000000000000000001504311601400266052../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basename/locales/000077500000000000000000000000001504311601400253615ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basename/locales/en-US.ftl000066400000000000000000000010601504311601400270140ustar00rootroot00000000000000basename-about = Print NAME with any leading directory components removed If specified, also remove a trailing SUFFIX basename-usage = basename [-z] NAME [SUFFIX] basename OPTION... NAME... # Error messages basename-error-missing-operand = missing operand basename-error-extra-operand = extra operand { $operand } # Help text for command-line arguments basename-help-multiple = support multiple arguments and treat each as a NAME basename-help-suffix = remove a trailing SUFFIX; implies -a basename-help-zero = end each output line with NUL, not newline coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basename/locales/fr-FR.ftl000066400000000000000000000011761504311601400270110ustar00rootroot00000000000000basename-about = Affiche NOM sans les composants de répertoire précédents Si spécifié, supprime également un SUFFIXE final basename-usage = basename [-z] NOM [SUFFIXE] basename OPTION... NOM... # Messages d'erreur basename-error-missing-operand = opérande manquant basename-error-extra-operand = opérande supplémentaire { $operand } # Texte d'aide pour les arguments de ligne de commande basename-help-multiple = prend en charge plusieurs arguments et traite chacun comme un NOM basename-help-suffix = supprime un SUFFIXE final ; implique -a basename-help-zero = termine chaque ligne de sortie avec NUL, pas nouvelle ligne coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basename/src/000077500000000000000000000000001504311601400245265ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basename/src/basename.rs000066400000000000000000000107671504311601400266620ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) fullname use clap::builder::ValueParser; use clap::{Arg, ArgAction, Command}; use std::ffi::OsString; use std::io::{Write, stdout}; use std::path::PathBuf; use uucore::display::Quotable; use uucore::error::{UResult, UUsageError}; use uucore::format_usage; use uucore::line_ending::LineEnding; use uucore::translate; pub mod options { pub static MULTIPLE: &str = "multiple"; pub static NAME: &str = "name"; pub static SUFFIX: &str = "suffix"; pub static ZERO: &str = "zero"; } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { // // Argument parsing // let matches = uu_app().try_get_matches_from(args)?; let line_ending = LineEnding::from_zero_flag(matches.get_flag(options::ZERO)); let mut name_args = matches .get_many::(options::NAME) .unwrap_or_default() .collect::>(); if name_args.is_empty() { return Err(UUsageError::new( 1, translate!("basename-error-missing-operand"), )); } let multiple_paths = matches.get_one::(options::SUFFIX).is_some() || matches.get_flag(options::MULTIPLE); let suffix = if multiple_paths { matches .get_one::(options::SUFFIX) .cloned() .unwrap_or_default() } else { // "simple format" match name_args.len() { 0 => panic!("already checked"), 1 => OsString::default(), 2 => name_args.pop().unwrap().clone(), _ => { return Err(UUsageError::new( 1, translate!("basename-error-extra-operand", "operand" => name_args[2].quote()), )); } } }; // // Main Program Processing // for path in name_args { stdout().write_all(&basename(path, &suffix)?)?; print!("{line_ending}"); } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("basename-about")) .override_usage(format_usage(&translate!("basename-usage"))) .infer_long_args(true) .arg( Arg::new(options::MULTIPLE) .short('a') .long(options::MULTIPLE) .help(translate!("basename-help-multiple")) .action(ArgAction::SetTrue) .overrides_with(options::MULTIPLE), ) .arg( Arg::new(options::NAME) .action(ArgAction::Append) .value_parser(ValueParser::os_string()) .value_hint(clap::ValueHint::AnyPath) .hide(true) .trailing_var_arg(true), ) .arg( Arg::new(options::SUFFIX) .short('s') .long(options::SUFFIX) .value_name("SUFFIX") .value_parser(ValueParser::os_string()) .help(translate!("basename-help-suffix")) .overrides_with(options::SUFFIX), ) .arg( Arg::new(options::ZERO) .short('z') .long(options::ZERO) .help(translate!("basename-help-zero")) .action(ArgAction::SetTrue) .overrides_with(options::ZERO), ) } // We return a Vec. Returning a seemingly more proper `OsString` would // require back and forth conversions as we need a &[u8] for printing anyway. fn basename(fullname: &OsString, suffix: &OsString) -> UResult> { let fullname_bytes = uucore::os_str_as_bytes(fullname)?; // Handle special case where path ends with /. if fullname_bytes.ends_with(b"/.") { return Ok(b".".into()); } // Convert to path buffer and get last path component let pb = PathBuf::from(fullname); pb.components().next_back().map_or(Ok([].into()), |c| { let name = c.as_os_str(); let name_bytes = uucore::os_str_as_bytes(name)?; if name == suffix { Ok(name_bytes.into()) } else { let suffix_bytes = uucore::os_str_as_bytes(suffix)?; Ok(name_bytes .strip_suffix(suffix_bytes) .unwrap_or(name_bytes) .into()) } }) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basename/src/main.rs000066400000000000000000000000331504311601400260140ustar00rootroot00000000000000uucore::bin!(uu_basename); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basenc/000077500000000000000000000000001504311601400234175ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basenc/BENCHMARKING.md000066400000000000000000000207051504311601400255750ustar00rootroot00000000000000 # Benchmarking base32, base64, and basenc Note that the functionality of the `base32` and `base64` programs is identical to that of the `basenc` program, using the "--base32" and "--base64" options, respectively. For that reason, it is only necessary to benchmark `basenc`. To compare the runtime performance of the uutils implementation with the GNU Core Utilities implementation, you can use a benchmarking tool like [hyperfine][0]. hyperfine currently does not measure maximum memory usage. Memory usage can be benchmarked using [poop][2], or [toybox][3]'s "time" subcommand (both are Linux only). Build the `basenc` binary using the release profile: ```Shell cargo build --package uu_basenc --profile release ``` ## Expected performance uutils' `basenc` performs streaming decoding and encoding, and therefore should perform all operations with a constant maximum memory usage, regardless of the size of the input. Release builds currently use less than 3 mebibytes of memory, and memory usage greater than 10 mebibytes should be considered a bug. As of September 2024, uutils' `basenc` has runtime performance equal to or superior to GNU Core Utilities' `basenc` in in most scenarios. uutils' `basenc` uses slightly more memory, but given how small these quantities are in absolute terms (see above), this is highly unlikely to be practically relevant to users. ## Benchmark results (2024-09-27) ### Setup ```Shell # Use uutils' dd to create a 1 gibibyte in-memory file filled with random bytes (Linux only). # On other platforms, you can use /tmp instead of /dev/shm, but note that /tmp is not guaranteed to be in-memory. coreutils dd if=/dev/urandom of=/dev/shm/one-random-gibibyte bs=1024 count=1048576 # Encode this file for use in decoding performance testing /usr/bin/basenc --base32hex -- /dev/shm/one-random-gibibyte 1>/dev/shm/one-random-gibibyte-base32hex-encoded /usr/bin/basenc --z85 -- /dev/shm/one-random-gibibyte 1>/dev/shm/one-random-gibibyte-z85-encoded ``` ### Programs being tested uutils' `basenc`: ``` ⯠git rev-list HEAD | coreutils head -n 1 -- - a0718ef0ffd50539a2e2bc0095c9fadcd70ab857 ``` GNU Core Utilities' `basenc`: ``` ⯠/usr/bin/basenc --version | coreutils head -n 1 -- - basenc (GNU coreutils) 9.4 ``` ### Encoding performance #### "--base64", default line wrapping (76 characters) âž• Faster than GNU Core Utilities ``` ⯠hyperfine \ --sort \ command \ -- \ '/usr/bin/basenc --base64 -- /dev/shm/one-random-gibibyte 1>/dev/null' \ './target/release/basenc --base64 -- /dev/shm/one-random-gibibyte 1>/dev/null' Benchmark 1: /usr/bin/basenc --base64 -- /dev/shm/one-random-gibibyte 1>/dev/null Time (mean ± σ): 965.1 ms ± 7.9 ms [User: 766.2 ms, System: 193.4 ms] Range (min … max): 950.2 ms … 976.9 ms 10 runs Benchmark 2: ./target/release/basenc --base64 -- /dev/shm/one-random-gibibyte 1>/dev/null Time (mean ± σ): 696.6 ms ± 9.1 ms [User: 574.9 ms, System: 117.3 ms] Range (min … max): 683.1 ms … 713.5 ms 10 runs Relative speed comparison 1.39 ± 0.02 /usr/bin/basenc --base64 -- /dev/shm/one-random-gibibyte 1>/dev/null 1.00 ./target/release/basenc --base64 -- /dev/shm/one-random-gibibyte 1>/dev/null ``` #### "--base16", no line wrapping âž– Slower than GNU Core Utilities ``` ⯠poop \ '/usr/bin/basenc --base16 --wrap 0 -- /dev/shm/one-random-gibibyte' \ './target/release/basenc --base16 --wrap 0 -- /dev/shm/one-random-gibibyte' Benchmark 1 (6 runs): /usr/bin/basenc --base16 --wrap 0 -- /dev/shm/one-random-gibibyte measurement mean ± σ min … max outliers delta wall_time 836ms ± 13.3ms 822ms … 855ms 0 ( 0%) 0% peak_rss 2.05MB ± 73.0KB 1.94MB … 2.12MB 0 ( 0%) 0% cpu_cycles 2.85G ± 32.8M 2.82G … 2.91G 0 ( 0%) 0% instructions 14.0G ± 58.7 14.0G … 14.0G 0 ( 0%) 0% cache_references 70.0M ± 6.48M 63.7M … 78.8M 0 ( 0%) 0% cache_misses 582K ± 172K 354K … 771K 0 ( 0%) 0% branch_misses 667K ± 4.55K 662K … 674K 0 ( 0%) 0% Benchmark 2 (6 runs): ./target/release/basenc --base16 --wrap 0 -- /dev/shm/one-random-gibibyte measurement mean ± σ min … max outliers delta wall_time 884ms ± 6.38ms 878ms … 895ms 0 ( 0%) 💩+ 5.7% ± 1.6% peak_rss 2.65MB ± 66.8KB 2.55MB … 2.74MB 0 ( 0%) 💩+ 29.3% ± 4.4% cpu_cycles 3.15G ± 8.61M 3.14G … 3.16G 0 ( 0%) 💩+ 10.6% ± 1.1% instructions 10.5G ± 275 10.5G … 10.5G 0 ( 0%) âš¡- 24.9% ± 0.0% cache_references 93.5M ± 6.10M 87.2M … 104M 0 ( 0%) 💩+ 33.7% ± 11.6% cache_misses 415K ± 52.3K 363K … 474K 0 ( 0%) - 28.8% ± 28.0% branch_misses 1.43M ± 4.82K 1.42M … 1.43M 0 ( 0%) 💩+113.9% ± 0.9% ``` ### Decoding performance #### "--base32hex" âž• Faster than GNU Core Utilities ``` ⯠hyperfine \ --sort \ command \ -- \ '/usr/bin/basenc --base32hex --decode -- /dev/shm/one-random-gibibyte-base32hex-encoded 1>/dev/null' \ './target/release/basenc --base32hex --decode -- /dev/shm/one-random-gibibyte-base32hex-encoded 1>/dev/null' Benchmark 1: /usr/bin/basenc --base32hex --decode -- /dev/shm/one-random-gibibyte-base32hex-encoded 1>/dev/null Time (mean ± σ): 7.154 s ± 0.082 s [User: 6.802 s, System: 0.323 s] Range (min … max): 7.051 s … 7.297 s 10 runs Benchmark 2: ./target/release/basenc --base32hex --decode -- /dev/shm/one-random-gibibyte-base32hex-encoded 1>/dev/null Time (mean ± σ): 2.679 s ± 0.025 s [User: 2.446 s, System: 0.221 s] Range (min … max): 2.649 s … 2.718 s 10 runs Relative speed comparison 2.67 ± 0.04 /usr/bin/basenc --base32hex --decode -- /dev/shm/one-random-gibibyte-base32hex-encoded 1>/dev/null 1.00 ./target/release/basenc --base32hex --decode -- /dev/shm/one-random-gibibyte-base32hex-encoded 1>/dev/null ``` #### "--z85", with "--ignore-garbage" âž• Faster than GNU Core Utilities ``` ⯠poop \ '/usr/bin/basenc --decode --ignore-garbage --z85 -- /dev/shm/one-random-gibibyte-z85-encoded' \ './target/release/basenc --decode --ignore-garbage --z85 -- /dev/shm/one-random-gibibyte-z85-encoded' Benchmark 1 (3 runs): /usr/bin/basenc --decode --ignore-garbage --z85 -- /dev/shm/one-random-gibibyte-z85-encoded measurement mean ± σ min … max outliers delta wall_time 14.4s ± 68.4ms 14.3s … 14.4s 0 ( 0%) 0% peak_rss 1.98MB ± 10.8KB 1.97MB … 1.99MB 0 ( 0%) 0% cpu_cycles 58.4G ± 211M 58.3G … 58.7G 0 ( 0%) 0% instructions 74.7G ± 64.0 74.7G … 74.7G 0 ( 0%) 0% cache_references 41.8M ± 624K 41.2M … 42.4M 0 ( 0%) 0% cache_misses 693K ± 118K 567K … 802K 0 ( 0%) 0% branch_misses 1.24G ± 183K 1.24G … 1.24G 0 ( 0%) 0% Benchmark 2 (3 runs): ./target/release/basenc --decode --ignore-garbage --z85 -- /dev/shm/one-random-gibibyte-z85-encoded measurement mean ± σ min … max outliers delta wall_time 2.80s ± 17.9ms 2.79s … 2.82s 0 ( 0%) âš¡- 80.5% ± 0.8% peak_rss 2.61MB ± 67.4KB 2.57MB … 2.69MB 0 ( 0%) 💩+ 31.9% ± 5.5% cpu_cycles 10.8G ± 27.9M 10.8G … 10.9G 0 ( 0%) âš¡- 81.5% ± 0.6% instructions 39.0G ± 353 39.0G … 39.0G 0 ( 0%) âš¡- 47.7% ± 0.0% cache_references 114M ± 2.43M 112M … 116M 0 ( 0%) 💩+173.3% ± 9.6% cache_misses 1.06M ± 288K 805K … 1.37M 0 ( 0%) + 52.6% ± 72.0% branch_misses 1.18M ± 14.7K 1.16M … 1.19M 0 ( 0%) âš¡- 99.9% ± 0.0% ``` [0]: https://github.com/sharkdp/hyperfine [1]: https://github.com/sharkdp/hyperfine?tab=readme-ov-file#installation [2]: https://github.com/andrewrk/poop [3]: https://landley.net/toybox/ coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basenc/Cargo.toml000066400000000000000000000011621504311601400253470ustar00rootroot00000000000000[package] name = "uu_basenc" description = "basenc ~ (uutils) decode/encode input" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/basenc" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/basenc.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["encoding"] } uu_base32 = { workspace = true } fluent = { workspace = true } [[bin]] name = "basenc" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basenc/LICENSE000077700000000000000000000000001504311601400262652../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basenc/locales000077700000000000000000000000001504311601400276162../base32/localesustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basenc/src/000077500000000000000000000000001504311601400242065ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basenc/src/basenc.rs000066400000000000000000000053571504311601400260210ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore lsbf msbf use clap::{Arg, ArgAction, Command}; use uu_base32::base_common::{self, BASE_CMD_PARSE_ERROR, Config}; use uucore::error::UClapError; use uucore::translate; use uucore::{ encoding::Format, error::{UResult, UUsageError}, }; fn get_encodings() -> Vec<(&'static str, Format, String)> { vec![ ("base64", Format::Base64, translate!("basenc-help-base64")), ( "base64url", Format::Base64Url, translate!("basenc-help-base64url"), ), ("base32", Format::Base32, translate!("basenc-help-base32")), ( "base32hex", Format::Base32Hex, translate!("basenc-help-base32hex"), ), ("base16", Format::Base16, translate!("basenc-help-base16")), ( "base2lsbf", Format::Base2Lsbf, translate!("basenc-help-base2lsbf"), ), ( "base2msbf", Format::Base2Msbf, translate!("basenc-help-base2msbf"), ), ("z85", Format::Z85, translate!("basenc-help-z85")), ] } pub fn uu_app() -> Command { let about: &'static str = Box::leak(translate!("basenc-about").into_boxed_str()); let usage: &'static str = Box::leak(translate!("basenc-usage").into_boxed_str()); let encodings = get_encodings(); let mut command = base_common::base_app(about, usage); for encoding in &encodings { let raw_arg = Arg::new(encoding.0) .long(encoding.0) .help(&encoding.2) .action(ArgAction::SetTrue); let overriding_arg = encodings .iter() .fold(raw_arg, |arg, enc| arg.overrides_with(enc.0)); command = command.arg(overriding_arg); } command } fn parse_cmd_args(args: impl uucore::Args) -> UResult<(Config, Format)> { let matches = uu_app() .try_get_matches_from(args.collect_lossy()) .with_exit_code(1)?; let encodings = get_encodings(); let format = encodings .iter() .find(|encoding| matches.get_flag(encoding.0)) .ok_or_else(|| { UUsageError::new( BASE_CMD_PARSE_ERROR, translate!("basenc-error-missing-encoding-type"), ) })? .1; let config = Config::from(&matches)?; Ok((config, format)) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let (config, format) = parse_cmd_args(args)?; let mut input = base_common::get_input(&config)?; base_common::handle_input(&mut input, format, config) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/basenc/src/main.rs000066400000000000000000000000311504311601400254720ustar00rootroot00000000000000uucore::bin!(uu_basenc); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/000077500000000000000000000000001504311601400227335ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/Cargo.toml000066400000000000000000000016411504311601400246650ustar00rootroot00000000000000[package] name = "uu_cat" description = "cat ~ (uutils) concatenate and display input" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/cat" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/cat.rs" [dependencies] clap = { workspace = true } memchr = { workspace = true } thiserror = { workspace = true } uucore = { workspace = true, features = ["fast-inc", "fs", "pipes"] } fluent = { workspace = true } [target.'cfg(unix)'.dependencies] nix = { workspace = true } [target.'cfg(windows)'.dependencies] winapi-util = { workspace = true } windows-sys = { workspace = true, features = ["Win32_Storage_FileSystem"] } [dev-dependencies] tempfile = { workspace = true } [[bin]] name = "cat" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/LICENSE000077700000000000000000000000001504311601400256012../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/locales/000077500000000000000000000000001504311601400243555ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/locales/en-US.ftl000066400000000000000000000002451504311601400260140ustar00rootroot00000000000000cat-about = Concatenate FILE(s), or standard input, to standard output With no FILE, or when FILE is -, read standard input. cat-usage = cat [OPTION]... [FILE]... coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/src/000077500000000000000000000000001504311601400235225ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/src/cat.rs000066400000000000000000000623751504311601400246540ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) nonprint nonblank nonprinting ELOOP mod platform; use crate::platform::is_unsafe_overwrite; use clap::{Arg, ArgAction, Command}; use memchr::memchr2; use std::fs::{File, metadata}; use std::io::{self, BufWriter, ErrorKind, IsTerminal, Read, Write}; /// Unix domain socket support #[cfg(unix)] use std::net::Shutdown; #[cfg(unix)] use std::os::fd::AsFd; #[cfg(unix)] use std::os::unix::fs::FileTypeExt; #[cfg(unix)] use std::os::unix::net::UnixStream; use thiserror::Error; use uucore::display::Quotable; use uucore::error::UResult; #[cfg(not(target_os = "windows"))] use uucore::libc; use uucore::translate; use uucore::{fast_inc::fast_inc_one, format_usage}; /// Linux splice support #[cfg(any(target_os = "linux", target_os = "android"))] mod splice; // Allocate 32 digits for the line number. // An estimate is that we can print about 1e8 lines/seconds, so 32 digits // would be enough for billions of universe lifetimes. const LINE_NUMBER_BUF_SIZE: usize = 32; struct LineNumber { buf: [u8; LINE_NUMBER_BUF_SIZE], print_start: usize, num_start: usize, num_end: usize, } // Logic to store a string for the line number. Manually incrementing the value // represented in a buffer like this is significantly faster than storing // a `usize` and using the standard Rust formatting macros to format a `usize` // to a string each time it's needed. // Buffer is initialized to " 1\t" and incremented each time `increment` is // called, using uucore's fast_inc function that operates on strings. impl LineNumber { fn new() -> Self { let mut buf = [b'0'; LINE_NUMBER_BUF_SIZE]; let init_str = " 1\t"; let print_start = buf.len() - init_str.len(); let num_start = buf.len() - 2; let num_end = buf.len() - 1; buf[print_start..].copy_from_slice(init_str.as_bytes()); LineNumber { buf, print_start, num_start, num_end, } } fn increment(&mut self) { fast_inc_one(&mut self.buf, &mut self.num_start, self.num_end); self.print_start = self.print_start.min(self.num_start); } #[inline] fn to_str(&self) -> &[u8] { &self.buf[self.print_start..] } fn write(&self, writer: &mut impl Write) -> io::Result<()> { writer.write_all(self.to_str()) } } #[derive(Error, Debug)] enum CatError { /// Wrapper around `io::Error` #[error("{0}")] Io(#[from] io::Error), /// Wrapper around `nix::Error` #[cfg(any(target_os = "linux", target_os = "android"))] #[error("{0}")] Nix(#[from] nix::Error), /// Unknown file type; it's not a regular file, socket, etc. #[error("unknown filetype: {ft_debug}")] UnknownFiletype { /// A debug print of the file type ft_debug: String, }, #[error("Is a directory")] IsDirectory, #[error("input file is output file")] OutputIsInput, #[error("Too many levels of symbolic links")] TooManySymlinks, } type CatResult = Result; #[derive(PartialEq)] enum NumberingMode { None, NonEmpty, All, } struct OutputOptions { /// Line numbering mode number: NumberingMode, /// Suppress repeated empty output lines squeeze_blank: bool, /// display TAB characters as `tab` show_tabs: bool, /// Show end of lines show_ends: bool, /// use ^ and M- notation, except for LF (\\n) and TAB (\\t) show_nonprint: bool, } impl OutputOptions { fn tab(&self) -> &'static str { if self.show_tabs { "^I" } else { "\t" } } fn end_of_line(&self) -> &'static str { if self.show_ends { "$\n" } else { "\n" } } /// We can write fast if we can simply copy the contents of the file to /// stdout, without augmenting the output with e.g. line numbers. fn can_write_fast(&self) -> bool { !(self.show_tabs || self.show_nonprint || self.show_ends || self.squeeze_blank || self.number != NumberingMode::None) } } /// State that persists between output of each file. This struct is only used /// when we can't write fast. struct OutputState { /// The current line number line_number: LineNumber, /// Whether the output cursor is at the beginning of a new line at_line_start: bool, /// Whether we skipped a \r, which still needs to be printed skipped_carriage_return: bool, /// Whether we have already printed a blank line one_blank_kept: bool, } #[cfg(unix)] trait FdReadable: Read + AsFd {} #[cfg(not(unix))] trait FdReadable: Read {} #[cfg(unix)] impl FdReadable for T where T: Read + AsFd {} #[cfg(not(unix))] impl FdReadable for T where T: Read {} /// Represents an open file handle, stream, or other device struct InputHandle { reader: R, is_interactive: bool, } /// Concrete enum of recognized file types. /// /// *Note*: `cat`-ing a directory should result in an /// [`CatError::IsDirectory`] enum InputType { Directory, File, StdIn, SymLink, #[cfg(unix)] BlockDevice, #[cfg(unix)] CharacterDevice, #[cfg(unix)] Fifo, #[cfg(unix)] Socket, } mod options { pub static FILE: &str = "file"; pub static SHOW_ALL: &str = "show-all"; pub static NUMBER_NONBLANK: &str = "number-nonblank"; pub static SHOW_NONPRINTING_ENDS: &str = "e"; pub static SHOW_ENDS: &str = "show-ends"; pub static NUMBER: &str = "number"; pub static SQUEEZE_BLANK: &str = "squeeze-blank"; pub static SHOW_NONPRINTING_TABS: &str = "t"; pub static SHOW_TABS: &str = "show-tabs"; pub static SHOW_NONPRINTING: &str = "show-nonprinting"; pub static IGNORED_U: &str = "ignored-u"; } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { // When we receive a SIGPIPE signal, we want to terminate the process so // that we don't print any error messages to stderr. Rust ignores SIGPIPE // (see https://github.com/rust-lang/rust/issues/62569), so we restore it's // default action here. #[cfg(not(target_os = "windows"))] unsafe { libc::signal(libc::SIGPIPE, libc::SIG_DFL); } let matches = uu_app().try_get_matches_from(args)?; let number_mode = if matches.get_flag(options::NUMBER_NONBLANK) { NumberingMode::NonEmpty } else if matches.get_flag(options::NUMBER) { NumberingMode::All } else { NumberingMode::None }; let show_nonprint = [ options::SHOW_ALL.to_owned(), options::SHOW_NONPRINTING_ENDS.to_owned(), options::SHOW_NONPRINTING_TABS.to_owned(), options::SHOW_NONPRINTING.to_owned(), ] .iter() .any(|v| matches.get_flag(v)); let show_ends = [ options::SHOW_ENDS.to_owned(), options::SHOW_ALL.to_owned(), options::SHOW_NONPRINTING_ENDS.to_owned(), ] .iter() .any(|v| matches.get_flag(v)); let show_tabs = [ options::SHOW_ALL.to_owned(), options::SHOW_TABS.to_owned(), options::SHOW_NONPRINTING_TABS.to_owned(), ] .iter() .any(|v| matches.get_flag(v)); let squeeze_blank = matches.get_flag(options::SQUEEZE_BLANK); let files: Vec = match matches.get_many::(options::FILE) { Some(v) => v.cloned().collect(), None => vec!["-".to_owned()], }; let options = OutputOptions { show_ends, number: number_mode, show_nonprint, show_tabs, squeeze_blank, }; cat_files(&files, &options) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .override_usage(format_usage(&translate!("cat-usage"))) .about(translate!("cat-about")) .infer_long_args(true) .args_override_self(true) .arg( Arg::new(options::FILE) .hide(true) .action(ArgAction::Append) .value_hint(clap::ValueHint::FilePath), ) .arg( Arg::new(options::SHOW_ALL) .short('A') .long(options::SHOW_ALL) .help("equivalent to -vET") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NUMBER_NONBLANK) .short('b') .long(options::NUMBER_NONBLANK) .help("number nonempty output lines, overrides -n") // Note: This MUST NOT .overrides_with(options::NUMBER)! // In clap, overriding is symmetric, so "-b -n" counts as "-n", which is not what we want. .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SHOW_NONPRINTING_ENDS) .short('e') .help("equivalent to -vE") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SHOW_ENDS) .short('E') .long(options::SHOW_ENDS) .help("display $ at end of each line") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NUMBER) .short('n') .long(options::NUMBER) .help("number all output lines") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SQUEEZE_BLANK) .short('s') .long(options::SQUEEZE_BLANK) .help("suppress repeated empty output lines") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SHOW_NONPRINTING_TABS) .short('t') .help("equivalent to -vT") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SHOW_TABS) .short('T') .long(options::SHOW_TABS) .help("display TAB characters at ^I") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SHOW_NONPRINTING) .short('v') .long(options::SHOW_NONPRINTING) .help("use ^ and M- notation, except for LF (\\n) and TAB (\\t)") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::IGNORED_U) .short('u') .help("(ignored)") .action(ArgAction::SetTrue), ) } fn cat_handle( handle: &mut InputHandle, options: &OutputOptions, state: &mut OutputState, ) -> CatResult<()> { if options.can_write_fast() { write_fast(handle) } else { write_lines(handle, options, state) } } fn cat_path(path: &str, options: &OutputOptions, state: &mut OutputState) -> CatResult<()> { match get_input_type(path)? { InputType::StdIn => { let stdin = io::stdin(); if is_unsafe_overwrite(&stdin, &io::stdout()) { return Err(CatError::OutputIsInput); } let mut handle = InputHandle { reader: stdin, is_interactive: io::stdin().is_terminal(), }; cat_handle(&mut handle, options, state) } InputType::Directory => Err(CatError::IsDirectory), #[cfg(unix)] InputType::Socket => { let socket = UnixStream::connect(path)?; socket.shutdown(Shutdown::Write)?; let mut handle = InputHandle { reader: socket, is_interactive: false, }; cat_handle(&mut handle, options, state) } _ => { let file = File::open(path)?; if is_unsafe_overwrite(&file, &io::stdout()) { return Err(CatError::OutputIsInput); } let mut handle = InputHandle { reader: file, is_interactive: false, }; cat_handle(&mut handle, options, state) } } } fn cat_files(files: &[String], options: &OutputOptions) -> UResult<()> { let mut state = OutputState { line_number: LineNumber::new(), at_line_start: true, skipped_carriage_return: false, one_blank_kept: false, }; let mut error_messages: Vec = Vec::new(); for path in files { if let Err(err) = cat_path(path, options, &mut state) { error_messages.push(format!("{}: {err}", path.maybe_quote())); } } if state.skipped_carriage_return { print!("\r"); } if error_messages.is_empty() { Ok(()) } else { // each next line is expected to display "cat: …" let line_joiner = format!("\n{}: ", uucore::util_name()); Err(uucore::error::USimpleError::new( error_messages.len() as i32, error_messages.join(&line_joiner), )) } } /// Classifies the `InputType` of file at `path` if possible /// /// # Arguments /// /// * `path` - Path on a file system to classify metadata fn get_input_type(path: &str) -> CatResult { if path == "-" { return Ok(InputType::StdIn); } let ft = match metadata(path) { Ok(md) => md.file_type(), Err(e) => { if let Some(raw_error) = e.raw_os_error() { // On Unix-like systems, the error code for "Too many levels of symbolic links" is 40 (ELOOP). // we want to provide a proper error message in this case. #[cfg(not(any(target_os = "macos", target_os = "freebsd")))] let too_many_symlink_code = 40; #[cfg(any(target_os = "macos", target_os = "freebsd"))] let too_many_symlink_code = 62; if raw_error == too_many_symlink_code { return Err(CatError::TooManySymlinks); } } return Err(CatError::Io(e)); } }; match ft { #[cfg(unix)] ft if ft.is_block_device() => Ok(InputType::BlockDevice), #[cfg(unix)] ft if ft.is_char_device() => Ok(InputType::CharacterDevice), #[cfg(unix)] ft if ft.is_fifo() => Ok(InputType::Fifo), #[cfg(unix)] ft if ft.is_socket() => Ok(InputType::Socket), ft if ft.is_dir() => Ok(InputType::Directory), ft if ft.is_file() => Ok(InputType::File), ft if ft.is_symlink() => Ok(InputType::SymLink), _ => Err(CatError::UnknownFiletype { ft_debug: format!("{ft:?}"), }), } } /// Writes handle to stdout with no configuration. This allows a /// simple memory copy. fn write_fast(handle: &mut InputHandle) -> CatResult<()> { let stdout = io::stdout(); let mut stdout_lock = stdout.lock(); #[cfg(any(target_os = "linux", target_os = "android"))] { // If we're on Linux or Android, try to use the splice() system call // for faster writing. If it works, we're done. if !splice::write_fast_using_splice(handle, &stdout_lock)? { return Ok(()); } } // If we're not on Linux or Android, or the splice() call failed, // fall back on slower writing. let mut buf = [0; 1024 * 64]; loop { match handle.reader.read(&mut buf) { Ok(n) => { if n == 0 { break; } stdout_lock .write_all(&buf[..n]) .inspect_err(handle_broken_pipe)?; } Err(e) => return Err(e.into()), } } // If the splice() call failed and there has been some data written to // stdout via while loop above AND there will be second splice() call // that will succeed, data pushed through splice will be output before // the data buffered in stdout.lock. Therefore additional explicit flush // is required here. stdout_lock.flush().inspect_err(handle_broken_pipe)?; Ok(()) } /// Outputs file contents to stdout in a line-by-line fashion, /// propagating any errors that might occur. fn write_lines( handle: &mut InputHandle, options: &OutputOptions, state: &mut OutputState, ) -> CatResult<()> { let mut in_buf = [0; 1024 * 31]; let stdout = io::stdout(); let stdout = stdout.lock(); // Add a 32K buffer for stdout - this greatly improves performance. let mut writer = BufWriter::with_capacity(32 * 1024, stdout); while let Ok(n) = handle.reader.read(&mut in_buf) { if n == 0 { break; } let in_buf = &in_buf[..n]; let mut pos = 0; while pos < n { // skip empty line_number enumerating them if needed if in_buf[pos] == b'\n' { write_new_line(&mut writer, options, state, handle.is_interactive)?; state.at_line_start = true; pos += 1; continue; } if state.skipped_carriage_return { writer.write_all(b"\r")?; state.skipped_carriage_return = false; state.at_line_start = false; } state.one_blank_kept = false; if state.at_line_start && options.number != NumberingMode::None { state.line_number.write(&mut writer)?; state.line_number.increment(); } // print to end of line or end of buffer let offset = write_end(&mut writer, &in_buf[pos..], options); // end of buffer? if offset + pos == in_buf.len() { state.at_line_start = false; break; } if in_buf[pos + offset] == b'\r' { state.skipped_carriage_return = true; } else { assert_eq!(in_buf[pos + offset], b'\n'); // print suitable end of line write_end_of_line( &mut writer, options.end_of_line().as_bytes(), handle.is_interactive, )?; state.at_line_start = true; } pos += offset + 1; } // We need to flush the buffer each time around the loop in order to pass GNU tests. // When we are reading the input from a pipe, the `handle.reader.read` call at the top // of this loop will block (indefinitely) whist waiting for more data. The expectation // however is that anything that's ready for output should show up in the meantime, // and not be buffered internally to the `cat` process. // Hence it's necessary to flush our buffer before every time we could potentially block // on a `std::io::Read::read` call. writer.flush().inspect_err(handle_broken_pipe)?; } Ok(()) } /// `\r` followed by `\n` is printed as `^M` when `show_ends` is enabled, so that `\r\n` prints as `^M$` fn write_new_line( writer: &mut W, options: &OutputOptions, state: &mut OutputState, is_interactive: bool, ) -> CatResult<()> { if state.skipped_carriage_return { if options.show_ends { writer.write_all(b"^M")?; } else { writer.write_all(b"\r")?; } state.skipped_carriage_return = false; write_end_of_line(writer, options.end_of_line().as_bytes(), is_interactive)?; return Ok(()); } if !state.at_line_start || !options.squeeze_blank || !state.one_blank_kept { state.one_blank_kept = true; if state.at_line_start && options.number == NumberingMode::All { state.line_number.write(writer)?; state.line_number.increment(); } write_end_of_line(writer, options.end_of_line().as_bytes(), is_interactive)?; } Ok(()) } fn write_end(writer: &mut W, in_buf: &[u8], options: &OutputOptions) -> usize { if options.show_nonprint { write_nonprint_to_end(in_buf, writer, options.tab().as_bytes()) } else if options.show_tabs { write_tab_to_end(in_buf, writer) } else { write_to_end(in_buf, writer) } } // write***_to_end methods // Write all symbols till \n or \r or end of buffer is reached // We need to stop at \r because it may be written as ^M depending on the byte after and settings; // however, write_nonprint_to_end doesn't need to stop at \r because it will always write \r as ^M. // Return the number of written symbols fn write_to_end(in_buf: &[u8], writer: &mut W) -> usize { // using memchr2 significantly improves performances match memchr2(b'\n', b'\r', in_buf) { Some(p) => { writer.write_all(&in_buf[..p]).unwrap(); p } None => { writer.write_all(in_buf).unwrap(); in_buf.len() } } } fn write_tab_to_end(mut in_buf: &[u8], writer: &mut W) -> usize { let mut count = 0; loop { match in_buf .iter() .position(|c| *c == b'\n' || *c == b'\t' || *c == b'\r') { Some(p) => { writer.write_all(&in_buf[..p]).unwrap(); if in_buf[p] == b'\t' { writer.write_all(b"^I").unwrap(); in_buf = &in_buf[p + 1..]; count += p + 1; } else { // b'\n' or b'\r' return count + p; } } None => { writer.write_all(in_buf).unwrap(); return in_buf.len() + count; } } } } fn write_nonprint_to_end(in_buf: &[u8], writer: &mut W, tab: &[u8]) -> usize { let mut count = 0; for byte in in_buf.iter().copied() { if byte == b'\n' { break; } match byte { 9 => writer.write_all(tab), 0..=8 | 10..=31 => writer.write_all(&[b'^', byte + 64]), 32..=126 => writer.write_all(&[byte]), 127 => writer.write_all(b"^?"), 128..=159 => writer.write_all(&[b'M', b'-', b'^', byte - 64]), 160..=254 => writer.write_all(&[b'M', b'-', byte - 128]), _ => writer.write_all(b"M-^?"), } .unwrap(); count += 1; } count } fn write_end_of_line( writer: &mut W, end_of_line: &[u8], is_interactive: bool, ) -> CatResult<()> { writer.write_all(end_of_line)?; if is_interactive { writer.flush().inspect_err(handle_broken_pipe)?; } Ok(()) } fn handle_broken_pipe(error: &io::Error) { // SIGPIPE is not available on Windows. if cfg!(target_os = "windows") && error.kind() == ErrorKind::BrokenPipe { std::process::exit(13); } } #[cfg(test)] mod tests { use std::io::{BufWriter, stdout}; #[test] fn test_write_tab_to_end_with_newline() { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = b"a\tb\tc\n"; assert_eq!(super::write_tab_to_end(in_buf, &mut writer), 5); } #[test] fn test_write_tab_to_end_no_newline() { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = b"a\tb\tc"; assert_eq!(super::write_tab_to_end(in_buf, &mut writer), 5); } #[test] fn test_write_nonprint_to_end_new_line() { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = b"\n"; let tab = b""; super::write_nonprint_to_end(in_buf, &mut writer, tab); assert_eq!(writer.buffer().len(), 0); } #[test] fn test_write_nonprint_to_end_9() { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = &[9u8]; let tab = b"tab"; super::write_nonprint_to_end(in_buf, &mut writer, tab); assert_eq!(writer.buffer(), tab); } #[test] fn test_write_nonprint_to_end_0_to_8() { for byte in 0u8..=8u8 { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = &[byte]; let tab = b""; super::write_nonprint_to_end(in_buf, &mut writer, tab); assert_eq!(writer.buffer(), [b'^', byte + 64]); } } #[test] fn test_write_nonprint_to_end_10_to_31() { for byte in 11u8..=31u8 { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = &[byte]; let tab = b""; super::write_nonprint_to_end(in_buf, &mut writer, tab); assert_eq!(writer.buffer(), [b'^', byte + 64]); } } #[test] fn test_incrementing_string() { let mut incrementing_string = super::LineNumber::new(); assert_eq!(b" 1\t", incrementing_string.to_str()); incrementing_string.increment(); assert_eq!(b" 2\t", incrementing_string.to_str()); // Run through to 100 for _ in 3..=100 { incrementing_string.increment(); } assert_eq!(b" 100\t", incrementing_string.to_str()); // Run through until we overflow the original size. for _ in 101..=1_000_000 { incrementing_string.increment(); } // Confirm that the start position moves when we overflow the original size. assert_eq!(b"1000000\t", incrementing_string.to_str()); incrementing_string.increment(); assert_eq!(b"1000001\t", incrementing_string.to_str()); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/src/main.rs000066400000000000000000000000261504311601400250120ustar00rootroot00000000000000uucore::bin!(uu_cat); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/src/platform/000077500000000000000000000000001504311601400253465ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/src/platform/mod.rs000066400000000000000000000005421504311601400264740ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. #[cfg(unix)] pub use self::unix::is_unsafe_overwrite; #[cfg(windows)] pub use self::windows::is_unsafe_overwrite; #[cfg(unix)] mod unix; #[cfg(windows)] mod windows; coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/src/platform/unix.rs000066400000000000000000000105631504311601400267040ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore lseek seekable use nix::fcntl::{FcntlArg, OFlag, fcntl}; use nix::unistd::{Whence, lseek}; use std::os::fd::AsFd; use uucore::fs::FileInformation; /// An unsafe overwrite occurs when the same nonempty file is used as both stdin and stdout, /// and the file offset of stdin is positioned earlier than that of stdout. /// In this scenario, bytes read from stdin are written to a later part of the file /// via stdout, which can then be read again by stdin and written again by stdout, /// causing an infinite loop and potential file corruption. pub fn is_unsafe_overwrite(input: &I, output: &O) -> bool { // `FileInformation::from_file` returns an error if the file descriptor is closed, invalid, // or refers to a non-regular file (e.g., socket, pipe, or special device). let Ok(input_info) = FileInformation::from_file(input) else { return false; }; let Ok(output_info) = FileInformation::from_file(output) else { return false; }; if input_info != output_info || output_info.file_size() == 0 { return false; } if is_appending(output) { return true; } // `lseek` returns an error if the file descriptor is closed or it refers to // a non-seekable resource (e.g., pipe, socket, or some devices). let Ok(input_pos) = lseek(input.as_fd(), 0, Whence::SeekCur) else { return false; }; let Ok(output_pos) = lseek(output.as_fd(), 0, Whence::SeekCur) else { return false; }; input_pos < output_pos } /// Whether the file is opened with the `O_APPEND` flag fn is_appending(file: &F) -> bool { let flags_raw = fcntl(file.as_fd(), FcntlArg::F_GETFL).unwrap_or_default(); let flags = OFlag::from_bits_truncate(flags_raw); flags.contains(OFlag::O_APPEND) } #[cfg(test)] mod tests { use crate::platform::unix::{is_appending, is_unsafe_overwrite}; use std::fs::OpenOptions; use std::io::{Seek, SeekFrom, Write}; use tempfile::NamedTempFile; #[test] fn test_is_appending() { let temp_file = NamedTempFile::new().unwrap(); assert!(!is_appending(&temp_file)); let read_file = OpenOptions::new().read(true).open(&temp_file).unwrap(); assert!(!is_appending(&read_file)); let write_file = OpenOptions::new().write(true).open(&temp_file).unwrap(); assert!(!is_appending(&write_file)); let append_file = OpenOptions::new().append(true).open(&temp_file).unwrap(); assert!(is_appending(&append_file)); } #[test] fn test_is_unsafe_overwrite() { // Create two temp files one of which is empty let empty = NamedTempFile::new().unwrap(); let mut nonempty = NamedTempFile::new().unwrap(); nonempty.write_all(b"anything").unwrap(); nonempty.seek(SeekFrom::Start(0)).unwrap(); // Using a different file as input and output does not result in an overwrite assert!(!is_unsafe_overwrite(&empty, &nonempty)); // Overwriting an empty file is always safe assert!(!is_unsafe_overwrite(&empty, &empty)); // Overwriting a nonempty file with itself is safe assert!(!is_unsafe_overwrite(&nonempty, &nonempty)); // Overwriting an empty file opened in append mode is safe let empty_append = OpenOptions::new().append(true).open(&empty).unwrap(); assert!(!is_unsafe_overwrite(&empty, &empty_append)); // Overwriting a nonempty file opened in append mode is unsafe let nonempty_append = OpenOptions::new().append(true).open(&nonempty).unwrap(); assert!(is_unsafe_overwrite(&nonempty, &nonempty_append)); // Overwriting a file opened in write mode is safe let mut nonempty_write = OpenOptions::new().write(true).open(&nonempty).unwrap(); assert!(!is_unsafe_overwrite(&nonempty, &nonempty_write)); // Overwriting a file when the input and output file descriptors are pointing to // different offsets is safe if the input offset is further than the output offset nonempty_write.seek(SeekFrom::Start(1)).unwrap(); assert!(!is_unsafe_overwrite(&nonempty_write, &nonempty)); assert!(is_unsafe_overwrite(&nonempty, &nonempty_write)); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/src/platform/windows.rs000066400000000000000000000035041504311601400274100ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use std::ffi::OsString; use std::os::windows::ffi::OsStringExt; use std::path::PathBuf; use uucore::fs::FileInformation; use winapi_util::AsHandleRef; use windows_sys::Win32::Storage::FileSystem::{ FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW, VOLUME_NAME_NT, }; /// An unsafe overwrite occurs when the same file is used as both stdin and stdout /// and the stdout file is not empty. pub fn is_unsafe_overwrite(input: &I, output: &O) -> bool { if !is_same_file_by_path(input, output) { return false; } // Check if the output file is empty FileInformation::from_file(output) .map(|info| info.file_size() > 0) .unwrap_or(false) } /// Get the file path for a file handle fn get_file_path_from_handle(file: &F) -> Option { let handle = file.as_raw(); let mut path_buf = vec![0u16; 4096]; // SAFETY: We should check how many bytes was written to `path_buf` // and only read that many bytes from it. let len = unsafe { GetFinalPathNameByHandleW( handle, path_buf.as_mut_ptr(), path_buf.len() as u32, FILE_NAME_NORMALIZED | VOLUME_NAME_NT, ) }; if len == 0 { return None; } let path = OsString::from_wide(&path_buf[..len as usize]); Some(PathBuf::from(path)) } /// Compare two file handles if they correspond to the same file fn is_same_file_by_path(a: &A, b: &B) -> bool { match (get_file_path_from_handle(a), get_file_path_from_handle(b)) { (Some(path1), Some(path2)) => path1 == path2, _ => false, } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cat/src/splice.rs000066400000000000000000000046611504311601400253560ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use super::{CatResult, FdReadable, InputHandle}; use nix::unistd; use std::os::{fd::AsFd, unix::io::AsRawFd}; use uucore::pipes::{pipe, splice, splice_exact}; const SPLICE_SIZE: usize = 1024 * 128; const BUF_SIZE: usize = 1024 * 16; /// This function is called from `write_fast()` on Linux and Android. The /// function `splice()` is used to move data between two file descriptors /// without copying between kernel and user spaces. This results in a large /// speedup. /// /// The `bool` in the result value indicates if we need to fall back to normal /// copying or not. False means we don't have to. #[inline] pub(super) fn write_fast_using_splice( handle: &InputHandle, write_fd: &S, ) -> CatResult { let (pipe_rd, pipe_wr) = pipe()?; loop { match splice(&handle.reader, &pipe_wr, SPLICE_SIZE) { Ok(n) => { if n == 0 { return Ok(false); } if splice_exact(&pipe_rd, write_fd, n).is_err() { // If the first splice manages to copy to the intermediate // pipe, but the second splice to stdout fails for some reason // we can recover by copying the data that we have from the // intermediate pipe to stdout using normal read/write. Then // we tell the caller to fall back. copy_exact(&pipe_rd, write_fd, n)?; return Ok(true); } } Err(_) => { return Ok(true); } } } } /// Move exactly `num_bytes` bytes from `read_fd` to `write_fd`. /// /// Panics if not enough bytes can be read. fn copy_exact(read_fd: &impl AsFd, write_fd: &impl AsFd, num_bytes: usize) -> nix::Result<()> { let mut left = num_bytes; let mut buf = [0; BUF_SIZE]; while left > 0 { let read = unistd::read(read_fd, &mut buf)?; assert_ne!(read, 0, "unexpected end of pipe"); let mut written = 0; while written < read { match unistd::write(write_fd, &buf[written..read])? { 0 => panic!(), n => written += n, } } left -= read; } Ok(()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/000077500000000000000000000000001504311601400232565ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/Cargo.toml000066400000000000000000000013711504311601400252100ustar00rootroot00000000000000[package] name = "uu_chcon" description = "chcon ~ (uutils) change file security context" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/chcon" keywords = ["coreutils", "uutils", "cli", "utility"] version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/chcon.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["entries", "fs", "perms"] } selinux = { workspace = true } thiserror = { workspace = true } libc = { workspace = true } fts-sys = { workspace = true } fluent = { workspace = true } [[bin]] name = "chcon" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/LICENSE000077700000000000000000000000001504311601400261242../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/locales/000077500000000000000000000000001504311601400247005ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/locales/en-US.ftl000066400000000000000000000067341504311601400263500ustar00rootroot00000000000000chcon-about = Change the SELinux security context of each FILE to CONTEXT. With --reference, change the security context of each FILE to that of RFILE. chcon-usage = chcon [OPTION]... CONTEXT FILE... chcon [OPTION]... [-u USER] [-r ROLE] [-l RANGE] [-t TYPE] FILE... chcon [OPTION]... --reference=RFILE FILE... # Help messages chcon-help-help = Print help information. chcon-help-dereference = Affect the referent of each symbolic link (this is the default), rather than the symbolic link itself. chcon-help-no-dereference = Affect symbolic links instead of any referenced file. chcon-help-preserve-root = Fail to operate recursively on '/'. chcon-help-no-preserve-root = Do not treat '/' specially (the default). chcon-help-reference = Use security context of RFILE, rather than specifying a CONTEXT value. chcon-help-user = Set user USER in the target security context. chcon-help-role = Set role ROLE in the target security context. chcon-help-type = Set type TYPE in the target security context. chcon-help-range = Set range RANGE in the target security context. chcon-help-recursive = Operate on files and directories recursively. chcon-help-follow-arg-dir-symlink = If a command line argument is a symbolic link to a directory, traverse it. Only valid when -R is specified. chcon-help-follow-dir-symlinks = Traverse every symbolic link to a directory encountered. Only valid when -R is specified. chcon-help-no-follow-symlinks = Do not traverse any symbolic links (default). Only valid when -R is specified. chcon-help-verbose = Output a diagnostic for every file processed. # Error messages - basic validation chcon-error-no-context-specified = No context is specified chcon-error-no-files-specified = No files are specified chcon-error-data-out-of-range = Data is out of range chcon-error-operation-failed = { $operation } failed chcon-error-operation-failed-on = { $operation } failed on { $operand } # Error messages - argument validation chcon-error-invalid-context = Invalid security context '{ $context }'. chcon-error-recursive-no-dereference-require-p = '--recursive' with '--no-dereference' require '-P' chcon-error-recursive-dereference-require-h-or-l = '--recursive' with '--dereference' require either '-H' or '-L' # Operation strings for error context chcon-op-getting-security-context = Getting security context chcon-op-file-name-validation = File name validation chcon-op-getting-meta-data = Getting meta data chcon-op-modifying-root-path = Modifying root path chcon-op-accessing = Accessing chcon-op-reading-directory = Reading directory chcon-op-reading-cyclic-directory = Reading cyclic directory chcon-op-applying-partial-context = Applying partial security context to unlabeled file chcon-op-creating-security-context = Creating security context chcon-op-setting-security-context-user = Setting security context user chcon-op-setting-security-context = Setting security context # Verbose output chcon-verbose-changing-context = { $util_name }: changing security context of { $file } # Warning messages chcon-warning-dangerous-recursive-root = It is dangerous to operate recursively on '/'. Use --{ $option } to override this failsafe. chcon-warning-dangerous-recursive-dir = It is dangerous to operate recursively on { $dir } (same as '/'). Use --{ $option } to override this failsafe. chcon-warning-circular-directory = Circular directory structure. This almost certainly means that you have a corrupted file system. NOTIFY YOUR SYSTEM MANAGER. The following directory is part of the cycle { $file }. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/locales/fr-FR.ftl000066400000000000000000000100141504311601400263170ustar00rootroot00000000000000chcon-about = Changer le contexte de sécurité SELinux de chaque FICHIER vers CONTEXTE. Avec --reference, changer le contexte de sécurité de chaque FICHIER vers celui de RFICHIER. chcon-usage = chcon [OPTION]... CONTEXTE FICHIER... chcon [OPTION]... [-u UTILISATEUR] [-r RÔLE] [-l PLAGE] [-t TYPE] FICHIER... chcon [OPTION]... --reference=RFICHIER FICHIER... # Messages d'aide chcon-help-help = Afficher les informations d'aide. chcon-help-dereference = Affecter la cible de chaque lien symbolique (par défaut), plutôt que le lien symbolique lui-même. chcon-help-no-dereference = Affecter les liens symboliques au lieu de tout fichier référencé. chcon-help-preserve-root = Échouer lors de l'opération récursive sur '/'. chcon-help-no-preserve-root = Ne pas traiter '/' spécialement (par défaut). chcon-help-reference = Utiliser le contexte de sécurité de RFICHIER, plutôt que de spécifier une valeur CONTEXTE. chcon-help-user = Définir l'utilisateur UTILISATEUR dans le contexte de sécurité cible. chcon-help-role = Définir le rôle RÔLE dans le contexte de sécurité cible. chcon-help-type = Définir le type TYPE dans le contexte de sécurité cible. chcon-help-range = Définir la plage PLAGE dans le contexte de sécurité cible. chcon-help-recursive = Opérer sur les fichiers et répertoires de manière récursive. chcon-help-follow-arg-dir-symlink = Si un argument de ligne de commande est un lien symbolique vers un répertoire, le traverser. Valide uniquement quand -R est spécifié. chcon-help-follow-dir-symlinks = Traverser chaque lien symbolique vers un répertoire rencontré. Valide uniquement quand -R est spécifié. chcon-help-no-follow-symlinks = Ne traverser aucun lien symbolique (par défaut). Valide uniquement quand -R est spécifié. chcon-help-verbose = Afficher un diagnostic pour chaque fichier traité. # Messages d'erreur - validation de base chcon-error-no-context-specified = Aucun contexte n'est spécifié chcon-error-no-files-specified = Aucun fichier n'est spécifié chcon-error-data-out-of-range = Données hors limites chcon-error-operation-failed = { $operation } a échoué chcon-error-operation-failed-on = { $operation } a échoué sur { $operand } # Messages d'erreur - validation des arguments chcon-error-invalid-context = Contexte de sécurité invalide '{ $context }'. chcon-error-recursive-no-dereference-require-p = '--recursive' avec '--no-dereference' nécessite '-P' chcon-error-recursive-dereference-require-h-or-l = '--recursive' avec '--dereference' nécessite soit '-H' soit '-L' # Chaînes d'opération pour le contexte d'erreur chcon-op-getting-security-context = Obtention du contexte de sécurité chcon-op-file-name-validation = Validation du nom de fichier chcon-op-getting-meta-data = Obtention des métadonnées chcon-op-modifying-root-path = Modification du chemin racine chcon-op-accessing = Accès chcon-op-reading-directory = Lecture du répertoire chcon-op-reading-cyclic-directory = Lecture du répertoire cyclique chcon-op-applying-partial-context = Application d'un contexte de sécurité partiel à un fichier non étiqueté chcon-op-creating-security-context = Création du contexte de sécurité chcon-op-setting-security-context-user = Définition de l'utilisateur du contexte de sécurité chcon-op-setting-security-context = Définition du contexte de sécurité # Sortie détaillée chcon-verbose-changing-context = { $util_name } : changement du contexte de sécurité de { $file } # Messages d'avertissement chcon-warning-dangerous-recursive-root = Il est dangereux d'opérer récursivement sur '/'. Utilisez --{ $option } pour outrepasser cette protection. chcon-warning-dangerous-recursive-dir = Il est dangereux d'opérer récursivement sur { $dir } (identique à '/'). Utilisez --{ $option } pour outrepasser cette protection. chcon-warning-circular-directory = Structure de répertoire circulaire. Cela signifie presque certainement que vous avez un système de fichiers corrompu. NOTIFIEZ VOTRE ADMINISTRATEUR SYSTÈME. Le répertoire suivant fait partie du cycle { $file }. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/src/000077500000000000000000000000001504311601400240455ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/src/chcon.rs000066400000000000000000000655761504311601400255300ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (vars) RFILE #![cfg(target_os = "linux")] #![allow(clippy::upper_case_acronyms)] use clap::builder::ValueParser; use uucore::error::{UResult, USimpleError, UUsageError}; use uucore::translate; use uucore::{display::Quotable, format_usage, show_error, show_warning}; use clap::{Arg, ArgAction, Command}; use selinux::{OpaqueSecurityContext, SecurityContext}; use std::borrow::Cow; use std::ffi::{CStr, CString, OsStr, OsString}; use std::os::raw::c_int; use std::path::{Path, PathBuf}; use std::{fs, io}; mod errors; mod fts; use errors::*; pub mod options { pub static HELP: &str = "help"; pub static VERBOSE: &str = "verbose"; pub static REFERENCE: &str = "reference"; pub static USER: &str = "user"; pub static ROLE: &str = "role"; pub static TYPE: &str = "type"; pub static RANGE: &str = "range"; pub static RECURSIVE: &str = "recursive"; pub mod sym_links { pub static FOLLOW_ARG_DIR_SYM_LINK: &str = "follow-arg-dir-sym-link"; pub static FOLLOW_DIR_SYM_LINKS: &str = "follow-dir-sym-links"; pub static NO_FOLLOW_SYM_LINKS: &str = "no-follow-sym-links"; } pub mod dereference { pub static DEREFERENCE: &str = "dereference"; pub static NO_DEREFERENCE: &str = "no-dereference"; } pub mod preserve_root { pub static PRESERVE_ROOT: &str = "preserve-root"; pub static NO_PRESERVE_ROOT: &str = "no-preserve-root"; } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let config = uu_app(); let options = match parse_command_line(config, args) { Ok(r) => r, Err(r) => { if let Error::CommandLine(r) = r { return Err(r.into()); } return Err(UUsageError::new(libc::EXIT_FAILURE, format!("{r}.\n"))); } }; let context = match &options.mode { CommandLineMode::ReferenceBased { reference } => { let result = match SecurityContext::of_path(reference, true, false) { Ok(Some(context)) => Ok(context), Ok(None) => { let err = io::Error::from_raw_os_error(libc::ENODATA); Err(Error::from_io1( translate!("chcon-op-getting-security-context"), reference, err, )) } Err(r) => Err(Error::from_selinux( translate!("chcon-op-getting-security-context"), r, )), }; match result { Err(r) => { return Err(USimpleError::new( libc::EXIT_FAILURE, format!("{}.", report_full_error(&r)), )); } Ok(file_context) => SELinuxSecurityContext::File(file_context), } } CommandLineMode::ContextBased { context } => { let c_context = match os_str_to_c_string(context) { Ok(context) => context, Err(_r) => { return Err(USimpleError::new( libc::EXIT_FAILURE, translate!("chcon-error-invalid-context", "context" => context.quote()), )); } }; if SecurityContext::from_c_str(&c_context, false).check() == Some(false) { return Err(USimpleError::new( libc::EXIT_FAILURE, translate!("chcon-error-invalid-context", "context" => context.quote()), )); } SELinuxSecurityContext::String(Some(c_context)) } CommandLineMode::Custom { .. } => SELinuxSecurityContext::String(None), }; let root_dev_ino = if options.preserve_root && options.recursive_mode.is_recursive() { match get_root_dev_ino() { Ok(r) => Some(r), Err(r) => { return Err(USimpleError::new( libc::EXIT_FAILURE, format!("{}.", report_full_error(&r)), )); } } } else { None }; let results = process_files(&options, &context, root_dev_ino); if results.is_empty() { return Ok(()); } for result in &results { show_error!("{}.", report_full_error(result)); } Err(libc::EXIT_FAILURE.into()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("chcon-about")) .override_usage(format_usage(&translate!("chcon-usage"))) .infer_long_args(true) .disable_help_flag(true) .args_override_self(true) .arg( Arg::new(options::HELP) .long(options::HELP) .help(translate!("chcon-help-help")) .action(ArgAction::Help), ) .arg( Arg::new(options::dereference::DEREFERENCE) .long(options::dereference::DEREFERENCE) .overrides_with(options::dereference::NO_DEREFERENCE) .help(translate!("chcon-help-dereference")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::dereference::NO_DEREFERENCE) .short('h') .long(options::dereference::NO_DEREFERENCE) .help(translate!("chcon-help-no-dereference")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::preserve_root::PRESERVE_ROOT) .long(options::preserve_root::PRESERVE_ROOT) .overrides_with(options::preserve_root::NO_PRESERVE_ROOT) .help(translate!("chcon-help-preserve-root")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::preserve_root::NO_PRESERVE_ROOT) .long(options::preserve_root::NO_PRESERVE_ROOT) .help(translate!("chcon-help-no-preserve-root")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::REFERENCE) .long(options::REFERENCE) .value_name("RFILE") .value_hint(clap::ValueHint::FilePath) .conflicts_with_all([options::USER, options::ROLE, options::TYPE, options::RANGE]) .help(translate!("chcon-help-reference")) .value_parser(ValueParser::os_string()), ) .arg( Arg::new(options::USER) .short('u') .long(options::USER) .value_name("USER") .value_hint(clap::ValueHint::Username) .help(translate!("chcon-help-user")) .value_parser(ValueParser::os_string()), ) .arg( Arg::new(options::ROLE) .short('r') .long(options::ROLE) .value_name("ROLE") .help(translate!("chcon-help-role")) .value_parser(ValueParser::os_string()), ) .arg( Arg::new(options::TYPE) .short('t') .long(options::TYPE) .value_name("TYPE") .help(translate!("chcon-help-type")) .value_parser(ValueParser::os_string()), ) .arg( Arg::new(options::RANGE) .short('l') .long(options::RANGE) .value_name("RANGE") .help(translate!("chcon-help-range")) .value_parser(ValueParser::os_string()), ) .arg( Arg::new(options::RECURSIVE) .short('R') .long(options::RECURSIVE) .help(translate!("chcon-help-recursive")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::sym_links::FOLLOW_ARG_DIR_SYM_LINK) .short('H') .requires(options::RECURSIVE) .overrides_with_all([ options::sym_links::FOLLOW_DIR_SYM_LINKS, options::sym_links::NO_FOLLOW_SYM_LINKS, ]) .help(translate!("chcon-help-follow-arg-dir-symlink")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::sym_links::FOLLOW_DIR_SYM_LINKS) .short('L') .requires(options::RECURSIVE) .overrides_with_all([ options::sym_links::FOLLOW_ARG_DIR_SYM_LINK, options::sym_links::NO_FOLLOW_SYM_LINKS, ]) .help(translate!("chcon-help-follow-dir-symlinks")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::sym_links::NO_FOLLOW_SYM_LINKS) .short('P') .requires(options::RECURSIVE) .overrides_with_all([ options::sym_links::FOLLOW_ARG_DIR_SYM_LINK, options::sym_links::FOLLOW_DIR_SYM_LINKS, ]) .help(translate!("chcon-help-no-follow-symlinks")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::VERBOSE) .short('v') .long(options::VERBOSE) .help(translate!("chcon-help-verbose")) .action(ArgAction::SetTrue), ) .arg( Arg::new("FILE") .action(ArgAction::Append) .value_hint(clap::ValueHint::FilePath) .num_args(1..) .value_parser(ValueParser::os_string()), ) } #[derive(Debug)] struct Options { verbose: bool, preserve_root: bool, recursive_mode: RecursiveMode, affect_symlink_referent: bool, mode: CommandLineMode, files: Vec, } fn parse_command_line(config: Command, args: impl uucore::Args) -> Result { let matches = config.try_get_matches_from(args)?; let verbose = matches.get_flag(options::VERBOSE); let (recursive_mode, affect_symlink_referent) = if matches.get_flag(options::RECURSIVE) { if matches.get_flag(options::sym_links::FOLLOW_DIR_SYM_LINKS) { if matches.get_flag(options::dereference::NO_DEREFERENCE) { return Err(Error::ArgumentsMismatch(translate!( "chcon-error-recursive-no-dereference-require-p" ))); } (RecursiveMode::RecursiveAndFollowAllDirSymLinks, true) } else if matches.get_flag(options::sym_links::FOLLOW_ARG_DIR_SYM_LINK) { if matches.get_flag(options::dereference::NO_DEREFERENCE) { return Err(Error::ArgumentsMismatch(translate!( "chcon-error-recursive-no-dereference-require-p" ))); } (RecursiveMode::RecursiveAndFollowArgDirSymLinks, true) } else { if matches.get_flag(options::dereference::DEREFERENCE) { return Err(Error::ArgumentsMismatch(translate!( "chcon-error-recursive-dereference-require-h-or-l" ))); } (RecursiveMode::RecursiveButDoNotFollowSymLinks, false) } } else { let no_dereference = matches.get_flag(options::dereference::NO_DEREFERENCE); (RecursiveMode::NotRecursive, !no_dereference) }; // By default, do not preserve root. let preserve_root = matches.get_flag(options::preserve_root::PRESERVE_ROOT); let mut files = matches.get_many::("FILE").unwrap_or_default(); let mode = if let Some(path) = matches.get_one::(options::REFERENCE) { CommandLineMode::ReferenceBased { reference: PathBuf::from(path), } } else if matches.contains_id(options::USER) || matches.contains_id(options::ROLE) || matches.contains_id(options::TYPE) || matches.contains_id(options::RANGE) { CommandLineMode::Custom { user: matches.get_one::(options::USER).map(Into::into), role: matches.get_one::(options::ROLE).map(Into::into), the_type: matches.get_one::(options::TYPE).map(Into::into), range: matches.get_one::(options::RANGE).map(Into::into), } } else if let Some(context) = files.next() { CommandLineMode::ContextBased { context: context.into(), } } else { return Err(Error::MissingContext); }; let files: Vec<_> = files.map(PathBuf::from).collect(); if files.is_empty() { return Err(Error::MissingFiles); } Ok(Options { verbose, preserve_root, recursive_mode, affect_symlink_referent, mode, files, }) } #[derive(Debug, Copy, Clone)] enum RecursiveMode { NotRecursive, /// Do not traverse any symbolic links. RecursiveButDoNotFollowSymLinks, /// Traverse every symbolic link to a directory encountered. RecursiveAndFollowAllDirSymLinks, /// If a command line argument is a symbolic link to a directory, traverse it. RecursiveAndFollowArgDirSymLinks, } impl RecursiveMode { fn is_recursive(self) -> bool { match self { Self::NotRecursive => false, Self::RecursiveButDoNotFollowSymLinks | Self::RecursiveAndFollowAllDirSymLinks | Self::RecursiveAndFollowArgDirSymLinks => true, } } fn fts_open_options(self) -> c_int { match self { Self::NotRecursive | Self::RecursiveButDoNotFollowSymLinks => fts_sys::FTS_PHYSICAL, Self::RecursiveAndFollowAllDirSymLinks => fts_sys::FTS_LOGICAL, Self::RecursiveAndFollowArgDirSymLinks => { fts_sys::FTS_PHYSICAL | fts_sys::FTS_COMFOLLOW } } } } #[derive(Debug)] enum CommandLineMode { ReferenceBased { reference: PathBuf, }, ContextBased { context: OsString, }, Custom { user: Option, role: Option, the_type: Option, range: Option, }, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] struct DeviceAndINode { device_id: u64, inode: u64, } #[cfg(unix)] impl From for DeviceAndINode { fn from(md: fs::Metadata) -> Self { use std::os::unix::fs::MetadataExt; Self { device_id: md.dev(), inode: md.ino(), } } } impl TryFrom<&libc::stat> for DeviceAndINode { type Error = Error; #[allow(clippy::useless_conversion)] fn try_from(st: &libc::stat) -> Result { let device_id = u64::try_from(st.st_dev).map_err(|_r| Error::OutOfRange)?; let inode = u64::try_from(st.st_ino).map_err(|_r| Error::OutOfRange)?; Ok(Self { device_id, inode }) } } fn process_files( options: &Options, context: &SELinuxSecurityContext, root_dev_ino: Option, ) -> Vec { let fts_options = options.recursive_mode.fts_open_options(); let mut fts = match fts::FTS::new(options.files.iter(), fts_options) { Ok(fts) => fts, Err(err) => return vec![err], }; let mut errors = Vec::default(); loop { match fts.read_next_entry() { Ok(true) => { if let Err(err) = process_file(options, context, &mut fts, root_dev_ino) { errors.push(err); } } Ok(false) => break, Err(err) => { errors.push(err); break; } } } errors } fn process_file( options: &Options, context: &SELinuxSecurityContext, fts: &mut fts::FTS, root_dev_ino: Option, ) -> Result<()> { let mut entry = fts.last_entry_ref().unwrap(); let file_full_name = entry.path().map(PathBuf::from).ok_or_else(|| { Error::from_io( translate!("chcon-op-file-name-validation"), io::ErrorKind::InvalidInput.into(), ) })?; let fts_access_path = entry.access_path().ok_or_else(|| { let err = io::ErrorKind::InvalidInput.into(); Error::from_io1( translate!("chcon-op-file-name-validation"), &file_full_name, err, ) })?; let err = |s, k: io::ErrorKind| Error::from_io1(s, &file_full_name, k.into()); let fts_err = |s| { let r = io::Error::from_raw_os_error(entry.errno()); Err(Error::from_io1(s, &file_full_name, r)) }; // SAFETY: If `entry.fts_statp` is not null, then is is assumed to be valid. let file_dev_ino: DeviceAndINode = if let Some(st) = entry.stat() { st.try_into()? } else { return Err(err( translate!("chcon-op-getting-meta-data"), io::ErrorKind::InvalidInput, )); }; let mut result = Ok(()); match entry.flags() { fts_sys::FTS_D => { if options.recursive_mode.is_recursive() { if root_dev_ino_check(root_dev_ino, file_dev_ino) { // This happens e.g., with "chcon -R --preserve-root ... /" // and with "chcon -RH --preserve-root ... symlink-to-root". root_dev_ino_warn(&file_full_name); // Tell fts not to traverse into this hierarchy. let _ignored = fts.set(fts_sys::FTS_SKIP); // Ensure that we do not process "/" on the second visit. let _ignored = fts.read_next_entry(); return Err(err( translate!("chcon-op-modifying-root-path"), io::ErrorKind::PermissionDenied, )); } return Ok(()); } } fts_sys::FTS_DP => { if !options.recursive_mode.is_recursive() { return Ok(()); } } fts_sys::FTS_NS => { // For a top-level file or directory, this FTS_NS (stat failed) indicator is determined // at the time of the initial fts_open call. With programs like chmod, chown, and chgrp, // that modify permissions, it is possible that the file in question is accessible when // control reaches this point. So, if this is the first time we've seen the FTS_NS for // this file, tell fts_read to stat it "again". if entry.level() == 0 && entry.number() == 0 { entry.set_number(1); let _ignored = fts.set(fts_sys::FTS_AGAIN); return Ok(()); } result = fts_err(translate!("chcon-op-accessing")); } fts_sys::FTS_ERR => result = fts_err(translate!("chcon-op-accessing")), fts_sys::FTS_DNR => result = fts_err(translate!("chcon-op-reading-directory")), fts_sys::FTS_DC => { if cycle_warning_required(options.recursive_mode.fts_open_options(), &entry) { emit_cycle_warning(&file_full_name); return Err(err( translate!("chcon-op-reading-cyclic-directory"), io::ErrorKind::InvalidData, )); } } _ => {} } if entry.flags() == fts_sys::FTS_DP && result.is_ok() && root_dev_ino_check(root_dev_ino, file_dev_ino) { root_dev_ino_warn(&file_full_name); result = Err(err( translate!("chcon-op-modifying-root-path"), io::ErrorKind::PermissionDenied, )); } if result.is_ok() { if options.verbose { println!( "{}", translate!("chcon-verbose-changing-context", "util_name" => uucore::util_name(), "file" => file_full_name.quote()) ); } result = change_file_context(options, context, fts_access_path); } if !options.recursive_mode.is_recursive() { let _ignored = fts.set(fts_sys::FTS_SKIP); } result } fn change_file_context( options: &Options, context: &SELinuxSecurityContext, path: &Path, ) -> Result<()> { match &options.mode { CommandLineMode::Custom { user, role, the_type, range, } => { let err0 = || -> Result<()> { // If the file doesn't have a context, and we're not setting all of the context // components, there isn't really an obvious default. Thus, we just give up. let op = translate!("chcon-op-applying-partial-context"); let err = io::ErrorKind::InvalidInput.into(); Err(Error::from_io1(op, path, err)) }; let file_context = match SecurityContext::of_path(path, options.affect_symlink_referent, false) { Ok(Some(context)) => context, Ok(None) => return err0(), Err(r) => { return Err(Error::from_selinux( translate!("chcon-op-getting-security-context"), r, )); } }; let c_file_context = match file_context.to_c_string() { Ok(Some(context)) => context, Ok(None) => return err0(), Err(r) => { return Err(Error::from_selinux( translate!("chcon-op-getting-security-context"), r, )); } }; let se_context = OpaqueSecurityContext::from_c_str(c_file_context.as_ref()).map_err(|_r| { let err = io::ErrorKind::InvalidInput.into(); Error::from_io1(translate!("chcon-op-creating-security-context"), path, err) })?; type SetValueProc = fn(&OpaqueSecurityContext, &CStr) -> selinux::errors::Result<()>; let list: &[(&Option, SetValueProc)] = &[ (user, OpaqueSecurityContext::set_user), (role, OpaqueSecurityContext::set_role), (the_type, OpaqueSecurityContext::set_type), (range, OpaqueSecurityContext::set_range), ]; for (new_value, set_value_proc) in list { if let Some(new_value) = new_value { let c_new_value = os_str_to_c_string(new_value).map_err(|_r| { let err = io::ErrorKind::InvalidInput.into(); Error::from_io1(translate!("chcon-op-creating-security-context"), path, err) })?; set_value_proc(&se_context, &c_new_value).map_err(|r| { Error::from_selinux(translate!("chcon-op-setting-security-context-user"), r) })?; } } let context_string = se_context.to_c_string().map_err(|r| { Error::from_selinux(translate!("chcon-op-getting-security-context"), r) })?; if c_file_context.as_ref().to_bytes() == context_string.as_ref().to_bytes() { Ok(()) // Nothing to change. } else { SecurityContext::from_c_str(&context_string, false) .set_for_path(path, options.affect_symlink_referent, false) .map_err(|r| { Error::from_selinux(translate!("chcon-op-setting-security-context"), r) }) } } CommandLineMode::ReferenceBased { .. } | CommandLineMode::ContextBased { .. } => { if let Some(c_context) = context.to_c_string()? { SecurityContext::from_c_str(c_context.as_ref(), false) .set_for_path(path, options.affect_symlink_referent, false) .map_err(|r| { Error::from_selinux(translate!("chcon-op-setting-security-context"), r) }) } else { let err = io::ErrorKind::InvalidInput.into(); Err(Error::from_io1( translate!("chcon-op-setting-security-context"), path, err, )) } } } } #[cfg(unix)] pub(crate) fn os_str_to_c_string(s: &OsStr) -> Result { use std::os::unix::ffi::OsStrExt; CString::new(s.as_bytes()) .map_err(|_r| Error::from_io("CString::new()", io::ErrorKind::InvalidInput.into())) } /// Call `lstat()` to get the device and inode numbers for `/`. #[cfg(unix)] fn get_root_dev_ino() -> Result { fs::symlink_metadata("/") .map(DeviceAndINode::from) .map_err(|r| Error::from_io1("std::fs::symlink_metadata", "/", r)) } fn root_dev_ino_check(root_dev_ino: Option, dir_dev_ino: DeviceAndINode) -> bool { root_dev_ino == Some(dir_dev_ino) } fn root_dev_ino_warn(dir_name: &Path) { if dir_name.as_os_str() == "/" { show_warning!( "{}", translate!("chcon-warning-dangerous-recursive-root", "option" => options::preserve_root::NO_PRESERVE_ROOT) ); } else { show_warning!( "{}", translate!("chcon-warning-dangerous-recursive-dir", "dir" => dir_name.to_string_lossy(), "option" => options::preserve_root::NO_PRESERVE_ROOT) ); } } /// When `fts_read` returns [`fts_sys::FTS_DC`] to indicate a directory cycle, it may or may not indicate /// a real problem. /// When a program like chgrp performs a recursive traversal that requires traversing symbolic links, /// it is *not* a problem. /// However, when invoked with "-P -R", it deserves a warning. /// The `fts_options` parameter records the options that control this aspect of fts behavior, /// so test that. fn cycle_warning_required(fts_options: c_int, entry: &fts::EntryRef) -> bool { // When dereferencing no symlinks, or when dereferencing only those listed on the command line // and we're not processing a command-line argument, then a cycle is a serious problem. ((fts_options & fts_sys::FTS_PHYSICAL) != 0) && (((fts_options & fts_sys::FTS_COMFOLLOW) == 0) || entry.level() != 0) } fn emit_cycle_warning(file_name: &Path) { show_warning!( "{}", translate!("chcon-warning-circular-directory", "file" => file_name.to_string_lossy()) ); } #[derive(Debug)] enum SELinuxSecurityContext<'t> { File(SecurityContext<'t>), String(Option), } impl SELinuxSecurityContext<'_> { fn to_c_string(&self) -> Result>> { match self { Self::File(context) => context .to_c_string() .map_err(|r| Error::from_selinux("SELinuxSecurityContext::to_c_string()", r)), Self::String(context) => Ok(context.as_deref().map(Cow::Borrowed)), } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/src/errors.rs000066400000000000000000000046401504311601400257330ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. #![cfg(target_os = "linux")] use std::ffi::OsString; use std::fmt::Write; use std::io; use thiserror::Error; use uucore::display::Quotable; use uucore::translate; pub(crate) type Result = std::result::Result; #[derive(Error, Debug)] pub(crate) enum Error { #[error("{}", translate!("chcon-error-no-context-specified"))] MissingContext, #[error("{}", translate!("chcon-error-no-files-specified"))] MissingFiles, #[error("{}", translate!("chcon-error-data-out-of-range"))] OutOfRange, #[error("{0}")] ArgumentsMismatch(String), #[error(transparent)] CommandLine(#[from] clap::Error), #[error("{}", translate!("chcon-error-operation-failed", "operation" => operation.clone()))] SELinux { operation: String, #[source] source: selinux::errors::Error, }, #[error("{}", translate!("chcon-error-operation-failed", "operation" => operation.clone()))] Io { operation: String, #[source] source: io::Error, }, #[error("{}", translate!("chcon-error-operation-failed-on", "operation" => operation.clone(), "operand" => operand1.quote()))] Io1 { operation: String, operand1: OsString, #[source] source: io::Error, }, } impl Error { pub(crate) fn from_io(operation: impl Into, source: io::Error) -> Self { Self::Io { operation: operation.into(), source, } } pub(crate) fn from_io1( operation: impl Into, operand1: impl Into, source: io::Error, ) -> Self { Self::Io1 { operation: operation.into(), operand1: operand1.into(), source, } } pub(crate) fn from_selinux( operation: impl Into, source: selinux::errors::Error, ) -> Self { Self::SELinux { operation: operation.into(), source, } } } pub(crate) fn report_full_error(mut err: &dyn std::error::Error) -> String { let mut desc = String::with_capacity(256); write!(desc, "{err}").unwrap(); while let Some(source) = err.source() { err = source; write!(desc, ". {err}").unwrap(); } desc } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/src/fts.rs000066400000000000000000000136551504311601400252210ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. #![cfg(target_os = "linux")] use std::ffi::{CStr, CString, OsStr}; use std::marker::PhantomData; use std::os::raw::{c_int, c_long, c_short}; use std::path::Path; use std::{io, iter, ptr, slice}; use crate::errors::{Error, Result}; use crate::os_str_to_c_string; #[derive(Debug)] pub(crate) struct FTS { fts: ptr::NonNull, entry: Option>, _phantom_data: PhantomData, } impl FTS { pub(crate) fn new(paths: I, options: c_int) -> Result where I: IntoIterator, I::Item: AsRef, { let files_paths: Vec = paths .into_iter() .map(|s| os_str_to_c_string(s.as_ref())) .collect::>()?; if files_paths.is_empty() { return Err(Error::from_io( "FTS::new()", io::ErrorKind::InvalidInput.into(), )); } let path_argv: Vec<_> = files_paths .iter() .map(CString::as_ref) .map(CStr::as_ptr) .chain(iter::once(ptr::null())) .collect(); // SAFETY: We assume calling fts_open() is safe: // - `path_argv` is an array holding at least one path, and null-terminated. // - `compar` is None. let fts = unsafe { fts_sys::fts_open(path_argv.as_ptr().cast(), options, None) }; let fts = ptr::NonNull::new(fts) .ok_or_else(|| Error::from_io("fts_open()", io::Error::last_os_error()))?; Ok(Self { fts, entry: None, _phantom_data: PhantomData, }) } pub(crate) fn last_entry_ref(&mut self) -> Option { self.entry.map(move |entry| EntryRef::new(self, entry)) } pub(crate) fn read_next_entry(&mut self) -> Result { // SAFETY: We assume calling fts_read() is safe with a non-null `fts` // pointer assumed to be valid. let new_entry = unsafe { fts_sys::fts_read(self.fts.as_ptr()) }; self.entry = ptr::NonNull::new(new_entry); if self.entry.is_none() { let r = io::Error::last_os_error(); if let Some(0) = r.raw_os_error() { Ok(false) } else { Err(Error::from_io("fts_read()", r)) } } else { Ok(true) } } pub(crate) fn set(&mut self, instr: c_int) -> Result<()> { let fts = self.fts.as_ptr(); let entry = self .entry .ok_or_else(|| Error::from_io("FTS::set()", io::ErrorKind::UnexpectedEof.into()))?; // SAFETY: We assume calling fts_set() is safe with non-null `fts` // and `entry` pointers assumed to be valid. if unsafe { fts_sys::fts_set(fts, entry.as_ptr(), instr) } == -1 { Err(Error::from_io("fts_set()", io::Error::last_os_error())) } else { Ok(()) } } } impl Drop for FTS { fn drop(&mut self) { // SAFETY: We assume calling fts_close() is safe with a non-null `fts` // pointer assumed to be valid. unsafe { fts_sys::fts_close(self.fts.as_ptr()) }; } } #[derive(Debug)] pub(crate) struct EntryRef<'fts> { pub(crate) pointer: ptr::NonNull, _fts: PhantomData<&'fts FTS>, _phantom_data: PhantomData, } impl<'fts> EntryRef<'fts> { fn new(_fts: &'fts FTS, entry: ptr::NonNull) -> Self { Self { pointer: entry, _fts: PhantomData, _phantom_data: PhantomData, } } fn as_ref(&self) -> &fts_sys::FTSENT { // SAFETY: `self.pointer` is a non-null pointer that is assumed to be valid. unsafe { self.pointer.as_ref() } } fn as_mut(&mut self) -> &mut fts_sys::FTSENT { // SAFETY: `self.pointer` is a non-null pointer that is assumed to be valid. unsafe { self.pointer.as_mut() } } pub(crate) fn flags(&self) -> c_int { c_int::from(self.as_ref().fts_info) } pub(crate) fn errno(&self) -> c_int { self.as_ref().fts_errno } pub(crate) fn level(&self) -> c_short { self.as_ref().fts_level } pub(crate) fn number(&self) -> c_long { self.as_ref().fts_number } pub(crate) fn set_number(&mut self, new_number: c_long) { self.as_mut().fts_number = new_number; } pub(crate) fn path(&self) -> Option<&Path> { let entry = self.as_ref(); if entry.fts_pathlen == 0 { return None; } ptr::NonNull::new(entry.fts_path) .map(|path_ptr| { let path_size = usize::from(entry.fts_pathlen).saturating_add(1); // SAFETY: `entry.fts_path` is a non-null pointer that is assumed to be valid. unsafe { slice::from_raw_parts(path_ptr.as_ptr().cast(), path_size) } }) .and_then(|bytes| CStr::from_bytes_with_nul(bytes).ok()) .map(c_str_to_os_str) .map(Path::new) } pub(crate) fn access_path(&self) -> Option<&Path> { ptr::NonNull::new(self.as_ref().fts_accpath) .map(|path_ptr| { // SAFETY: `entry.fts_accpath` is a non-null pointer that is assumed to be valid. unsafe { CStr::from_ptr(path_ptr.as_ptr()) } }) .map(c_str_to_os_str) .map(Path::new) } pub(crate) fn stat(&self) -> Option<&libc::stat> { ptr::NonNull::new(self.as_ref().fts_statp).map(|stat_ptr| { // SAFETY: `entry.fts_statp` is a non-null pointer that is assumed to be valid. unsafe { stat_ptr.as_ref() } }) } } #[cfg(unix)] fn c_str_to_os_str(s: &CStr) -> &OsStr { use std::os::unix::ffi::OsStrExt; OsStr::from_bytes(s.to_bytes()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chcon/src/main.rs000066400000000000000000000000651504311601400253400ustar00rootroot00000000000000#![cfg(target_os = "linux")] uucore::bin!(uu_chcon); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chgrp/000077500000000000000000000000001504311601400232675ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chgrp/Cargo.toml000066400000000000000000000011511504311601400252150ustar00rootroot00000000000000[package] name = "uu_chgrp" description = "chgrp ~ (uutils) change the group ownership of FILE" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/chgrp" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/chgrp.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["entries", "fs", "perms"] } fluent = { workspace = true } [[bin]] name = "chgrp" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chgrp/LICENSE000077700000000000000000000000001504311601400261352../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chgrp/locales/000077500000000000000000000000001504311601400247115ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chgrp/locales/en-US.ftl000066400000000000000000000020001504311601400263370ustar00rootroot00000000000000chgrp-about = Change the group of each FILE to GROUP. chgrp-usage = chgrp [OPTION]... GROUP FILE... chgrp [OPTION]... --reference=RFILE FILE... # Help messages chgrp-help-print-help = Print help information. chgrp-help-changes = like verbose but report only when a change is made chgrp-help-quiet = suppress most error messages chgrp-help-verbose = output a diagnostic for every file processed chgrp-help-preserve-root = fail to operate recursively on '/' chgrp-help-no-preserve-root = do not treat '/' specially (the default) chgrp-help-reference = use RFILE's group rather than specifying GROUP values chgrp-help-from = change the group only if its current group matches GROUP chgrp-help-recursive = operate on files and directories recursively # Error messages chgrp-error-invalid-group-id = invalid group id: '{ $gid_str }' chgrp-error-invalid-group = invalid group: '{ $group }' chgrp-error-failed-to-get-attributes = failed to get attributes of { $file } chgrp-error-invalid-user = invalid user: '{ $from_group }' coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chgrp/locales/fr-FR.ftl000066400000000000000000000022411504311601400263330ustar00rootroot00000000000000chgrp-about = Changer le groupe de chaque FICHIER vers GROUPE. chgrp-usage = chgrp [OPTION]... GROUPE FICHIER... chgrp [OPTION]... --reference=RFICHIER FICHIER... # Messages d'aide chgrp-help-print-help = Afficher les informations d'aide. chgrp-help-changes = comme verbeux mais rapporter seulement lors d'un changement chgrp-help-quiet = supprimer la plupart des messages d'erreur chgrp-help-verbose = afficher un diagnostic pour chaque fichier traité chgrp-help-preserve-root = échouer à opérer récursivement sur '/' chgrp-help-no-preserve-root = ne pas traiter '/' spécialement (par défaut) chgrp-help-reference = utiliser le groupe de RFICHIER plutôt que spécifier les valeurs de GROUPE chgrp-help-from = changer le groupe seulement si son groupe actuel correspond à GROUPE chgrp-help-recursive = opérer sur les fichiers et répertoires récursivement # Messages d'erreur chgrp-error-invalid-group-id = identifiant de groupe invalide : '{ $gid_str }' chgrp-error-invalid-group = groupe invalide : '{ $group }' chgrp-error-failed-to-get-attributes = échec de l'obtention des attributs de { $file } chgrp-error-invalid-user = utilisateur invalide : '{ $from_group }' coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chgrp/src/000077500000000000000000000000001504311601400240565ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chgrp/src/chgrp.rs000066400000000000000000000133151504311601400255320ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) COMFOLLOW Chowner RFILE RFILE's derefer dgid nonblank nonprint nonprinting use uucore::display::Quotable; pub use uucore::entries; use uucore::error::{FromIo, UResult, USimpleError}; use uucore::format_usage; use uucore::perms::{GidUidOwnerFilter, IfFrom, chown_base, options}; use uucore::translate; use clap::{Arg, ArgAction, ArgMatches, Command}; use std::fs; use std::os::unix::fs::MetadataExt; fn parse_gid_from_str(group: &str) -> Result { if let Some(gid_str) = group.strip_prefix(':') { // Handle :gid format gid_str .parse::() .map_err(|_| translate!("chgrp-error-invalid-group-id", "gid_str" => gid_str)) } else { // Try as group name first match entries::grp2gid(group) { Ok(g) => Ok(g), // If group name lookup fails, try parsing as raw number Err(_) => group .parse::() .map_err(|_| translate!("chgrp-error-invalid-group", "group" => group)), } } } fn get_dest_gid(matches: &ArgMatches) -> UResult<(Option, String)> { let mut raw_group = String::new(); let dest_gid = if let Some(file) = matches.get_one::(options::REFERENCE) { fs::metadata(file) .map(|meta| { let gid = meta.gid(); raw_group = entries::gid2grp(gid).unwrap_or_else(|_| gid.to_string()); Some(gid) }) .map_err_context( || translate!("chgrp-error-failed-to-get-attributes", "file" => file.quote()), )? } else { let group = matches .get_one::(options::ARG_GROUP) .map(|s| s.as_str()) .unwrap_or_default(); raw_group = group.to_string(); if group.is_empty() { None } else { match parse_gid_from_str(group) { Ok(g) => Some(g), Err(e) => return Err(USimpleError::new(1, e)), } } }; Ok((dest_gid, raw_group)) } fn parse_gid_and_uid(matches: &ArgMatches) -> UResult { let (dest_gid, raw_group) = get_dest_gid(matches)?; // Handle --from option let filter = if let Some(from_group) = matches.get_one::(options::FROM) { match parse_gid_from_str(from_group) { Ok(g) => IfFrom::Group(g), Err(_) => { return Err(USimpleError::new( 1, translate!("chgrp-error-invalid-user", "from_group" => from_group), )); } } } else { IfFrom::All }; Ok(GidUidOwnerFilter { dest_gid, dest_uid: None, raw_owner: raw_group, filter, }) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { chown_base(uu_app(), args, options::ARG_GROUP, parse_gid_and_uid, true) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("chgrp-about")) .override_usage(format_usage(&translate!("chgrp-usage"))) .infer_long_args(true) .disable_help_flag(true) .arg( Arg::new(options::HELP) .long(options::HELP) .help(translate!("chgrp-help-print-help")) .action(ArgAction::Help), ) .arg( Arg::new(options::verbosity::CHANGES) .short('c') .long(options::verbosity::CHANGES) .help(translate!("chgrp-help-changes")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::verbosity::SILENT) .short('f') .long(options::verbosity::SILENT) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::verbosity::QUIET) .long(options::verbosity::QUIET) .help(translate!("chgrp-help-quiet")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::verbosity::VERBOSE) .short('v') .long(options::verbosity::VERBOSE) .help(translate!("chgrp-help-verbose")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::preserve_root::PRESERVE) .long(options::preserve_root::PRESERVE) .help(translate!("chgrp-help-preserve-root")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::preserve_root::NO_PRESERVE) .long(options::preserve_root::NO_PRESERVE) .help(translate!("chgrp-help-no-preserve-root")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::REFERENCE) .long(options::REFERENCE) .value_name("RFILE") .value_hint(clap::ValueHint::FilePath) .help(translate!("chgrp-help-reference")), ) .arg( Arg::new(options::FROM) .long(options::FROM) .value_name("GROUP") .help(translate!("chgrp-help-from")), ) .arg( Arg::new(options::RECURSIVE) .short('R') .long(options::RECURSIVE) .help(translate!("chgrp-help-recursive")) .action(ArgAction::SetTrue), ) // Add common arguments with chgrp, chown & chmod .args(uucore::perms::common_args()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chgrp/src/main.rs000066400000000000000000000000301504311601400253410ustar00rootroot00000000000000uucore::bin!(uu_chgrp); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chmod/000077500000000000000000000000001504311601400232565ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chmod/Cargo.toml000066400000000000000000000012031504311601400252020ustar00rootroot00000000000000[package] name = "uu_chmod" description = "chmod ~ (uutils) change mode of FILE" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/chmod" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/chmod.rs" [dependencies] clap = { workspace = true } thiserror = { workspace = true } uucore = { workspace = true, features = ["entries", "fs", "mode", "perms"] } fluent = { workspace = true } [[bin]] name = "chmod" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chmod/LICENSE000077700000000000000000000000001504311601400261242../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chmod/locales/000077500000000000000000000000001504311601400247005ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chmod/locales/en-US.ftl000066400000000000000000000036741504311601400263500ustar00rootroot00000000000000chmod-about = Change the mode of each FILE to MODE. With --reference, change the mode of each FILE to that of RFILE. chmod-usage = chmod [OPTION]... MODE[,MODE]... FILE... chmod [OPTION]... OCTAL-MODE FILE... chmod [OPTION]... --reference=RFILE FILE... chmod-after-help = Each MODE is of the form [ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+. chmod-error-cannot-stat = cannot stat attributes of {$file} chmod-error-dangling-symlink = cannot operate on dangling symlink {$file} chmod-error-no-such-file = cannot access {$file}: No such file or directory chmod-error-preserve-root = it is dangerous to operate recursively on {$file} chmod: use --no-preserve-root to override this failsafe chmod-error-permission-denied = {$file}: Permission denied chmod-error-new-permissions = {$file}: new permissions are {$actual}, not {$expected} chmod-error-missing-operand = missing operand # Help messages chmod-help-print-help = Print help information. chmod-help-changes = like verbose but report only when a change is made chmod-help-quiet = suppress most error messages chmod-help-verbose = output a diagnostic for every file processed chmod-help-no-preserve-root = do not treat '/' specially (the default) chmod-help-preserve-root = fail to operate recursively on '/' chmod-help-recursive = change files and directories recursively chmod-help-reference = use RFILE's mode instead of MODE values # Verbose messages chmod-verbose-failed-dangling = failed to change mode of {$file} from 0000 (---------) to 1500 (r-x-----T) chmod-verbose-neither-changed = neither symbolic link {$file} nor referent has been changed chmod-verbose-mode-retained = mode of {$file} retained as {$mode_octal} ({$mode_display}) chmod-verbose-failed-change = failed to change mode of file {$file} from {$old_mode} ({$old_mode_display}) to {$new_mode} ({$new_mode_display}) chmod-verbose-mode-changed = mode of {$file} changed from {$old_mode} ({$old_mode_display}) to {$new_mode} ({$new_mode_display}) coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chmod/locales/fr-FR.ftl000066400000000000000000000043101504311601400263210ustar00rootroot00000000000000chmod-about = Changer le mode de chaque FICHIER vers MODE. Avec --reference, changer le mode de chaque FICHIER vers celui de RFICHIER. chmod-usage = chmod [OPTION]... MODE[,MODE]... FICHIER... chmod [OPTION]... MODE-OCTAL FICHIER... chmod [OPTION]... --reference=RFICHIER FICHIER... chmod-after-help = Chaque MODE est de la forme [ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+. # Messages d'aide chmod-help-print-help = Afficher les informations d'aide. chmod-help-changes = comme verbeux mais rapporter seulement lors d'un changement chmod-help-quiet = supprimer la plupart des messages d'erreur chmod-help-verbose = afficher un diagnostic pour chaque fichier traité chmod-help-no-preserve-root = ne pas traiter '/' spécialement (par défaut) chmod-help-preserve-root = échouer à opérer récursivement sur '/' chmod-help-recursive = changer les fichiers et répertoires récursivement chmod-help-reference = utiliser le mode de RFICHIER au lieu des valeurs de MODE # Messages d'erreur chmod-error-cannot-stat = impossible d'obtenir les attributs de {$file} chmod-error-dangling-symlink = impossible d'opérer sur le lien symbolique pendouillant {$file} chmod-error-no-such-file = impossible d'accéder à {$file} : Aucun fichier ou dossier de ce type chmod-error-preserve-root = il est dangereux d'opérer récursivement sur {$file} chmod: utiliser --no-preserve-root pour outrepasser cette protection chmod-error-permission-denied = {$file} : Permission refusée chmod-error-new-permissions = {$file} : les nouvelles permissions sont {$actual}, pas {$expected} chmod-error-missing-operand = opérande manquant # Messages verbeux/de statut chmod-verbose-failed-dangling = échec du changement de mode de {$file} de 0000 (---------) vers 1500 (r-x-----T) chmod-verbose-neither-changed = ni le lien symbolique {$file} ni la référence n'ont été changés chmod-verbose-mode-retained = mode de {$file} conservé comme {$mode_octal} ({$mode_display}) chmod-verbose-failed-change = échec du changement de mode du fichier {$file} de {$old_mode} ({$old_mode_display}) vers {$new_mode} ({$new_mode_display}) chmod-verbose-mode-changed = mode de {$file} changé de {$old_mode} ({$old_mode_display}) vers {$new_mode} ({$new_mode_display}) coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chmod/src/000077500000000000000000000000001504311601400240455ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chmod/src/chmod.rs000066400000000000000000000456751504311601400255260ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) Chmoder cmode fmode fperm fref ugoa RFILE RFILE's use clap::{Arg, ArgAction, Command}; use std::ffi::OsString; use std::fs; use std::os::unix::fs::{MetadataExt, PermissionsExt}; use std::path::Path; use thiserror::Error; use uucore::display::Quotable; use uucore::error::{ExitCode, UError, UResult, USimpleError, UUsageError, set_exit_code}; use uucore::fs::display_permissions_unix; use uucore::libc::mode_t; #[cfg(not(windows))] use uucore::mode; use uucore::perms::{TraverseSymlinks, configure_symlink_and_recursion}; use uucore::{format_usage, show, show_error}; use uucore::translate; #[derive(Debug, Error)] enum ChmodError { #[error("{}", translate!("chmod-error-cannot-stat", "file" => _0.quote()))] CannotStat(String), #[error("{}", translate!("chmod-error-dangling-symlink", "file" => _0.quote()))] DanglingSymlink(String), #[error("{}", translate!("chmod-error-no-such-file", "file" => _0.quote()))] NoSuchFile(String), #[error("{}", translate!("chmod-error-preserve-root", "file" => _0.quote()))] PreserveRoot(String), #[error("{}", translate!("chmod-error-permission-denied", "file" => _0.quote()))] PermissionDenied(String), #[error("{}", translate!("chmod-error-new-permissions", "file" => _0.clone(), "actual" => _1.clone(), "expected" => _2.clone()))] NewPermissions(String, String, String), } impl UError for ChmodError {} mod options { pub const HELP: &str = "help"; pub const CHANGES: &str = "changes"; pub const QUIET: &str = "quiet"; // visible_alias("silent") pub const VERBOSE: &str = "verbose"; pub const NO_PRESERVE_ROOT: &str = "no-preserve-root"; pub const PRESERVE_ROOT: &str = "preserve-root"; pub const REFERENCE: &str = "RFILE"; pub const RECURSIVE: &str = "recursive"; pub const MODE: &str = "MODE"; pub const FILE: &str = "FILE"; } /// Extract negative modes (starting with '-') from the rest of the arguments. /// /// This is mainly required for GNU compatibility, where "non-positional negative" modes are used /// as the actual positional MODE. Some examples of these cases are: /// * "chmod -w -r file", which is the same as "chmod -w,-r file" /// * "chmod -w file -r", which is the same as "chmod -w,-r file" /// /// These can currently not be handled by clap. /// Therefore it might be possible that a pseudo MODE is inserted to pass clap parsing. /// The pseudo MODE is later replaced by the extracted (and joined) negative modes. fn extract_negative_modes(mut args: impl uucore::Args) -> (Option, Vec) { // we look up the args until "--" is found // "-mode" will be extracted into parsed_cmode_vec let (parsed_cmode_vec, pre_double_hyphen_args): (Vec, Vec) = args.by_ref().take_while(|a| a != "--").partition(|arg| { let arg = if let Some(arg) = arg.to_str() { arg.to_string() } else { return false; }; arg.len() >= 2 && arg.starts_with('-') && matches!( arg.chars().nth(1).unwrap(), 'r' | 'w' | 'x' | 'X' | 's' | 't' | 'u' | 'g' | 'o' | '0'..='7' ) }); let mut clean_args = Vec::new(); if !parsed_cmode_vec.is_empty() { // we need a pseudo cmode for clap, which won't be used later. // this is required because clap needs the default "chmod MODE FILE" scheme. clean_args.push("w".into()); } clean_args.extend(pre_double_hyphen_args); if let Some(arg) = args.next() { // as there is still something left in the iterator, we previously consumed the "--" // -> add it to the args again clean_args.push("--".into()); clean_args.push(arg); } clean_args.extend(args); let parsed_cmode = Some( parsed_cmode_vec .iter() .map(|s| s.to_str().unwrap()) .collect::>() .join(","), ) .filter(|s| !s.is_empty()); (parsed_cmode, clean_args) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let (parsed_cmode, args) = extract_negative_modes(args.skip(1)); // skip binary name let matches = uu_app() .after_help(translate!("chmod-after-help")) .try_get_matches_from(args)?; let changes = matches.get_flag(options::CHANGES); let quiet = matches.get_flag(options::QUIET); let verbose = matches.get_flag(options::VERBOSE); let preserve_root = matches.get_flag(options::PRESERVE_ROOT); let fmode = match matches.get_one::(options::REFERENCE) { Some(fref) => match fs::metadata(fref) { Ok(meta) => Some(meta.mode() & 0o7777), Err(_) => { return Err(ChmodError::CannotStat(fref.to_string()).into()); } }, None => None, }; let modes = matches.get_one::(options::MODE); let cmode = if let Some(parsed_cmode) = parsed_cmode { parsed_cmode } else { modes.unwrap().to_string() // modes is required }; // FIXME: enable non-utf8 paths let mut files: Vec = matches .get_many::(options::FILE) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); let cmode = if fmode.is_some() { // "--reference" and MODE are mutually exclusive // if "--reference" was used MODE needs to be interpreted as another FILE // it wasn't possible to implement this behavior directly with clap files.push(cmode); None } else { Some(cmode) }; if files.is_empty() { return Err(UUsageError::new( 1, translate!("chmod-error-missing-operand"), )); } let (recursive, dereference, traverse_symlinks) = configure_symlink_and_recursion(&matches, TraverseSymlinks::First)?; let chmoder = Chmoder { changes, quiet, verbose, preserve_root, recursive, fmode, cmode, traverse_symlinks, dereference, }; chmoder.chmod(&files) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("chmod-about")) .override_usage(format_usage(&translate!("chmod-usage"))) .args_override_self(true) .infer_long_args(true) .no_binary_name(true) .disable_help_flag(true) .arg( Arg::new(options::HELP) .long(options::HELP) .help(translate!("chmod-help-print-help")) .action(ArgAction::Help), ) .arg( Arg::new(options::CHANGES) .long(options::CHANGES) .short('c') .help(translate!("chmod-help-changes")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::QUIET) .long(options::QUIET) .visible_alias("silent") .short('f') .help(translate!("chmod-help-quiet")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::VERBOSE) .long(options::VERBOSE) .short('v') .help(translate!("chmod-help-verbose")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NO_PRESERVE_ROOT) .long(options::NO_PRESERVE_ROOT) .help(translate!("chmod-help-no-preserve-root")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::PRESERVE_ROOT) .long(options::PRESERVE_ROOT) .help(translate!("chmod-help-preserve-root")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::RECURSIVE) .long(options::RECURSIVE) .short('R') .help(translate!("chmod-help-recursive")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::REFERENCE) .long("reference") .value_hint(clap::ValueHint::FilePath) .help(translate!("chmod-help-reference")), ) .arg( Arg::new(options::MODE).required_unless_present(options::REFERENCE), // It would be nice if clap could parse with delimiter, e.g. "g-x,u+x", // however .multiple_occurrences(true) cannot be used here because FILE already needs that. // Only one positional argument with .multiple_occurrences(true) set is allowed per command ) .arg( Arg::new(options::FILE) .required_unless_present(options::MODE) .action(ArgAction::Append) .value_hint(clap::ValueHint::AnyPath), ) // Add common arguments with chgrp, chown & chmod .args(uucore::perms::common_args()) } struct Chmoder { changes: bool, quiet: bool, verbose: bool, preserve_root: bool, recursive: bool, fmode: Option, cmode: Option, traverse_symlinks: TraverseSymlinks, dereference: bool, } impl Chmoder { fn chmod(&self, files: &[String]) -> UResult<()> { let mut r = Ok(()); for filename in files { let filename = &filename[..]; let file = Path::new(filename); if !file.exists() { if file.is_symlink() { if !self.dereference && !self.recursive { // The file is a symlink and we should not follow it // Don't try to change the mode of the symlink itself continue; } if self.recursive && self.traverse_symlinks == TraverseSymlinks::None { continue; } if !self.quiet { show!(ChmodError::DanglingSymlink(filename.to_string())); set_exit_code(1); } if self.verbose { println!( "{}", translate!("chmod-verbose-failed-dangling", "file" => filename.quote()) ); } } else if !self.quiet { show!(ChmodError::NoSuchFile(filename.to_string())); } // GNU exits with exit code 1 even if -q or --quiet are passed // So we set the exit code, because it hasn't been set yet if `self.quiet` is true. set_exit_code(1); continue; } else if !self.dereference && file.is_symlink() { // The file is a symlink and we should not follow it // chmod 755 --no-dereference a/link // should not change the permissions in this case continue; } if self.recursive && self.preserve_root && filename == "/" { return Err(ChmodError::PreserveRoot(filename.to_string()).into()); } if self.recursive { r = self.walk_dir(file); } else { r = self.chmod_file(file).and(r); } } r } fn walk_dir(&self, file_path: &Path) -> UResult<()> { let mut r = self.chmod_file(file_path); // Determine whether to traverse symlinks based on `self.traverse_symlinks` let should_follow_symlink = match self.traverse_symlinks { TraverseSymlinks::All => true, TraverseSymlinks::First => { file_path == file_path.canonicalize().unwrap_or(file_path.to_path_buf()) } TraverseSymlinks::None => false, }; // If the path is a directory (or we should follow symlinks), recurse into it if (!file_path.is_symlink() || should_follow_symlink) && file_path.is_dir() { for dir_entry in file_path.read_dir()? { let path = dir_entry?.path(); if !path.is_symlink() { r = self.walk_dir(path.as_path()); } else if should_follow_symlink { r = self.chmod_file(path.as_path()).and(r); } } } r } #[cfg(windows)] fn chmod_file(&self, file: &Path) -> UResult<()> { // chmod is useless on Windows // it doesn't set any permissions at all // instead it just sets the readonly attribute on the file Ok(()) } #[cfg(unix)] fn chmod_file(&self, file: &Path) -> UResult<()> { use uucore::{mode::get_umask, perms::get_metadata}; let metadata = get_metadata(file, self.dereference); let fperm = match metadata { Ok(meta) => meta.mode() & 0o7777, Err(err) => { // Handle dangling symlinks or other errors return if file.is_symlink() && !self.dereference { if self.verbose { println!( "neither symbolic link {} nor referent has been changed", file.quote() ); } Ok(()) // Skip dangling symlinks } else if err.kind() == std::io::ErrorKind::PermissionDenied { // These two filenames would normally be conditionally // quoted, but GNU's tests expect them to always be quoted Err(ChmodError::PermissionDenied(file.to_string_lossy().to_string()).into()) } else { Err(ChmodError::CannotStat(file.to_string_lossy().to_string()).into()) }; } }; // Determine the new permissions to apply match self.fmode { Some(mode) => self.change_file(fperm, mode, file)?, None => { let cmode_unwrapped = self.cmode.clone().unwrap(); let mut new_mode = fperm; let mut naively_expected_new_mode = new_mode; for mode in cmode_unwrapped.split(',') { let result = if mode.chars().any(|c| c.is_ascii_digit()) { mode::parse_numeric(new_mode, mode, file.is_dir()).map(|v| (v, v)) } else { mode::parse_symbolic(new_mode, mode, get_umask(), file.is_dir()).map(|m| { // calculate the new mode as if umask was 0 let naive_mode = mode::parse_symbolic( naively_expected_new_mode, mode, 0, file.is_dir(), ) .unwrap(); // we know that mode must be valid, so this cannot fail (m, naive_mode) }) }; match result { Ok((mode, naive_mode)) => { new_mode = mode; naively_expected_new_mode = naive_mode; } Err(f) => { return if self.quiet { Err(ExitCode::new(1)) } else { Err(USimpleError::new(1, f)) }; } } } self.change_file(fperm, new_mode, file)?; // if a permission would have been removed if umask was 0, but it wasn't because umask was not 0, print an error and fail if (new_mode & !naively_expected_new_mode) != 0 { return Err(ChmodError::NewPermissions( file.to_string_lossy().to_string(), display_permissions_unix(new_mode as mode_t, false), display_permissions_unix(naively_expected_new_mode as mode_t, false), ) .into()); } } } Ok(()) } #[cfg(unix)] fn change_file(&self, fperm: u32, mode: u32, file: &Path) -> Result<(), i32> { if fperm == mode { if self.verbose && !self.changes { println!( "mode of {} retained as {fperm:04o} ({})", file.quote(), display_permissions_unix(fperm as mode_t, false), ); } Ok(()) } else if let Err(err) = fs::set_permissions(file, fs::Permissions::from_mode(mode)) { if !self.quiet { show_error!("{err}"); } if self.verbose { println!( "failed to change mode of file {} from {fperm:04o} ({}) to {mode:04o} ({})", file.quote(), display_permissions_unix(fperm as mode_t, false), display_permissions_unix(mode as mode_t, false) ); } Err(1) } else { if self.verbose || self.changes { println!( "mode of {} changed from {fperm:04o} ({}) to {mode:04o} ({})", file.quote(), display_permissions_unix(fperm as mode_t, false), display_permissions_unix(mode as mode_t, false) ); } Ok(()) } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_extract_negative_modes() { // "chmod -w -r file" becomes "chmod -w,-r file". clap does not accept "-w,-r" as MODE. // Therefore, "w" is added as pseudo mode to pass clap. let (c, a) = extract_negative_modes(["-w", "-r", "file"].iter().map(OsString::from)); assert_eq!(c, Some("-w,-r".to_string())); assert_eq!(a, ["w", "file"]); // "chmod -w file -r" becomes "chmod -w,-r file". clap does not accept "-w,-r" as MODE. // Therefore, "w" is added as pseudo mode to pass clap. let (c, a) = extract_negative_modes(["-w", "file", "-r"].iter().map(OsString::from)); assert_eq!(c, Some("-w,-r".to_string())); assert_eq!(a, ["w", "file"]); // "chmod -w -- -r file" becomes "chmod -w -r file", where "-r" is interpreted as file. // Again, "w" is needed as pseudo mode. let (c, a) = extract_negative_modes(["-w", "--", "-r", "f"].iter().map(OsString::from)); assert_eq!(c, Some("-w".to_string())); assert_eq!(a, ["w", "--", "-r", "f"]); // "chmod -- -r file" becomes "chmod -r file". let (c, a) = extract_negative_modes(["--", "-r", "file"].iter().map(OsString::from)); assert_eq!(c, None); assert_eq!(a, ["--", "-r", "file"]); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chmod/src/main.rs000066400000000000000000000000301504311601400253300ustar00rootroot00000000000000uucore::bin!(uu_chmod); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chown/000077500000000000000000000000001504311601400233025ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chown/Cargo.toml000066400000000000000000000011431504311601400252310ustar00rootroot00000000000000[package] name = "uu_chown" description = "chown ~ (uutils) change the ownership of FILE" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/chown" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/chown.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["entries", "fs", "perms"] } fluent = { workspace = true } [[bin]] name = "chown" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chown/LICENSE000077700000000000000000000000001504311601400261502../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chown/locales/000077500000000000000000000000001504311601400247245ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chown/locales/en-US.ftl000066400000000000000000000022131504311601400263600ustar00rootroot00000000000000chown-about = Change file owner and group chown-usage = chown [OPTION]... [OWNER][:[GROUP]] FILE... chown [OPTION]... --reference=RFILE FILE... # Help messages chown-help-print-help = Print help information. chown-help-changes = like verbose but report only when a change is made chown-help-from = change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute chown-help-preserve-root = fail to operate recursively on '/' chown-help-no-preserve-root = do not treat '/' specially (the default) chown-help-quiet = suppress most error messages chown-help-recursive = operate on files and directories recursively chown-help-reference = use RFILE's owner and group rather than specifying OWNER:GROUP values chown-help-verbose = output a diagnostic for every file processed # Error messages chown-error-failed-to-get-attributes = failed to get attributes of { $file } chown-error-invalid-user = invalid user: { $user } chown-error-invalid-group = invalid group: { $group } chown-error-invalid-spec = invalid spec: { $spec } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chown/locales/fr-FR.ftl000066400000000000000000000025641504311601400263560ustar00rootroot00000000000000chown-about = Changer le propriétaire et le groupe des fichiers chown-usage = chown [OPTION]... [PROPRIÉTAIRE][:[GROUPE]] FICHIER... chown [OPTION]... --reference=RFICHIER FICHIER... # Messages d'aide chown-help-print-help = Afficher les informations d'aide. chown-help-changes = comme verbeux mais rapporter seulement lors d'un changement chown-help-from = changer le propriétaire et/ou le groupe de chaque fichier seulement si son propriétaire et/ou groupe actuel correspondent à ceux spécifiés ici. L'un ou l'autre peut être omis, auquel cas une correspondance n'est pas requise pour l'attribut omis chown-help-preserve-root = échouer à opérer récursivement sur '/' chown-help-no-preserve-root = ne pas traiter '/' spécialement (par défaut) chown-help-quiet = supprimer la plupart des messages d'erreur chown-help-recursive = opérer sur les fichiers et répertoires récursivement chown-help-reference = utiliser le propriétaire et groupe de RFICHIER plutôt que spécifier les valeurs PROPRIÉTAIRE:GROUPE chown-help-verbose = afficher un diagnostic pour chaque fichier traité # Messages d'erreur chown-error-failed-to-get-attributes = échec de l'obtention des attributs de { $file } chown-error-invalid-user = utilisateur invalide : { $user } chown-error-invalid-group = groupe invalide : { $group } chown-error-invalid-spec = spécification invalide : { $spec } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chown/src/000077500000000000000000000000001504311601400240715ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chown/src/chown.rs000066400000000000000000000215671504311601400255700ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) COMFOLLOW Passwd RFILE RFILE's derefer dgid duid groupname use uucore::display::Quotable; pub use uucore::entries::{self, Group, Locate, Passwd}; use uucore::format_usage; use uucore::perms::{GidUidOwnerFilter, IfFrom, chown_base, options}; use uucore::translate; use uucore::error::{FromIo, UResult, USimpleError}; use clap::{Arg, ArgAction, ArgMatches, Command}; use std::fs; use std::os::unix::fs::MetadataExt; fn parse_gid_uid_and_filter(matches: &ArgMatches) -> UResult { let filter = if let Some(spec) = matches.get_one::(options::FROM) { match parse_spec(spec, ':')? { (Some(uid), None) => IfFrom::User(uid), (None, Some(gid)) => IfFrom::Group(gid), (Some(uid), Some(gid)) => IfFrom::UserGroup(uid, gid), (None, None) => IfFrom::All, } } else { IfFrom::All }; let dest_uid: Option; let dest_gid: Option; let raw_owner: String; if let Some(file) = matches.get_one::(options::REFERENCE) { let meta = fs::metadata(file).map_err_context( || translate!("chown-error-failed-to-get-attributes", "file" => file.quote()), )?; let gid = meta.gid(); let uid = meta.uid(); dest_gid = Some(gid); dest_uid = Some(uid); raw_owner = format!( "{}:{}", entries::uid2usr(uid).unwrap_or_else(|_| uid.to_string()), entries::gid2grp(gid).unwrap_or_else(|_| gid.to_string()) ); } else { raw_owner = matches .get_one::(options::ARG_OWNER) .unwrap() .into(); let (u, g) = parse_spec(&raw_owner, ':')?; dest_uid = u; dest_gid = g; } Ok(GidUidOwnerFilter { dest_gid, dest_uid, raw_owner, filter, }) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { chown_base( uu_app(), args, options::ARG_OWNER, parse_gid_uid_and_filter, false, ) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("chown-about")) .override_usage(format_usage(&translate!("chown-usage"))) .infer_long_args(true) .disable_help_flag(true) .arg( Arg::new(options::HELP) .long(options::HELP) .help(translate!("chown-help-print-help")) .action(ArgAction::Help), ) .arg( Arg::new(options::verbosity::CHANGES) .short('c') .long(options::verbosity::CHANGES) .help(translate!("chown-help-changes")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FROM) .long(options::FROM) .help(translate!("chown-help-from")) .value_name("CURRENT_OWNER:CURRENT_GROUP"), ) .arg( Arg::new(options::preserve_root::PRESERVE) .long(options::preserve_root::PRESERVE) .help(translate!("chown-help-preserve-root")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::preserve_root::NO_PRESERVE) .long(options::preserve_root::NO_PRESERVE) .help(translate!("chown-help-no-preserve-root")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::verbosity::QUIET) .long(options::verbosity::QUIET) .help(translate!("chown-help-quiet")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::RECURSIVE) .short('R') .long(options::RECURSIVE) .help(translate!("chown-help-recursive")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::REFERENCE) .long(options::REFERENCE) .help(translate!("chown-help-reference")) .value_name("RFILE") .value_hint(clap::ValueHint::FilePath) .num_args(1..), ) .arg( Arg::new(options::verbosity::SILENT) .short('f') .long(options::verbosity::SILENT) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::verbosity::VERBOSE) .long(options::verbosity::VERBOSE) .short('v') .help(translate!("chown-help-verbose")) .action(ArgAction::SetTrue), ) // Add common arguments with chgrp, chown & chmod .args(uucore::perms::common_args()) } /// Parses the user string to extract the UID. fn parse_uid(user: &str, spec: &str, sep: char) -> UResult> { if user.is_empty() { return Ok(None); } match Passwd::locate(user) { Ok(u) => Ok(Some(u.uid)), // We have been able to get the uid Err(_) => { // we have NOT been able to find the uid // but we could be in the case where we have user.group if spec.contains('.') && !spec.contains(':') && sep == ':' { // but the input contains a '.' but not a ':' // we might have something like username.groupname // So, try to parse it this way parse_spec(spec, '.').map(|(uid, _)| uid) } else { // It's possible that the `user` string contains a // numeric user ID, in which case, we respect that. match user.parse() { Ok(uid) => Ok(Some(uid)), Err(_) => Err(USimpleError::new( 1, translate!("chown-error-invalid-user", "user" => spec.quote()), )), } } } } } /// Parses the group string to extract the GID. fn parse_gid(group: &str, spec: &str) -> UResult> { if group.is_empty() { return Ok(None); } match Group::locate(group) { Ok(g) => Ok(Some(g.gid)), Err(_) => match group.parse() { Ok(gid) => Ok(Some(gid)), Err(_) => Err(USimpleError::new( 1, translate!("chown-error-invalid-group", "group" => spec.quote()), )), }, } } /// Parse the owner/group specifier string into a user ID and a group ID. /// /// The `spec` can be of the form: /// /// * `"owner:group"`, /// * `"owner"`, /// * `":group"`, /// /// and the owner or group can be specified either as an ID or a /// name. The `sep` argument specifies which character to use as a /// separator between the owner and group; calling code should set /// this to `':'`. fn parse_spec(spec: &str, sep: char) -> UResult<(Option, Option)> { assert!(['.', ':'].contains(&sep)); let mut args = spec.splitn(2, sep); let user = args.next().unwrap_or(""); let group = args.next().unwrap_or(""); let uid = parse_uid(user, spec, sep)?; let gid = parse_gid(group, spec)?; if user.chars().next().is_some_and(char::is_numeric) && group.is_empty() && spec != user { // if the arg starts with an id numeric value, the group isn't set but the separator is provided, // we should fail with an error return Err(USimpleError::new( 1, translate!("chown-error-invalid-spec", "spec" => spec.quote()), )); } Ok((uid, gid)) } #[cfg(test)] mod test { use super::*; use std::env; use uucore::locale; #[test] fn test_parse_spec() { unsafe { env::set_var("LANG", "C"); } let _ = locale::setup_localization("chown"); assert!(matches!(parse_spec(":", ':'), Ok((None, None)))); assert!(matches!(parse_spec(".", ':'), Ok((None, None)))); assert!(matches!(parse_spec(".", '.'), Ok((None, None)))); assert!(format!("{}", parse_spec("::", ':').err().unwrap()).starts_with("invalid group: ")); assert!(format!("{}", parse_spec("..", ':').err().unwrap()).starts_with("invalid group: ")); } /// Test for parsing IDs that don't correspond to a named user or group. #[test] fn test_parse_spec_nameless_ids() { // This assumes that there is no named user with ID 12345. assert!(matches!(parse_spec("12345", ':'), Ok((Some(12345), None)))); // This assumes that there is no named group with ID 54321. assert!(matches!(parse_spec(":54321", ':'), Ok((None, Some(54321))))); assert!(matches!( parse_spec("12345:54321", ':'), Ok((Some(12345), Some(54321))) )); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chown/src/main.rs000066400000000000000000000000301504311601400253540ustar00rootroot00000000000000uucore::bin!(uu_chown); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/000077500000000000000000000000001504311601400234625ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/Cargo.toml000066400000000000000000000012121504311601400254060ustar00rootroot00000000000000[package] name = "uu_chroot" description = "chroot ~ (uutils) run COMMAND under a new root directory" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/chroot" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/chroot.rs" [dependencies] clap = { workspace = true } thiserror = { workspace = true } uucore = { workspace = true, features = ["entries", "fs"] } fluent = { workspace = true } [[bin]] name = "chroot" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/LICENSE000077700000000000000000000000001504311601400263302../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/locales/000077500000000000000000000000001504311601400251045ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/locales/en-US.ftl000066400000000000000000000027261504311601400265510ustar00rootroot00000000000000chroot-about = Run COMMAND with root directory set to NEWROOT. chroot-usage = chroot [OPTION]... NEWROOT [COMMAND [ARG]...] # Help messages chroot-help-groups = Comma-separated list of groups to switch to chroot-help-userspec = Colon-separated user and group to switch to. chroot-help-skip-chdir = Use this option to not change the working directory to / after changing the root directory to newroot, i.e., inside the chroot. # Error messages chroot-error-skip-chdir-only-permitted = option --skip-chdir only permitted if NEWROOT is old '/' chroot-error-cannot-enter = cannot chroot to { $dir }: { $err } chroot-error-command-failed = failed to run command { $cmd }: { $err } chroot-error-command-not-found = failed to run command { $cmd }: { $err } chroot-error-groups-parsing-failed = --groups parsing failed chroot-error-invalid-group = invalid group: { $group } chroot-error-invalid-group-list = invalid group list: { $list } chroot-error-missing-newroot = Missing operand: NEWROOT Try '{ $util_name } --help' for more information. chroot-error-no-group-specified = no group specified for unknown uid: { $uid } chroot-error-no-such-user = invalid user chroot-error-no-such-group = invalid group chroot-error-no-such-directory = cannot change root directory to { $dir }: no such directory chroot-error-set-gid-failed = cannot set gid to { $gid }: { $err } chroot-error-set-groups-failed = cannot set groups: { $err } chroot-error-set-user-failed = cannot set user to { $user }: { $err } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/locales/fr-FR.ftl000066400000000000000000000034371504311601400265360ustar00rootroot00000000000000chroot-about = Exécuter COMMANDE avec le répertoire racine défini à NOUVRACINE. chroot-usage = chroot [OPTION]... NOUVRACINE [COMMANDE [ARG]...] # Messages d'aide chroot-help-groups = Liste de groupes séparés par des virgules vers lesquels basculer chroot-help-userspec = Utilisateur et groupe séparés par deux-points vers lesquels basculer. chroot-help-skip-chdir = Utiliser cette option pour ne pas changer le répertoire de travail vers / après avoir changé le répertoire racine vers nouvracine, c.-à-d., à l'intérieur du chroot. # Messages d'erreur chroot-error-skip-chdir-only-permitted = l'option --skip-chdir n'est autorisée que si NOUVRACINE est l'ancien '/' chroot-error-cannot-enter = impossible de faire chroot vers { $dir } : { $err } chroot-error-command-failed = échec de l'exécution de la commande { $cmd } : { $err } chroot-error-command-not-found = échec de l'exécution de la commande { $cmd } : { $err } chroot-error-groups-parsing-failed = échec de l'analyse de --groups chroot-error-invalid-group = groupe invalide : { $group } chroot-error-invalid-group-list = liste de groupes invalide : { $list } chroot-error-missing-newroot = Opérande manquant : NOUVRACINE Essayez '{ $util_name } --help' pour plus d'informations. chroot-error-no-group-specified = aucun groupe spécifié pour l'uid inconnu : { $uid } chroot-error-no-such-user = utilisateur invalide chroot-error-no-such-group = groupe invalide chroot-error-no-such-directory = impossible de changer le répertoire racine vers { $dir } : aucun répertoire de ce type chroot-error-set-gid-failed = impossible de définir le gid à { $gid } : { $err } chroot-error-set-groups-failed = impossible de définir les groupes : { $err } chroot-error-set-user-failed = impossible de définir l'utilisateur à { $user } : { $err } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/src/000077500000000000000000000000001504311601400242515ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/src/chroot.rs000066400000000000000000000352401504311601400261210ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) NEWROOT Userspec pstatus chdir mod error; use crate::error::ChrootError; use clap::{Arg, ArgAction, Command}; use std::ffi::CString; use std::io::Error; use std::os::unix::prelude::OsStrExt; use std::path::{Path, PathBuf}; use std::process; use uucore::entries::{Locate, Passwd, grp2gid, usr2uid}; use uucore::error::{UClapError, UResult, UUsageError, set_exit_code}; use uucore::fs::{MissingHandling, ResolveMode, canonicalize}; use uucore::libc::{self, chroot, setgid, setgroups, setuid}; use uucore::{format_usage, show}; use uucore::translate; mod options { pub const NEWROOT: &str = "newroot"; pub const GROUPS: &str = "groups"; pub const USERSPEC: &str = "userspec"; pub const COMMAND: &str = "command"; pub const SKIP_CHDIR: &str = "skip-chdir"; } /// A user and group specification, where each is optional. enum UserSpec { NeitherGroupNorUser, UserOnly(String), GroupOnly(String), UserAndGroup(String, String), } struct Options { /// Path to the new root directory. newroot: PathBuf, /// Whether to change to the new root directory. skip_chdir: bool, /// List of groups under which the command will be run. groups: Option>, /// The user and group (each optional) under which the command will be run. userspec: Option, } /// Parse a user and group from the argument to `--userspec`. /// /// The `spec` must be of the form `[USER][:[GROUP]]`, otherwise an /// error is returned. fn parse_userspec(spec: &str) -> UserSpec { match spec.split_once(':') { // "" None if spec.is_empty() => UserSpec::NeitherGroupNorUser, // "usr" None => UserSpec::UserOnly(spec.to_string()), // ":" Some(("", "")) => UserSpec::NeitherGroupNorUser, // ":grp" Some(("", grp)) => UserSpec::GroupOnly(grp.to_string()), // "usr:" Some((usr, "")) => UserSpec::UserOnly(usr.to_string()), // "usr:grp" Some((usr, grp)) => UserSpec::UserAndGroup(usr.to_string(), grp.to_string()), } } /// Pre-condition: `list_str` is non-empty. fn parse_group_list(list_str: &str) -> Result, ChrootError> { let split: Vec<&str> = list_str.split(',').collect(); if split.len() == 1 { let name = split[0].trim(); if name.is_empty() { // --groups=" " // chroot: invalid group ' ' Err(ChrootError::InvalidGroup(name.to_string())) } else { // --groups="blah" Ok(vec![name.to_string()]) } } else if split.iter().all(|s| s.is_empty()) { // --groups="," // chroot: invalid group list ',' Err(ChrootError::InvalidGroupList(list_str.to_string())) } else { let mut result = vec![]; let mut err = false; for name in split { let trimmed_name = name.trim(); if trimmed_name.is_empty() { if name.is_empty() { // --groups="," continue; } // --groups=", " // chroot: invalid group ' ' show!(ChrootError::InvalidGroup(name.to_string())); err = true; } else { // TODO Figure out a better condition here. if trimmed_name.starts_with(char::is_numeric) && trimmed_name.ends_with(|c: char| !c.is_numeric()) { // --groups="0trail" // chroot: invalid group '0trail' show!(ChrootError::InvalidGroup(name.to_string())); err = true; } else { result.push(trimmed_name.to_string()); } } } if err { Err(ChrootError::GroupsParsingFailed) } else { Ok(result) } } } impl Options { /// Parse parameters from the command-line arguments. fn from(matches: &clap::ArgMatches) -> UResult { let newroot = match matches.get_one::(options::NEWROOT) { Some(v) => Path::new(v).to_path_buf(), None => return Err(ChrootError::MissingNewRoot.into()), }; let groups = match matches.get_one::(options::GROUPS) { None => None, Some(s) => { if s.is_empty() { Some(vec![]) } else { Some(parse_group_list(s)?) } } }; let skip_chdir = matches.get_flag(options::SKIP_CHDIR); let userspec = matches .get_one::(options::USERSPEC) .map(|s| parse_userspec(s)); Ok(Self { newroot, skip_chdir, groups, userspec, }) } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args).with_exit_code(125)?; let default_shell: &'static str = "/bin/sh"; let default_option: &'static str = "-i"; let user_shell = std::env::var("SHELL"); let options = Options::from(&matches)?; // We are resolving the path in case it is a symlink or /. or /../ if options.skip_chdir && canonicalize( &options.newroot, MissingHandling::Normal, ResolveMode::Logical, ) .unwrap() .to_str() != Some("/") { return Err(UUsageError::new( 125, translate!("chroot-error-skip-chdir-only-permitted"), )); } if !options.newroot.is_dir() { return Err(ChrootError::NoSuchDirectory(format!("{}", options.newroot.display())).into()); } let commands = match matches.get_many::(options::COMMAND) { Some(v) => v.map(|s| s.as_str()).collect(), None => vec![], }; // TODO: refactor the args and command matching // See: https://github.com/uutils/coreutils/pull/2365#discussion_r647849967 let command: Vec<&str> = match commands.len() { 0 => { let shell: &str = match user_shell { Err(_) => default_shell, Ok(ref s) => s.as_ref(), }; vec![shell, default_option] } _ => commands, }; assert!(!command.is_empty()); let chroot_command = command[0]; let chroot_args = &command[1..]; // NOTE: Tests can only trigger code beyond this point if they're invoked with root permissions set_context(&options)?; let pstatus = match process::Command::new(chroot_command) .args(chroot_args) .status() { Ok(status) => status, Err(e) => { return Err(if e.kind() == std::io::ErrorKind::NotFound { ChrootError::CommandNotFound(command[0].to_string(), e) } else { ChrootError::CommandFailed(command[0].to_string(), e) } .into()); } }; let code = if pstatus.success() { 0 } else { pstatus.code().unwrap_or(-1) }; set_exit_code(code); Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("chroot-about")) .override_usage(format_usage(&translate!("chroot-usage"))) .infer_long_args(true) .trailing_var_arg(true) .arg( Arg::new(options::NEWROOT) .value_hint(clap::ValueHint::DirPath) .hide(true) .required(true) .index(1), ) .arg( Arg::new(options::GROUPS) .long(options::GROUPS) .overrides_with(options::GROUPS) .help(translate!("chroot-help-groups")) .value_name("GROUP1,GROUP2..."), ) .arg( Arg::new(options::USERSPEC) .long(options::USERSPEC) .help(translate!("chroot-help-userspec")) .value_name("USER:GROUP"), ) .arg( Arg::new(options::SKIP_CHDIR) .long(options::SKIP_CHDIR) .help(translate!("chroot-help-skip-chdir")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::COMMAND) .action(ArgAction::Append) .value_hint(clap::ValueHint::CommandName) .hide(true) .index(2), ) } /// Get the UID for the given username, falling back to numeric parsing. /// /// According to the documentation of GNU `chroot`, "POSIX requires that /// these commands first attempt to resolve the specified string as a /// name, and only once that fails, then try to interpret it as an ID." fn name_to_uid(name: &str) -> Result { match usr2uid(name) { Ok(uid) => Ok(uid), Err(_) => name .parse::() .map_err(|_| ChrootError::NoSuchUser), } } /// Get the GID for the given group name, falling back to numeric parsing. /// /// According to the documentation of GNU `chroot`, "POSIX requires that /// these commands first attempt to resolve the specified string as a /// name, and only once that fails, then try to interpret it as an ID." fn name_to_gid(name: &str) -> Result { match grp2gid(name) { Ok(gid) => Ok(gid), Err(_) => name .parse::() .map_err(|_| ChrootError::NoSuchGroup), } } /// Get the list of group IDs for the given user. /// /// According to the GNU documentation, "the supplementary groups are /// set according to the system defined list for that user". This /// function gets that list. fn supplemental_gids(uid: libc::uid_t) -> Vec { match Passwd::locate(uid) { Err(_) => vec![], Ok(passwd) => passwd.belongs_to(), } } /// Set the supplemental group IDs for this process. fn set_supplemental_gids(gids: &[libc::gid_t]) -> std::io::Result<()> { #[cfg(any(target_vendor = "apple", target_os = "freebsd", target_os = "openbsd"))] let n = gids.len() as libc::c_int; #[cfg(any(target_os = "linux", target_os = "android"))] let n = gids.len() as libc::size_t; let err = unsafe { setgroups(n, gids.as_ptr()) }; if err == 0 { Ok(()) } else { Err(Error::last_os_error()) } } /// Set the group ID of this process. fn set_gid(gid: libc::gid_t) -> std::io::Result<()> { let err = unsafe { setgid(gid) }; if err == 0 { Ok(()) } else { Err(Error::last_os_error()) } } /// Set the user ID of this process. fn set_uid(uid: libc::uid_t) -> std::io::Result<()> { let err = unsafe { setuid(uid) }; if err == 0 { Ok(()) } else { Err(Error::last_os_error()) } } /// What to do when the `--groups` argument is missing. enum Strategy { /// Do nothing. Nothing, /// Use the list of supplemental groups for the given user. /// /// If the `bool` parameter is `false` and the list of groups for /// the given user is empty, then this will result in an error. FromUID(libc::uid_t, bool), } /// Set supplemental groups when the `--groups` argument is not specified. fn handle_missing_groups(strategy: Strategy) -> Result<(), ChrootError> { match strategy { Strategy::Nothing => Ok(()), Strategy::FromUID(uid, false) => { let gids = supplemental_gids(uid); if gids.is_empty() { Err(ChrootError::NoGroupSpecified(uid)) } else { set_supplemental_gids(&gids).map_err(ChrootError::SetGroupsFailed) } } Strategy::FromUID(uid, true) => { let gids = supplemental_gids(uid); set_supplemental_gids(&gids).map_err(ChrootError::SetGroupsFailed) } } } /// Set supplemental groups for this process. fn set_supplemental_gids_with_strategy( strategy: Strategy, groups: Option<&Vec>, ) -> Result<(), ChrootError> { match groups { None => handle_missing_groups(strategy), Some(groups) => { let mut gids = vec![]; for group in groups { gids.push(name_to_gid(group)?); } set_supplemental_gids(&gids).map_err(ChrootError::SetGroupsFailed) } } } /// Change the root, set the user ID, and set the group IDs for this process. fn set_context(options: &Options) -> UResult<()> { enter_chroot(&options.newroot, options.skip_chdir)?; match &options.userspec { None | Some(UserSpec::NeitherGroupNorUser) => { let strategy = Strategy::Nothing; set_supplemental_gids_with_strategy(strategy, options.groups.as_ref())?; } Some(UserSpec::UserOnly(user)) => { let uid = name_to_uid(user)?; let gid = uid as libc::gid_t; let strategy = Strategy::FromUID(uid, false); set_supplemental_gids_with_strategy(strategy, options.groups.as_ref())?; set_gid(gid).map_err(|e| ChrootError::SetGidFailed(user.to_string(), e))?; set_uid(uid).map_err(|e| ChrootError::SetUserFailed(user.to_string(), e))?; } Some(UserSpec::GroupOnly(group)) => { let gid = name_to_gid(group)?; let strategy = Strategy::Nothing; set_supplemental_gids_with_strategy(strategy, options.groups.as_ref())?; set_gid(gid).map_err(|e| ChrootError::SetGidFailed(group.to_string(), e))?; } Some(UserSpec::UserAndGroup(user, group)) => { let uid = name_to_uid(user)?; let gid = name_to_gid(group)?; let strategy = Strategy::FromUID(uid, true); set_supplemental_gids_with_strategy(strategy, options.groups.as_ref())?; set_gid(gid).map_err(|e| ChrootError::SetGidFailed(group.to_string(), e))?; set_uid(uid).map_err(|e| ChrootError::SetUserFailed(user.to_string(), e))?; } } Ok(()) } fn enter_chroot(root: &Path, skip_chdir: bool) -> UResult<()> { let err = unsafe { chroot( CString::new(root.as_os_str().as_bytes().to_vec()) .map_err(|e| ChrootError::CannotEnter("root".to_string(), e.into()))? .as_bytes_with_nul() .as_ptr() .cast::(), ) }; if err == 0 { if !skip_chdir { std::env::set_current_dir("/")?; } Ok(()) } else { Err(ChrootError::CannotEnter(format!("{}", root.display()), Error::last_os_error()).into()) } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/src/error.rs000066400000000000000000000055741504311601400257630ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore NEWROOT Userspec userspec //! Errors returned by chroot. use std::io::Error; use thiserror::Error; use uucore::display::Quotable; use uucore::error::UError; use uucore::libc; use uucore::translate; /// Errors that can happen while executing chroot. #[derive(Debug, Error)] pub enum ChrootError { /// Failed to enter the specified directory. #[error("{}", translate!("chroot-error-cannot-enter", "dir" => _0.quote(), "err" => _1))] CannotEnter(String, #[source] Error), /// Failed to execute the specified command. #[error("{}", translate!("chroot-error-command-failed", "cmd" => _0.quote(), "err" => _1))] CommandFailed(String, #[source] Error), /// Failed to find the specified command. #[error("{}", translate!("chroot-error-command-not-found", "cmd" => _0.quote(), "err" => _1))] CommandNotFound(String, #[source] Error), #[error("{}", translate!("chroot-error-groups-parsing-failed"))] GroupsParsingFailed, #[error("{}", translate!("chroot-error-invalid-group", "group" => _0.quote()))] InvalidGroup(String), #[error("{}", translate!("chroot-error-invalid-group-list", "list" => _0.quote()))] InvalidGroupList(String), /// The new root directory was not given. #[error("{}", translate!("chroot-error-missing-newroot", "util_name" => uucore::execution_phrase()))] MissingNewRoot, #[error("{}", translate!("chroot-error-no-group-specified", "uid" => _0))] NoGroupSpecified(libc::uid_t), /// Failed to find the specified user. #[error("{}", translate!("chroot-error-no-such-user"))] NoSuchUser, /// Failed to find the specified group. #[error("{}", translate!("chroot-error-no-such-group"))] NoSuchGroup, /// The given directory does not exist. #[error("{}", translate!("chroot-error-no-such-directory", "dir" => _0.quote()))] NoSuchDirectory(String), /// The call to `setgid()` failed. #[error("{}", translate!("chroot-error-set-gid-failed", "gid" => _0, "err" => _1))] SetGidFailed(String, #[source] Error), /// The call to `setgroups()` failed. #[error("{}", translate!("chroot-error-set-groups-failed", "err" => _0))] SetGroupsFailed(Error), /// The call to `setuid()` failed. #[error("{}", translate!("chroot-error-set-user-failed", "user" => _0.maybe_quote(), "err" => _1))] SetUserFailed(String, #[source] Error), } impl UError for ChrootError { /// 125 if chroot itself fails /// 126 if command is found but cannot be invoked /// 127 if command cannot be found fn code(&self) -> i32 { match self { Self::CommandFailed(_, _) => 126, Self::CommandNotFound(_, _) => 127, _ => 125, } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/chroot/src/main.rs000066400000000000000000000000311504311601400255350ustar00rootroot00000000000000uucore::bin!(uu_chroot); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cksum/000077500000000000000000000000001504311601400233065ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cksum/Cargo.toml000066400000000000000000000012041504311601400252330ustar00rootroot00000000000000[package] name = "uu_cksum" description = "cksum ~ (uutils) display CRC and size of input" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/cksum" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/cksum.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["checksum", "encoding", "sum"] } hex = { workspace = true } fluent = { workspace = true } [[bin]] name = "cksum" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cksum/LICENSE000077700000000000000000000000001504311601400261542../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cksum/locales/000077500000000000000000000000001504311601400247305ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cksum/locales/en-US.ftl000066400000000000000000000032611504311601400263700ustar00rootroot00000000000000cksum-about = Print CRC and size for each file cksum-usage = cksum [OPTIONS] [FILE]... cksum-after-help = DIGEST determines the digest algorithm and default output format: - sysv: (equivalent to sum -s) - bsd: (equivalent to sum -r) - crc: (equivalent to cksum) - crc32b: (only available through cksum) - md5: (equivalent to md5sum) - sha1: (equivalent to sha1sum) - sha224: (equivalent to sha224sum) - sha256: (equivalent to sha256sum) - sha384: (equivalent to sha384sum) - sha512: (equivalent to sha512sum) - blake2b: (equivalent to b2sum) - sm3: (only available through cksum) # Help messages cksum-help-algorithm = select the digest type to use. See DIGEST below cksum-help-untagged = create a reversed style checksum, without digest type cksum-help-tag = create a BSD style checksum, undo --untagged (default) cksum-help-length = digest length in bits; must not exceed the max for the blake2 algorithm and must be a multiple of 8 cksum-help-raw = emit a raw binary digest, not hexadecimal cksum-help-strict = exit non-zero for improperly formatted checksum lines cksum-help-check = read hashsums from the FILEs and check them cksum-help-base64 = emit a base64 digest, not hexadecimal cksum-help-warn = warn about improperly formatted checksum lines cksum-help-status = don't output anything, status code shows success cksum-help-quiet = don't print OK for each successfully verified file cksum-help-ignore-missing = don't fail or report status for missing files cksum-help-zero = end each output line with NUL, not newline, and disable file name escaping # Error messages cksum-error-is-directory = { $file }: Is a directory cksum-error-failed-to-read-input = failed to read input coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cksum/locales/fr-FR.ftl000066400000000000000000000037561504311601400263660ustar00rootroot00000000000000cksum-about = Afficher le CRC et la taille de chaque fichier cksum-usage = cksum [OPTION]... [FICHIER]... cksum-after-help = DIGEST détermine l'algorithme de condensé et le format de sortie par défaut : - sysv : (équivalent à sum -s) - bsd : (équivalent à sum -r) - crc : (équivalent à cksum) - crc32b : (disponible uniquement via cksum) - md5 : (équivalent à md5sum) - sha1 : (équivalent à sha1sum) - sha224 : (équivalent à sha224sum) - sha256 : (équivalent à sha256sum) - sha384 : (équivalent à sha384sum) - sha512 : (équivalent à sha512sum) - blake2b : (équivalent à b2sum) - sm3 : (disponible uniquement via cksum) # Messages d'aide cksum-help-algorithm = sélectionner le type de condensé à utiliser. Voir DIGEST ci-dessous cksum-help-untagged = créer une somme de contrôle de style inversé, sans type de condensé cksum-help-tag = créer une somme de contrôle de style BSD, annuler --untagged (par défaut) cksum-help-length = longueur du condensé en bits ; ne doit pas dépasser le maximum pour l'algorithme blake2 et doit être un multiple de 8 cksum-help-raw = émettre un condensé binaire brut, pas hexadécimal cksum-help-strict = sortir avec un code non-zéro pour les lignes de somme de contrôle mal formatées cksum-help-check = lire les sommes de hachage des FICHIERs et les vérifier cksum-help-base64 = émettre un condensé base64, pas hexadécimal cksum-help-warn = avertir des lignes de somme de contrôle mal formatées cksum-help-status = ne rien afficher, le code de statut indique le succès cksum-help-quiet = ne pas afficher OK pour chaque fichier vérifié avec succès cksum-help-ignore-missing = ne pas échouer ou signaler le statut pour les fichiers manquants cksum-help-zero = terminer chaque ligne de sortie avec NUL, pas un saut de ligne, et désactiver l'échappement des noms de fichiers # Messages d'erreur cksum-error-is-directory = { $file } : Est un répertoire cksum-error-failed-to-read-input = échec de la lecture de l'entrée coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cksum/src/000077500000000000000000000000001504311601400240755ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cksum/src/cksum.rs000066400000000000000000000367701504311601400256020ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) fname, algo use clap::builder::ValueParser; use clap::{Arg, ArgAction, Command, value_parser}; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::io::{self, BufReader, Read, Write, stdin, stdout}; use std::iter; use std::path::Path; use uucore::checksum::{ ALGORITHM_OPTIONS_BLAKE2B, ALGORITHM_OPTIONS_BSD, ALGORITHM_OPTIONS_CRC, ALGORITHM_OPTIONS_CRC32B, ALGORITHM_OPTIONS_SYSV, ChecksumError, ChecksumOptions, ChecksumVerbose, SUPPORTED_ALGORITHMS, calculate_blake2b_length, detect_algo, digest_reader, perform_checksum_validation, }; use uucore::translate; use uucore::{ encoding, error::{FromIo, UResult, USimpleError}, format_usage, line_ending::LineEnding, os_str_as_bytes, show, sum::Digest, }; #[derive(Debug, PartialEq)] enum OutputFormat { Hexadecimal, Raw, Base64, } struct Options { algo_name: &'static str, digest: Box, output_bits: usize, tag: bool, // will cover the --untagged option length: Option, output_format: OutputFormat, asterisk: bool, // if we display an asterisk or not (--binary/--text) line_ending: LineEnding, } /// Calculate checksum /// /// # Arguments /// /// * `options` - CLI options for the assigning checksum algorithm /// * `files` - A iterator of [`OsStr`] which is a bunch of files that are using for calculating checksum #[allow(clippy::cognitive_complexity)] fn cksum<'a, I>(mut options: Options, files: I) -> UResult<()> where I: Iterator, { let files: Vec<_> = files.collect(); if options.output_format == OutputFormat::Raw && files.len() > 1 { return Err(Box::new(ChecksumError::RawMultipleFiles)); } for filename in files { let filename = Path::new(filename); let stdin_buf; let file_buf; let not_file = filename == OsStr::new("-"); // Handle the file input let mut file = BufReader::new(if not_file { stdin_buf = stdin(); Box::new(stdin_buf) as Box } else if filename.is_dir() { Box::new(BufReader::new(io::empty())) as Box } else { file_buf = match File::open(filename) { Ok(file) => file, Err(err) => { show!(err.map_err_context(|| filename.to_string_lossy().to_string())); continue; } }; Box::new(file_buf) as Box }); if filename.is_dir() { show!(USimpleError::new( 1, translate!("cksum-error-is-directory", "file" => filename.display()) )); continue; } let (sum_hex, sz) = digest_reader(&mut options.digest, &mut file, false, options.output_bits) .map_err_context(|| translate!("cksum-error-failed-to-read-input"))?; let sum = match options.output_format { OutputFormat::Raw => { let bytes = match options.algo_name { ALGORITHM_OPTIONS_CRC => sum_hex.parse::().unwrap().to_be_bytes().to_vec(), ALGORITHM_OPTIONS_SYSV | ALGORITHM_OPTIONS_BSD => { sum_hex.parse::().unwrap().to_be_bytes().to_vec() } _ => hex::decode(sum_hex).unwrap(), }; // Cannot handle multiple files anyway, output immediately. stdout().write_all(&bytes)?; return Ok(()); } OutputFormat::Hexadecimal => sum_hex, OutputFormat::Base64 => match options.algo_name { ALGORITHM_OPTIONS_CRC | ALGORITHM_OPTIONS_CRC32B | ALGORITHM_OPTIONS_SYSV | ALGORITHM_OPTIONS_BSD => sum_hex, _ => encoding::for_cksum::BASE64.encode(&hex::decode(sum_hex).unwrap()), }, }; // The BSD checksum output is 5 digit integer let bsd_width = 5; let (before_filename, should_print_filename, after_filename) = match options.algo_name { ALGORITHM_OPTIONS_SYSV => ( format!( "{} {}{}", sum.parse::().unwrap(), sz.div_ceil(options.output_bits), if not_file { "" } else { " " } ), !not_file, String::new(), ), ALGORITHM_OPTIONS_BSD => ( format!( "{:0bsd_width$} {:bsd_width$}{}", sum.parse::().unwrap(), sz.div_ceil(options.output_bits), if not_file { "" } else { " " } ), !not_file, String::new(), ), ALGORITHM_OPTIONS_CRC | ALGORITHM_OPTIONS_CRC32B => ( format!("{sum} {sz}{}", if not_file { "" } else { " " }), !not_file, String::new(), ), ALGORITHM_OPTIONS_BLAKE2B if options.tag => { ( if let Some(length) = options.length { // Multiply by 8 here, as we want to print the length in bits. format!("BLAKE2b-{} (", length * 8) } else { "BLAKE2b (".to_owned() }, true, format!(") = {sum}"), ) } _ => { if options.tag { ( format!("{} (", options.algo_name.to_ascii_uppercase()), true, format!(") = {sum}"), ) } else { let prefix = if options.asterisk { "*" } else { " " }; (format!("{sum} {prefix}"), true, String::new()) } } }; print!("{before_filename}"); if should_print_filename { // The filename might not be valid UTF-8, and filename.display() would mangle the names. // Therefore, emit the bytes directly to stdout, without any attempt at encoding them. let _dropped_result = stdout().write_all(os_str_as_bytes(filename.as_os_str())?); } print!("{after_filename}{}", options.line_ending); } Ok(()) } mod options { pub const ALGORITHM: &str = "algorithm"; pub const FILE: &str = "file"; pub const UNTAGGED: &str = "untagged"; pub const TAG: &str = "tag"; pub const LENGTH: &str = "length"; pub const RAW: &str = "raw"; pub const BASE64: &str = "base64"; pub const CHECK: &str = "check"; pub const STRICT: &str = "strict"; pub const TEXT: &str = "text"; pub const BINARY: &str = "binary"; pub const STATUS: &str = "status"; pub const WARN: &str = "warn"; pub const IGNORE_MISSING: &str = "ignore-missing"; pub const QUIET: &str = "quiet"; pub const ZERO: &str = "zero"; } /*** * cksum has a bunch of legacy behavior. * We handle this in this function to make sure they are self contained * and "easier" to understand */ fn handle_tag_text_binary_flags>( args: impl Iterator, ) -> UResult<(bool, bool)> { let mut tag = true; let mut binary = false; // --binary, --tag and --untagged are tight together: none of them // conflicts with each other but --tag will reset "binary" and set "tag". for arg in args { let arg = arg.as_ref(); if arg == "-b" || arg == "--binary" { binary = true; } else if arg == "--tag" { tag = true; binary = false; } else if arg == "--untagged" { tag = false; } } Ok((tag, !tag && binary)) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let check = matches.get_flag(options::CHECK); let algo_name: &str = match matches.get_one::(options::ALGORITHM) { Some(v) => v, None => { if check { // if we are doing a --check, we should not default to crc "" } else { ALGORITHM_OPTIONS_CRC } } }; let input_length = matches.get_one::(options::LENGTH); let length = match input_length { Some(length) => { if algo_name == ALGORITHM_OPTIONS_BLAKE2B { calculate_blake2b_length(*length)? } else { return Err(ChecksumError::LengthOnlyForBlake2b.into()); } } None => None, }; if ["bsd", "crc", "sysv", "crc32b"].contains(&algo_name) && check { return Err(ChecksumError::AlgorithmNotSupportedWithCheck.into()); } if check { let text_flag = matches.get_flag(options::TEXT); let binary_flag = matches.get_flag(options::BINARY); let strict = matches.get_flag(options::STRICT); let status = matches.get_flag(options::STATUS); let warn = matches.get_flag(options::WARN); let ignore_missing = matches.get_flag(options::IGNORE_MISSING); let quiet = matches.get_flag(options::QUIET); let tag = matches.get_flag(options::TAG); if tag || binary_flag || text_flag { return Err(ChecksumError::BinaryTextConflict.into()); } // Determine the appropriate algorithm option to pass let algo_option = if algo_name.is_empty() { None } else { Some(algo_name) }; // Execute the checksum validation based on the presence of files or the use of stdin let files = matches.get_many::(options::FILE).map_or_else( || iter::once(OsStr::new("-")).collect::>(), |files| files.map(OsStr::new).collect::>(), ); let verbose = ChecksumVerbose::new(status, quiet, warn); let opts = ChecksumOptions { binary: binary_flag, ignore_missing, strict, verbose, }; return perform_checksum_validation(files.iter().copied(), algo_option, length, opts); } let (tag, asterisk) = handle_tag_text_binary_flags(std::env::args_os())?; let algo = detect_algo(algo_name, length)?; let line_ending = LineEnding::from_zero_flag(matches.get_flag(options::ZERO)); let output_format = if matches.get_flag(options::RAW) { OutputFormat::Raw } else if matches.get_flag(options::BASE64) { OutputFormat::Base64 } else { OutputFormat::Hexadecimal }; let opts = Options { algo_name: algo.name, digest: (algo.create_fn)(), output_bits: algo.bits, length, tag, output_format, asterisk, line_ending, }; match matches.get_many::(options::FILE) { Some(files) => cksum(opts, files.map(OsStr::new))?, None => cksum(opts, iter::once(OsStr::new("-")))?, } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("cksum-about")) .override_usage(format_usage(&translate!("cksum-usage"))) .infer_long_args(true) .args_override_self(true) .arg( Arg::new(options::FILE) .hide(true) .action(ArgAction::Append) .value_parser(ValueParser::os_string()) .value_hint(clap::ValueHint::FilePath), ) .arg( Arg::new(options::ALGORITHM) .long(options::ALGORITHM) .short('a') .help(translate!("cksum-help-algorithm")) .value_name("ALGORITHM") .value_parser(SUPPORTED_ALGORITHMS), ) .arg( Arg::new(options::UNTAGGED) .long(options::UNTAGGED) .help(translate!("cksum-help-untagged")) .action(ArgAction::SetTrue) .overrides_with(options::TAG), ) .arg( Arg::new(options::TAG) .long(options::TAG) .help(translate!("cksum-help-tag")) .action(ArgAction::SetTrue) .overrides_with(options::UNTAGGED), ) .arg( Arg::new(options::LENGTH) .long(options::LENGTH) .value_parser(value_parser!(usize)) .short('l') .help(translate!("cksum-help-length")) .action(ArgAction::Set), ) .arg( Arg::new(options::RAW) .long(options::RAW) .help(translate!("cksum-help-raw")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::STRICT) .long(options::STRICT) .help(translate!("cksum-help-strict")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::CHECK) .short('c') .long(options::CHECK) .help(translate!("cksum-help-check")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::BASE64) .long(options::BASE64) .help(translate!("cksum-help-base64")) .action(ArgAction::SetTrue) // Even though this could easily just override an earlier '--raw', // GNU cksum does not permit these flags to be combined: .conflicts_with(options::RAW), ) .arg( Arg::new(options::TEXT) .long(options::TEXT) .short('t') .hide(true) .overrides_with(options::BINARY) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::BINARY) .long(options::BINARY) .short('b') .hide(true) .overrides_with(options::TEXT) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::WARN) .short('w') .long("warn") .help(translate!("cksum-help-warn")) .action(ArgAction::SetTrue) .overrides_with_all([options::STATUS, options::QUIET]), ) .arg( Arg::new(options::STATUS) .long("status") .help(translate!("cksum-help-status")) .action(ArgAction::SetTrue) .overrides_with_all([options::WARN, options::QUIET]), ) .arg( Arg::new(options::QUIET) .long(options::QUIET) .help(translate!("cksum-help-quiet")) .action(ArgAction::SetTrue) .overrides_with_all([options::WARN, options::STATUS]), ) .arg( Arg::new(options::IGNORE_MISSING) .long(options::IGNORE_MISSING) .help(translate!("cksum-help-ignore-missing")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ZERO) .long(options::ZERO) .short('z') .help(translate!("cksum-help-zero")) .action(ArgAction::SetTrue), ) .after_help(translate!("cksum-after-help")) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cksum/src/main.rs000066400000000000000000000000301504311601400253600ustar00rootroot00000000000000uucore::bin!(uu_cksum); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/comm/000077500000000000000000000000001504311601400231175ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/comm/Cargo.toml000066400000000000000000000011031504311601400250420ustar00rootroot00000000000000[package] name = "uu_comm" description = "comm ~ (uutils) compare sorted inputs" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/comm" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/comm.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["fs"] } fluent = { workspace = true } [[bin]] name = "comm" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/comm/LICENSE000077700000000000000000000000001504311601400257652../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/comm/locales/000077500000000000000000000000001504311601400245415ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/comm/locales/en-US.ftl000066400000000000000000000023041504311601400261760ustar00rootroot00000000000000comm-about = Compare two sorted files line by line. When FILE1 or FILE2 (not both) is -, read standard input. With no options, produce three-column output. Column one contains lines unique to FILE1, column two contains lines unique to FILE2, and column three contains lines common to both files. comm-usage = comm [OPTION]... FILE1 FILE2 # Help messages comm-help-column-1 = suppress column 1 (lines unique to FILE1) comm-help-column-2 = suppress column 2 (lines unique to FILE2) comm-help-column-3 = suppress column 3 (lines that appear in both files) comm-help-delimiter = separate columns with STR comm-help-zero-terminated = line delimiter is NUL, not newline comm-help-total = output a summary comm-help-check-order = check that the input is correctly sorted, even if all input lines are pairable comm-help-no-check-order = do not check that the input is correctly sorted # Error messages comm-error-file-not-sorted = comm: file { $file_num } is not in sorted order comm-error-input-not-sorted = comm: input is not in sorted order comm-error-is-directory = Is a directory comm-error-multiple-conflicting-delimiters = multiple conflicting output delimiters specified # Other messages comm-total = total coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/comm/locales/fr-FR.ftl000066400000000000000000000026341504311601400261710ustar00rootroot00000000000000comm-about = Comparer deux fichiers triés ligne par ligne. Lorsque FICHIER1 ou FICHIER2 (pas les deux) est -, lire l'entrée standard. Sans options, produit une sortie à trois colonnes. La colonne un contient les lignes uniques à FICHIER1, la colonne deux contient les lignes uniques à FICHIER2, et la colonne trois contient les lignes communes aux deux fichiers. comm-usage = comm [OPTION]... FICHIER1 FICHIER2 # Messages d'aide comm-help-column-1 = supprimer la colonne 1 (lignes uniques à FICHIER1) comm-help-column-2 = supprimer la colonne 2 (lignes uniques à FICHIER2) comm-help-column-3 = supprimer la colonne 3 (lignes qui apparaissent dans les deux fichiers) comm-help-delimiter = séparer les colonnes avec STR comm-help-zero-terminated = le délimiteur de ligne est NUL, pas nouvelle ligne comm-help-total = afficher un résumé comm-help-check-order = vérifier que l'entrée est correctement triée, même si toutes les lignes d'entrée sont appariables comm-help-no-check-order = ne pas vérifier que l'entrée est correctement triée # Messages d'erreur comm-error-file-not-sorted = comm : le fichier { $file_num } n'est pas dans l'ordre trié comm-error-input-not-sorted = comm : l'entrée n'est pas dans l'ordre trié comm-error-is-directory = Est un répertoire comm-error-multiple-conflicting-delimiters = plusieurs délimiteurs de sortie en conflit spécifiés # Autres messages comm-total = total coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/comm/src/000077500000000000000000000000001504311601400237065ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/comm/src/comm.rs000066400000000000000000000300561504311601400252130ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) delim mkdelim pairable use std::cmp::Ordering; use std::fs::{File, metadata}; use std::io::{self, BufRead, BufReader, Read, Stdin, stdin}; use uucore::error::{FromIo, UResult, USimpleError}; use uucore::format_usage; use uucore::fs::paths_refer_to_same_file; use uucore::line_ending::LineEnding; use uucore::translate; use clap::{Arg, ArgAction, ArgMatches, Command}; mod options { pub const COLUMN_1: &str = "1"; pub const COLUMN_2: &str = "2"; pub const COLUMN_3: &str = "3"; pub const DELIMITER: &str = "output-delimiter"; pub const DELIMITER_DEFAULT: &str = "\t"; pub const FILE_1: &str = "FILE1"; pub const FILE_2: &str = "FILE2"; pub const TOTAL: &str = "total"; pub const ZERO_TERMINATED: &str = "zero-terminated"; pub const CHECK_ORDER: &str = "check-order"; pub const NO_CHECK_ORDER: &str = "nocheck-order"; } #[derive(Debug, Clone, Copy)] enum FileNumber { One, Two, } impl FileNumber { fn as_str(&self) -> &'static str { match self { FileNumber::One => "1", FileNumber::Two => "2", } } } struct OrderChecker { last_line: Vec, file_num: FileNumber, check_order: bool, has_error: bool, } enum Input { Stdin(Stdin), FileIn(BufReader), } struct LineReader { line_ending: LineEnding, input: Input, } impl LineReader { fn new(input: Input, line_ending: LineEnding) -> Self { Self { line_ending, input } } fn read_line(&mut self, buf: &mut Vec) -> io::Result { let line_ending = self.line_ending.into(); let result = match &mut self.input { Input::Stdin(r) => r.lock().read_until(line_ending, buf), Input::FileIn(r) => r.read_until(line_ending, buf), }; if !buf.ends_with(&[line_ending]) { buf.push(line_ending); } result } } impl OrderChecker { fn new(file_num: FileNumber, check_order: bool) -> Self { Self { last_line: Vec::new(), file_num, check_order, has_error: false, } } fn verify_order(&mut self, current_line: &[u8]) -> bool { if self.last_line.is_empty() { self.last_line = current_line.to_vec(); return true; } let is_ordered = *current_line >= *self.last_line; if !is_ordered && !self.has_error { eprintln!( "{}", translate!("comm-error-file-not-sorted", "file_num" => self.file_num.as_str()) ); self.has_error = true; } self.last_line = current_line.to_vec(); is_ordered || !self.check_order } } // Check if two files are identical by comparing their contents pub fn are_files_identical(path1: &str, path2: &str) -> io::Result { // First compare file sizes let metadata1 = metadata(path1)?; let metadata2 = metadata(path2)?; if metadata1.len() != metadata2.len() { return Ok(false); } let file1 = File::open(path1)?; let file2 = File::open(path2)?; let mut reader1 = BufReader::new(file1); let mut reader2 = BufReader::new(file2); let mut buffer1 = [0; 8192]; let mut buffer2 = [0; 8192]; loop { let bytes1 = reader1.read(&mut buffer1)?; let bytes2 = reader2.read(&mut buffer2)?; if bytes1 != bytes2 { return Ok(false); } if bytes1 == 0 { return Ok(true); } if buffer1[..bytes1] != buffer2[..bytes2] { return Ok(false); } } } fn comm(a: &mut LineReader, b: &mut LineReader, delim: &str, opts: &ArgMatches) -> UResult<()> { let width_col_1 = usize::from(!opts.get_flag(options::COLUMN_1)); let width_col_2 = usize::from(!opts.get_flag(options::COLUMN_2)); let delim_col_2 = delim.repeat(width_col_1); let delim_col_3 = delim.repeat(width_col_1 + width_col_2); let ra = &mut Vec::new(); let mut na = a.read_line(ra); let rb = &mut Vec::new(); let mut nb = b.read_line(rb); let mut total_col_1 = 0; let mut total_col_2 = 0; let mut total_col_3 = 0; let check_order = opts.get_flag(options::CHECK_ORDER); let no_check_order = opts.get_flag(options::NO_CHECK_ORDER); // Determine if we should perform order checking let should_check_order = !no_check_order && (check_order || if let (Some(file1), Some(file2)) = ( opts.get_one::(options::FILE_1), opts.get_one::(options::FILE_2), ) { !(paths_refer_to_same_file(file1, file2, true) || are_files_identical(file1, file2).unwrap_or(false)) } else { true }); let mut checker1 = OrderChecker::new(FileNumber::One, check_order); let mut checker2 = OrderChecker::new(FileNumber::Two, check_order); let mut input_error = false; while na.is_ok() || nb.is_ok() { let ord = match (na.is_ok(), nb.is_ok()) { (false, true) => Ordering::Greater, (true, false) => Ordering::Less, (true, true) => match (&na, &nb) { (&Ok(0), &Ok(0)) => break, (&Ok(0), _) => Ordering::Greater, (_, &Ok(0)) => Ordering::Less, _ => ra.cmp(&rb), }, _ => unreachable!(), }; match ord { Ordering::Less => { if should_check_order && !checker1.verify_order(ra) { break; } if !opts.get_flag(options::COLUMN_1) { print!("{}", String::from_utf8_lossy(ra)); } ra.clear(); na = a.read_line(ra); total_col_1 += 1; } Ordering::Greater => { if should_check_order && !checker2.verify_order(rb) { break; } if !opts.get_flag(options::COLUMN_2) { print!("{delim_col_2}{}", String::from_utf8_lossy(rb)); } rb.clear(); nb = b.read_line(rb); total_col_2 += 1; } Ordering::Equal => { if should_check_order && (!checker1.verify_order(ra) || !checker2.verify_order(rb)) { break; } if !opts.get_flag(options::COLUMN_3) { print!("{delim_col_3}{}", String::from_utf8_lossy(ra)); } ra.clear(); rb.clear(); na = a.read_line(ra); nb = b.read_line(rb); total_col_3 += 1; } } // Track if we've seen any order errors if (checker1.has_error || checker2.has_error) && !input_error && !check_order { input_error = true; } } if opts.get_flag(options::TOTAL) { let line_ending = LineEnding::from_zero_flag(opts.get_flag(options::ZERO_TERMINATED)); print!( "{total_col_1}{delim}{total_col_2}{delim}{total_col_3}{delim}{}{line_ending}", translate!("comm-total") ); } if should_check_order && (checker1.has_error || checker2.has_error) { // Print the input error message once at the end if input_error { eprintln!("{}", translate!("comm-error-input-not-sorted")); } Err(USimpleError::new(1, "")) } else { Ok(()) } } fn open_file(name: &str, line_ending: LineEnding) -> io::Result { if name == "-" { Ok(LineReader::new(Input::Stdin(stdin()), line_ending)) } else { if metadata(name)?.is_dir() { return Err(io::Error::other(translate!("comm-error-is-directory"))); } let f = File::open(name)?; Ok(LineReader::new( Input::FileIn(BufReader::new(f)), line_ending, )) } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let line_ending = LineEnding::from_zero_flag(matches.get_flag(options::ZERO_TERMINATED)); let filename1 = matches.get_one::(options::FILE_1).unwrap(); let filename2 = matches.get_one::(options::FILE_2).unwrap(); let mut f1 = open_file(filename1, line_ending).map_err_context(|| filename1.to_string())?; let mut f2 = open_file(filename2, line_ending).map_err_context(|| filename2.to_string())?; // Due to default_value(), there must be at least one value here, thus unwrap() must not panic. let all_delimiters = matches .get_many::(options::DELIMITER) .unwrap() .map(String::from) .collect::>(); for delim in &all_delimiters[1..] { // Note that this check is very different from ".conflicts_with_self(true).action(ArgAction::Set)", // as this accepts duplicate *identical* arguments. if delim != &all_delimiters[0] { // Note: This intentionally deviate from the GNU error message by inserting the word "conflicting". return Err(USimpleError::new( 1, translate!("comm-error-multiple-conflicting-delimiters"), )); } } let delim = match &*all_delimiters[0] { "" => "\0", delim => delim, }; comm(&mut f1, &mut f2, delim, &matches) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("comm-about")) .override_usage(format_usage(&translate!("comm-usage"))) .infer_long_args(true) .args_override_self(true) .arg( Arg::new(options::COLUMN_1) .short('1') .help(translate!("comm-help-column-1")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::COLUMN_2) .short('2') .help(translate!("comm-help-column-2")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::COLUMN_3) .short('3') .help(translate!("comm-help-column-3")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::DELIMITER) .long(options::DELIMITER) .help(translate!("comm-help-delimiter")) .value_name("STR") .default_value(options::DELIMITER_DEFAULT) .allow_hyphen_values(true) .action(ArgAction::Append) .hide_default_value(true), ) .arg( Arg::new(options::ZERO_TERMINATED) .long(options::ZERO_TERMINATED) .short('z') .overrides_with(options::ZERO_TERMINATED) .help(translate!("comm-help-zero-terminated")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FILE_1) .required(true) .value_hint(clap::ValueHint::FilePath), ) .arg( Arg::new(options::FILE_2) .required(true) .value_hint(clap::ValueHint::FilePath), ) .arg( Arg::new(options::TOTAL) .long(options::TOTAL) .help(translate!("comm-help-total")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::CHECK_ORDER) .long(options::CHECK_ORDER) .help(translate!("comm-help-check-order")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NO_CHECK_ORDER) .long(options::NO_CHECK_ORDER) .help(translate!("comm-help-no-check-order")) .action(ArgAction::SetTrue) .conflicts_with(options::CHECK_ORDER), ) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/comm/src/main.rs000066400000000000000000000000271504311601400251770ustar00rootroot00000000000000uucore::bin!(uu_comm); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/000077500000000000000000000000001504311601400225665ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/Cargo.toml000066400000000000000000000021701504311601400245160ustar00rootroot00000000000000[package] name = "uu_cp" description = "cp ~ (uutils) copy SOURCE to DESTINATION" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/cp" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/cp.rs" [dependencies] clap = { workspace = true } filetime = { workspace = true } libc = { workspace = true } linux-raw-sys = { workspace = true, features = ["ioctl"] } selinux = { workspace = true, optional = true } uucore = { workspace = true, features = [ "backup-control", "buf-copy", "entries", "fs", "fsxattr", "parser", "perms", "mode", "update-control", ] } walkdir = { workspace = true } indicatif = { workspace = true } thiserror = { workspace = true } fluent = { workspace = true } [target.'cfg(unix)'.dependencies] xattr = { workspace = true } exacl = { workspace = true, optional = true } [[bin]] name = "cp" path = "src/main.rs" [features] feat_selinux = ["selinux", "uucore/selinux"] feat_acl = ["exacl"] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/LICENSE000077700000000000000000000000001504311601400254342../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/README.md000066400000000000000000000015331504311601400240470ustar00rootroot00000000000000 # Feature list ## To Do - [ ] cli-symbolic-links - [ ] context - [ ] copy-contents - [ ] sparse ## Completed - [x] archive - [x] attributes-only - [x] backup - [x] dereference - [x] force (Not implemented on Windows) - [x] interactive - [x] link - [x] no-clobber - [x] no-dereference - [x] no-dereference-preserve-links - [x] no-preserve - [x] no-target-directory - [x] one-file-system - [x] parents - [x] paths - [x] preserve - [x] preserve-default-attributes - [x] recursive - [x] reflink - [x] remove-destination (On Windows, current only works for writeable files) - [x] strip-trailing-slashes - [x] suffix - [x] symbolic-link - [x] target-directory - [x] update - [x] verbose - [x] version coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/locales/000077500000000000000000000000001504311601400242105ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/locales/en-US.ftl000066400000000000000000000175501504311601400256560ustar00rootroot00000000000000cp-about = Copy SOURCE to DEST, or multiple SOURCE(s) to DIRECTORY. cp-usage = cp [OPTION]... [-T] SOURCE DEST cp [OPTION]... SOURCE... DIRECTORY cp [OPTION]... -t DIRECTORY SOURCE... cp-after-help = Do not copy a non-directory that has an existing destination with the same or newer modification timestamp; instead, silently skip the file without failing. If timestamps are being preserved, the comparison is to the source timestamp truncated to the resolutions of the destination file system and of the system calls used to update timestamps; this avoids duplicate work if several cp -pu commands are executed with the same source and destination. This option is ignored if the -n or --no-clobber option is also specified. Also, if --preserve=links is also specified (like with cp -au for example), that will take precedence; consequently, depending on the order that files are processed from the source, newer files in the destination may be replaced, to mirror hard links in the source. which gives more control over which existing files in the destination are replaced, and its value can be one of the following: - all This is the default operation when an --update option is not specified, and results in all existing files in the destination being replaced. - none This is similar to the --no-clobber option, in that no files in the destination are replaced, but also skipping a file does not induce a failure. - older This is the default operation when --update is specified, and results in files being replaced if they're older than the corresponding source file. # Help messages cp-help-target-directory = copy all SOURCE arguments into target-directory cp-help-no-target-directory = Treat DEST as a regular file and not a directory cp-help-interactive = ask before overwriting files cp-help-link = hard-link files instead of copying cp-help-no-clobber = don't overwrite a file that already exists cp-help-recursive = copy directories recursively cp-help-strip-trailing-slashes = remove any trailing slashes from each SOURCE argument cp-help-debug = explain how a file is copied. Implies -v cp-help-verbose = explicitly state what is being done cp-help-symbolic-link = make symbolic links instead of copying cp-help-force = if an existing destination file cannot be opened, remove it and try again (this option is ignored when the -n option is also used). Currently not implemented for Windows. cp-help-remove-destination = remove each existing destination file before attempting to open it (contrast with --force). On Windows, currently only works for writeable files. cp-help-reflink = control clone/CoW copies. See below cp-help-attributes-only = Don't copy the file data, just the attributes cp-help-preserve = Preserve the specified attributes (default: mode, ownership (unix only), timestamps), if possible additional attributes: context, links, xattr, all cp-help-preserve-default = same as --preserve=mode,ownership(unix only),timestamps cp-help-no-preserve = don't preserve the specified attributes cp-help-parents = use full source file name under DIRECTORY cp-help-no-dereference = never follow symbolic links in SOURCE cp-help-dereference = always follow symbolic links in SOURCE cp-help-cli-symbolic-links = follow command-line symbolic links in SOURCE cp-help-archive = Same as -dR --preserve=all cp-help-no-dereference-preserve-links = same as --no-dereference --preserve=links cp-help-one-file-system = stay on this file system cp-help-sparse = control creation of sparse files. See below cp-help-selinux = set SELinux security context of destination file to default type cp-help-context = like -Z, or if CTX is specified then set the SELinux or SMACK security context to CTX cp-help-progress = Display a progress bar. Note: this feature is not supported by GNU coreutils. cp-help-copy-contents = NotImplemented: copy contents of special files when recursive # Error messages cp-error-missing-file-operand = missing file operand cp-error-missing-destination-operand = missing destination file operand after { $source } cp-error-extra-operand = extra operand { $operand } cp-error-same-file = { $source } and { $dest } are the same file cp-error-backing-up-destroy-source = backing up { $dest } might destroy source; { $source } not copied cp-error-cannot-open-for-reading = cannot open { $source } for reading cp-error-not-writing-dangling-symlink = not writing through dangling symlink { $dest } cp-error-failed-to-clone = failed to clone { $source } from { $dest }: { $error } cp-error-cannot-change-attribute = cannot change attribute { $dest }: Source file is a non regular file cp-error-cannot-stat = cannot stat { $source }: No such file or directory cp-error-cannot-create-symlink = cannot create symlink { $dest } to { $source } cp-error-cannot-create-hard-link = cannot create hard link { $dest } to { $source } cp-error-omitting-directory = -r not specified; omitting directory { $dir } cp-error-cannot-copy-directory-into-itself = cannot copy a directory, { $source }, into itself, { $dest } cp-error-will-not-copy-through-symlink = will not copy { $source } through just-created symlink { $dest } cp-error-will-not-overwrite-just-created = will not overwrite just-created { $dest } with { $source } cp-error-target-not-directory = target: { $target } is not a directory cp-error-cannot-overwrite-directory-with-non-directory = cannot overwrite directory { $dir } with non-directory cp-error-cannot-overwrite-non-directory-with-directory = cannot overwrite non-directory with directory cp-error-with-parents-dest-must-be-dir = with --parents, the destination must be a directory cp-error-not-replacing = not replacing { $file } cp-error-failed-get-current-dir = failed to get current directory { $error } cp-error-failed-set-permissions = cannot set permissions { $path } cp-error-backup-mutually-exclusive = options --backup and --no-clobber are mutually exclusive cp-error-invalid-argument = invalid argument { $arg } for '{ $option }' cp-error-option-not-implemented = Option '{ $option }' not yet implemented. cp-error-not-all-files-copied = Not all files were copied cp-error-reflink-always-sparse-auto = `--reflink=always` can be used only with --sparse=auto cp-error-file-exists = { $path }: File exists cp-error-invalid-backup-argument = --backup is mutually exclusive with -n or --update=none-fail cp-error-reflink-not-supported = --reflink is only supported on linux and macOS cp-error-sparse-not-supported = --sparse is only supported on linux cp-error-not-a-directory = { $path } is not a directory cp-error-selinux-not-enabled = SELinux was not enabled during the compile time! cp-error-selinux-set-context = failed to set the security context of { $path }: { $error } cp-error-selinux-get-context = failed to get security context of { $path } cp-error-selinux-error = SELinux error: { $error } cp-error-cannot-create-fifo = cannot create fifo { $path }: File exists cp-error-invalid-attribute = invalid attribute { $value } cp-error-failed-to-create-whole-tree = failed to create whole tree cp-error-failed-to-create-directory = Failed to create directory: { $error } cp-error-backup-format = cp: { $error } Try '{ $exec } --help' for more information. # Debug enum strings cp-debug-enum-no = no cp-debug-enum-yes = yes cp-debug-enum-avoided = avoided cp-debug-enum-unsupported = unsupported cp-debug-enum-unknown = unknown cp-debug-enum-zeros = zeros cp-debug-enum-seek-hole = SEEK_HOLE cp-debug-enum-seek-hole-zeros = SEEK_HOLE + zeros # Warning messages cp-warning-source-specified-more-than-once = source { $file_type } { $source } specified more than once # Verbose and debug messages cp-verbose-copied = { $source } -> { $dest } cp-debug-skipped = skipped { $path } cp-verbose-created-directory = { $source } -> { $dest } cp-debug-copy-offload = copy offload: { $offload }, reflink: { $reflink }, sparse detection: { $sparse } # Prompts cp-prompt-overwrite = overwrite { $path }? cp-prompt-overwrite-with-mode = replace { $path }, overriding mode coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/locales/fr-FR.ftl000066400000000000000000000216671504311601400256470ustar00rootroot00000000000000cp-about = Copier SOURCE vers DEST, ou plusieurs SOURCE(s) vers RÉPERTOIRE. cp-usage = cp [OPTION]... [-T] SOURCE DEST cp [OPTION]... SOURCE... RÉPERTOIRE cp [OPTION]... -t RÉPERTOIRE SOURCE... cp-after-help = Ne pas copier un non-répertoire qui a une destination existante avec le même horodatage de modification ou plus récent ; à la place, ignorer silencieusement le fichier sans échec. Si les horodatages sont préservés, la comparaison est faite avec l'horodatage source tronqué aux résolutions du système de fichiers de destination et des appels système utilisés pour mettre à jour les horodatages ; cela évite le travail en double si plusieurs commandes cp -pu sont exécutées avec la même source et destination. Cette option est ignorée si l'option -n ou --no-clobber est également spécifiée. De plus, si --preserve=links est également spécifié (comme avec cp -au par exemple), cela aura la priorité ; par conséquent, selon l'ordre dans lequel les fichiers sont traités depuis la source, les fichiers plus récents dans la destination peuvent être remplacés, pour refléter les liens durs dans la source. ce qui donne plus de contrôle sur les fichiers existants dans la destination qui sont remplacés, et sa valeur peut être l'une des suivantes : - all C'est l'opération par défaut lorsqu'une option --update n'est pas spécifiée, et entraîne le remplacement de tous les fichiers existants dans la destination. - none Cela est similaire à l'option --no-clobber, en ce sens qu'aucun fichier dans la destination n'est remplacé, mais ignorer un fichier n'induit pas d'échec. - older C'est l'opération par défaut lorsque --update est spécifié, et entraîne le remplacement des fichiers s'ils sont plus anciens que le fichier source correspondant. # Messages d'aide cp-help-target-directory = copier tous les arguments SOURCE dans le répertoire cible cp-help-no-target-directory = Traiter DEST comme un fichier régulier et non comme un répertoire cp-help-interactive = demander avant d'écraser les fichiers cp-help-link = créer des liens durs au lieu de copier cp-help-no-clobber = ne pas écraser un fichier qui existe déjà cp-help-recursive = copier les répertoires récursivement cp-help-strip-trailing-slashes = supprimer les barres obliques finales de chaque argument SOURCE cp-help-debug = expliquer comment un fichier est copié. Implique -v cp-help-verbose = indiquer explicitement ce qui est fait cp-help-symbolic-link = créer des liens symboliques au lieu de copier cp-help-force = si un fichier de destination existant ne peut pas être ouvert, le supprimer et réessayer (cette option est ignorée lorsque l'option -n est également utilisée). Actuellement non implémenté pour Windows. cp-help-remove-destination = supprimer chaque fichier de destination existant avant de tenter de l'ouvrir (contraste avec --force). Sur Windows, ne fonctionne actuellement que pour les fichiers inscriptibles. cp-help-reflink = contrôler les copies clone/CoW. Voir ci-dessous cp-help-attributes-only = Ne pas copier les données du fichier, juste les attributs cp-help-preserve = Préserver les attributs spécifiés (par défaut : mode, propriété (unix uniquement), horodatages), si possible attributs supplémentaires : contexte, liens, xattr, all cp-help-preserve-default = identique à --preserve=mode,ownership(unix uniquement),timestamps cp-help-no-preserve = ne pas préserver les attributs spécifiés cp-help-parents = utiliser le nom complet du fichier source sous RÉPERTOIRE cp-help-no-dereference = ne jamais suivre les liens symboliques dans SOURCE cp-help-dereference = toujours suivre les liens symboliques dans SOURCE cp-help-cli-symbolic-links = suivre les liens symboliques de la ligne de commande dans SOURCE cp-help-archive = Identique à -dR --preserve=all cp-help-no-dereference-preserve-links = identique à --no-dereference --preserve=links cp-help-one-file-system = rester sur ce système de fichiers cp-help-sparse = contrôler la création de fichiers épars. Voir ci-dessous cp-help-selinux = définir le contexte de sécurité SELinux du fichier de destination au type par défaut cp-help-context = comme -Z, ou si CTX est spécifié, définir le contexte de sécurité SELinux ou SMACK à CTX cp-help-progress = Afficher une barre de progression. Note : cette fonctionnalité n'est pas supportée par GNU coreutils. cp-help-copy-contents = Non implémenté : copier le contenu des fichiers spéciaux lors de la récursion # Messages d'erreur cp-error-missing-file-operand = opérande fichier manquant cp-error-missing-destination-operand = opérande fichier de destination manquant après { $source } cp-error-extra-operand = opérande supplémentaire { $operand } cp-error-same-file = { $source } et { $dest } sont le même fichier cp-error-backing-up-destroy-source = sauvegarder { $dest } pourrait détruire la source ; { $source } non copié cp-error-cannot-open-for-reading = impossible d'ouvrir { $source } en lecture cp-error-not-writing-dangling-symlink = ne pas écrire à travers le lien symbolique pendant { $dest } cp-error-failed-to-clone = échec du clonage de { $source } depuis { $dest } : { $error } cp-error-cannot-change-attribute = impossible de changer l'attribut { $dest } : Le fichier source n'est pas un fichier régulier cp-error-cannot-stat = impossible de faire stat sur { $source } : Aucun fichier ou répertoire de ce type cp-error-cannot-create-symlink = impossible de créer le lien symbolique { $dest } vers { $source } cp-error-cannot-create-hard-link = impossible de créer le lien dur { $dest } vers { $source } cp-error-omitting-directory = -r non spécifié ; répertoire { $dir } omis cp-error-cannot-copy-directory-into-itself = impossible de copier un répertoire, { $source }, dans lui-même, { $dest } cp-error-will-not-copy-through-symlink = ne copiera pas { $source } à travers le lien symbolique tout juste créé { $dest } cp-error-will-not-overwrite-just-created = n'écrasera pas le fichier tout juste créé { $dest } avec { $source } cp-error-target-not-directory = cible : { $target } n'est pas un répertoire cp-error-cannot-overwrite-directory-with-non-directory = impossible d'écraser le répertoire { $dir } avec un non-répertoire cp-error-cannot-overwrite-non-directory-with-directory = impossible d'écraser un non-répertoire avec un répertoire cp-error-with-parents-dest-must-be-dir = avec --parents, la destination doit être un répertoire cp-error-not-replacing = ne remplace pas { $file } cp-error-failed-get-current-dir = échec de l'obtention du répertoire actuel { $error } cp-error-failed-set-permissions = impossible de définir les permissions { $path } cp-error-backup-mutually-exclusive = les options --backup et --no-clobber sont mutuellement exclusives cp-error-invalid-argument = argument invalide { $arg } pour '{ $option }' cp-error-option-not-implemented = Option '{ $option }' pas encore implémentée. cp-error-not-all-files-copied = Tous les fichiers n'ont pas été copiés cp-error-reflink-always-sparse-auto = `--reflink=always` ne peut être utilisé qu'avec --sparse=auto cp-error-file-exists = { $path } : Le fichier existe cp-error-invalid-backup-argument = --backup est mutuellement exclusif avec -n ou --update=none-fail cp-error-reflink-not-supported = --reflink n'est supporté que sur linux et macOS cp-error-sparse-not-supported = --sparse n'est supporté que sur linux cp-error-not-a-directory = { $path } n'est pas un répertoire cp-error-selinux-not-enabled = SELinux n'était pas activé lors de la compilation ! cp-error-selinux-set-context = échec de la définition du contexte de sécurité de { $path } : { $error } cp-error-selinux-get-context = échec de l'obtention du contexte de sécurité de { $path } cp-error-selinux-error = Erreur SELinux : { $error } cp-error-cannot-create-fifo = impossible de créer le fifo { $path } : Le fichier existe cp-error-invalid-attribute = attribut invalide { $value } cp-error-failed-to-create-whole-tree = échec de la création de l'arborescence complète cp-error-failed-to-create-directory = Échec de la création du répertoire : { $error } cp-error-backup-format = cp : { $error } Tentez '{ $exec } --help' pour plus d'informations. # Debug enum strings cp-debug-enum-no = non cp-debug-enum-yes = oui cp-debug-enum-avoided = évité cp-debug-enum-unsupported = non supporté cp-debug-enum-unknown = inconnu cp-debug-enum-zeros = zéros cp-debug-enum-seek-hole = SEEK_HOLE cp-debug-enum-seek-hole-zeros = SEEK_HOLE + zéros # Messages d'avertissement cp-warning-source-specified-more-than-once = { $file_type } source { $source } spécifié plus d'une fois # Messages verbeux et de débogage cp-verbose-copied = { $source } -> { $dest } cp-debug-skipped = { $path } ignoré cp-verbose-created-directory = { $source } -> { $dest } cp-debug-copy-offload = copy offload : { $offload }, reflink : { $reflink }, sparse detection : { $sparse } # Invites cp-prompt-overwrite = écraser { $path } ? cp-prompt-overwrite-with-mode = remplacer { $path }, en écrasant le mode coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/000077500000000000000000000000001504311601400233555ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/copydir.rs000066400000000000000000000500751504311601400254030ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore TODO canonicalizes direntry pathbuf symlinked IRWXO IRWXG //! Recursively copy the contents of a directory. //! //! See the [`copy_directory`] function for more information. #[cfg(windows)] use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::env; use std::fs; use std::io; use std::path::{Path, PathBuf, StripPrefixError}; use indicatif::ProgressBar; use uucore::display::Quotable; use uucore::error::UIoError; use uucore::fs::{ FileInformation, MissingHandling, ResolveMode, canonicalize, path_ends_with_terminator, }; use uucore::translate; use uucore::show; use uucore::show_error; use uucore::uio_error; use walkdir::{DirEntry, WalkDir}; use crate::{ CopyResult, CpError, Options, aligned_ancestors, context_for, copy_attributes, copy_file, copy_link, }; /// Ensure a Windows path starts with a `\\?`. #[cfg(target_os = "windows")] fn adjust_canonicalization(p: &Path) -> Cow { // In some cases, \\? can be missing on some Windows paths. Add it at the // beginning unless the path is prefixed with a device namespace. const VERBATIM_PREFIX: &str = r"\\?"; const DEVICE_NS_PREFIX: &str = r"\\."; let has_prefix = p .components() .next() .and_then(|comp| comp.as_os_str().to_str()) .is_some_and(|p_str| { p_str.starts_with(VERBATIM_PREFIX) || p_str.starts_with(DEVICE_NS_PREFIX) }); if has_prefix { p.into() } else { Path::new(VERBATIM_PREFIX).join(p).into() } } /// Get a descendant path relative to the given parent directory. /// /// If `root_parent` is `None`, then this just returns the `path` /// itself. Otherwise, this function strips the parent prefix from the /// given `path`, leaving only the portion of the path relative to the /// parent. fn get_local_to_root_parent( path: &Path, root_parent: Option<&Path>, ) -> Result { match root_parent { Some(parent) => { // On Windows, some paths are starting with \\? // but not always, so, make sure that we are consistent for strip_prefix // See https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file for more info #[cfg(windows)] let (path, parent) = ( adjust_canonicalization(path), adjust_canonicalization(parent), ); let path = path.strip_prefix(parent)?; Ok(path.to_path_buf()) } None => Ok(path.to_path_buf()), } } /// Given an iterator, return all its items except the last. fn skip_last(mut iter: impl Iterator) -> impl Iterator { let last = iter.next(); iter.scan(last, |state, item| state.replace(item)) } /// Paths that are invariant throughout the traversal when copying a directory. struct Context<'a> { /// The current working directory at the time of starting the traversal. current_dir: PathBuf, /// The path to the parent of the source directory, if any. root_parent: Option, /// The target path to which the directory will be copied. target: &'a Path, /// The source path from which the directory will be copied. root: &'a Path, } impl<'a> Context<'a> { fn new(root: &'a Path, target: &'a Path) -> io::Result { let current_dir = env::current_dir()?; let root_path = current_dir.join(root); let root_parent = if target.exists() && !root.to_str().unwrap().ends_with("/.") { root_path.parent().map(|p| p.to_path_buf()) } else { Some(root_path) }; Ok(Self { current_dir, root_parent, target, root, }) } } /// Data needed to perform a single copy operation while traversing a directory. /// /// For convenience while traversing a directory, the [`Entry::new`] /// function allows creating an entry from a [`Context`] and a /// [`walkdir::DirEntry`]. /// /// # Examples /// /// For example, if the source directory structure is `a/b/c`, the /// target is `d/`, a directory that already exists, and the copy /// command is `cp -r a/b/c d`, then the overall set of copy /// operations could be represented as three entries, /// /// ```rust,ignore /// let operations = [ /// Entry { /// source_absolute: "/tmp/a".into(), /// source_relative: "a".into(), /// local_to_target: "d/a".into(), /// target_is_file: false, /// } /// Entry { /// source_absolute: "/tmp/a/b".into(), /// source_relative: "a/b".into(), /// local_to_target: "d/a/b".into(), /// target_is_file: false, /// } /// Entry { /// source_absolute: "/tmp/a/b/c".into(), /// source_relative: "a/b/c".into(), /// local_to_target: "d/a/b/c".into(), /// target_is_file: false, /// } /// ]; /// ``` struct Entry { /// The absolute path to file or directory to copy. source_absolute: PathBuf, /// The relative path to file or directory to copy. source_relative: PathBuf, /// The path to the destination, relative to the target. local_to_target: PathBuf, /// Whether the destination is a file. target_is_file: bool, } impl Entry { fn new>( context: &Context, source: A, no_target_dir: bool, ) -> Result { let source = source.as_ref(); let source_relative = source.to_path_buf(); let source_absolute = context.current_dir.join(&source_relative); let mut descendant = get_local_to_root_parent(&source_absolute, context.root_parent.as_deref())?; if no_target_dir { let source_is_dir = source.is_dir(); if path_ends_with_terminator(context.target) && source_is_dir { if let Err(e) = fs::create_dir_all(context.target) { eprintln!( "{}", translate!("cp-error-failed-to-create-directory", "error" => e) ); } } else { descendant = descendant.strip_prefix(context.root)?.to_path_buf(); } } let local_to_target = context.target.join(descendant); let target_is_file = context.target.is_file(); Ok(Self { source_absolute, source_relative, local_to_target, target_is_file, }) } } #[allow(clippy::too_many_arguments)] /// Copy a single entry during a directory traversal. fn copy_direntry( progress_bar: Option<&ProgressBar>, entry: Entry, options: &Options, symlinked_files: &mut HashSet, preserve_hard_links: bool, copied_destinations: &HashSet, copied_files: &mut HashMap, ) -> CopyResult<()> { let Entry { source_absolute, source_relative, local_to_target, target_is_file, } = entry; // If the source is a symbolic link and the options tell us not to // dereference the link, then copy the link object itself. if source_absolute.is_symlink() && !options.dereference { return copy_link(&source_absolute, &local_to_target, symlinked_files, options); } // If the source is a directory and the destination does not // exist, ... if source_absolute.is_dir() && !local_to_target.exists() { return if target_is_file { Err(translate!("cp-error-cannot-overwrite-non-directory-with-directory").into()) } else { build_dir(&local_to_target, false, options, Some(&source_absolute))?; if options.verbose { println!("{}", context_for(&source_relative, &local_to_target)); } Ok(()) }; } // If the source is not a directory, then we need to copy the file. if !source_absolute.is_dir() { if let Err(err) = copy_file( progress_bar, &source_absolute, local_to_target.as_path(), options, symlinked_files, copied_destinations, copied_files, false, ) { if preserve_hard_links { if !source_absolute.is_symlink() { return Err(err); } // silent the error with a symlink // In case we do --archive, we might copy the symlink // before the file itself } else { // At this point, `path` is just a plain old file. // Terminate this function immediately if there is any // kind of error *except* a "permission denied" error. // // TODO What other kinds of errors, if any, should // cause us to continue walking the directory? match err { CpError::IoErrContext(e, _) if e.kind() == io::ErrorKind::PermissionDenied => { show!(uio_error!( e, "{}", translate!("cp-error-cannot-open-for-reading", "source" => source_relative.quote()), )); } e => return Err(e), } } } } // In any other case, there is nothing to do, so we just return to // continue the traversal. Ok(()) } /// Read the contents of the directory `root` and recursively copy the /// contents to `target`. /// /// Any errors encountered copying files in the tree will be logged but /// will not cause a short-circuit. #[allow(clippy::too_many_arguments)] pub(crate) fn copy_directory( progress_bar: Option<&ProgressBar>, root: &Path, target: &Path, options: &Options, symlinked_files: &mut HashSet, copied_destinations: &HashSet, copied_files: &mut HashMap, source_in_command_line: bool, ) -> CopyResult<()> { // if no-dereference is enabled and this is a symlink, copy it as a file if !options.dereference(source_in_command_line) && root.is_symlink() { return copy_file( progress_bar, root, target, options, symlinked_files, copied_destinations, copied_files, source_in_command_line, ); } if !options.recursive { return Err(translate!("cp-error-omitting-directory", "dir" => root.quote()).into()); } // check if root is a prefix of target if path_has_prefix(target, root)? { return Err(translate!("cp-error-cannot-copy-directory-into-itself", "source" => root.quote(), "dest" => target.join(root.file_name().unwrap()).quote()) .into()); } // If in `--parents` mode, create all the necessary ancestor directories. // // For example, if the command is `cp --parents a/b/c d`, that // means we need to copy the two ancestor directories first: // // a -> d/a // a/b -> d/a/b // let tmp = if options.parents { if let Some(parent) = root.parent() { let new_target = target.join(parent); build_dir(&new_target, true, options, None)?; if options.verbose { // For example, if copying file `a/b/c` and its parents // to directory `d/`, then print // // a -> d/a // a/b -> d/a/b // for (x, y) in aligned_ancestors(root, &target.join(root)) { println!("{} -> {}", x.display(), y.display()); } } new_target } else { target.to_path_buf() } } else { target.to_path_buf() }; let target = tmp.as_path(); let preserve_hard_links = options.preserve_hard_links(); // Collect some paths here that are invariant during the traversal // of the given directory, like the current working directory and // the target directory. let context = match Context::new(root, target) { Ok(c) => c, Err(e) => { return Err(translate!("cp-error-failed-get-current-dir", "error" => e).into()); } }; // The directory we were in during the previous iteration let mut last_iter: Option = None; // Traverse the contents of the directory, copying each one. for direntry_result in WalkDir::new(root) .same_file_system(options.one_file_system) .follow_links(options.dereference) { match direntry_result { Ok(direntry) => { let entry = Entry::new(&context, direntry.path(), options.no_target_dir)?; copy_direntry( progress_bar, entry, options, symlinked_files, preserve_hard_links, copied_destinations, copied_files, )?; // We omit certain permissions when creating directories // to prevent other users from accessing them before they're done. // We thus need to fix the permissions of each directory we copy // once it's contents are ready. // This "fixup" is implemented here in a memory-efficient manner. // // We detect iterations where we "walk up" the directory tree, // and fix permissions on all the directories we exited. // (Note that there can be more than one! We might step out of // `./a/b/c` into `./a/`, in which case we'll need to fix the // permissions of both `./a/b/c` and `./a/b`, in that order.) if direntry.file_type().is_dir() { // If true, last_iter is not a parent of this iter. // The means we just exited a directory. let went_up = if let Some(last_iter) = &last_iter { last_iter.path().strip_prefix(direntry.path()).is_ok() } else { false }; if went_up { // Compute the "difference" between `last_iter` and `direntry`. // For example, if... // - last_iter = `a/b/c/d` // - direntry = `a/b` // then diff = `c/d` // // All the unwraps() here are unreachable. let last_iter = last_iter.as_ref().unwrap(); let diff = last_iter.path().strip_prefix(direntry.path()).unwrap(); // Fix permissions for every entry in `diff`, inside-out. // We skip the last directory (which will be `.`) because // its permissions will be fixed when we walk _out_ of it. // (at this point, we might not be done copying `.`!) for p in skip_last(diff.ancestors()) { let src = direntry.path().join(p); let entry = Entry::new(&context, &src, options.no_target_dir)?; copy_attributes( &entry.source_absolute, &entry.local_to_target, &options.attributes, )?; } } last_iter = Some(direntry); } } // Print an error message, but continue traversing the directory. Err(e) => show_error!("{e}"), } } // Handle final directory permission fixes. // This is almost the same as the permission-fixing code above, // with minor differences (commented) if let Some(last_iter) = last_iter { let diff = last_iter.path().strip_prefix(root).unwrap(); // Do _not_ skip `.` this time, since we know we're done. // This is where we fix the permissions of the top-level // directory we just copied. for p in diff.ancestors() { let src = root.join(p); let entry = Entry::new(&context, &src, options.no_target_dir)?; copy_attributes( &entry.source_absolute, &entry.local_to_target, &options.attributes, )?; } } // Also fix permissions for parent directories, // if we were asked to create them. if options.parents { let dest = target.join(root.file_name().unwrap()); for (x, y) in aligned_ancestors(root, dest.as_path()) { if let Ok(src) = canonicalize(x, MissingHandling::Normal, ResolveMode::Physical) { copy_attributes(&src, y, &options.attributes)?; } } } Ok(()) } /// Decide whether the second path is a prefix of the first. /// /// This function canonicalizes the paths via /// [`uucore::fs::canonicalize`] before comparing. /// /// # Errors /// /// If there is an error determining the canonical, absolute form of /// either path. /// /// # Examples /// /// ```rust,ignore /// assert!(path_has_prefix(Path::new("/usr/bin"), Path::new("/usr"))) /// assert!(!path_has_prefix(Path::new("/usr"), Path::new("/usr/bin"))) /// assert!(!path_has_prefix(Path::new("/usr/bin"), Path::new("/var/log"))) /// ``` pub fn path_has_prefix(p1: &Path, p2: &Path) -> io::Result { let pathbuf1 = canonicalize(p1, MissingHandling::Normal, ResolveMode::Logical)?; let pathbuf2 = canonicalize(p2, MissingHandling::Normal, ResolveMode::Logical)?; Ok(pathbuf1.starts_with(pathbuf2)) } /// Builds a directory at the specified path with the given options. /// /// # Notes /// - If `copy_attributes_from` is `Some`, the new directory's attributes will be /// copied from the provided file. Otherwise, the new directory will have the default /// attributes for the current user. /// - This method excludes certain permissions if ownership or special mode bits could /// potentially change. (See `test_dir_perm_race_with_preserve_mode_and_ownership`) /// - The `recursive` flag determines whether parent directories should be created /// if they do not already exist. // we need to allow unused_variable since `options` might be unused in non unix systems #[allow(unused_variables)] fn build_dir( path: &PathBuf, recursive: bool, options: &Options, copy_attributes_from: Option<&Path>, ) -> CopyResult<()> { let mut builder = fs::DirBuilder::new(); builder.recursive(recursive); // To prevent unauthorized access before the folder is ready, // exclude certain permissions if ownership or special mode bits // could potentially change. #[cfg(unix)] { use crate::Preserve; use std::os::unix::fs::PermissionsExt; // we need to allow trivial casts here because some systems like linux have u32 constants in // in libc while others don't. #[allow(clippy::unnecessary_cast)] let mut excluded_perms = if matches!(options.attributes.ownership, Preserve::Yes { .. }) { libc::S_IRWXG | libc::S_IRWXO // exclude rwx for group and other } else if matches!(options.attributes.mode, Preserve::Yes { .. }) { libc::S_IWGRP | libc::S_IWOTH //exclude w for group and other } else { 0 } as u32; let umask = if copy_attributes_from.is_some() && matches!(options.attributes.mode, Preserve::Yes { .. }) { !fs::symlink_metadata(copy_attributes_from.unwrap())? .permissions() .mode() } else { uucore::mode::get_umask() }; excluded_perms |= umask; let mode = !excluded_perms & 0o777; //use only the last three octet bits std::os::unix::fs::DirBuilderExt::mode(&mut builder, mode); } builder.create(path)?; Ok(()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/cp.rs000066400000000000000000002752131504311601400243370ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) copydir ficlone fiemap ftruncate linkgs lstat nlink nlinks pathbuf pwrite reflink strs xattrs symlinked deduplicated advcpmv nushell IRWXG IRWXO IRWXU IRWXUGO IRWXU IRWXG IRWXO IRWXUGO use std::cmp::Ordering; use std::collections::{HashMap, HashSet}; use std::ffi::OsString; use std::fmt::Display; use std::fs::{self, Metadata, OpenOptions, Permissions}; #[cfg(unix)] use std::os::unix::fs::{FileTypeExt, PermissionsExt}; use std::path::{Path, PathBuf, StripPrefixError}; use std::{fmt, io}; #[cfg(all(unix, not(target_os = "android")))] use uucore::fsxattr::copy_xattrs; use uucore::translate; use clap::{Arg, ArgAction, ArgMatches, Command, builder::ValueParser, value_parser}; use filetime::FileTime; use indicatif::{ProgressBar, ProgressStyle}; use thiserror::Error; use platform::copy_on_write; use uucore::display::Quotable; use uucore::error::{UError, UResult, UUsageError, set_exit_code}; #[cfg(unix)] use uucore::fs::make_fifo; use uucore::fs::{ FileInformation, MissingHandling, ResolveMode, are_hardlinks_to_same_file, canonicalize, get_filename, is_symlink_loop, normalize_path, path_ends_with_terminator, paths_refer_to_same_file, }; use uucore::{backup_control, update_control}; // These are exposed for projects (e.g. nushell) that want to create an `Options` value, which // requires these enum. pub use uucore::{backup_control::BackupMode, update_control::UpdateMode}; use uucore::{ format_usage, parser::shortcut_value_parser::ShortcutValueParser, prompt_yes, show_error, show_warning, }; use crate::copydir::copy_directory; mod copydir; mod platform; #[derive(Debug, Error)] pub enum CpError { /// Simple [`io::Error`] wrapper #[error("{0}")] IoErr(#[from] io::Error), /// Wrapper for [`io::Error`] with path context #[error("{1}: {0}")] IoErrContext(io::Error, String), /// General copy error #[error("{0}")] Error(String), /// Represents the state when a non-fatal error has occurred /// and not all files were copied. #[error("{}", translate!("cp-error-not-all-files-copied"))] NotAllFilesCopied, /// Simple [`walkdir::Error`] wrapper #[error("{0}")] WalkDirErr(#[from] walkdir::Error), /// Simple [`StripPrefixError`] wrapper #[error(transparent)] StripPrefixError(#[from] StripPrefixError), /// Result of a skipped file /// Currently happens when "no" is selected in interactive mode or when /// `no-clobber` flag is set and destination is already present. /// `exit with error` is used to determine which exit code should be returned. #[error("Skipped copying file (exit with error = {0})")] Skipped(bool), /// Invalid argument error #[error("{0}")] InvalidArgument(String), /// All standard options are included as an implementation /// path, but those that are not implemented yet should return /// a `NotImplemented` error. #[error("{}", translate!("cp-error-option-not-implemented", "option" => 0))] NotImplemented(String), /// Invalid arguments to backup #[error(transparent)] Backup(#[from] BackupError), #[error("{}", translate!("cp-error-not-a-directory", "path" => .0.quote()))] NotADirectory(PathBuf), } // Manual impl for &str impl From<&'static str> for CpError { fn from(s: &'static str) -> Self { Self::Error(s.to_string()) } } impl From for CpError { fn from(s: String) -> Self { Self::Error(s) } } #[derive(Debug)] pub struct BackupError(String); impl Display for BackupError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}", translate!("cp-error-backup-format", "error" => self.0.clone(), "exec" => uucore::execution_phrase()) ) } } impl std::error::Error for BackupError {} impl UError for CpError { fn code(&self) -> i32 { EXIT_ERR } } pub type CopyResult = Result; /// Specifies how to overwrite files. #[derive(Debug, Clone, Copy, Eq, PartialEq, Default)] pub enum ClobberMode { Force, RemoveDestination, #[default] Standard, } /// Specifies whether files should be overwritten. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum OverwriteMode { /// [Default] Always overwrite existing files Clobber(ClobberMode), /// Prompt before overwriting a file Interactive(ClobberMode), /// Never overwrite a file NoClobber, } impl Default for OverwriteMode { fn default() -> Self { Self::Clobber(ClobberMode::default()) } } /// Possible arguments for `--reflink`. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum ReflinkMode { Always, Auto, Never, } impl Default for ReflinkMode { #[allow(clippy::derivable_impls)] fn default() -> Self { #[cfg(any(target_os = "linux", target_os = "android", target_os = "macos"))] { ReflinkMode::Auto } #[cfg(not(any(target_os = "linux", target_os = "android", target_os = "macos")))] { ReflinkMode::Never } } } /// Possible arguments for `--sparse`. #[derive(Debug, Copy, Clone, Eq, PartialEq, Default)] pub enum SparseMode { Always, #[default] Auto, Never, } /// The expected file type of copy target #[derive(Copy, Clone)] pub enum TargetType { Directory, File, } /// Copy action to perform #[derive(Debug, Clone, Eq, PartialEq, Default)] pub enum CopyMode { Link, SymLink, #[default] Copy, Update, AttrOnly, } /// Preservation settings for various attributes /// /// It should be derived from options as follows: /// /// - if there is a list of attributes to preserve (i.e. `--preserve=ATTR_LIST`) parse that list with [`Attributes::parse_iter`], /// - if `-p` or `--preserve` is given without arguments, use [`Attributes::DEFAULT`], /// - if `-a`/`--archive` is passed, use [`Attributes::ALL`], /// - if `-d` is passed use [`Attributes::LINKS`], /// - otherwise, use [`Attributes::NONE`]. /// /// For full compatibility with GNU, these options should also combine. We /// currently only do a best effort imitation of that behavior, because it is /// difficult to achieve in clap, especially with `--no-preserve`. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct Attributes { #[cfg(unix)] pub ownership: Preserve, pub mode: Preserve, pub timestamps: Preserve, pub context: Preserve, pub links: Preserve, pub xattr: Preserve, } impl Default for Attributes { fn default() -> Self { Self::NONE } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum Preserve { // explicit means whether the --no-preserve flag is used or not to distinguish out the default value. // e.g. --no-preserve=mode means mode = No { explicit = true } No { explicit: bool }, Yes { required: bool }, } impl PartialOrd for Preserve { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl Ord for Preserve { fn cmp(&self, other: &Self) -> Ordering { match (self, other) { (Self::No { .. }, Self::No { .. }) => Ordering::Equal, (Self::Yes { .. }, Self::No { .. }) => Ordering::Greater, (Self::No { .. }, Self::Yes { .. }) => Ordering::Less, ( Self::Yes { required: req_self }, Self::Yes { required: req_other, }, ) => req_self.cmp(req_other), } } } /// Options for the `cp` command /// /// All options are public so that the options can be programmatically /// constructed by other crates, such as nushell. That means that this struct /// is part of our public API. It should therefore not be changed without good /// reason. /// /// The fields are documented with the arguments that determine their value. #[allow(dead_code)] #[derive(Debug, Clone, Eq, PartialEq)] pub struct Options { /// `--attributes-only` pub attributes_only: bool, /// `--backup[=CONTROL]`, `-b` pub backup: BackupMode, /// `--copy-contents` pub copy_contents: bool, /// `-H` pub cli_dereference: bool, /// Determines the type of copying that should be done /// /// Set by the following arguments: /// - `-l`, `--link`: [`CopyMode::Link`] /// - `-s`, `--symbolic-link`: [`CopyMode::SymLink`] /// - `-u`, `--update[=WHEN]`: [`CopyMode::Update`] /// - `--attributes-only`: [`CopyMode::AttrOnly`] /// - otherwise: [`CopyMode::Copy`] pub copy_mode: CopyMode, /// `-L`, `--dereference` pub dereference: bool, /// `-T`, `--no-target-dir` pub no_target_dir: bool, /// `-x`, `--one-file-system` pub one_file_system: bool, /// Specifies what to do with an existing destination /// /// Set by the following arguments: /// - `-i`, `--interactive`: [`OverwriteMode::Interactive`] /// - `-n`, `--no-clobber`: [`OverwriteMode::NoClobber`] /// - otherwise: [`OverwriteMode::Clobber`] /// /// The `Interactive` and `Clobber` variants have a [`ClobberMode`] argument, /// set by the following arguments: /// - `-f`, `--force`: [`ClobberMode::Force`] /// - `--remove-destination`: [`ClobberMode::RemoveDestination`] /// - otherwise: [`ClobberMode::Standard`] pub overwrite: OverwriteMode, /// `--parents` pub parents: bool, /// `--sparse[=WHEN]` pub sparse_mode: SparseMode, /// `--strip-trailing-slashes` pub strip_trailing_slashes: bool, /// `--reflink[=WHEN]` pub reflink_mode: ReflinkMode, /// `--preserve=[=ATTRIBUTE_LIST]` and `--no-preserve=ATTRIBUTE_LIST` pub attributes: Attributes, /// `-R`, `-r`, `--recursive` pub recursive: bool, /// `-S`, `--suffix` pub backup_suffix: String, /// `-t`, `--target-directory` pub target_dir: Option, /// `--update[=UPDATE]` pub update: UpdateMode, /// `--debug` pub debug: bool, /// `-v`, `--verbose` pub verbose: bool, /// `-g`, `--progress` pub progress_bar: bool, /// -Z pub set_selinux_context: bool, // --context pub context: Option, } impl Default for Options { fn default() -> Self { Self { attributes_only: false, backup: BackupMode::default(), copy_contents: false, cli_dereference: false, copy_mode: CopyMode::default(), dereference: false, no_target_dir: false, one_file_system: false, overwrite: OverwriteMode::default(), parents: false, sparse_mode: SparseMode::default(), strip_trailing_slashes: false, reflink_mode: ReflinkMode::default(), attributes: Attributes::default(), recursive: false, backup_suffix: backup_control::DEFAULT_BACKUP_SUFFIX.to_owned(), target_dir: None, update: UpdateMode::default(), debug: false, verbose: false, progress_bar: false, set_selinux_context: false, context: None, } } } /// Enum representing if a file has been skipped. #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum PerformedAction { Copied, Skipped, } /// Enum representing various debug states of the offload and reflink actions. #[derive(Debug)] #[allow(dead_code)] // All of them are used on Linux enum OffloadReflinkDebug { Unknown, No, Yes, Avoided, Unsupported, } /// Enum representing various debug states of the sparse detection. #[derive(Debug)] #[allow(dead_code)] // silent for now until we use them enum SparseDebug { Unknown, No, Zeros, SeekHole, SeekHoleZeros, Unsupported, } /// Struct that contains the debug state for each action in a file copy operation. #[derive(Debug)] struct CopyDebug { offload: OffloadReflinkDebug, reflink: OffloadReflinkDebug, sparse_detection: SparseDebug, } impl Display for OffloadReflinkDebug { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let msg = match self { Self::No => translate!("cp-debug-enum-no"), Self::Yes => translate!("cp-debug-enum-yes"), Self::Avoided => translate!("cp-debug-enum-avoided"), Self::Unsupported => translate!("cp-debug-enum-unsupported"), Self::Unknown => translate!("cp-debug-enum-unknown"), }; write!(f, "{msg}") } } impl Display for SparseDebug { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let msg = match self { Self::No => translate!("cp-debug-enum-no"), Self::Zeros => translate!("cp-debug-enum-zeros"), Self::SeekHole => translate!("cp-debug-enum-seek-hole"), Self::SeekHoleZeros => translate!("cp-debug-enum-seek-hole-zeros"), Self::Unsupported => translate!("cp-debug-enum-unsupported"), Self::Unknown => translate!("cp-debug-enum-unknown"), }; write!(f, "{msg}") } } /// This function prints the debug information of a file copy operation if /// no hard link or symbolic link is required, and data copy is required. /// It prints the debug information of the offload, reflink, and sparse detection actions. fn show_debug(copy_debug: &CopyDebug) { println!( "{}", translate!("cp-debug-copy-offload", "offload" => copy_debug.offload, "reflink" => copy_debug.reflink, "sparse" => copy_debug.sparse_detection) ); } static EXIT_ERR: i32 = 1; // Argument constants mod options { pub const ARCHIVE: &str = "archive"; pub const ATTRIBUTES_ONLY: &str = "attributes-only"; pub const CLI_SYMBOLIC_LINKS: &str = "cli-symbolic-links"; pub const CONTEXT: &str = "context"; pub const COPY_CONTENTS: &str = "copy-contents"; pub const DEREFERENCE: &str = "dereference"; pub const FORCE: &str = "force"; pub const INTERACTIVE: &str = "interactive"; pub const LINK: &str = "link"; pub const NO_CLOBBER: &str = "no-clobber"; pub const NO_DEREFERENCE: &str = "no-dereference"; pub const NO_DEREFERENCE_PRESERVE_LINKS: &str = "no-dereference-preserve-links"; pub const NO_PRESERVE: &str = "no-preserve"; pub const NO_TARGET_DIRECTORY: &str = "no-target-directory"; pub const ONE_FILE_SYSTEM: &str = "one-file-system"; pub const PARENT: &str = "parent"; pub const PARENTS: &str = "parents"; pub const PATHS: &str = "paths"; pub const PROGRESS_BAR: &str = "progress"; pub const PRESERVE: &str = "preserve"; pub const PRESERVE_DEFAULT_ATTRIBUTES: &str = "preserve-default-attributes"; pub const RECURSIVE: &str = "recursive"; pub const REFLINK: &str = "reflink"; pub const REMOVE_DESTINATION: &str = "remove-destination"; pub const SELINUX: &str = "Z"; pub const SPARSE: &str = "sparse"; pub const STRIP_TRAILING_SLASHES: &str = "strip-trailing-slashes"; pub const SYMBOLIC_LINK: &str = "symbolic-link"; pub const TARGET_DIRECTORY: &str = "target-directory"; pub const DEBUG: &str = "debug"; pub const VERBOSE: &str = "verbose"; } #[cfg(unix)] static PRESERVABLE_ATTRIBUTES: &[&str] = &[ "mode", "ownership", "timestamps", "context", "links", "xattr", "all", ]; #[cfg(not(unix))] static PRESERVABLE_ATTRIBUTES: &[&str] = &["mode", "timestamps", "context", "links", "xattr", "all"]; const PRESERVE_DEFAULT_VALUES: &str = if cfg!(unix) { "mode,ownership,timestamp" } else { "mode,timestamp" }; pub fn uu_app() -> Command { const MODE_ARGS: &[&str] = &[ options::LINK, options::REFLINK, options::SYMBOLIC_LINK, options::ATTRIBUTES_ONLY, options::COPY_CONTENTS, ]; Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("cp-about")) .override_usage(format_usage(&translate!("cp-usage"))) .after_help(format!( "{}\n\n{}", translate!("cp-after-help"), backup_control::BACKUP_CONTROL_LONG_HELP )) .infer_long_args(true) .args_override_self(true) .arg( Arg::new(options::TARGET_DIRECTORY) .short('t') .conflicts_with(options::NO_TARGET_DIRECTORY) .long(options::TARGET_DIRECTORY) .value_name(options::TARGET_DIRECTORY) .value_hint(clap::ValueHint::DirPath) .value_parser(ValueParser::path_buf()) .help(translate!("cp-help-target-directory")), ) .arg( Arg::new(options::NO_TARGET_DIRECTORY) .short('T') .long(options::NO_TARGET_DIRECTORY) .conflicts_with(options::TARGET_DIRECTORY) .help(translate!("cp-help-no-target-directory")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::INTERACTIVE) .short('i') .long(options::INTERACTIVE) .overrides_with(options::NO_CLOBBER) .help(translate!("cp-help-interactive")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::LINK) .short('l') .long(options::LINK) .overrides_with_all(MODE_ARGS) .help(translate!("cp-help-link")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NO_CLOBBER) .short('n') .long(options::NO_CLOBBER) .overrides_with(options::INTERACTIVE) .help(translate!("cp-help-no-clobber")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::RECURSIVE) .short('R') .visible_short_alias('r') .long(options::RECURSIVE) // --archive sets this option .help(translate!("cp-help-recursive")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::STRIP_TRAILING_SLASHES) .long(options::STRIP_TRAILING_SLASHES) .help(translate!("cp-help-strip-trailing-slashes")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::DEBUG) .long(options::DEBUG) .help(translate!("cp-help-debug")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::VERBOSE) .short('v') .long(options::VERBOSE) .help(translate!("cp-help-verbose")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SYMBOLIC_LINK) .short('s') .long(options::SYMBOLIC_LINK) .overrides_with_all(MODE_ARGS) .help(translate!("cp-help-symbolic-link")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FORCE) .short('f') .long(options::FORCE) .help(translate!("cp-help-force")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::REMOVE_DESTINATION) .long(options::REMOVE_DESTINATION) .overrides_with(options::FORCE) .help(translate!("cp-help-remove-destination")) .action(ArgAction::SetTrue), ) .arg(backup_control::arguments::backup()) .arg(backup_control::arguments::backup_no_args()) .arg(backup_control::arguments::suffix()) .arg(update_control::arguments::update()) .arg(update_control::arguments::update_no_args()) .arg( Arg::new(options::REFLINK) .long(options::REFLINK) .value_name("WHEN") .overrides_with_all(MODE_ARGS) .require_equals(true) .default_missing_value("always") .value_parser(ShortcutValueParser::new(["auto", "always", "never"])) .num_args(0..=1) .help(translate!("cp-help-reflink")), ) .arg( Arg::new(options::ATTRIBUTES_ONLY) .long(options::ATTRIBUTES_ONLY) .overrides_with_all(MODE_ARGS) .help(translate!("cp-help-attributes-only")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::PRESERVE) .long(options::PRESERVE) .action(ArgAction::Append) .use_value_delimiter(true) .value_parser(ShortcutValueParser::new(PRESERVABLE_ATTRIBUTES)) .num_args(0..) .require_equals(true) .value_name("ATTR_LIST") .default_missing_value(PRESERVE_DEFAULT_VALUES) // -d sets this option // --archive sets this option .help(translate!("cp-help-preserve")), ) .arg( Arg::new(options::PRESERVE_DEFAULT_ATTRIBUTES) .short('p') .long(options::PRESERVE_DEFAULT_ATTRIBUTES) .help(translate!("cp-help-preserve-default")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NO_PRESERVE) .long(options::NO_PRESERVE) .action(ArgAction::Append) .use_value_delimiter(true) .value_parser(ShortcutValueParser::new(PRESERVABLE_ATTRIBUTES)) .num_args(0..) .require_equals(true) .value_name("ATTR_LIST") .help(translate!("cp-help-no-preserve")), ) .arg( Arg::new(options::PARENTS) .long(options::PARENTS) .alias(options::PARENT) .help(translate!("cp-help-parents")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NO_DEREFERENCE) .short('P') .long(options::NO_DEREFERENCE) .overrides_with(options::DEREFERENCE) // -d sets this option .help(translate!("cp-help-no-dereference")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::DEREFERENCE) .short('L') .long(options::DEREFERENCE) .overrides_with(options::NO_DEREFERENCE) .help(translate!("cp-help-dereference")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::CLI_SYMBOLIC_LINKS) .short('H') .help(translate!("cp-help-cli-symbolic-links")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ARCHIVE) .short('a') .long(options::ARCHIVE) .help(translate!("cp-help-archive")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NO_DEREFERENCE_PRESERVE_LINKS) .short('d') .help(translate!("cp-help-no-dereference-preserve-links")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ONE_FILE_SYSTEM) .short('x') .long(options::ONE_FILE_SYSTEM) .help(translate!("cp-help-one-file-system")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SPARSE) .long(options::SPARSE) .value_name("WHEN") .value_parser(ShortcutValueParser::new(["never", "auto", "always"])) .help(translate!("cp-help-sparse")), ) .arg( Arg::new(options::SELINUX) .short('Z') .help(translate!("cp-help-selinux")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::CONTEXT) .long(options::CONTEXT) .value_name("CTX") .value_parser(value_parser!(String)) .help(translate!("cp-help-context")) .num_args(0..=1) .require_equals(true) .default_missing_value(""), ) .arg( // The 'g' short flag is modeled after advcpmv // See this repo: https://github.com/jarun/advcpmv Arg::new(options::PROGRESS_BAR) .long(options::PROGRESS_BAR) .short('g') .action(ArgAction::SetTrue) .help(translate!("cp-help-progress")), ) // TODO: implement the following args .arg( Arg::new(options::COPY_CONTENTS) .long(options::COPY_CONTENTS) .overrides_with(options::ATTRIBUTES_ONLY) .help(translate!("cp-help-copy-contents")) .action(ArgAction::SetTrue), ) // END TODO .arg( Arg::new(options::PATHS) .action(ArgAction::Append) .num_args(1..) .required(true) .value_hint(clap::ValueHint::AnyPath) .value_parser(ValueParser::os_string()), ) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let options = Options::from_matches(&matches)?; if options.overwrite == OverwriteMode::NoClobber && options.backup != BackupMode::None { return Err(UUsageError::new( EXIT_ERR, translate!("cp-error-backup-mutually-exclusive"), )); } let paths: Vec = matches .get_many::(options::PATHS) .map(|v| v.map(PathBuf::from).collect()) .unwrap_or_default(); let (sources, target) = parse_path_args(paths, &options)?; if let Err(error) = copy(&sources, &target, &options) { match error { // Error::NotAllFilesCopied is non-fatal, but the error // code should still be EXIT_ERR as does GNU cp CpError::NotAllFilesCopied => {} // Else we caught a fatal bubbled-up error, log it to stderr _ => show_error!("{error}"), } set_exit_code(EXIT_ERR); } Ok(()) } impl ClobberMode { fn from_matches(matches: &ArgMatches) -> Self { if matches.get_flag(options::FORCE) { Self::Force } else if matches.get_flag(options::REMOVE_DESTINATION) { Self::RemoveDestination } else { Self::Standard } } } impl OverwriteMode { fn from_matches(matches: &ArgMatches) -> Self { if matches.get_flag(options::INTERACTIVE) { Self::Interactive(ClobberMode::from_matches(matches)) } else if matches.get_flag(options::NO_CLOBBER) { Self::NoClobber } else { Self::Clobber(ClobberMode::from_matches(matches)) } } } impl CopyMode { fn from_matches(matches: &ArgMatches) -> Self { if matches.get_flag(options::LINK) { Self::Link } else if matches.get_flag(options::SYMBOLIC_LINK) { Self::SymLink } else if matches .get_one::(update_control::arguments::OPT_UPDATE) .is_some() || matches.get_flag(update_control::arguments::OPT_UPDATE_NO_ARG) { Self::Update } else if matches.get_flag(options::ATTRIBUTES_ONLY) { if matches.get_flag(options::REMOVE_DESTINATION) { Self::Copy } else { Self::AttrOnly } } else { Self::Copy } } } impl Attributes { pub const ALL: Self = Self { #[cfg(unix)] ownership: Preserve::Yes { required: true }, mode: Preserve::Yes { required: true }, timestamps: Preserve::Yes { required: true }, context: { #[cfg(feature = "feat_selinux")] { Preserve::Yes { required: false } } #[cfg(not(feature = "feat_selinux"))] { Preserve::No { explicit: false } } }, links: Preserve::Yes { required: true }, xattr: Preserve::Yes { required: false }, }; pub const NONE: Self = Self { #[cfg(unix)] ownership: Preserve::No { explicit: false }, mode: Preserve::No { explicit: false }, timestamps: Preserve::No { explicit: false }, context: Preserve::No { explicit: false }, links: Preserve::No { explicit: false }, xattr: Preserve::No { explicit: false }, }; // TODO: ownership is required if the user is root, for non-root users it's not required. pub const DEFAULT: Self = Self { #[cfg(unix)] ownership: Preserve::Yes { required: true }, mode: Preserve::Yes { required: true }, timestamps: Preserve::Yes { required: true }, xattr: Preserve::Yes { required: true }, ..Self::NONE }; pub const LINKS: Self = Self { links: Preserve::Yes { required: true }, ..Self::NONE }; pub fn union(self, other: &Self) -> Self { Self { #[cfg(unix)] ownership: self.ownership.max(other.ownership), context: self.context.max(other.context), timestamps: self.timestamps.max(other.timestamps), mode: self.mode.max(other.mode), links: self.links.max(other.links), xattr: self.xattr.max(other.xattr), } } /// Set the field to `Preserve::No { explicit: true }` if the corresponding field /// in other is set to `Preserve::Yes { .. }`. pub fn diff(self, other: &Self) -> Self { fn update_preserve_field(current: Preserve, other: Preserve) -> Preserve { if matches!(other, Preserve::Yes { .. }) { Preserve::No { explicit: true } } else { current } } Self { #[cfg(unix)] ownership: update_preserve_field(self.ownership, other.ownership), mode: update_preserve_field(self.mode, other.mode), timestamps: update_preserve_field(self.timestamps, other.timestamps), context: update_preserve_field(self.context, other.context), links: update_preserve_field(self.links, other.links), xattr: update_preserve_field(self.xattr, other.xattr), } } pub fn parse_iter(values: impl Iterator) -> CopyResult where T: AsRef, { let mut new = Self::NONE; for value in values { new = new.union(&Self::parse_single_string(value.as_ref())?); } Ok(new) } /// Tries to match string containing a parameter to preserve with the corresponding entry in the /// Attributes struct. fn parse_single_string(value: &str) -> CopyResult { let value = value.to_lowercase(); if value == "all" { return Ok(Self::ALL); } let mut new = Self::NONE; let attribute = match value.as_ref() { "mode" => &mut new.mode, #[cfg(unix)] "ownership" => &mut new.ownership, "timestamps" => &mut new.timestamps, "context" => &mut new.context, "link" | "links" => &mut new.links, "xattr" => &mut new.xattr, _ => { return Err(CpError::InvalidArgument( translate!("cp-error-invalid-attribute", "value" => value.quote()), )); } }; *attribute = Preserve::Yes { required: true }; Ok(new) } } impl Options { #[allow(clippy::cognitive_complexity)] fn from_matches(matches: &ArgMatches) -> CopyResult { let not_implemented_opts = vec![ #[cfg(not(any(windows, unix)))] options::ONE_FILE_SYSTEM, #[cfg(windows)] options::FORCE, ]; for not_implemented_opt in not_implemented_opts { if matches.contains_id(not_implemented_opt) && matches.value_source(not_implemented_opt) == Some(clap::parser::ValueSource::CommandLine) { return Err(CpError::NotImplemented(not_implemented_opt.to_string())); } } let recursive = matches.get_flag(options::RECURSIVE) || matches.get_flag(options::ARCHIVE); let backup_mode = match backup_control::determine_backup_mode(matches) { Err(e) => return Err(CpError::Backup(BackupError(format!("{e}")))), Ok(mode) => mode, }; let update_mode = update_control::determine_update_mode(matches); if backup_mode != BackupMode::None && matches .get_one::(update_control::arguments::OPT_UPDATE) .is_some_and(|v| v == "none" || v == "none-fail") { return Err(CpError::InvalidArgument( translate!("cp-error-invalid-backup-argument").to_string(), )); } let backup_suffix = backup_control::determine_backup_suffix(matches); let overwrite = OverwriteMode::from_matches(matches); // Parse target directory options let no_target_dir = matches.get_flag(options::NO_TARGET_DIRECTORY); let target_dir = matches .get_one::(options::TARGET_DIRECTORY) .cloned(); if let Some(dir) = &target_dir { if !dir.is_dir() { return Err(CpError::NotADirectory(dir.clone())); } } // cp follows POSIX conventions for overriding options such as "-a", // "-d", "--preserve", and "--no-preserve". We can use clap's // override-all behavior to achieve this, but there's a challenge: when // clap overrides an argument, it removes all traces of it from the // match. This poses a problem because flags like "-a" expand to "-dR // --preserve=all", and we only want to override the "--preserve=all" // part. Additionally, we need to handle multiple occurrences of the // same flags. To address this, we create an overriding order from the // matches here. let mut overriding_order: Vec<(usize, &str, Vec<&String>)> = vec![]; // We iterate through each overriding option, adding each occurrence of // the option along with its value and index as a tuple, and push it to // `overriding_order`. for option in [ options::PRESERVE, options::NO_PRESERVE, options::ARCHIVE, options::PRESERVE_DEFAULT_ATTRIBUTES, options::NO_DEREFERENCE_PRESERVE_LINKS, ] { if let (Ok(Some(val)), Some(index)) = ( matches.try_get_one::(option), // even though it says in the doc that `index_of` would give us // the first index of the argument, when it comes to flag it // gives us the last index where the flag appeared (probably // because it overrides itself). Since it is a flag and it would // have same value across the occurrences we just need the last // index. matches.index_of(option), ) { if *val { overriding_order.push((index, option, vec![])); } } else if let (Some(occurrences), Some(mut indices)) = ( matches.get_occurrences::(option), matches.indices_of(option), ) { occurrences.for_each(|val| { if let Some(index) = indices.next() { let val = val.collect::>(); // As mentioned in the documentation of the indices_of // function, it provides the indices of the individual // values. Therefore, to get the index of the first // value of the next occurrence in the next iteration, // we need to advance the indices iterator by the length // of the current occurrence's values. for _ in 1..val.len() { indices.next(); } overriding_order.push((index, option, val)); } }); } } overriding_order.sort_by(|a, b| a.0.cmp(&b.0)); let mut attributes = Attributes::NONE; // Iterate through the `overriding_order` and adjust the attributes accordingly. for (_, option, val) in overriding_order { match option { options::ARCHIVE => { attributes = Attributes::ALL; } options::PRESERVE_DEFAULT_ATTRIBUTES => { attributes = attributes.union(&Attributes::DEFAULT); } options::NO_DEREFERENCE_PRESERVE_LINKS => { attributes = attributes.union(&Attributes::LINKS); } options::PRESERVE => { attributes = attributes.union(&Attributes::parse_iter(val.into_iter())?); } options::NO_PRESERVE => { if !val.is_empty() { attributes = attributes.diff(&Attributes::parse_iter(val.into_iter())?); } } _ => (), } } #[cfg(not(feature = "selinux"))] if let Preserve::Yes { required } = attributes.context { let selinux_disabled_error = CpError::Error(translate!("cp-error-selinux-not-enabled")); if required { return Err(selinux_disabled_error); } show_error_if_needed(&selinux_disabled_error); } // Extract the SELinux related flags and options let set_selinux_context = matches.get_flag(options::SELINUX); let context = if matches.contains_id(options::CONTEXT) { matches.get_one::(options::CONTEXT).cloned() } else { None }; let options = Self { attributes_only: matches.get_flag(options::ATTRIBUTES_ONLY), copy_contents: matches.get_flag(options::COPY_CONTENTS), cli_dereference: matches.get_flag(options::CLI_SYMBOLIC_LINKS), copy_mode: CopyMode::from_matches(matches), // No dereference is set with -p, -d and --archive dereference: !(matches.get_flag(options::NO_DEREFERENCE) || matches.get_flag(options::NO_DEREFERENCE_PRESERVE_LINKS) || matches.get_flag(options::ARCHIVE) // cp normally follows the link only when not copying recursively or when // --link (-l) is used || (recursive && CopyMode::from_matches(matches)!= CopyMode::Link )) || matches.get_flag(options::DEREFERENCE), one_file_system: matches.get_flag(options::ONE_FILE_SYSTEM), parents: matches.get_flag(options::PARENTS), update: update_mode, debug: matches.get_flag(options::DEBUG), verbose: matches.get_flag(options::VERBOSE) || matches.get_flag(options::DEBUG), strip_trailing_slashes: matches.get_flag(options::STRIP_TRAILING_SLASHES), reflink_mode: { if let Some(reflink) = matches.get_one::(options::REFLINK) { match reflink.as_str() { "always" => ReflinkMode::Always, "auto" => ReflinkMode::Auto, "never" => ReflinkMode::Never, value => { return Err(CpError::InvalidArgument( translate!("cp-error-invalid-argument", "arg" => value.quote(), "option" => "reflink"), )); } } } else { ReflinkMode::default() } }, sparse_mode: { if let Some(val) = matches.get_one::(options::SPARSE) { match val.as_str() { "always" => SparseMode::Always, "auto" => SparseMode::Auto, "never" => SparseMode::Never, _ => { return Err(CpError::InvalidArgument( translate!("cp-error-invalid-argument", "arg" => val, "option" => "sparse"), )); } } } else { SparseMode::Auto } }, backup: backup_mode, backup_suffix, overwrite, no_target_dir, attributes, recursive, target_dir, progress_bar: matches.get_flag(options::PROGRESS_BAR), set_selinux_context: set_selinux_context || context.is_some(), context, }; Ok(options) } fn dereference(&self, in_command_line: bool) -> bool { self.dereference || (in_command_line && self.cli_dereference) } fn preserve_hard_links(&self) -> bool { match self.attributes.links { Preserve::No { .. } => false, Preserve::Yes { .. } => true, } } #[cfg(unix)] fn preserve_mode(&self) -> (bool, bool) { match self.attributes.mode { Preserve::No { explicit } => { if explicit { (false, true) } else { (false, false) } } Preserve::Yes { .. } => (true, false), } } /// Whether to force overwriting the destination file. fn force(&self) -> bool { matches!(self.overwrite, OverwriteMode::Clobber(ClobberMode::Force)) } } impl TargetType { /// Return [`TargetType`] required for `target`. /// /// Treat target as a dir if we have multiple sources or the target /// exists and already is a directory fn determine(sources: &[PathBuf], target: &Path) -> Self { if sources.len() > 1 || target.is_dir() { Self::Directory } else { Self::File } } } /// Returns tuple of (Source paths, Target) fn parse_path_args( mut paths: Vec, options: &Options, ) -> CopyResult<(Vec, PathBuf)> { if paths.is_empty() { // No files specified return Err(translate!("cp-error-missing-file-operand").into()); } else if paths.len() == 1 && options.target_dir.is_none() { // Only one file specified return Err(translate!("cp-error-missing-destination-operand", "source" => paths[0].quote()) .into()); } // Return an error if the user requested to copy more than one // file source to a file target if options.no_target_dir && options.target_dir.is_none() && paths.len() > 2 { return Err(translate!("cp-error-extra-operand", "operand" => paths[2].quote()) .into()); } let target = match options.target_dir { Some(ref target) => { // All path args are sources, and the target dir was // specified separately target.clone() } None => { // If there was no explicit target-dir, then use the last // path_arg paths.pop().unwrap() } }; if options.strip_trailing_slashes { // clippy::assigning_clones added with Rust 1.78 // Rust version = 1.76 on OpenBSD stable/7.5 #[cfg_attr(not(target_os = "openbsd"), allow(clippy::assigning_clones))] for source in &mut paths { *source = source.components().as_path().to_owned(); } } Ok((paths, target)) } /// When handling errors, we don't always want to show them to the user. This function handles that. fn show_error_if_needed(error: &CpError) { match error { // When using --no-clobber, we don't want to show // an error message CpError::NotAllFilesCopied => { // Need to return an error code } CpError::Skipped(_) => { // touch a b && echo "n"|cp -i a b && echo $? // should return an error from GNU 9.2 } _ => { show_error!("{error}"); } } } /// Copy all `sources` to `target`. /// /// Returns an `Err(Error::NotAllFilesCopied)` if at least one non-fatal error /// was encountered. /// /// Behavior is determined by the `options` parameter, see [`Options`] for details. pub fn copy(sources: &[PathBuf], target: &Path, options: &Options) -> CopyResult<()> { let target_type = TargetType::determine(sources, target); verify_target_type(target, &target_type)?; let mut non_fatal_errors = false; let mut seen_sources = HashSet::with_capacity(sources.len()); let mut symlinked_files = HashSet::new(); // to remember the copied files for further usage. // the FileInformation implemented the Hash trait by using // 1. inode number // 2. device number // the combination of a file's inode number and device number is unique throughout all the file systems. // // key is the source file's information and the value is the destination filepath. let mut copied_files: HashMap = HashMap::with_capacity(sources.len()); // remember the copied destinations for further usage. // we can't use copied_files as it is because the key is the source file's information. let mut copied_destinations: HashSet = HashSet::with_capacity(sources.len()); let progress_bar = if options.progress_bar { let pb = ProgressBar::new(disk_usage(sources, options.recursive)?) .with_style( ProgressStyle::with_template( "{msg}: [{elapsed_precise}] {wide_bar} {bytes:>7}/{total_bytes:7}", ) .unwrap(), ) .with_message(uucore::util_name()); pb.tick(); Some(pb) } else { None }; for source in sources { let normalized_source = normalize_path(source); if options.backup == BackupMode::None && seen_sources.contains(&normalized_source) { let file_type = if source.symlink_metadata()?.file_type().is_dir() { "directory" } else { "file" }; let msg = translate!("cp-warning-source-specified-more-than-once", "file_type" => file_type, "source" => source.quote()); show_warning!("{msg}"); } else { let dest = construct_dest_path(source, target, target_type, options) .unwrap_or_else(|_| target.to_path_buf()); if fs::metadata(&dest).is_ok() && !fs::symlink_metadata(&dest)?.file_type().is_symlink() // if both `source` and `dest` are symlinks, it should be considered as an overwrite. || fs::metadata(source).is_ok() && fs::symlink_metadata(source)?.file_type().is_symlink() || matches!(options.copy_mode, CopyMode::SymLink) { // There is already a file and it isn't a symlink (managed in a different place) if copied_destinations.contains(&dest) && options.backup != BackupMode::Numbered { // If the target file was already created in this cp call, do not overwrite return Err(CpError::Error( translate!("cp-error-will-not-overwrite-just-created", "dest" => dest.quote(), "source" => source.quote()), )); } } if let Err(error) = copy_source( progress_bar.as_ref(), source, target, target_type, options, &mut symlinked_files, &copied_destinations, &mut copied_files, ) { show_error_if_needed(&error); if !matches!(error, CpError::Skipped(false)) { non_fatal_errors = true; } } else { copied_destinations.insert(dest.clone()); } } seen_sources.insert(normalized_source); } if let Some(pb) = progress_bar { pb.finish(); } if non_fatal_errors { Err(CpError::NotAllFilesCopied) } else { Ok(()) } } fn construct_dest_path( source_path: &Path, target: &Path, target_type: TargetType, options: &Options, ) -> CopyResult { if options.no_target_dir && target.is_dir() { return Err( translate!("cp-error-cannot-overwrite-directory-with-non-directory", "dir" => target.quote()) .into(), ); } if options.parents && !target.is_dir() { return Err(translate!("cp-error-with-parents-dest-must-be-dir").into()); } Ok(match target_type { TargetType::Directory => { let root = if options.parents { if source_path.has_root() && cfg!(unix) { Path::new("/") } else { Path::new("") } } else { source_path.parent().unwrap_or(source_path) }; localize_to_target(root, source_path, target)? } TargetType::File => target.to_path_buf(), }) } #[allow(clippy::too_many_arguments)] fn copy_source( progress_bar: Option<&ProgressBar>, source: &Path, target: &Path, target_type: TargetType, options: &Options, symlinked_files: &mut HashSet, copied_destinations: &HashSet, copied_files: &mut HashMap, ) -> CopyResult<()> { let source_path = Path::new(&source); if source_path.is_dir() && (options.dereference || !source_path.is_symlink()) { // Copy as directory copy_directory( progress_bar, source, target, options, symlinked_files, copied_destinations, copied_files, true, ) } else { // Copy as file let dest = construct_dest_path(source_path, target, target_type, options)?; let res = copy_file( progress_bar, source_path, dest.as_path(), options, symlinked_files, copied_destinations, copied_files, true, ); if options.parents { for (x, y) in aligned_ancestors(source, dest.as_path()) { if let Ok(src) = canonicalize(x, MissingHandling::Normal, ResolveMode::Physical) { copy_attributes(&src, y, &options.attributes)?; } } } res } } /// If `path` does not have `S_IWUSR` set, returns a tuple of the file's /// mode in octal (index 0) and human-readable (index 1) formats. /// /// If the destination of a copy operation is a file that is not writeable to /// the owner (bit `S_IWUSR`), extra information needs to be added to the /// interactive mode prompt: the mode (permissions) of the file in octal and /// human-readable format. // TODO // The destination metadata can be read multiple times in the course of a single execution of `cp`. // This fix adds yet another metadata read. // Should this metadata be read once and then reused throughout the execution? // https://github.com/uutils/coreutils/issues/6658 fn file_mode_for_interactive_overwrite( #[cfg_attr(not(unix), allow(unused_variables))] path: &Path, ) -> Option<(String, String)> { // Retain outer braces to ensure only one branch is included { #[cfg(unix)] { use libc::{S_IWUSR, mode_t}; use std::os::unix::prelude::MetadataExt; match path.metadata() { Ok(me) => { // Cast is necessary on some platforms let mode: mode_t = me.mode() as mode_t; // It looks like this extra information is added to the prompt iff the file's user write bit is 0 // write permission, owner if uucore::has!(mode, S_IWUSR) { None } else { // Discard leading digits let mode_without_leading_digits = mode & 0o7777; Some(( format!("{mode_without_leading_digits:04o}"), uucore::fs::display_permissions_unix(mode, false), )) } } // TODO: How should failure to read the metadata be handled? Ignoring for now. Err(_) => None, } } #[cfg(not(unix))] { None } } } impl OverwriteMode { fn verify(&self, path: &Path, debug: bool) -> CopyResult<()> { match *self { Self::NoClobber => { if debug { println!("{}", translate!("cp-debug-skipped", "path" => path.quote())); } Err(CpError::Skipped(false)) } Self::Interactive(_) => { let prompt_yes_result = if let Some((octal, human_readable)) = file_mode_for_interactive_overwrite(path) { let prompt_msg = translate!("cp-prompt-overwrite-with-mode", "path" => path.quote()); prompt_yes!("{prompt_msg} {octal} ({human_readable})?") } else { let prompt_msg = translate!("cp-prompt-overwrite", "path" => path.quote()); prompt_yes!("{prompt_msg}") }; if prompt_yes_result { Ok(()) } else { Err(CpError::Skipped(true)) } } Self::Clobber(_) => Ok(()), } } } /// Handles errors for attributes preservation. If the attribute is not required, and /// errored, tries to show error (see `show_error_if_needed` for additional behavior details). /// If it's required, then the error is thrown. fn handle_preserve CopyResult<()>>(p: &Preserve, f: F) -> CopyResult<()> { match p { Preserve::No { .. } => {} Preserve::Yes { required } => { let result = f(); if *required { result?; } else if let Err(error) = result { show_error_if_needed(&error); } } } Ok(()) } /// Copies extended attributes (xattrs) from `source` to `dest`, ensuring that `dest` is temporarily /// user-writable if needed and restoring its original permissions afterward. This avoids "Operation /// not permitted" errors on read-only files. Returns an error if permission or metadata operations fail, /// or if xattr copying fails. #[cfg(all(unix, not(target_os = "android")))] fn copy_extended_attrs(source: &Path, dest: &Path) -> CopyResult<()> { let metadata = fs::symlink_metadata(dest)?; // Check if the destination file is currently read-only for the user. let mut perms = metadata.permissions(); let was_readonly = perms.readonly(); // Temporarily grant user write if it was read-only. if was_readonly { #[allow(clippy::permissions_set_readonly_false)] perms.set_readonly(false); fs::set_permissions(dest, perms)?; } // Perform the xattr copy and capture any potential error, // so we can restore permissions before returning. let copy_xattrs_result = copy_xattrs(source, dest); // Restore read-only if we changed it. if was_readonly { let mut revert_perms = fs::symlink_metadata(dest)?.permissions(); revert_perms.set_readonly(true); fs::set_permissions(dest, revert_perms)?; } // If copying xattrs failed, propagate that error now. copy_xattrs_result?; Ok(()) } /// Copy the specified attributes from one path to another. pub(crate) fn copy_attributes( source: &Path, dest: &Path, attributes: &Attributes, ) -> CopyResult<()> { let context = &*format!("{} -> {}", source.quote(), dest.quote()); let source_metadata = fs::symlink_metadata(source).map_err(|e| CpError::IoErrContext(e, context.to_owned()))?; // Ownership must be changed first to avoid interfering with mode change. #[cfg(unix)] handle_preserve(&attributes.ownership, || -> CopyResult<()> { use std::os::unix::prelude::MetadataExt; use uucore::perms::Verbosity; use uucore::perms::VerbosityLevel; use uucore::perms::wrap_chown; let dest_uid = source_metadata.uid(); let dest_gid = source_metadata.gid(); let meta = &dest .symlink_metadata() .map_err(|e| CpError::IoErrContext(e, context.to_owned()))?; let try_chown = { |uid| { wrap_chown( dest, meta, uid, Some(dest_gid), false, Verbosity { groups_only: false, level: VerbosityLevel::Silent, }, ) } }; // gnu compatibility: cp doesn't report an error if it fails to set the ownership, // and will fall back to changing only the gid if possible. if try_chown(Some(dest_uid)).is_err() { let _ = try_chown(None); } Ok(()) })?; handle_preserve(&attributes.mode, || -> CopyResult<()> { // The `chmod()` system call that underlies the // `fs::set_permissions()` call is unable to change the // permissions of a symbolic link. In that case, we just // do nothing, since every symbolic link has the same // permissions. if !dest.is_symlink() { fs::set_permissions(dest, source_metadata.permissions()) .map_err(|e| CpError::IoErrContext(e, context.to_owned()))?; // FIXME: Implement this for windows as well #[cfg(feature = "feat_acl")] exacl::getfacl(source, None) .and_then(|acl| exacl::setfacl(&[dest], &acl, None)) .map_err(|err| CpError::Error(err.to_string()))?; } Ok(()) })?; handle_preserve(&attributes.timestamps, || -> CopyResult<()> { let atime = FileTime::from_last_access_time(&source_metadata); let mtime = FileTime::from_last_modification_time(&source_metadata); if dest.is_symlink() { filetime::set_symlink_file_times(dest, atime, mtime)?; } else { filetime::set_file_times(dest, atime, mtime)?; } Ok(()) })?; #[cfg(feature = "selinux")] handle_preserve(&attributes.context, || -> CopyResult<()> { // Get the source context and apply it to the destination if let Ok(context) = selinux::SecurityContext::of_path(source, false, false) { if let Some(context) = context { if let Err(e) = context.set_for_path(dest, false, false) { return Err(CpError::Error( translate!("cp-error-selinux-set-context", "path" => dest.display(), "error" => e), )); } } } else { return Err(CpError::Error( translate!("cp-error-selinux-get-context", "path" => source.display()), )); } Ok(()) })?; handle_preserve(&attributes.xattr, || -> CopyResult<()> { #[cfg(all(unix, not(target_os = "android")))] { copy_extended_attrs(source, dest)?; } #[cfg(not(all(unix, not(target_os = "android"))))] { // The documentation for GNU cp states: // // > Try to preserve SELinux security context and // > extended attributes (xattr), but ignore any failure // > to do that and print no corresponding diagnostic. // // so we simply do nothing here. // // TODO Silently ignore failures in the `#[cfg(unix)]` // block instead of terminating immediately on errors. } Ok(()) })?; Ok(()) } fn symlink_file( source: &Path, dest: &Path, symlinked_files: &mut HashSet, ) -> CopyResult<()> { #[cfg(not(windows))] { std::os::unix::fs::symlink(source, dest).map_err(|e| { CpError::IoErrContext( e, translate!("cp-error-cannot-create-symlink", "dest" => get_filename(dest).unwrap_or("?").quote(), "source" => get_filename(source).unwrap_or("?").quote()), ) })?; } #[cfg(windows)] { std::os::windows::fs::symlink_file(source, dest).map_err(|e| { CpError::IoErrContext( e, translate!("cp-error-cannot-create-symlink", "dest" => get_filename(dest).unwrap_or("?").quote(), "source" => get_filename(source).unwrap_or("?").quote()), ) })?; } if let Ok(file_info) = FileInformation::from_path(dest, false) { symlinked_files.insert(file_info); } Ok(()) } fn context_for(src: &Path, dest: &Path) -> String { format!("{} -> {}", src.quote(), dest.quote()) } /// Implements a simple backup copy for the destination file . /// if `is_dest_symlink` flag is set to true dest will be renamed to `backup_path` /// TODO: for the backup, should this function be replaced by `copy_file(...)`? fn backup_dest(dest: &Path, backup_path: &Path, is_dest_symlink: bool) -> CopyResult { if is_dest_symlink { fs::rename(dest, backup_path)?; } else { fs::copy(dest, backup_path)?; } Ok(backup_path.into()) } /// Decide whether source and destination files are the same and /// copying is forbidden. /// /// Copying to the same file is only allowed if both `--backup` and /// `--force` are specified and the file is a regular file. fn is_forbidden_to_copy_to_same_file( source: &Path, dest: &Path, options: &Options, source_in_command_line: bool, ) -> bool { // TODO To match the behavior of GNU cp, we also need to check // that the file is a regular file. let source_is_symlink = source.is_symlink(); let dest_is_symlink = dest.is_symlink(); // only disable dereference if both source and dest is symlink and dereference flag is disabled let dereference_to_compare = options.dereference(source_in_command_line) || (!source_is_symlink || !dest_is_symlink); if !paths_refer_to_same_file(source, dest, dereference_to_compare) { return false; } if options.backup != BackupMode::None { if options.force() && !source_is_symlink { return false; } if source_is_symlink && !options.dereference { return false; } if dest_is_symlink { return false; } if !dest_is_symlink && !source_is_symlink && dest != source { return false; } } if options.copy_mode == CopyMode::Link { return false; } if options.copy_mode == CopyMode::SymLink && dest_is_symlink { return false; } // If source and dest are both the same symlink but with different names, then allow the copy. // This can occur, for example, if source and dest are both hardlinks to the same symlink. if dest_is_symlink && source_is_symlink && source.file_name() != dest.file_name() && !options.dereference { return false; } true } /// Back up, remove, or leave intact the destination file, depending on the options. fn handle_existing_dest( source: &Path, dest: &Path, options: &Options, source_in_command_line: bool, copied_files: &HashMap, ) -> CopyResult<()> { // Disallow copying a file to itself, unless `--force` and // `--backup` are both specified. if is_forbidden_to_copy_to_same_file(source, dest, options, source_in_command_line) { return Err(translate!("cp-error-same-file", "source" => source.quote(), "dest" => dest.quote()) .into()); } if options.update == UpdateMode::None { if options.debug { println!("skipped {}", dest.quote()); } return Err(CpError::Skipped(false)); } if options.update != UpdateMode::IfOlder { options.overwrite.verify(dest, options.debug)?; } let mut is_dest_removed = false; let backup_path = backup_control::get_backup_path(options.backup, dest, &options.backup_suffix); if let Some(backup_path) = backup_path { if paths_refer_to_same_file(source, &backup_path, true) { return Err(translate!("cp-error-backing-up-destroy-source", "dest" => dest.quote(), "source" => source.quote()) .into()); } is_dest_removed = dest.is_symlink(); backup_dest(dest, &backup_path, is_dest_removed)?; } if !is_dest_removed { delete_dest_if_needed_and_allowed( source, dest, options, source_in_command_line, copied_files, )?; } Ok(()) } /// Checks if: /// * `dest` needs to be deleted before the copy operation can proceed /// * the provided options allow this deletion /// /// If so, deletes `dest`. fn delete_dest_if_needed_and_allowed( source: &Path, dest: &Path, options: &Options, source_in_command_line: bool, copied_files: &HashMap, ) -> CopyResult<()> { let delete_dest = match options.overwrite { OverwriteMode::Clobber(cl) | OverwriteMode::Interactive(cl) => { match cl { // FIXME: print that the file was removed if --verbose is enabled ClobberMode::Force => { // TODO // Using `readonly` here to check if `dest` needs to be deleted is not correct: // "On Unix-based platforms this checks if any of the owner, group or others write permission bits are set. It does not check if the current user is in the file's assigned group. It also does not check ACLs. Therefore the return value of this function cannot be relied upon to predict whether attempts to read or write the file will actually succeed." // This results in some copy operations failing, because this necessary deletion is being skipped. is_symlink_loop(dest) || fs::metadata(dest)?.permissions().readonly() } ClobberMode::RemoveDestination => true, ClobberMode::Standard => { // Consider the following files: // // * `src/f` - a regular file // * `src/link` - a hard link to `src/f` // * `dest/src/f` - a different regular file // // In this scenario, if we do `cp -a src/ dest/`, it is // possible that the order of traversal causes `src/link` // to get copied first (to `dest/src/link`). In that case, // in order to make sure `dest/src/link` is a hard link to // `dest/src/f` and `dest/src/f` has the contents of // `src/f`, we delete the existing file to allow the hard // linking. options.preserve_hard_links() && // only try to remove dest file only if the current source // is hardlink to a file that is already copied copied_files.contains_key( &FileInformation::from_path( source, options.dereference(source_in_command_line) ).map_err(|e| CpError::IoErrContext(e, format!("cannot stat {}", source.quote())))? ) } } } OverwriteMode::NoClobber => false, }; if delete_dest { fs::remove_file(dest)?; } Ok(()) } /// Decide whether the given path exists. fn file_or_link_exists(path: &Path) -> bool { // Using `Path.exists()` or `Path.try_exists()` is not sufficient, // because if `path` is a symbolic link and there are too many // levels of symbolic link, then those methods will return false // or an OS error. path.symlink_metadata().is_ok() } /// Zip the ancestors of a source path and destination path. /// /// # Examples /// /// ```rust,ignore /// let actual = aligned_ancestors(&Path::new("a/b/c"), &Path::new("d/a/b/c")); /// let expected = vec![ /// (Path::new("a"), Path::new("d/a")), /// (Path::new("a/b"), Path::new("d/a/b")), /// ]; /// assert_eq!(actual, expected); /// ``` fn aligned_ancestors<'a>(source: &'a Path, dest: &'a Path) -> Vec<(&'a Path, &'a Path)> { // Collect the ancestors of each. For example, if `source` is // "a/b/c", then the ancestors are "a/b/c", "a/b", "a/", and "". let source_ancestors: Vec<&Path> = source.ancestors().collect(); let dest_ancestors: Vec<&Path> = dest.ancestors().collect(); // For this particular application, we don't care about the null // path "" and we don't care about the full path (e.g. "a/b/c"), // so we exclude those. let n = source_ancestors.len(); let source_ancestors = &source_ancestors[1..n - 1]; // Get the matching number of elements from the ancestors of the // destination path (for example, get "d/a" and "d/a/b"). let k = source_ancestors.len(); let dest_ancestors = &dest_ancestors[1..=k]; // Now we have two slices of the same length, so we zip them. let mut result = vec![]; for (x, y) in source_ancestors .iter() .rev() .zip(dest_ancestors.iter().rev()) { result.push((*x, *y)); } result } fn print_verbose_output( parents: bool, progress_bar: Option<&ProgressBar>, source: &Path, dest: &Path, ) { if let Some(pb) = progress_bar { // Suspend (hide) the progress bar so the println won't overlap with the progress bar. pb.suspend(|| { print_paths(parents, source, dest); }); } else { print_paths(parents, source, dest); } } fn print_paths(parents: bool, source: &Path, dest: &Path) { if parents { // For example, if copying file `a/b/c` and its parents // to directory `d/`, then print // // a -> d/a // a/b -> d/a/b // for (x, y) in aligned_ancestors(source, dest) { println!( "{}", translate!("cp-verbose-created-directory", "source" => x.display(), "dest" => y.display()) ); } } println!("{}", context_for(source, dest)); } /// Handles the copy mode for a file copy operation. /// /// This function determines how to copy a file based on the provided options. /// It supports different copy modes, including hard linking, copying, symbolic linking, updating, and attribute-only copying. /// It also handles file backups, overwriting, and dereferencing based on the provided options. /// /// # Returns /// /// * `Ok(())` - The file was copied successfully. /// * `Err(CopyError)` - An error occurred while copying the file. #[allow(clippy::too_many_arguments)] fn handle_copy_mode( source: &Path, dest: &Path, options: &Options, context: &str, source_metadata: &Metadata, symlinked_files: &mut HashSet, source_in_command_line: bool, source_is_fifo: bool, #[cfg(unix)] source_is_stream: bool, ) -> CopyResult { let source_is_symlink = source_metadata.is_symlink(); match options.copy_mode { CopyMode::Link => { if dest.exists() { let backup_path = backup_control::get_backup_path(options.backup, dest, &options.backup_suffix); if let Some(backup_path) = backup_path { backup_dest(dest, &backup_path, dest.is_symlink())?; fs::remove_file(dest)?; } if options.overwrite == OverwriteMode::Clobber(ClobberMode::Force) { fs::remove_file(dest)?; } } if options.dereference(source_in_command_line) && source.is_symlink() { let resolved = canonicalize(source, MissingHandling::Missing, ResolveMode::Physical).unwrap(); fs::hard_link(resolved, dest) } else { fs::hard_link(source, dest) } .map_err(|e| { CpError::IoErrContext( e, translate!("cp-error-cannot-create-hard-link", "dest" => get_filename(dest).unwrap_or("?").quote(), "source" => get_filename(source).unwrap_or("?").quote()) ) })?; } CopyMode::Copy => { copy_helper( source, dest, options, context, source_is_symlink, source_is_fifo, symlinked_files, #[cfg(unix)] source_is_stream, )?; } CopyMode::SymLink => { if dest.exists() && options.overwrite == OverwriteMode::Clobber(ClobberMode::Force) { fs::remove_file(dest)?; } symlink_file(source, dest, symlinked_files)?; } CopyMode::Update => { if dest.exists() { match options.update { UpdateMode::All => { copy_helper( source, dest, options, context, source_is_symlink, source_is_fifo, symlinked_files, #[cfg(unix)] source_is_stream, )?; } UpdateMode::None => { if options.debug { println!("skipped {}", dest.quote()); } return Ok(PerformedAction::Skipped); } UpdateMode::NoneFail => { return Err(CpError::Error( translate!("cp-error-not-replacing", "file" => dest.quote()), )); } UpdateMode::IfOlder => { let dest_metadata = fs::symlink_metadata(dest)?; let src_time = source_metadata.modified()?; let dest_time = dest_metadata.modified()?; if src_time <= dest_time { return Ok(PerformedAction::Skipped); } options.overwrite.verify(dest, options.debug)?; copy_helper( source, dest, options, context, source_is_symlink, source_is_fifo, symlinked_files, #[cfg(unix)] source_is_stream, )?; } } } else { copy_helper( source, dest, options, context, source_is_symlink, source_is_fifo, symlinked_files, #[cfg(unix)] source_is_stream, )?; } } CopyMode::AttrOnly => { OpenOptions::new() .write(true) .truncate(false) .create(true) .open(dest) .unwrap(); } } Ok(PerformedAction::Copied) } /// Calculates the permissions for the destination file in a copy operation. /// /// If the destination file already exists, its current permissions are returned. /// If the destination file does not exist, the source file's permissions are used, /// with the `no-preserve` option and the umask taken into account on Unix platforms. /// # Returns /// /// * `Ok(Permissions)` - The calculated permissions for the destination file. /// * `Err(CopyError)` - An error occurred while getting the metadata of the destination file. /// // Allow unused variables for Windows (on options) #[allow(unused_variables)] fn calculate_dest_permissions( dest: &Path, source_metadata: &Metadata, options: &Options, context: &str, ) -> CopyResult { if dest.exists() { Ok(dest .symlink_metadata() .map_err(|e| CpError::IoErrContext(e, context.to_owned()))? .permissions()) } else { #[cfg(unix)] { let mut permissions = source_metadata.permissions(); let mode = handle_no_preserve_mode(options, permissions.mode()); // Apply umask use uucore::mode::get_umask; let mode = mode & !get_umask(); permissions.set_mode(mode); Ok(permissions) } #[cfg(not(unix))] { let permissions = source_metadata.permissions(); Ok(permissions) } } } /// Copy the a file from `source` to `dest`. `source` will be dereferenced if /// `options.dereference` is set to true. `dest` will be dereferenced only if /// the source was not a symlink. /// /// Behavior when copying to existing files is contingent on the /// `options.overwrite` mode. If a file is skipped, the return type /// should be `Error:Skipped` /// /// The original permissions of `source` will be copied to `dest` /// after a successful copy. #[allow(clippy::cognitive_complexity, clippy::too_many_arguments)] fn copy_file( progress_bar: Option<&ProgressBar>, source: &Path, dest: &Path, options: &Options, symlinked_files: &mut HashSet, copied_destinations: &HashSet, copied_files: &mut HashMap, source_in_command_line: bool, ) -> CopyResult<()> { let source_is_symlink = source.is_symlink(); let dest_is_symlink = dest.is_symlink(); // Fail if dest is a dangling symlink or a symlink this program created previously if dest_is_symlink { if FileInformation::from_path(dest, false) .map(|info| symlinked_files.contains(&info)) .unwrap_or(false) { return Err(CpError::Error( translate!("cp-error-will-not-copy-through-symlink", "source" => source.quote(), "dest" => dest.quote()), )); } // Fail if cp tries to copy two sources of the same name into a single symlink // Example: "cp file1 dir1/file1 tmp" where "tmp" is a directory containing a symlink "file1" pointing to a file named "foo". // foo will contain the contents of "file1" and "dir1/file1" will not be copied over to "tmp/file1" if copied_destinations.contains(dest) { return Err(CpError::Error( translate!("cp-error-will-not-copy-through-symlink", "source" => source.quote(), "dest" => dest.quote()), )); } let copy_contents = options.dereference(source_in_command_line) || !source_is_symlink; if copy_contents && !dest.exists() && !matches!( options.overwrite, OverwriteMode::Clobber(ClobberMode::RemoveDestination) ) && !is_symlink_loop(dest) && std::env::var_os("POSIXLY_CORRECT").is_none() { return Err(CpError::Error( translate!("cp-error-not-writing-dangling-symlink", "dest" => dest.quote()), )); } if paths_refer_to_same_file(source, dest, true) && matches!( options.overwrite, OverwriteMode::Clobber(ClobberMode::RemoveDestination) ) && options.backup == BackupMode::None { fs::remove_file(dest)?; } } if are_hardlinks_to_same_file(source, dest) && source != dest && matches!( options.overwrite, OverwriteMode::Clobber(ClobberMode::RemoveDestination) ) { fs::remove_file(dest)?; } if file_or_link_exists(dest) && (!options.attributes_only || matches!( options.overwrite, OverwriteMode::Clobber(ClobberMode::RemoveDestination) )) { if paths_refer_to_same_file(source, dest, true) && options.copy_mode == CopyMode::Link { if source_is_symlink { if !dest_is_symlink { return Ok(()); } if !options.dereference { return Ok(()); } } else if options.backup != BackupMode::None && !dest_is_symlink { if source == dest { if !options.force() { return Ok(()); } } else { return Ok(()); } } } handle_existing_dest(source, dest, options, source_in_command_line, copied_files)?; if are_hardlinks_to_same_file(source, dest) { if options.copy_mode == CopyMode::Copy { return Ok(()); } if options.copy_mode == CopyMode::Link && (!source_is_symlink || !dest_is_symlink) { return Ok(()); } } } if options.attributes_only && source_is_symlink && !matches!( options.overwrite, OverwriteMode::Clobber(ClobberMode::RemoveDestination) ) { return Err(translate!("cp-error-cannot-change-attribute", "dest" => dest.quote()).into()); } if options.preserve_hard_links() { // if we encounter a matching device/inode pair in the source tree // we can arrange to create a hard link between the corresponding names // in the destination tree. if let Some(new_source) = copied_files.get( &FileInformation::from_path(source, options.dereference(source_in_command_line)) .map_err(|e| CpError::IoErrContext(e, format!("cannot stat {}", source.quote())))?, ) { fs::hard_link(new_source, dest)?; if options.verbose { print_verbose_output(options.parents, progress_bar, source, dest); } return Ok(()); } } // Calculate the context upfront before canonicalizing the path let context = context_for(source, dest); let context = context.as_str(); let source_metadata = { let result = if options.dereference(source_in_command_line) { fs::metadata(source) } else { fs::symlink_metadata(source) }; // this is just for gnu tests compatibility result.map_err(|err| { if err.to_string().contains("No such file or directory") { return translate!("cp-error-cannot-stat", "source" => source.quote()); } err.to_string() })? }; let dest_permissions = calculate_dest_permissions(dest, &source_metadata, options, context)?; #[cfg(unix)] let source_is_fifo = source_metadata.file_type().is_fifo(); #[cfg(not(unix))] let source_is_fifo = false; let source_is_stream = is_stream(&source_metadata); let performed_action = handle_copy_mode( source, dest, options, context, &source_metadata, symlinked_files, source_in_command_line, source_is_fifo, #[cfg(unix)] source_is_stream, )?; if options.verbose && performed_action != PerformedAction::Skipped { print_verbose_output(options.parents, progress_bar, source, dest); } // TODO: implement something similar to gnu's lchown if !dest_is_symlink { // Here, to match GNU semantics, we quietly ignore an error // if a user does not have the correct ownership to modify // the permissions of a file. // // FWIW, the OS will throw an error later, on the write op, if // the user does not have permission to write to the file. fs::set_permissions(dest, dest_permissions).ok(); } if options.dereference(source_in_command_line) { if let Ok(src) = canonicalize(source, MissingHandling::Normal, ResolveMode::Physical) { if src.exists() { copy_attributes(&src, dest, &options.attributes)?; } } } else if source_is_stream && !source.exists() { // Some stream files may not exist after we have copied it, // like anonymous pipes. Thus, we can't really copy its // attributes. However, this is already handled in the stream // copy function (see `copy_stream` under platform/linux.rs). } else { copy_attributes(source, dest, &options.attributes)?; } #[cfg(feature = "selinux")] if options.set_selinux_context && uucore::selinux::is_selinux_enabled() { // Set the given selinux permissions on the copied file. if let Err(e) = uucore::selinux::set_selinux_security_context(dest, options.context.as_ref()) { return Err(CpError::Error( translate!("cp-error-selinux-error", "error" => e), )); } } copied_files.insert( FileInformation::from_path(source, options.dereference(source_in_command_line))?, dest.to_path_buf(), ); if let Some(progress_bar) = progress_bar { progress_bar.inc(fs::metadata(source)?.len()); } Ok(()) } fn is_stream(metadata: &Metadata) -> bool { #[cfg(unix)] { let file_type = metadata.file_type(); file_type.is_fifo() || file_type.is_char_device() || file_type.is_block_device() } #[cfg(not(unix))] { let _ = metadata; false } } #[cfg(unix)] fn handle_no_preserve_mode(options: &Options, org_mode: u32) -> u32 { let (is_preserve_mode, is_explicit_no_preserve_mode) = options.preserve_mode(); if !is_preserve_mode { use libc::{ S_IRGRP, S_IROTH, S_IRUSR, S_IRWXG, S_IRWXO, S_IRWXU, S_IWGRP, S_IWOTH, S_IWUSR, }; #[cfg(not(any( target_os = "android", target_os = "macos", target_os = "freebsd", target_os = "redox", )))] { const MODE_RW_UGO: u32 = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; const S_IRWXUGO: u32 = S_IRWXU | S_IRWXG | S_IRWXO; return if is_explicit_no_preserve_mode { MODE_RW_UGO } else { org_mode & S_IRWXUGO }; } #[cfg(any( target_os = "android", target_os = "macos", target_os = "freebsd", target_os = "redox", ))] { const MODE_RW_UGO: u32 = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH) as u32; const S_IRWXUGO: u32 = (S_IRWXU | S_IRWXG | S_IRWXO) as u32; return if is_explicit_no_preserve_mode { MODE_RW_UGO } else { org_mode & S_IRWXUGO }; } } org_mode } /// Copy the file from `source` to `dest` either using the normal `fs::copy` or a /// copy-on-write scheme if --reflink is specified and the filesystem supports it. #[allow(clippy::too_many_arguments)] fn copy_helper( source: &Path, dest: &Path, options: &Options, context: &str, source_is_symlink: bool, source_is_fifo: bool, symlinked_files: &mut HashSet, #[cfg(unix)] source_is_stream: bool, ) -> CopyResult<()> { if options.parents { let parent = dest.parent().unwrap_or(dest); fs::create_dir_all(parent)?; } if path_ends_with_terminator(dest) && !dest.is_dir() { return Err(CpError::NotADirectory(dest.to_path_buf())); } if source_is_fifo && options.recursive && !options.copy_contents { #[cfg(unix)] copy_fifo(dest, options.overwrite, options.debug)?; } else if source_is_symlink { copy_link(source, dest, symlinked_files, options)?; } else { let copy_debug = copy_on_write( source, dest, options.reflink_mode, options.sparse_mode, context, #[cfg(unix)] source_is_stream, )?; if !options.attributes_only && options.debug { show_debug(©_debug); } } Ok(()) } // "Copies" a FIFO by creating a new one. This workaround is because Rust's // built-in fs::copy does not handle FIFOs (see rust-lang/rust/issues/79390). #[cfg(unix)] fn copy_fifo(dest: &Path, overwrite: OverwriteMode, debug: bool) -> CopyResult<()> { if dest.exists() { overwrite.verify(dest, debug)?; fs::remove_file(dest)?; } make_fifo(dest) .map_err(|_| translate!("cp-error-cannot-create-fifo", "path" => dest.quote()).into()) } fn copy_link( source: &Path, dest: &Path, symlinked_files: &mut HashSet, options: &Options, ) -> CopyResult<()> { // Here, we will copy the symlink itself (actually, just recreate it) let link = fs::read_link(source)?; // we always need to remove the file to be able to create a symlink, // even if it is writeable. if dest.is_symlink() || dest.is_file() { fs::remove_file(dest)?; } symlink_file(&link, dest, symlinked_files)?; copy_attributes(source, dest, &options.attributes) } /// Generate an error message if `target` is not the correct `target_type` pub fn verify_target_type(target: &Path, target_type: &TargetType) -> CopyResult<()> { match (target_type, target.is_dir()) { (&TargetType::Directory, false) => Err(translate!("cp-error-target-not-directory", "target" => target.quote()) .into()), (&TargetType::File, true) => Err(translate!("cp-error-cannot-overwrite-directory-with-non-directory", "dir" => target.quote()) .into()), _ => Ok(()), } } /// Remove the `root` prefix from `source` and prefix it with `target` /// to create a file that is local to `target` /// # Examples /// /// ```ignore /// assert!(uu_cp::localize_to_target( /// &Path::new("a/source/"), /// &Path::new("a/source/c.txt"), /// &Path::new("target/"), /// ).unwrap() == Path::new("target/c.txt")) /// ``` pub fn localize_to_target(root: &Path, source: &Path, target: &Path) -> CopyResult { let local_to_root = source.strip_prefix(root)?; Ok(target.join(local_to_root)) } /// Get the total size of a slice of files and directories. /// /// This function is much like the `du` utility, by recursively getting the sizes of files in directories. /// Files are not deduplicated when appearing in multiple sources. If `recursive` is set to `false`, the /// directories in `paths` will be ignored. fn disk_usage(paths: &[PathBuf], recursive: bool) -> io::Result { let mut total = 0; for p in paths { let md = fs::metadata(p)?; if md.file_type().is_dir() { if recursive { total += disk_usage_directory(p)?; } } else { total += md.len(); } } Ok(total) } /// A helper for `disk_usage` specialized for directories. fn disk_usage_directory(p: &Path) -> io::Result { let mut total = 0; for entry in fs::read_dir(p)? { let entry = entry?; if entry.file_type()?.is_dir() { total += disk_usage_directory(&entry.path())?; } else { total += entry.metadata()?.len(); } } Ok(total) } #[cfg(test)] mod tests { use crate::{Attributes, Preserve, aligned_ancestors, localize_to_target}; use std::path::Path; #[test] fn test_cp_localize_to_target() { let root = Path::new("a/source/"); let source = Path::new("a/source/c.txt"); let target = Path::new("target/"); let actual = localize_to_target(root, source, target).unwrap(); let expected = Path::new("target/c.txt"); assert_eq!(actual, expected); } #[test] fn test_aligned_ancestors() { let actual = aligned_ancestors(Path::new("a/b/c"), Path::new("d/a/b/c")); let expected = vec![ (Path::new("a"), Path::new("d/a")), (Path::new("a/b"), Path::new("d/a/b")), ]; assert_eq!(actual, expected); } #[test] fn test_diff_attrs() { assert_eq!( Attributes::ALL.diff(&Attributes { context: Preserve::Yes { required: true }, xattr: Preserve::Yes { required: true }, ..Attributes::ALL }), Attributes { #[cfg(unix)] ownership: Preserve::No { explicit: true }, mode: Preserve::No { explicit: true }, timestamps: Preserve::No { explicit: true }, context: Preserve::No { explicit: true }, links: Preserve::No { explicit: true }, xattr: Preserve::No { explicit: true } } ); assert_eq!( Attributes { context: Preserve::Yes { required: true }, xattr: Preserve::Yes { required: true }, ..Attributes::ALL } .diff(&Attributes::NONE), Attributes { context: Preserve::Yes { required: true }, xattr: Preserve::Yes { required: true }, ..Attributes::ALL } ); assert_eq!( Attributes::NONE.diff(&Attributes { context: Preserve::Yes { required: true }, xattr: Preserve::Yes { required: true }, ..Attributes::ALL }), Attributes { #[cfg(unix)] ownership: Preserve::No { explicit: true }, mode: Preserve::No { explicit: true }, timestamps: Preserve::No { explicit: true }, context: Preserve::No { explicit: true }, links: Preserve::No { explicit: true }, xattr: Preserve::No { explicit: true } } ); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/main.rs000066400000000000000000000000251504311601400246440ustar00rootroot00000000000000uucore::bin!(uu_cp); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/platform/000077500000000000000000000000001504311601400252015ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/platform/linux.rs000066400000000000000000000537451504311601400267240ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore ficlone reflink ftruncate pwrite fiemap lseek use libc::{SEEK_DATA, SEEK_HOLE}; use std::fs::{File, OpenOptions}; use std::io::Read; use std::os::unix::fs::FileExt; use std::os::unix::fs::MetadataExt; use std::os::unix::fs::{FileTypeExt, OpenOptionsExt}; use std::os::unix::io::AsRawFd; use std::path::Path; use uucore::buf_copy; use uucore::mode::get_umask; use uucore::translate; use crate::{ CopyDebug, CopyResult, CpError, OffloadReflinkDebug, ReflinkMode, SparseDebug, SparseMode, is_stream, }; /// The fallback behavior for [`clone`] on failed system call. #[derive(Clone, Copy)] enum CloneFallback { /// Raise an error. Error, /// Use [`std::fs::copy`]. FSCopy, /// Use [`sparse_copy`] SparseCopy, /// Use [`sparse_copy_without_hole`] SparseCopyWithoutHole, } /// Type of method used for copying files #[derive(Clone, Copy)] enum CopyMethod { /// Do a sparse copy SparseCopy, /// Use [`std::fs::copy`]. FSCopy, /// Default (can either be [`CopyMethod::SparseCopy`] or [`CopyMethod::FSCopy`]) Default, /// Use [`sparse_copy_without_hole`] SparseCopyWithoutHole, } /// Use the Linux `ioctl_ficlone` API to do a copy-on-write clone. /// /// `fallback` controls what to do if the system call fails. #[cfg(any(target_os = "linux", target_os = "android"))] fn clone

(source: P, dest: P, fallback: CloneFallback) -> std::io::Result<()> where P: AsRef, { let src_file = File::open(&source)?; let dst_file = File::create(&dest)?; let src_fd = src_file.as_raw_fd(); let dst_fd = dst_file.as_raw_fd(); // Using .try_into().unwrap() is required as glibc, musl & android all have different type for ioctl() #[allow(clippy::unnecessary_fallible_conversions)] let result = unsafe { libc::ioctl( dst_fd, linux_raw_sys::ioctl::FICLONE.try_into().unwrap(), src_fd, ) }; if result == 0 { return Ok(()); } match fallback { CloneFallback::Error => Err(std::io::Error::last_os_error()), CloneFallback::FSCopy => std::fs::copy(source, dest).map(|_| ()), CloneFallback::SparseCopy => sparse_copy(source, dest), CloneFallback::SparseCopyWithoutHole => sparse_copy_without_hole(source, dest), } } /// Checks whether a file contains any non null bytes i.e. any byte != 0x0 /// This function returns a tuple of (bool, u64, u64) signifying a tuple of (whether a file has /// data, its size, no of blocks it has allocated in disk) #[cfg(any(target_os = "linux", target_os = "android"))] fn check_for_data(source: &Path) -> Result<(bool, u64, u64), std::io::Error> { let mut src_file = File::open(source)?; let metadata = src_file.metadata()?; let size = metadata.size(); let blocks = metadata.blocks(); // checks edge case of virtual files in /proc which have a size of zero but contains data if size == 0 { let mut buf: Vec = vec![0; metadata.blksize() as usize]; // Directly use metadata.blksize() let _ = src_file.read(&mut buf)?; return Ok((buf.iter().any(|&x| x != 0x0), size, 0)); } let src_fd = src_file.as_raw_fd(); let result = unsafe { libc::lseek(src_fd, 0, SEEK_DATA) }; match result { -1 => Ok((false, size, blocks)), // No data found or end of file _ if result >= 0 => Ok((true, size, blocks)), // Data found _ => Err(std::io::Error::last_os_error()), } } #[cfg(any(target_os = "linux", target_os = "android"))] /// Checks whether a file is sparse i.e. it contains holes, uses the crude heuristic blocks < size / 512 /// Reference:`` fn check_sparse_detection(source: &Path) -> Result { let src_file = File::open(source)?; let metadata = src_file.metadata()?; let size = metadata.size(); let blocks = metadata.blocks(); if blocks < size / 512 { return Ok(true); } Ok(false) } /// Optimized [`sparse_copy`] doesn't create holes for large sequences of zeros in non `sparse_files` /// Used when `--sparse=auto` #[cfg(any(target_os = "linux", target_os = "android"))] fn sparse_copy_without_hole

(source: P, dest: P) -> std::io::Result<()> where P: AsRef, { let src_file = File::open(source)?; let dst_file = File::create(dest)?; let dst_fd = dst_file.as_raw_fd(); let size = src_file.metadata()?.size(); if unsafe { libc::ftruncate(dst_fd, size.try_into().unwrap()) } < 0 { return Err(std::io::Error::last_os_error()); } let src_fd = src_file.as_raw_fd(); let mut current_offset: isize = 0; // Maximize the data read at once to 16 MiB to avoid memory hogging with large files // 16 MiB chunks should saturate an SSD let step = std::cmp::min(size, 16 * 1024 * 1024) as usize; let mut buf: Vec = vec![0x0; step]; loop { let result = unsafe { libc::lseek(src_fd, current_offset.try_into().unwrap(), SEEK_DATA) } .try_into() .unwrap(); current_offset = result; let hole: isize = unsafe { libc::lseek(src_fd, current_offset.try_into().unwrap(), SEEK_HOLE) } .try_into() .unwrap(); if result == -1 || hole == -1 { break; } if result <= -2 || hole <= -2 { return Err(std::io::Error::last_os_error()); } let len: isize = hole - current_offset; // Read and write data in chunks of `step` while reusing the same buffer for i in (0..len).step_by(step) { // Ensure we don't read past the end of the file or the start of the next hole let read_len = std::cmp::min((len - i) as usize, step); let buf = &mut buf[..read_len]; src_file.read_exact_at(buf, (current_offset + i) as u64)?; dst_file.write_all_at(buf, (current_offset + i) as u64)?; } current_offset = hole; } Ok(()) } /// Perform a sparse copy from one file to another. /// Creates a holes for large sequences of zeros in `non_sparse_files`, used for `--sparse=always` #[cfg(any(target_os = "linux", target_os = "android"))] fn sparse_copy

(source: P, dest: P) -> std::io::Result<()> where P: AsRef, { let mut src_file = File::open(source)?; let dst_file = File::create(dest)?; let dst_fd = dst_file.as_raw_fd(); let size: usize = src_file.metadata()?.size().try_into().unwrap(); if unsafe { libc::ftruncate(dst_fd, size.try_into().unwrap()) } < 0 { return Err(std::io::Error::last_os_error()); } let blksize = dst_file.metadata()?.blksize(); let mut buf: Vec = vec![0; blksize.try_into().unwrap()]; let mut current_offset: usize = 0; // TODO Perhaps we can employ the "fiemap ioctl" API to get the // file extent mappings: // https://www.kernel.org/doc/html/latest/filesystems/fiemap.html while current_offset < size { let this_read = src_file.read(&mut buf)?; let buf = &buf[..this_read]; if buf.iter().any(|&x| x != 0) { dst_file.write_all_at(buf, current_offset.try_into().unwrap())?; } current_offset += this_read; } Ok(()) } #[cfg(any(target_os = "linux", target_os = "android"))] /// Checks whether an existing destination is a fifo fn check_dest_is_fifo(dest: &Path) -> bool { // If our destination file exists and its a fifo , we do a standard copy . let file_type = std::fs::metadata(dest); match file_type { Ok(f) => f.file_type().is_fifo(), _ => false, } } /// Copy the contents of a stream from `source` to `dest`. fn copy_stream

(source: P, dest: P) -> std::io::Result where P: AsRef, { // For some reason, // // cp --preserve=ownership --copy-contents fifo fifo2 // // causes `fifo2` to be created with limited permissions (mode 622 // or maybe 600 it seems), and then after `fifo` is closed, the // permissions get updated to match those of `fifo`. This doesn't // make much sense to me but the behavior appears in // `tests/cp/file-perm-race.sh`. // // So it seems that if `--preserve=ownership` is true then what we // need to do is create the destination file with limited // permissions, copy the contents, then update the permissions. If // `--preserve=ownership` is not true, however, then we can just // match the mode of the source file. // // TODO Update the code below to respect the case where // `--preserve=ownership` is not true. let mut src_file = File::open(&source)?; let mode = 0o622 & !get_umask(); let mut dst_file = OpenOptions::new() .create(true) .write(true) .mode(mode) .open(&dest)?; let dest_is_stream = is_stream(&dst_file.metadata()?); if !dest_is_stream { // `copy_stream` doesn't clear the dest file, if dest is not a stream, we should clear it manually. dst_file.set_len(0)?; } let num_bytes_copied = buf_copy::copy_stream(&mut src_file, &mut dst_file) .map_err(|_| std::io::Error::from(std::io::ErrorKind::Other))?; Ok(num_bytes_copied) } /// Copies `source` to `dest` using copy-on-write if possible. pub(crate) fn copy_on_write( source: &Path, dest: &Path, reflink_mode: ReflinkMode, sparse_mode: SparseMode, context: &str, source_is_stream: bool, ) -> CopyResult { let mut copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unknown, reflink: OffloadReflinkDebug::Unsupported, sparse_detection: SparseDebug::No, }; let result = match (reflink_mode, sparse_mode) { (ReflinkMode::Never, SparseMode::Always) => { copy_debug.sparse_detection = SparseDebug::Zeros; // Default SparseDebug val for SparseMode::Always copy_debug.reflink = OffloadReflinkDebug::No; if source_is_stream { copy_debug.offload = OffloadReflinkDebug::Avoided; copy_stream(source, dest).map(|_| ()) } else { let mut copy_method = CopyMethod::Default; let result = handle_reflink_never_sparse_always(source, dest); if let Ok((debug, method)) = result { copy_debug = debug; copy_method = method; } match copy_method { CopyMethod::FSCopy => std::fs::copy(source, dest).map(|_| ()), _ => sparse_copy(source, dest), } } } (ReflinkMode::Never, SparseMode::Never) => { copy_debug.reflink = OffloadReflinkDebug::No; if source_is_stream { copy_debug.offload = OffloadReflinkDebug::Avoided; copy_stream(source, dest).map(|_| ()) } else { let result = handle_reflink_never_sparse_never(source); if let Ok(debug) = result { copy_debug = debug; } std::fs::copy(source, dest).map(|_| ()) } } (ReflinkMode::Never, SparseMode::Auto) => { copy_debug.reflink = OffloadReflinkDebug::No; if source_is_stream { copy_debug.offload = OffloadReflinkDebug::Avoided; copy_stream(source, dest).map(|_| ()) } else { let mut copy_method = CopyMethod::Default; let result = handle_reflink_never_sparse_auto(source, dest); if let Ok((debug, method)) = result { copy_debug = debug; copy_method = method; } match copy_method { CopyMethod::SparseCopyWithoutHole => sparse_copy_without_hole(source, dest), _ => std::fs::copy(source, dest).map(|_| ()), } } } (ReflinkMode::Auto, SparseMode::Always) => { copy_debug.sparse_detection = SparseDebug::Zeros; // Default SparseDebug val for // SparseMode::Always if source_is_stream { copy_debug.offload = OffloadReflinkDebug::Avoided; copy_stream(source, dest).map(|_| ()) } else { let mut copy_method = CopyMethod::Default; let result = handle_reflink_auto_sparse_always(source, dest); if let Ok((debug, method)) = result { copy_debug = debug; copy_method = method; } match copy_method { CopyMethod::FSCopy => clone(source, dest, CloneFallback::FSCopy), _ => clone(source, dest, CloneFallback::SparseCopy), } } } (ReflinkMode::Auto, SparseMode::Never) => { copy_debug.reflink = OffloadReflinkDebug::No; if source_is_stream { copy_debug.offload = OffloadReflinkDebug::Avoided; copy_stream(source, dest).map(|_| ()) } else { let result = handle_reflink_auto_sparse_never(source); if let Ok(debug) = result { copy_debug = debug; } clone(source, dest, CloneFallback::FSCopy) } } (ReflinkMode::Auto, SparseMode::Auto) => { if source_is_stream { copy_debug.offload = OffloadReflinkDebug::Unsupported; copy_stream(source, dest).map(|_| ()) } else { let mut copy_method = CopyMethod::Default; let result = handle_reflink_auto_sparse_auto(source, dest); if let Ok((debug, method)) = result { copy_debug = debug; copy_method = method; } match copy_method { CopyMethod::SparseCopyWithoutHole => { clone(source, dest, CloneFallback::SparseCopyWithoutHole) } _ => clone(source, dest, CloneFallback::FSCopy), } } } (ReflinkMode::Always, SparseMode::Auto) => { copy_debug.sparse_detection = SparseDebug::No; copy_debug.reflink = OffloadReflinkDebug::Yes; clone(source, dest, CloneFallback::Error) } (ReflinkMode::Always, _) => { return Err(translate!("cp-error-reflink-always-sparse-auto").into()); } }; result.map_err(|e| CpError::IoErrContext(e, context.to_owned()))?; Ok(copy_debug) } /// Handles debug results when flags are "--reflink=auto" and "--sparse=always" and specifies what /// type of copy should be used fn handle_reflink_auto_sparse_always( source: &Path, dest: &Path, ) -> Result<(CopyDebug, CopyMethod), std::io::Error> { let mut copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unknown, reflink: OffloadReflinkDebug::Unsupported, sparse_detection: SparseDebug::Zeros, }; let mut copy_method = CopyMethod::Default; let (data_flag, size, blocks) = check_for_data(source)?; let sparse_flag = check_sparse_detection(source)?; if data_flag || size < 512 { copy_debug.offload = OffloadReflinkDebug::Avoided; } match (sparse_flag, data_flag, blocks) { (true, true, 0) => { // Handling funny files with 0 block allocation but has data // in it copy_method = CopyMethod::FSCopy; copy_debug.sparse_detection = SparseDebug::SeekHoleZeros; } (false, true, 0) => copy_method = CopyMethod::FSCopy, (true, false, 0) => copy_debug.sparse_detection = SparseDebug::SeekHole, (true, true, _) => copy_debug.sparse_detection = SparseDebug::SeekHoleZeros, (true, false, _) => copy_debug.sparse_detection = SparseDebug::SeekHole, (_, _, _) => (), } if check_dest_is_fifo(dest) { copy_method = CopyMethod::FSCopy; } Ok((copy_debug, copy_method)) } /// Handles debug results when flags are "--reflink=auto" and "--sparse=auto" and specifies what /// type of copy should be used fn handle_reflink_never_sparse_never(source: &Path) -> Result { let mut copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unknown, reflink: OffloadReflinkDebug::No, sparse_detection: SparseDebug::No, }; let (data_flag, size, _blocks) = check_for_data(source)?; let sparse_flag = check_sparse_detection(source)?; if sparse_flag { copy_debug.sparse_detection = SparseDebug::SeekHole; } if data_flag || size < 512 { copy_debug.offload = OffloadReflinkDebug::Avoided; } Ok(copy_debug) } /// Handles debug results when flags are "--reflink=auto" and "--sparse=never", files will be copied /// through cloning them with fallback switching to [`std::fs::copy`] fn handle_reflink_auto_sparse_never(source: &Path) -> Result { let mut copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unknown, reflink: OffloadReflinkDebug::No, sparse_detection: SparseDebug::No, }; let (data_flag, size, _blocks) = check_for_data(source)?; let sparse_flag = check_sparse_detection(source)?; if sparse_flag { copy_debug.sparse_detection = SparseDebug::SeekHole; } if data_flag || size < 512 { copy_debug.offload = OffloadReflinkDebug::Avoided; } Ok(copy_debug) } /// Handles debug results when flags are "--reflink=auto" and "--sparse=auto" and specifies what /// type of copy should be used fn handle_reflink_auto_sparse_auto( source: &Path, dest: &Path, ) -> Result<(CopyDebug, CopyMethod), std::io::Error> { let mut copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unknown, reflink: OffloadReflinkDebug::Unsupported, sparse_detection: SparseDebug::No, }; let mut copy_method = CopyMethod::Default; let (data_flag, size, blocks) = check_for_data(source)?; let sparse_flag = check_sparse_detection(source)?; if (data_flag && size != 0) || (size > 0 && size < 512) { copy_debug.offload = OffloadReflinkDebug::Yes; } if data_flag && size == 0 { // Handling /proc/ files copy_debug.offload = OffloadReflinkDebug::Unsupported; } if sparse_flag { if blocks == 0 && data_flag { // Handling other "virtual" files copy_debug.offload = OffloadReflinkDebug::Unsupported; copy_method = CopyMethod::FSCopy; // Doing a standard copy for the virtual files } else { copy_method = CopyMethod::SparseCopyWithoutHole; } // Since sparse_flag is true, sparse_detection shall be SeekHole for any non virtual // regular sparse file and the file will be sparsely copied copy_debug.sparse_detection = SparseDebug::SeekHole; } if check_dest_is_fifo(dest) { copy_method = CopyMethod::FSCopy; } Ok((copy_debug, copy_method)) } /// Handles debug results when flags are "--reflink=never" and "--sparse=auto" and specifies what /// type of copy should be used fn handle_reflink_never_sparse_auto( source: &Path, dest: &Path, ) -> Result<(CopyDebug, CopyMethod), std::io::Error> { let mut copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unknown, reflink: OffloadReflinkDebug::No, sparse_detection: SparseDebug::No, }; let (data_flag, size, blocks) = check_for_data(source)?; let sparse_flag = check_sparse_detection(source)?; let mut copy_method = CopyMethod::Default; if data_flag || size < 512 { copy_debug.offload = OffloadReflinkDebug::Avoided; } if sparse_flag { if blocks == 0 && data_flag { copy_method = CopyMethod::FSCopy; // Handles virtual files which have size > 0 but no // disk allocation } else { copy_method = CopyMethod::SparseCopyWithoutHole; // Handles regular sparse-files } copy_debug.sparse_detection = SparseDebug::SeekHole; } if check_dest_is_fifo(dest) { copy_method = CopyMethod::FSCopy; } Ok((copy_debug, copy_method)) } /// Handles debug results when flags are "--reflink=never" and "--sparse=always" and specifies what /// type of copy should be used fn handle_reflink_never_sparse_always( source: &Path, dest: &Path, ) -> Result<(CopyDebug, CopyMethod), std::io::Error> { let mut copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unknown, reflink: OffloadReflinkDebug::No, sparse_detection: SparseDebug::Zeros, }; let mut copy_method = CopyMethod::SparseCopy; let (data_flag, size, blocks) = check_for_data(source)?; let sparse_flag = check_sparse_detection(source)?; if data_flag || size < 512 { copy_debug.offload = OffloadReflinkDebug::Avoided; } match (sparse_flag, data_flag, blocks) { (true, true, 0) => { // Handling funny files with 0 block allocation but has data // in it, e.g. files in /sys and other virtual files copy_method = CopyMethod::FSCopy; copy_debug.sparse_detection = SparseDebug::SeekHoleZeros; } (false, true, 0) => copy_method = CopyMethod::FSCopy, // Handling data containing zero sized // files in /proc (true, false, 0) => copy_debug.sparse_detection = SparseDebug::SeekHole, // Handles files // with 0 blocks allocated in disk and (true, true, _) => copy_debug.sparse_detection = SparseDebug::SeekHoleZeros, // Any // sparse_files with data in it will display SeekHoleZeros (true, false, _) => { copy_debug.offload = OffloadReflinkDebug::Unknown; copy_debug.sparse_detection = SparseDebug::SeekHole; } (_, _, _) => (), } if check_dest_is_fifo(dest) { copy_method = CopyMethod::FSCopy; } Ok((copy_debug, copy_method)) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/platform/macos.rs000066400000000000000000000114371504311601400266570ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore reflink use std::ffi::CString; use std::fs::{self, File, OpenOptions}; use std::os::unix::ffi::OsStrExt; use std::os::unix::fs::OpenOptionsExt; use std::path::Path; use uucore::buf_copy; use uucore::translate; use uucore::mode::get_umask; use crate::{ CopyDebug, CopyResult, CpError, OffloadReflinkDebug, ReflinkMode, SparseDebug, SparseMode, is_stream, }; /// Copies `source` to `dest` using copy-on-write if possible. pub(crate) fn copy_on_write( source: &Path, dest: &Path, reflink_mode: ReflinkMode, sparse_mode: SparseMode, context: &str, source_is_stream: bool, ) -> CopyResult { if sparse_mode != SparseMode::Auto { return Err(translate!("cp-error-sparse-not-supported") .to_string() .into()); } let mut copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unknown, reflink: OffloadReflinkDebug::Unsupported, sparse_detection: SparseDebug::Unsupported, }; // Extract paths in a form suitable to be passed to a syscall. // The unwrap() is safe because they come from the command-line and so contain non nul // character. let src = CString::new(source.as_os_str().as_bytes()).unwrap(); let dst = CString::new(dest.as_os_str().as_bytes()).unwrap(); // clonefile(2) was introduced in macOS 10.12 so we cannot statically link against it // for backward compatibility. let clonefile = CString::new("clonefile").unwrap(); let raw_pfn = unsafe { libc::dlsym(libc::RTLD_NEXT, clonefile.as_ptr()) }; let mut error = 0; if !raw_pfn.is_null() { // Call clonefile(2). // Safety: Casting a C function pointer to a rust function value is one of the few // blessed uses of `transmute()`. unsafe { let pfn: extern "C" fn( src: *const libc::c_char, dst: *const libc::c_char, flags: u32, ) -> libc::c_int = std::mem::transmute(raw_pfn); error = pfn(src.as_ptr(), dst.as_ptr(), 0); if std::io::Error::last_os_error().kind() == std::io::ErrorKind::AlreadyExists // Only remove the `dest` if the `source` and `dest` are not the same && source != dest { // clonefile(2) fails if the destination exists. Remove it and try again. Do not // bother to check if removal worked because we're going to try to clone again. // first lets make sure the dest file is not read only if fs::metadata(dest).is_ok_and(|md| !md.permissions().readonly()) { // remove and copy again // TODO: rewrite this to better match linux behavior // linux first opens the source file and destination file then uses the file // descriptors to do the clone. let _ = fs::remove_file(dest); error = pfn(src.as_ptr(), dst.as_ptr(), 0); } } } } if raw_pfn.is_null() || error != 0 { // clonefile(2) is either not supported or it errored out (possibly because the FS does not // support COW). match reflink_mode { ReflinkMode::Always => { return Err(translate!("cp-error-failed-to-clone", "source" => source.display(), "dest" => dest.display(), "error" => error) .into()); } _ => { copy_debug.reflink = OffloadReflinkDebug::Yes; if source_is_stream { let mut src_file = File::open(source)?; let mode = 0o622 & !get_umask(); let mut dst_file = OpenOptions::new() .create(true) .write(true) .mode(mode) .open(dest)?; let dest_is_stream = is_stream(&dst_file.metadata()?); if !dest_is_stream { // `copy_stream` doesn't clear the dest file, if dest is not a stream, we should clear it manually. dst_file.set_len(0)?; } buf_copy::copy_stream(&mut src_file, &mut dst_file) .map_err(|_| std::io::Error::from(std::io::ErrorKind::Other)) .map_err(|e| CpError::IoErrContext(e, context.to_owned()))? } else { fs::copy(source, dest) .map_err(|e| CpError::IoErrContext(e, context.to_owned()))? } } }; } Ok(copy_debug) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/platform/mod.rs000066400000000000000000000017421504311601400263320ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. #[cfg(all( unix, not(any(target_os = "macos", target_os = "linux", target_os = "android")) ))] mod other_unix; #[cfg(all( unix, not(any(target_os = "macos", target_os = "linux", target_os = "android")) ))] pub(crate) use self::other_unix::copy_on_write; #[cfg(target_os = "macos")] mod macos; #[cfg(target_os = "macos")] pub(crate) use self::macos::copy_on_write; #[cfg(any(target_os = "linux", target_os = "android"))] mod linux; #[cfg(any(target_os = "linux", target_os = "android"))] pub(crate) use self::linux::copy_on_write; #[cfg(not(any( unix, any(target_os = "macos", target_os = "linux", target_os = "android") )))] mod other; #[cfg(not(any( unix, any(target_os = "macos", target_os = "linux", target_os = "android") )))] pub(crate) use self::other::copy_on_write; coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/platform/other.rs000066400000000000000000000023341504311601400266720ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore reflink use std::fs; use std::path::Path; use uucore::translate; use crate::{ CopyDebug, CopyResult, CpError, OffloadReflinkDebug, ReflinkMode, SparseDebug, SparseMode, }; /// Copies `source` to `dest` for systems without copy-on-write pub(crate) fn copy_on_write( source: &Path, dest: &Path, reflink_mode: ReflinkMode, sparse_mode: SparseMode, context: &str, ) -> CopyResult { if reflink_mode != ReflinkMode::Never { return Err(translate!("cp-error-reflink-not-supported") .to_string() .into()); } if sparse_mode != SparseMode::Auto { return Err(translate!("cp-error-sparse-not-supported") .to_string() .into()); } let copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unsupported, reflink: OffloadReflinkDebug::Unsupported, sparse_detection: SparseDebug::Unsupported, }; fs::copy(source, dest).map_err(|e| CpError::IoErrContext(e, context.to_owned()))?; Ok(copy_debug) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cp/src/platform/other_unix.rs000066400000000000000000000041641504311601400277400ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore reflink use std::fs::{self, File, OpenOptions}; use std::os::unix::fs::OpenOptionsExt; use std::path::Path; use uucore::buf_copy; use uucore::mode::get_umask; use uucore::translate; use crate::{ CopyDebug, CopyResult, CpError, OffloadReflinkDebug, ReflinkMode, SparseDebug, SparseMode, is_stream, }; /// Copies `source` to `dest` for systems without copy-on-write pub(crate) fn copy_on_write( source: &Path, dest: &Path, reflink_mode: ReflinkMode, sparse_mode: SparseMode, context: &str, source_is_stream: bool, ) -> CopyResult { if reflink_mode != ReflinkMode::Never { return Err(translate!("cp-error-reflink-not-supported") .to_string() .into()); } if sparse_mode != SparseMode::Auto { return Err(translate!("cp-error-sparse-not-supported") .to_string() .into()); } let copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unsupported, reflink: OffloadReflinkDebug::Unsupported, sparse_detection: SparseDebug::Unsupported, }; if source_is_stream { let mut src_file = File::open(source)?; let mode = 0o622 & !get_umask(); let mut dst_file = OpenOptions::new() .create(true) .write(true) .mode(mode) .open(dest)?; let dest_is_stream = is_stream(&dst_file.metadata()?); if !dest_is_stream { // `copy_stream` doesn't clear the dest file, if dest is not a stream, we should clear it manually. dst_file.set_len(0)?; } buf_copy::copy_stream(&mut src_file, &mut dst_file) .map_err(|_| std::io::Error::from(std::io::ErrorKind::Other)) .map_err(|e| CpError::IoErrContext(e, context.to_owned()))?; return Ok(copy_debug); } fs::copy(source, dest).map_err(|e| CpError::IoErrContext(e, context.to_owned()))?; Ok(copy_debug) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/000077500000000000000000000000001504311601400234625ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/Cargo.toml000066400000000000000000000014121504311601400254100ustar00rootroot00000000000000[package] name = "uu_csplit" description = "csplit ~ (uutils) Output pieces of FILE separated by PATTERN(s) to files 'xx00', 'xx01', ..., and output byte counts of each piece to standard output" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/ls" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/csplit.rs" [dependencies] clap = { workspace = true } thiserror = { workspace = true } regex = { workspace = true } uucore = { workspace = true, features = ["entries", "fs", "format"] } fluent = { workspace = true } [[bin]] name = "csplit" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/LICENSE000077700000000000000000000000001504311601400263302../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/locales/000077500000000000000000000000001504311601400251045ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/locales/en-US.ftl000066400000000000000000000036221504311601400265450ustar00rootroot00000000000000csplit-about = Split a file into sections determined by context lines csplit-usage = csplit [OPTION]... FILE PATTERN... csplit-after-help = Output pieces of FILE separated by PATTERN(s) to files 'xx00', 'xx01', ..., and output byte counts of each piece to standard output. # Help messages csplit-help-suffix-format = use sprintf FORMAT instead of %02d csplit-help-prefix = use PREFIX instead of 'xx' csplit-help-keep-files = do not remove output files on errors csplit-help-suppress-matched = suppress the lines matching PATTERN csplit-help-digits = use specified number of digits instead of 2 csplit-help-quiet = do not print counts of output file sizes csplit-help-elide-empty-files = remove empty output files # Error messages csplit-error-line-out-of-range = { $pattern }: line number out of range csplit-error-line-out-of-range-on-repetition = { $pattern }: line number out of range on repetition { $repetition } csplit-error-match-not-found = { $pattern }: match not found csplit-error-match-not-found-on-repetition = { $pattern }: match not found on repetition { $repetition } csplit-error-line-number-is-zero = 0: line number must be greater than zero csplit-error-line-number-smaller-than-previous = line number '{ $current }' is smaller than preceding line number, { $previous } csplit-error-invalid-pattern = { $pattern }: invalid pattern csplit-error-invalid-number = invalid number: { $number } csplit-error-suffix-format-incorrect = incorrect conversion specification in suffix csplit-error-suffix-format-too-many-percents = too many % conversion specifications in suffix csplit-error-not-regular-file = { $file } is not a regular file csplit-warning-line-number-same-as-previous = line number '{ $line_number }' is the same as preceding line number csplit-stream-not-utf8 = stream did not contain valid UTF-8 csplit-read-error = read error csplit-write-split-not-created = trying to write to a split that was not created coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/locales/fr-FR.ftl000066400000000000000000000042751504311601400265370ustar00rootroot00000000000000csplit-about = Diviser un fichier en sections déterminées par des lignes de contexte csplit-usage = csplit [OPTION]... FICHIER MOTIF... csplit-after-help = Sortir les morceaux de FICHIER séparés par MOTIF(S) dans les fichiers 'xx00', 'xx01', ..., et sortir le nombre d'octets de chaque morceau sur la sortie standard. # Messages d'aide csplit-help-suffix-format = utiliser le FORMAT sprintf au lieu de %02d csplit-help-prefix = utiliser PRÉFIXE au lieu de 'xx' csplit-help-keep-files = ne pas supprimer les fichiers de sortie en cas d'erreurs csplit-help-suppress-matched = supprimer les lignes correspondant au MOTIF csplit-help-digits = utiliser le nombre spécifié de chiffres au lieu de 2 csplit-help-quiet = ne pas afficher le nombre d'octets des fichiers de sortie csplit-help-elide-empty-files = supprimer les fichiers de sortie vides # Messages d'erreur csplit-error-line-out-of-range = { $pattern } : numéro de ligne hors limites csplit-error-line-out-of-range-on-repetition = { $pattern } : numéro de ligne hors limites à la répétition { $repetition } csplit-error-match-not-found = { $pattern } : correspondance non trouvée csplit-error-match-not-found-on-repetition = { $pattern } : correspondance non trouvée à la répétition { $repetition } csplit-error-line-number-is-zero = 0 : le numéro de ligne doit être supérieur à zéro csplit-error-line-number-smaller-than-previous = le numéro de ligne '{ $current }' est plus petit que le numéro de ligne précédent, { $previous } csplit-error-invalid-pattern = { $pattern } : motif invalide csplit-error-invalid-number = nombre invalide : { $number } csplit-error-suffix-format-incorrect = spécification de conversion incorrecte dans le suffixe csplit-error-suffix-format-too-many-percents = trop de spécifications de conversion % dans le suffixe csplit-error-not-regular-file = { $file } n'est pas un fichier régulier csplit-warning-line-number-same-as-previous = le numéro de ligne '{ $line_number }' est identique au numéro de ligne précédent csplit-stream-not-utf8 = le flux ne contenait pas d'UTF-8 valide csplit-read-error = erreur de lecture csplit-write-split-not-created = tentative d'écriture dans une division qui n'a pas été créée coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/src/000077500000000000000000000000001504311601400242515ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/src/csplit.rs000066400000000000000000000736341504311601400261320ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore rustdoc #![allow(rustdoc::private_intra_doc_links)] use std::cmp::Ordering; use std::io::{self, BufReader, ErrorKind}; use std::{ fs::{File, remove_file}, io::{BufRead, BufWriter, Write}, }; use clap::{Arg, ArgAction, ArgMatches, Command}; use regex::Regex; use uucore::display::Quotable; use uucore::error::{FromIo, UResult}; use uucore::format_usage; mod csplit_error; mod patterns; mod split_name; use crate::csplit_error::CsplitError; use crate::split_name::SplitName; use uucore::translate; mod options { pub const SUFFIX_FORMAT: &str = "suffix-format"; pub const SUPPRESS_MATCHED: &str = "suppress-matched"; pub const DIGITS: &str = "digits"; pub const PREFIX: &str = "prefix"; pub const KEEP_FILES: &str = "keep-files"; pub const QUIET: &str = "quiet"; pub const ELIDE_EMPTY_FILES: &str = "elide-empty-files"; pub const FILE: &str = "file"; pub const PATTERN: &str = "pattern"; } /// Command line options for csplit. pub struct CsplitOptions { split_name: SplitName, keep_files: bool, quiet: bool, elide_empty_files: bool, suppress_matched: bool, } impl CsplitOptions { fn new(matches: &ArgMatches) -> Result { let keep_files = matches.get_flag(options::KEEP_FILES); let quiet = matches.get_flag(options::QUIET); let elide_empty_files = matches.get_flag(options::ELIDE_EMPTY_FILES); let suppress_matched = matches.get_flag(options::SUPPRESS_MATCHED); Ok(Self { split_name: SplitName::new( matches.get_one::(options::PREFIX).cloned(), matches.get_one::(options::SUFFIX_FORMAT).cloned(), matches.get_one::(options::DIGITS).cloned(), )?, keep_files, quiet, elide_empty_files, suppress_matched, }) } } pub struct LinesWithNewlines { inner: T, } impl LinesWithNewlines { fn new(s: T) -> Self { Self { inner: s } } } impl Iterator for LinesWithNewlines { type Item = io::Result; fn next(&mut self) -> Option { fn ret(v: Vec) -> io::Result { String::from_utf8(v).map_err(|_| { io::Error::new(ErrorKind::InvalidData, translate!("csplit-stream-not-utf8")) }) } let mut v = Vec::new(); match self.inner.read_until(b'\n', &mut v) { Ok(0) => None, Ok(_) => Some(ret(v)), Err(e) => Some(Err(e)), } } } /// Splits a file into severals according to the command line patterns. /// /// # Errors /// /// - [`io::Error`] if there is some problem reading/writing from/to a file. /// - [`CsplitError::LineOutOfRange`] if the line number pattern is larger than the number of input /// lines. /// - [`CsplitError::LineOutOfRangeOnRepetition`], like previous but after applying the pattern /// more than once. /// - [`CsplitError::MatchNotFound`] if no line matched a regular expression. /// - [`CsplitError::MatchNotFoundOnRepetition`], like previous but after applying the pattern /// more than once. pub fn csplit(options: &CsplitOptions, patterns: &[String], input: T) -> Result<(), CsplitError> where T: BufRead, { let enumerated_input_lines = LinesWithNewlines::new(input) .map(|line| line.map_err_context(|| translate!("csplit-read-error"))) .enumerate(); let mut input_iter = InputSplitter::new(enumerated_input_lines); let mut split_writer = SplitWriter::new(options); let patterns: Vec = patterns::get_patterns(patterns)?; let ret = do_csplit(&mut split_writer, patterns, &mut input_iter); // consume the rest, unless there was an error if ret.is_ok() { input_iter.rewind_buffer(); if let Some((_, line)) = input_iter.next() { split_writer.new_writer()?; split_writer.writeln(&line?)?; for (_, line) in input_iter { split_writer.writeln(&line?)?; } split_writer.finish_split(); } } // delete files on error by default if ret.is_err() && !options.keep_files { split_writer.delete_all_splits()?; } ret } fn do_csplit( split_writer: &mut SplitWriter, patterns: Vec, input_iter: &mut InputSplitter, ) -> Result<(), CsplitError> where I: Iterator)>, { // split the file based on patterns for pattern in patterns { let pattern_as_str = pattern.to_string(); let is_skip = matches!(pattern, patterns::Pattern::SkipToMatch(_, _, _)); match pattern { patterns::Pattern::UpToLine(n, ex) => { let mut up_to_line = n; for (_, ith) in ex.iter() { split_writer.new_writer()?; match split_writer.do_to_line(&pattern_as_str, up_to_line, input_iter) { // the error happened when applying the pattern more than once Err(CsplitError::LineOutOfRange(_)) if ith != 1 => { return Err(CsplitError::LineOutOfRangeOnRepetition( pattern_as_str.to_string(), ith - 1, )); } Err(err) => return Err(err), // continue the splitting process Ok(()) => (), } up_to_line += n; } } patterns::Pattern::UpToMatch(regex, offset, ex) | patterns::Pattern::SkipToMatch(regex, offset, ex) => { for (max, ith) in ex.iter() { if is_skip { // when skipping a part of the input, no writer is created split_writer.as_dev_null(); } else { split_writer.new_writer()?; } match ( split_writer.do_to_match(&pattern_as_str, ®ex, offset, input_iter), max, ) { // in case of ::pattern::ExecutePattern::Always, then it's fine not to find a // matching line (Err(CsplitError::MatchNotFound(_)), None) => { return Ok(()); } // the error happened when applying the pattern more than once (Err(CsplitError::MatchNotFound(_)), Some(m)) if m != 1 && ith != 1 => { return Err(CsplitError::MatchNotFoundOnRepetition( pattern_as_str.to_string(), ith - 1, )); } (Err(err), _) => return Err(err), // continue the splitting process (Ok(()), _) => (), } } } } } Ok(()) } /// Write a portion of the input file into a split which filename is based on an incrementing /// counter. struct SplitWriter<'a> { /// the options set through the command line options: &'a CsplitOptions, /// a split counter counter: usize, /// the writer to the current split current_writer: Option>, /// the size in bytes of the current split size: usize, /// flag to indicate that no content should be written to a split dev_null: bool, } impl Drop for SplitWriter<'_> { fn drop(&mut self) { if self.options.elide_empty_files && self.size == 0 { let file_name = self.options.split_name.get(self.counter); // In the case of `echo a | csplit -z - %a%1`, the file // `xx00` does not exist because the positive offset // advanced past the end of the input. Since there is no // file to remove in that case, `remove_file` would return // an error, so we just ignore it. let _ = remove_file(file_name); } } } impl SplitWriter<'_> { fn new(options: &CsplitOptions) -> SplitWriter { SplitWriter { options, counter: 0, current_writer: None, size: 0, dev_null: false, } } /// Creates a new split and returns its filename. /// /// # Errors /// /// The creation of the split file may fail with some [`io::Error`]. fn new_writer(&mut self) -> io::Result<()> { let file_name = self.options.split_name.get(self.counter); let file = File::create(file_name)?; self.current_writer = Some(BufWriter::new(file)); self.counter += 1; self.size = 0; self.dev_null = false; Ok(()) } /// The current split will not keep any of the read input lines. fn as_dev_null(&mut self) { self.dev_null = true; } /// Writes the line to the current split. /// If [`self.dev_null`] is true, then the line is discarded. /// /// # Errors /// /// Some [`io::Error`] may occur when attempting to write the line. fn writeln(&mut self, line: &str) -> io::Result<()> { if !self.dev_null { match self.current_writer { Some(ref mut current_writer) => { let bytes = line.as_bytes(); current_writer.write_all(bytes)?; self.size += bytes.len(); } None => panic!("{}", translate!("csplit-write-split-not-created")), } } Ok(()) } /// Perform some operations after completing a split, i.e., either remove it /// if the [`options::ELIDE_EMPTY_FILES`] option is enabled, or print how much bytes were written /// to it if [`options::QUIET`] is disabled. /// /// # Errors /// /// Some [`io::Error`] if the split could not be removed in case it should be elided. fn finish_split(&mut self) { if !self.dev_null { if self.options.elide_empty_files && self.size == 0 { self.counter -= 1; } else if !self.options.quiet { println!("{}", self.size); } } } /// Removes all the split files that were created. /// /// # Errors /// /// Returns an [`io::Error`] if there was a problem removing a split. fn delete_all_splits(&self) -> io::Result<()> { let mut ret = Ok(()); for ith in 0..self.counter { let file_name = self.options.split_name.get(ith); if let Err(err) = remove_file(file_name) { ret = Err(err); } } ret } /// Split the input stream up to the line number `n`. /// /// If the line number `n` is smaller than the current position in the input, then an empty /// split is created. /// /// # Errors /// /// In addition to errors reading/writing from/to a file, if the line number /// `n` is greater than the total available lines, then a /// [`CsplitError::LineOutOfRange`] error is returned. fn do_to_line( &mut self, pattern_as_str: &str, n: usize, input_iter: &mut InputSplitter, ) -> Result<(), CsplitError> where I: Iterator)>, { input_iter.rewind_buffer(); input_iter.set_size_of_buffer(1); let mut ret = Err(CsplitError::LineOutOfRange(pattern_as_str.to_string())); while let Some((ln, line)) = input_iter.next() { let line = line?; match n.cmp(&(&ln + 1)) { Ordering::Less => { assert!( input_iter.add_line_to_buffer(ln, line).is_none(), "the buffer is big enough to contain 1 line" ); ret = Ok(()); break; } Ordering::Equal => { assert!( self.options.suppress_matched || input_iter.add_line_to_buffer(ln, line).is_none(), "the buffer is big enough to contain 1 line" ); ret = Ok(()); break; } Ordering::Greater => (), } self.writeln(&line)?; } self.finish_split(); ret } /// Read lines up to the line matching a [`Regex`]. With a non-zero offset, /// the block of relevant lines can be extended (if positive), or reduced /// (if negative). /// /// # Errors /// /// In addition to errors reading/writing from/to a file, the following errors may be returned: /// - if no line matched, an [`CsplitError::MatchNotFound`]. /// - if there are not enough lines to accommodate the offset, an /// [`CsplitError::LineOutOfRange`]. #[allow(clippy::cognitive_complexity)] fn do_to_match( &mut self, pattern_as_str: &str, regex: &Regex, mut offset: i32, input_iter: &mut InputSplitter, ) -> Result<(), CsplitError> where I: Iterator)>, { if offset >= 0 { // The offset is zero or positive, no need for a buffer on the lines read. // NOTE: drain the buffer of input_iter, no match should be done within. for line in input_iter.drain_buffer() { self.writeln(&line)?; } // retain the matching line input_iter.set_size_of_buffer(1); while let Some((ln, line)) = input_iter.next() { let line = line?; let l = line .strip_suffix("\r\n") .unwrap_or_else(|| line.strip_suffix('\n').unwrap_or(&line)); if regex.is_match(l) { let mut next_line_suppress_matched = false; match (self.options.suppress_matched, offset) { // no offset, add the line to the next split (false, 0) => { assert!( input_iter.add_line_to_buffer(ln, line).is_none(), "the buffer is big enough to contain 1 line" ); } // a positive offset, some more lines need to be added to the current split (false, _) => self.writeln(&line)?, // suppress matched option true, but there is a positive offset, so the line is printed (true, 1..) => { next_line_suppress_matched = true; self.writeln(&line)?; } _ => (), } offset -= 1; // write the extra lines required by the offset while offset > 0 { match input_iter.next() { Some((_, line)) => { self.writeln(&line?)?; } None => { self.finish_split(); return Err(CsplitError::LineOutOfRange( pattern_as_str.to_string(), )); } } offset -= 1; } self.finish_split(); // if we have to suppress one line after we take the next and do nothing if next_line_suppress_matched { input_iter.next(); } return Ok(()); } self.writeln(&line)?; } } else { // With a negative offset we use a buffer to keep the lines within the offset. // NOTE: do not drain the buffer of input_iter, in case of an LineOutOfRange error // but do not rewind it either since no match should be done within. // The consequence is that the buffer may already be full with lines from a previous // split, which is taken care of when calling `shrink_buffer_to_size`. let offset_usize = -offset as usize; input_iter.set_size_of_buffer(offset_usize); while let Some((ln, line)) = input_iter.next() { let line = line?; let l = line .strip_suffix("\r\n") .unwrap_or_else(|| line.strip_suffix('\n').unwrap_or(&line)); if regex.is_match(l) { for line in input_iter.shrink_buffer_to_size() { self.writeln(&line)?; } if self.options.suppress_matched { // since offset_usize is for sure greater than 0 // the first element of the buffer should be removed and this // line inserted to be coherent with GNU implementation input_iter.add_line_to_buffer(ln, line); } else { // add 1 to the buffer size to make place for the matched line input_iter.set_size_of_buffer(offset_usize + 1); assert!( input_iter.add_line_to_buffer(ln, line).is_none(), "should be big enough to hold every lines" ); } self.finish_split(); if input_iter.buffer_len() < offset_usize { return Err(CsplitError::LineOutOfRange(pattern_as_str.to_string())); } return Ok(()); } if let Some(line) = input_iter.add_line_to_buffer(ln, line) { self.writeln(&line)?; } } // no match, drain the buffer into the current split for line in input_iter.drain_buffer() { self.writeln(&line)?; } } self.finish_split(); Err(CsplitError::MatchNotFound(pattern_as_str.to_string())) } } /// An iterator which can output items from a buffer filled externally. /// This is used to pass matching lines to the next split and to support patterns with a negative offset. struct InputSplitter where I: Iterator)>, { iter: I, buffer: Vec<::Item>, /// the number of elements the buffer may hold size: usize, /// flag to indicate content off the buffer should be returned instead of off the wrapped /// iterator rewind: bool, } impl InputSplitter where I: Iterator)>, { fn new(iter: I) -> Self { Self { iter, buffer: Vec::new(), rewind: false, size: 1, } } /// Rewind the iteration by outputting the buffer's content. fn rewind_buffer(&mut self) { self.rewind = true; } /// Shrink the buffer so that its length is equal to the set size, returning an iterator for /// the elements that were too much. fn shrink_buffer_to_size(&mut self) -> impl Iterator + '_ { let shrink_offset = if self.buffer.len() > self.size { self.buffer.len() - self.size } else { 0 }; self.buffer .drain(..shrink_offset) .map(|(_, line)| line.unwrap()) } /// Drain the content of the buffer. fn drain_buffer(&mut self) -> impl Iterator + '_ { self.buffer.drain(..).map(|(_, line)| line.unwrap()) } /// Set the maximum number of lines to keep. fn set_size_of_buffer(&mut self, size: usize) { self.size = size; } /// Add a line to the buffer. If the buffer has [`self.size`] elements, then its head is removed and /// the new line is pushed to the buffer. The removed head is then available in the returned /// option. fn add_line_to_buffer(&mut self, ln: usize, line: String) -> Option { if self.rewind { self.buffer.insert(0, (ln, Ok(line))); None } else if self.buffer.len() >= self.size { let (_, head_line) = self.buffer.remove(0); self.buffer.push((ln, Ok(line))); Some(head_line.unwrap()) } else { self.buffer.push((ln, Ok(line))); None } } /// Returns the number of lines stored in the buffer fn buffer_len(&self) -> usize { self.buffer.len() } } impl Iterator for InputSplitter where I: Iterator)>, { type Item = ::Item; fn next(&mut self) -> Option { if self.rewind { if !self.buffer.is_empty() { return Some(self.buffer.remove(0)); } self.rewind = false; } self.iter.next() } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; // get the file to split let file_name = matches.get_one::(options::FILE).unwrap(); // get the patterns to split on let patterns: Vec = matches .get_many::(options::PATTERN) .unwrap() .map(|s| s.to_string()) .collect(); let options = CsplitOptions::new(&matches)?; if file_name == "-" { let stdin = io::stdin(); Ok(csplit(&options, &patterns, stdin.lock())?) } else { let file = File::open(file_name) .map_err_context(|| format!("cannot open {} for reading", file_name.quote()))?; Ok(csplit(&options, &patterns, BufReader::new(file))?) } } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("csplit-about")) .override_usage(format_usage(&translate!("csplit-usage"))) .args_override_self(true) .infer_long_args(true) .arg( Arg::new(options::SUFFIX_FORMAT) .short('b') .long(options::SUFFIX_FORMAT) .value_name("FORMAT") .help(translate!("csplit-help-suffix-format")), ) .arg( Arg::new(options::PREFIX) .short('f') .long(options::PREFIX) .value_name("PREFIX") .help(translate!("csplit-help-prefix")), ) .arg( Arg::new(options::KEEP_FILES) .short('k') .long(options::KEEP_FILES) .help(translate!("csplit-help-keep-files")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SUPPRESS_MATCHED) .long(options::SUPPRESS_MATCHED) .help(translate!("csplit-help-suppress-matched")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::DIGITS) .short('n') .long(options::DIGITS) .value_name("DIGITS") .help(translate!("csplit-help-digits")), ) .arg( Arg::new(options::QUIET) .short('q') .long(options::QUIET) .visible_short_alias('s') .visible_alias("silent") .help(translate!("csplit-help-quiet")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ELIDE_EMPTY_FILES) .short('z') .long(options::ELIDE_EMPTY_FILES) .help(translate!("csplit-help-elide-empty-files")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FILE) .hide(true) .required(true) .value_hint(clap::ValueHint::FilePath), ) .arg( Arg::new(options::PATTERN) .hide(true) .action(ArgAction::Append) .required(true), ) .after_help(translate!("csplit-after-help")) } #[cfg(test)] mod tests { use super::*; #[test] #[allow(clippy::cognitive_complexity)] fn input_splitter() { let input = vec![ Ok(String::from("aaa")), Ok(String::from("bbb")), Ok(String::from("ccc")), Ok(String::from("ddd")), ]; let mut input_splitter = InputSplitter::new(input.into_iter().enumerate()); input_splitter.set_size_of_buffer(2); assert_eq!(input_splitter.buffer_len(), 0); match input_splitter.next() { Some((0, Ok(line))) => { assert_eq!(line, String::from("aaa")); assert_eq!(input_splitter.add_line_to_buffer(0, line), None); assert_eq!(input_splitter.buffer_len(), 1); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((1, Ok(line))) => { assert_eq!(line, String::from("bbb")); assert_eq!(input_splitter.add_line_to_buffer(1, line), None); assert_eq!(input_splitter.buffer_len(), 2); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((2, Ok(line))) => { assert_eq!(line, String::from("ccc")); assert_eq!( input_splitter.add_line_to_buffer(2, line), Some(String::from("aaa")) ); assert_eq!(input_splitter.buffer_len(), 2); } item => panic!("wrong item: {item:?}"), } input_splitter.rewind_buffer(); match input_splitter.next() { Some((1, Ok(line))) => { assert_eq!(line, String::from("bbb")); assert_eq!(input_splitter.buffer_len(), 1); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((2, Ok(line))) => { assert_eq!(line, String::from("ccc")); assert_eq!(input_splitter.buffer_len(), 0); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((3, Ok(line))) => { assert_eq!(line, String::from("ddd")); assert_eq!(input_splitter.buffer_len(), 0); } item => panic!("wrong item: {item:?}"), } assert!(input_splitter.next().is_none()); } #[test] #[allow(clippy::cognitive_complexity)] fn input_splitter_interrupt_rewind() { let input = vec![ Ok(String::from("aaa")), Ok(String::from("bbb")), Ok(String::from("ccc")), Ok(String::from("ddd")), ]; let mut input_splitter = InputSplitter::new(input.into_iter().enumerate()); input_splitter.set_size_of_buffer(3); assert_eq!(input_splitter.buffer_len(), 0); match input_splitter.next() { Some((0, Ok(line))) => { assert_eq!(line, String::from("aaa")); assert_eq!(input_splitter.add_line_to_buffer(0, line), None); assert_eq!(input_splitter.buffer_len(), 1); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((1, Ok(line))) => { assert_eq!(line, String::from("bbb")); assert_eq!(input_splitter.add_line_to_buffer(1, line), None); assert_eq!(input_splitter.buffer_len(), 2); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((2, Ok(line))) => { assert_eq!(line, String::from("ccc")); assert_eq!(input_splitter.add_line_to_buffer(2, line), None); assert_eq!(input_splitter.buffer_len(), 3); } item => panic!("wrong item: {item:?}"), } input_splitter.rewind_buffer(); match input_splitter.next() { Some((0, Ok(line))) => { assert_eq!(line, String::from("aaa")); assert_eq!(input_splitter.add_line_to_buffer(0, line), None); assert_eq!(input_splitter.buffer_len(), 3); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((0, Ok(line))) => { assert_eq!(line, String::from("aaa")); assert_eq!(input_splitter.buffer_len(), 2); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((1, Ok(line))) => { assert_eq!(line, String::from("bbb")); assert_eq!(input_splitter.buffer_len(), 1); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((2, Ok(line))) => { assert_eq!(line, String::from("ccc")); assert_eq!(input_splitter.buffer_len(), 0); } item => panic!("wrong item: {item:?}"), } match input_splitter.next() { Some((3, Ok(line))) => { assert_eq!(line, String::from("ddd")); assert_eq!(input_splitter.buffer_len(), 0); } item => panic!("wrong item: {item:?}"), } assert!(input_splitter.next().is_none()); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/src/csplit_error.rs000066400000000000000000000041761504311601400273360ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use std::io; use thiserror::Error; use uucore::display::Quotable; use uucore::error::UError; use uucore::translate; /// Errors thrown by the csplit command #[derive(Debug, Error)] pub enum CsplitError { #[error("IO error: {}", _0)] IoError(#[from] io::Error), #[error("{}", translate!("csplit-error-line-out-of-range", "pattern" => _0.quote()))] LineOutOfRange(String), #[error("{}", translate!("csplit-error-line-out-of-range-on-repetition", "pattern" => _0.quote(), "repetition" => _1))] LineOutOfRangeOnRepetition(String, usize), #[error("{}", translate!("csplit-error-match-not-found", "pattern" => _0.quote()))] MatchNotFound(String), #[error("{}", translate!("csplit-error-match-not-found-on-repetition", "pattern" => _0.quote(), "repetition" => _1))] MatchNotFoundOnRepetition(String, usize), #[error("{}", translate!("csplit-error-line-number-is-zero"))] LineNumberIsZero, #[error("{}", translate!("csplit-error-line-number-smaller-than-previous", "current" => _0, "previous" => _1))] LineNumberSmallerThanPrevious(usize, usize), #[error("{}", translate!("csplit-error-invalid-pattern", "pattern" => _0.quote()))] InvalidPattern(String), #[error("{}", translate!("csplit-error-invalid-number", "number" => _0.quote()))] InvalidNumber(String), #[error("{}", translate!("csplit-error-suffix-format-incorrect"))] SuffixFormatIncorrect, #[error("{}", translate!("csplit-error-suffix-format-too-many-percents"))] SuffixFormatTooManyPercents, #[error("{}", translate!("csplit-error-not-regular-file", "file" => _0.quote()))] NotRegularFile(String), #[error("{}", _0)] UError(Box), } impl From> for CsplitError { fn from(error: Box) -> Self { Self::UError(error) } } impl UError for CsplitError { fn code(&self) -> i32 { match self { Self::UError(e) => e.code(), _ => 1, } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/src/main.rs000066400000000000000000000000311504311601400255350ustar00rootroot00000000000000uucore::bin!(uu_csplit); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/src/patterns.rs000066400000000000000000000332151504311601400264630ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (regex) SKIPTO UPTO ; (vars) ntimes use crate::csplit_error::CsplitError; use regex::Regex; use uucore::show_warning; use uucore::translate; /// The definition of a pattern to match on a line. #[derive(Debug)] pub enum Pattern { /// Copy the file's content to a split up to, not including, the given line number. The number /// of times the pattern is executed is detailed in [`ExecutePattern`]. UpToLine(usize, ExecutePattern), /// Copy the file's content to a split up to, not including, the line matching the regex. The /// integer is an offset relative to the matched line of what to include (if positive) or /// to exclude (if negative). The number of times the pattern is executed is detailed in /// [`ExecutePattern`]. UpToMatch(Regex, i32, ExecutePattern), /// Skip the file's content up to, not including, the line matching the regex. The integer /// is an offset relative to the matched line of what to include (if positive) or to exclude /// (if negative). The number of times the pattern is executed is detailed in [`ExecutePattern`]. SkipToMatch(Regex, i32, ExecutePattern), } impl std::fmt::Display for Pattern { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::UpToLine(n, _) => write!(f, "{n}"), Self::UpToMatch(regex, 0, _) => write!(f, "/{}/", regex.as_str()), Self::UpToMatch(regex, offset, _) => write!(f, "/{}/{offset:+}", regex.as_str()), Self::SkipToMatch(regex, 0, _) => write!(f, "%{}%", regex.as_str()), Self::SkipToMatch(regex, offset, _) => write!(f, "%{}%{offset:+}", regex.as_str()), } } } /// The number of times a pattern can be used. #[derive(Debug)] pub enum ExecutePattern { /// Execute the pattern as many times as possible Always, /// Execute the pattern a fixed number of times Times(usize), } impl ExecutePattern { pub fn iter(&self) -> ExecutePatternIter { match self { Self::Times(n) => ExecutePatternIter::new(Some(*n)), Self::Always => ExecutePatternIter::new(None), } } } pub struct ExecutePatternIter { max: Option, cur: usize, } impl ExecutePatternIter { fn new(max: Option) -> Self { Self { max, cur: 0 } } } impl Iterator for ExecutePatternIter { type Item = (Option, usize); fn next(&mut self) -> Option<(Option, usize)> { match self.max { // iterate until m is reached Some(m) => { if self.cur == m { None } else { self.cur += 1; Some((self.max, self.cur)) } } // no limit, just increment a counter None => { self.cur += 1; Some((None, self.cur)) } } } } /// Parses the definitions of patterns given on the command line into a list of [`Pattern`]s. /// /// # Errors /// /// If a pattern is incorrect, a [`CsplitError::InvalidPattern`] error is returned, which may be /// due to, e.g.,: /// - an invalid regular expression; /// - an invalid number for, e.g., the offset. pub fn get_patterns(args: &[String]) -> Result, CsplitError> { let patterns = extract_patterns(args)?; validate_line_numbers(&patterns)?; Ok(patterns) } fn extract_patterns(args: &[String]) -> Result, CsplitError> { let mut patterns = Vec::with_capacity(args.len()); let to_match_reg = Regex::new(r"^(/(?P.+)/|%(?P.+)%)(?P[\+-]?[0-9]+)?$").unwrap(); let execute_ntimes_reg = Regex::new(r"^\{(?P[0-9]+)|\*\}$").unwrap(); let mut iter = args.iter().peekable(); while let Some(arg) = iter.next() { // get the number of times a pattern is repeated, which is at least once plus whatever is // in the quantifier. let execute_ntimes = match iter.peek() { None => ExecutePattern::Times(1), Some(&next_item) => { match execute_ntimes_reg.captures(next_item) { None => ExecutePattern::Times(1), Some(r) => { // skip the next item iter.next(); if let Some(times) = r.name("TIMES") { ExecutePattern::Times(times.as_str().parse::().unwrap() + 1) } else { ExecutePattern::Always } } } } }; // get the pattern definition if let Some(captures) = to_match_reg.captures(arg) { let offset = match captures.name("OFFSET") { None => 0, Some(m) => m.as_str().parse().unwrap(), }; if let Some(up_to_match) = captures.name("UPTO") { let pattern = Regex::new(up_to_match.as_str()) .map_err(|_| CsplitError::InvalidPattern(arg.to_string()))?; patterns.push(Pattern::UpToMatch(pattern, offset, execute_ntimes)); } else if let Some(skip_to_match) = captures.name("SKIPTO") { let pattern = Regex::new(skip_to_match.as_str()) .map_err(|_| CsplitError::InvalidPattern(arg.to_string()))?; patterns.push(Pattern::SkipToMatch(pattern, offset, execute_ntimes)); } } else if let Ok(line_number) = arg.parse::() { patterns.push(Pattern::UpToLine(line_number, execute_ntimes)); } else { return Err(CsplitError::InvalidPattern(arg.to_string())); } } Ok(patterns) } /// Asserts the line numbers are in increasing order, starting at 1. fn validate_line_numbers(patterns: &[Pattern]) -> Result<(), CsplitError> { patterns .iter() .filter_map(|pattern| match pattern { Pattern::UpToLine(line_number, _) => Some(line_number), _ => None, }) .try_fold(0, |prev_ln, ¤t_ln| match (prev_ln, current_ln) { // a line number cannot be zero (_, 0) => Err(CsplitError::LineNumberIsZero), // two consecutive numbers should not be equal (n, m) if n == m => { show_warning!( "{}", translate!("csplit-warning-line-number-same-as-previous", "line_number" => n) ); Ok(n) } // a number cannot be greater than the one that follows (n, m) if n > m => Err(CsplitError::LineNumberSmallerThanPrevious(m, n)), (_, m) => Ok(m), })?; Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn bad_pattern() { let input = vec!["bad".to_string()]; assert!(get_patterns(input.as_slice()).is_err()); } #[test] fn up_to_line_pattern() { let input: Vec = vec!["24", "42", "{*}", "50", "{4}"] .into_iter() .map(|v| v.to_string()) .collect(); let patterns = get_patterns(input.as_slice()).unwrap(); assert_eq!(patterns.len(), 3); match patterns.first() { Some(Pattern::UpToLine(24, ExecutePattern::Times(1))) => (), _ => panic!("expected UpToLine pattern"), } match patterns.get(1) { Some(Pattern::UpToLine(42, ExecutePattern::Always)) => (), _ => panic!("expected UpToLine pattern"), } match patterns.get(2) { Some(Pattern::UpToLine(50, ExecutePattern::Times(5))) => (), _ => panic!("expected UpToLine pattern"), } } #[test] #[allow(clippy::cognitive_complexity)] fn up_to_match_pattern() { let input: Vec = vec![ "/test1.*end$/", "/test2.*end$/", "{*}", "/test3.*end$/", "{4}", "/test4.*end$/3", "/test5.*end$/+3", "/test6.*end$/-3", ] .into_iter() .map(|v| v.to_string()) .collect(); let patterns = get_patterns(input.as_slice()).unwrap(); assert_eq!(patterns.len(), 6); match patterns.first() { Some(Pattern::UpToMatch(reg, 0, ExecutePattern::Times(1))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test1.*end$"); } _ => panic!("expected UpToMatch pattern"), } match patterns.get(1) { Some(Pattern::UpToMatch(reg, 0, ExecutePattern::Always)) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test2.*end$"); } _ => panic!("expected UpToMatch pattern"), } match patterns.get(2) { Some(Pattern::UpToMatch(reg, 0, ExecutePattern::Times(5))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test3.*end$"); } _ => panic!("expected UpToMatch pattern"), } match patterns.get(3) { Some(Pattern::UpToMatch(reg, 3, ExecutePattern::Times(1))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test4.*end$"); } _ => panic!("expected UpToMatch pattern"), } match patterns.get(4) { Some(Pattern::UpToMatch(reg, 3, ExecutePattern::Times(1))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test5.*end$"); } _ => panic!("expected UpToMatch pattern"), } match patterns.get(5) { Some(Pattern::UpToMatch(reg, -3, ExecutePattern::Times(1))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test6.*end$"); } _ => panic!("expected UpToMatch pattern"), } } #[test] #[allow(clippy::cognitive_complexity)] fn skip_to_match_pattern() { let input: Vec = vec![ "%test1.*end$%", "%test2.*end$%", "{*}", "%test3.*end$%", "{4}", "%test4.*end$%3", "%test5.*end$%+3", "%test6.*end$%-3", ] .into_iter() .map(|v| v.to_string()) .collect(); let patterns = get_patterns(input.as_slice()).unwrap(); assert_eq!(patterns.len(), 6); match patterns.first() { Some(Pattern::SkipToMatch(reg, 0, ExecutePattern::Times(1))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test1.*end$"); } _ => panic!("expected SkipToMatch pattern"), } match patterns.get(1) { Some(Pattern::SkipToMatch(reg, 0, ExecutePattern::Always)) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test2.*end$"); } _ => panic!("expected SkipToMatch pattern"), } match patterns.get(2) { Some(Pattern::SkipToMatch(reg, 0, ExecutePattern::Times(5))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test3.*end$"); } _ => panic!("expected SkipToMatch pattern"), } match patterns.get(3) { Some(Pattern::SkipToMatch(reg, 3, ExecutePattern::Times(1))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test4.*end$"); } _ => panic!("expected SkipToMatch pattern"), } match patterns.get(4) { Some(Pattern::SkipToMatch(reg, 3, ExecutePattern::Times(1))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test5.*end$"); } _ => panic!("expected SkipToMatch pattern"), } match patterns.get(5) { Some(Pattern::SkipToMatch(reg, -3, ExecutePattern::Times(1))) => { let parsed_reg = format!("{reg}"); assert_eq!(parsed_reg, "test6.*end$"); } _ => panic!("expected SkipToMatch pattern"), } } #[test] fn line_number_zero() { let patterns = vec![Pattern::UpToLine(0, ExecutePattern::Times(1))]; match validate_line_numbers(&patterns) { Err(CsplitError::LineNumberIsZero) => (), _ => panic!("expected LineNumberIsZero error"), } } #[test] fn line_number_smaller_than_previous() { let input: Vec = vec!["10".to_string(), "5".to_string()]; match get_patterns(input.as_slice()) { Err(CsplitError::LineNumberSmallerThanPrevious(5, 10)) => (), _ => panic!("expected LineNumberSmallerThanPrevious error"), } } #[test] fn line_number_smaller_than_previous_separate() { let input: Vec = vec!["10".to_string(), "/20/".to_string(), "5".to_string()]; match get_patterns(input.as_slice()) { Err(CsplitError::LineNumberSmallerThanPrevious(5, 10)) => (), _ => panic!("expected LineNumberSmallerThanPrevious error"), } } #[test] fn line_number_zero_separate() { let input: Vec = vec!["10".to_string(), "/20/".to_string(), "0".to_string()]; match get_patterns(input.as_slice()) { Err(CsplitError::LineNumberIsZero) => (), _ => panic!("expected LineNumberIsZero error"), } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/csplit/src/split_name.rs000066400000000000000000000177741504311601400267720ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (regex) diuox use uucore::format::{Format, FormatError, num_format::UnsignedInt}; use crate::csplit_error::CsplitError; /// Computes the filename of a split, taking into consideration a possible user-defined suffix /// format. pub struct SplitName { prefix: Vec, format: Format, } impl SplitName { /// Creates a new [`SplitName`] with the given user-defined options: /// - `prefix_opt` specifies a prefix for all splits. /// - `format_opt` specifies a custom format for the suffix part of the filename, using the /// `sprintf` format notation. /// - `n_digits_opt` defines the width of the split number. /// /// # Caveats /// /// If `prefix_opt` and `format_opt` are defined, and the `format_opt` has some string appearing /// before the conversion pattern (e.g., "here-%05d"), then it is appended to the passed prefix /// via `prefix_opt`. /// /// If `n_digits_opt` and `format_opt` are defined, then width defined in `format_opt` is /// taken. pub fn new( prefix_opt: Option, format_opt: Option, n_digits_opt: Option, ) -> Result { // get the prefix let prefix = prefix_opt.unwrap_or_else(|| "xx".to_string()); // the width for the split offset let n_digits = n_digits_opt .map(|opt| { opt.parse::() .map_err(|_| CsplitError::InvalidNumber(opt)) }) .transpose()? .unwrap_or(2); let format_string = format_opt.unwrap_or_else(|| format!("%0{n_digits}u")); let format = match Format::::parse(format_string) { Ok(format) => Ok(format), Err(FormatError::TooManySpecs(_)) => Err(CsplitError::SuffixFormatTooManyPercents), Err(_) => Err(CsplitError::SuffixFormatIncorrect), }?; Ok(Self { prefix: prefix.as_bytes().to_owned(), format, }) } /// Returns the filename of the i-th split. pub fn get(&self, n: usize) -> String { let mut v = self.prefix.clone(); self.format.fmt(&mut v, n as u64).unwrap(); String::from_utf8_lossy(&v).to_string() } } #[cfg(test)] mod tests { // spell-checker:ignore (path) xxcst use super::*; #[test] fn invalid_number() { let split_name = SplitName::new(None, None, Some(String::from("bad"))); match split_name { Err(CsplitError::InvalidNumber(_)) => (), _ => panic!("should fail with InvalidNumber"), } } #[test] fn invalid_suffix_format1() { let split_name = SplitName::new(None, Some(String::from("no conversion string")), None); match split_name { Err(CsplitError::SuffixFormatIncorrect) => (), _ => panic!("should fail with SuffixFormatIncorrect"), } } #[test] fn invalid_suffix_format2() { let split_name = SplitName::new(None, Some(String::from("%042a")), None); match split_name { Err(CsplitError::SuffixFormatIncorrect) => (), _ => panic!("should fail with SuffixFormatIncorrect"), } } #[test] fn default_formatter() { let split_name = SplitName::new(None, None, None).unwrap(); assert_eq!(split_name.get(2), "xx02"); } #[test] fn default_formatter_with_prefix() { let split_name = SplitName::new(Some(String::from("aaa")), None, None).unwrap(); assert_eq!(split_name.get(2), "aaa02"); } #[test] fn default_formatter_with_width() { let split_name = SplitName::new(None, None, Some(String::from("5"))).unwrap(); assert_eq!(split_name.get(2), "xx00002"); } #[test] fn no_padding_decimal() { let split_name = SplitName::new(None, Some(String::from("cst-%d-")), None).unwrap(); assert_eq!(split_name.get(2), "xxcst-2-"); } #[test] fn zero_padding_decimal1() { let split_name = SplitName::new(None, Some(String::from("cst-%03d-")), None).unwrap(); assert_eq!(split_name.get(2), "xxcst-002-"); } #[test] fn zero_padding_decimal2() { let split_name = SplitName::new( Some(String::from("pre-")), Some(String::from("cst-%03d-post")), None, ) .unwrap(); assert_eq!(split_name.get(2), "pre-cst-002-post"); } #[test] fn zero_padding_decimal3() { let split_name = SplitName::new( None, Some(String::from("cst-%03d-")), Some(String::from("42")), ) .unwrap(); assert_eq!(split_name.get(2), "xxcst-002-"); } #[test] fn zero_padding_decimal4() { let split_name = SplitName::new(None, Some(String::from("cst-%03i-")), None).unwrap(); assert_eq!(split_name.get(2), "xxcst-002-"); } #[test] fn zero_padding_decimal5() { let split_name = SplitName::new(None, Some(String::from("cst-%03u-")), None).unwrap(); assert_eq!(split_name.get(2), "xxcst-002-"); } #[test] fn zero_padding_octal() { let split_name = SplitName::new(None, Some(String::from("cst-%03o-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst-052-"); } #[test] fn zero_padding_lower_hex() { let split_name = SplitName::new(None, Some(String::from("cst-%03x-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst-02a-"); } #[test] fn zero_padding_upper_hex() { let split_name = SplitName::new(None, Some(String::from("cst-%03X-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst-02A-"); } #[test] fn alternate_form_octal() { let split_name = SplitName::new(None, Some(String::from("cst-%#10o-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst- 052-"); } #[test] fn alternate_form_lower_hex() { let split_name = SplitName::new(None, Some(String::from("cst-%#10x-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst- 0x2a-"); } #[test] fn alternate_form_upper_hex() { let split_name = SplitName::new(None, Some(String::from("cst-%#10X-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst- 0X2A-"); } #[test] fn left_adjusted_decimal1() { let split_name = SplitName::new(None, Some(String::from("cst-%-10d-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst-42 -"); } #[test] fn left_adjusted_decimal2() { let split_name = SplitName::new(None, Some(String::from("cst-%-10i-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst-42 -"); } #[test] fn left_adjusted_decimal3() { let split_name = SplitName::new(None, Some(String::from("cst-%-10u-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst-42 -"); } #[test] fn left_adjusted_octal() { let split_name = SplitName::new(None, Some(String::from("cst-%-10o-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst-52 -"); } #[test] fn left_adjusted_lower_hex() { let split_name = SplitName::new(None, Some(String::from("cst-%-10x-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst-2a -"); } #[test] fn left_adjusted_upper_hex() { let split_name = SplitName::new(None, Some(String::from("cst-%-10X-")), None).unwrap(); assert_eq!(split_name.get(42), "xxcst-2A -"); } #[test] fn too_many_percent() { let split_name = SplitName::new(None, Some(String::from("%02d-%-3x")), None); match split_name { Err(CsplitError::SuffixFormatTooManyPercents) => (), _ => panic!("should fail with SuffixFormatTooManyPercents"), } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/000077500000000000000000000000001504311601400227575ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/BENCHMARKING.md000066400000000000000000000032461504311601400251360ustar00rootroot00000000000000# Benchmarking cut ## Performance profile In normal use cases a significant amount of the total execution time of `cut` is spent performing I/O. When invoked with the `-f` option (cut fields) some CPU time is spent on detecting fields (in `Searcher::next`). Other than that some small amount of CPU time is spent on breaking the input stream into lines. ## How to When fixing bugs or adding features you might want to compare performance before and after your code changes. - `hyperfine` can be used to accurately measure and compare the total execution time of one or more commands. ```shell cargo build --release --package uu_cut hyperfine -w3 "./target/release/cut -f2-4,8 -d' ' input.txt" "cut -f2-4,8 -d' ' input.txt" ``` You can put those two commands in a shell script to be sure that you don't forget to build after making any changes. When optimizing or fixing performance regressions seeing the number of times a function is called, and the amount of time it takes can be useful. - `cargo flamegraph` generates flame graphs from function level metrics it records using `perf` or `dtrace` ```shell cargo flamegraph --bin cut --package uu_cut -- -f1,3-4 input.txt > /dev/null ``` ## What to benchmark There are four different performance paths in `cut` to benchmark. - Byte ranges `-c`/`--characters` or `-b`/`--bytes` e.g. `cut -c 2,4,6-` - Byte ranges with output delimiters e.g. `cut -c 4- --output-delimiter=/` - Fields e.g. `cut -f -4` - Fields with output delimiters e.g. `cut -f 7-10 --output-delimiter=:` Choose a test input file with large number of lines so that program startup time does not significantly affect the benchmark. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/Cargo.toml000066400000000000000000000012201504311601400247020ustar00rootroot00000000000000[package] name = "uu_cut" description = "cut ~ (uutils) display byte/field columns of input lines" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/cut" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/cut.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["ranges"] } memchr = { workspace = true } bstr = { workspace = true } fluent = { workspace = true } [[bin]] name = "cut" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/LICENSE000077700000000000000000000000001504311601400256252../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/locales/000077500000000000000000000000001504311601400244015ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/locales/en-US.ftl000066400000000000000000000117631504311601400260470ustar00rootroot00000000000000cut-about = Prints specified byte or field columns from each line of stdin or the input files cut-usage = cut OPTION... [FILE]... cut-after-help = Each call must specify a mode (what to use for columns), a sequence (which columns to print), and provide a data source ### Specifying a mode Use --bytes (-b) or --characters (-c) to specify byte mode Use --fields (-f) to specify field mode, where each line is broken into fields identified by a delimiter character. For example for a typical CSV you could use this in combination with setting comma as the delimiter ### Specifying a sequence A sequence is a group of 1 or more numbers or inclusive ranges separated by a commas. cut -f 2,5-7 some_file.txt will display the 2nd, 5th, 6th, and 7th field for each source line Ranges can extend to the end of the row by excluding the second number cut -f 3- some_file.txt will display the 3rd field and all fields after for each source line The first number of a range can be excluded, and this is effectively the same as using 1 as the first number: it causes the range to begin at the first column. Ranges can also display a single column cut -f 1,3-5 some_file.txt will display the 1st, 3rd, 4th, and 5th field for each source line The --complement option, when used, inverts the effect of the sequence cut --complement -f 4-6 some_file.txt will display the every field but the 4th, 5th, and 6th ### Specifying a data source If no sourcefile arguments are specified, stdin is used as the source of lines to print If sourcefile arguments are specified, stdin is ignored and all files are read in consecutively if a sourcefile is not successfully read, a warning will print to stderr, and the eventual status code will be 1, but cut will continue to read through proceeding sourcefiles To print columns from both STDIN and a file argument, use - (dash) as a sourcefile argument to represent stdin. ### Field Mode options The fields in each line are identified by a delimiter (separator) #### Set the delimiter Set the delimiter which separates fields in the file using the --delimiter (-d) option. Setting the delimiter is optional. If not set, a default delimiter of Tab will be used. If the -w option is provided, fields will be separated by any number of whitespace characters (Space and Tab). The output delimiter will be a Tab unless explicitly specified. Only one of -d or -w option can be specified. This is an extension adopted from FreeBSD. #### Optionally Filter based on delimiter If the --only-delimited (-s) flag is provided, only lines which contain the delimiter will be printed #### Replace the delimiter If the --output-delimiter option is provided, the argument used for it will replace the delimiter character in each line printed. This is useful for transforming tabular data - e.g. to convert a CSV to a TSV (tab-separated file) ### Line endings When the --zero-terminated (-z) option is used, cut sees \\0 (null) as the 'line ending' character (both for the purposes of reading lines and separating printed lines) instead of \\n (newline). This is useful for tabular data where some of the cells may contain newlines echo 'ab\\0cd' | cut -z -c 1 will result in 'a\\0c\\0' # Help messages cut-help-bytes = filter byte columns from the input source cut-help-characters = alias for character mode cut-help-delimiter = specify the delimiter character that separates fields in the input source. Defaults to Tab. cut-help-whitespace-delimited = Use any number of whitespace (Space, Tab) to separate fields in the input source (FreeBSD extension). cut-help-fields = filter field columns from the input source cut-help-complement = invert the filter - instead of displaying only the filtered columns, display all but those columns cut-help-only-delimited = in field mode, only print lines which contain the delimiter cut-help-zero-terminated = instead of filtering columns based on line, filter columns based on \\0 (NULL character) cut-help-output-delimiter = in field mode, replace the delimiter in output lines with this option's argument # Error messages cut-error-is-directory = Is a directory cut-error-write-error = write error cut-error-delimiter-and-whitespace-conflict = invalid input: Only one of --delimiter (-d) or -w option can be specified cut-error-delimiter-must-be-single-character = the delimiter must be a single character cut-error-multiple-mode-args = invalid usage: expects no more than one of --fields (-f), --chars (-c) or --bytes (-b) cut-error-missing-mode-arg = invalid usage: expects one of --fields (-f), --chars (-c) or --bytes (-b) cut-error-delimiter-only-with-fields = invalid input: The '--delimiter' ('-d') option only usable if printing a sequence of fields cut-error-whitespace-only-with-fields = invalid input: The '-w' option only usable if printing a sequence of fields cut-error-only-delimited-only-with-fields = invalid input: The '--only-delimited' ('-s') option only usable if printing a sequence of fields coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/locales/fr-FR.ftl000066400000000000000000000133451504311601400260320ustar00rootroot00000000000000cut-about = Affiche les colonnes d'octets ou de champs spécifiées de chaque ligne de stdin ou des fichiers d'entrée cut-usage = cut OPTION... [FICHIER]... cut-after-help = Chaque appel doit spécifier un mode (quoi utiliser pour les colonnes), une séquence (quelles colonnes afficher), et fournir une source de données ### Spécifier un mode Utilisez --bytes (-b) ou --characters (-c) pour spécifier le mode octet Utilisez --fields (-f) pour spécifier le mode champ, où chaque ligne est divisée en champs identifiés par un caractère délimiteur. Par exemple pour un CSV typique vous pourriez utiliser ceci en combinaison avec la définition de la virgule comme délimiteur ### Spécifier une séquence Une séquence est un groupe de 1 ou plusieurs nombres ou plages inclusives séparés par des virgules. cut -f 2,5-7 quelque_fichier.txt affichera les 2ème, 5ème, 6ème, et 7ème champs pour chaque ligne source Les plages peuvent s'étendre jusqu'à la fin de la ligne en excluant le second nombre cut -f 3- quelque_fichier.txt affichera le 3ème champ et tous les champs suivants pour chaque ligne source Le premier nombre d'une plage peut être exclu, et ceci est effectivement identique à utiliser 1 comme premier nombre : cela fait commencer la plage à la première colonne. Les plages peuvent aussi afficher une seule colonne cut -f 1,3-5 quelque_fichier.txt affichera les 1er, 3ème, 4ème, et 5ème champs pour chaque ligne source L'option --complement, quand utilisée, inverse l'effet de la séquence cut --complement -f 4-6 quelque_fichier.txt affichera tous les champs sauf les 4ème, 5ème, et 6ème ### Spécifier une source de données Si aucun argument de fichier source n'est spécifié, stdin est utilisé comme source de lignes à afficher Si des arguments de fichier source sont spécifiés, stdin est ignoré et tous les fichiers sont lus consécutivement si un fichier source n'est pas lu avec succès, un avertissement sera affiché sur stderr, et le code de statut final sera 1, mais cut continuera à lire les fichiers sources suivants Pour afficher les colonnes depuis STDIN et un argument de fichier, utilisez - (tiret) comme argument de fichier source pour représenter stdin. ### Options du Mode Champ Les champs dans chaque ligne sont identifiés par un délimiteur (séparateur) #### Définir le délimiteur Définissez le délimiteur qui sépare les champs dans le fichier en utilisant l'option --delimiter (-d). Définir le délimiteur est optionnel. Si non défini, un délimiteur par défaut de Tab sera utilisé. Si l'option -w est fournie, les champs seront séparés par tout nombre de caractères d'espacement (Espace et Tab). Le délimiteur de sortie sera un Tab sauf si explicitement spécifié. Seulement une des options -d ou -w peut être spécifiée. Ceci est une extension adoptée de FreeBSD. #### Filtrage optionnel basé sur le délimiteur Si le drapeau --only-delimited (-s) est fourni, seules les lignes qui contiennent le délimiteur seront affichées #### Remplacer le délimiteur Si l'option --output-delimiter est fournie, l'argument utilisé pour elle remplacera le caractère délimiteur dans chaque ligne affichée. Ceci est utile pour transformer les données tabulaires - par ex. pour convertir un CSV en TSV (fichier séparé par tabulations) ### Fins de ligne Quand l'option --zero-terminated (-z) est utilisée, cut voit \\0 (null) comme le caractère de 'fin de ligne' (à la fois pour lire les lignes et séparer les lignes affichées) au lieu de \\n (nouvelle ligne). Ceci est utile pour les données tabulaires où certaines cellules peuvent contenir des nouvelles lignes echo 'ab\\0cd' | cut -z -c 1 donnera comme résultat 'a\\0c\\0' # Messages d'aide cut-help-bytes = filtrer les colonnes d'octets depuis la source d'entrée cut-help-characters = alias pour le mode caractère cut-help-delimiter = spécifier le caractère délimiteur qui sépare les champs dans la source d'entrée. Par défaut Tab. cut-help-whitespace-delimited = Utiliser tout nombre d'espaces (Espace, Tab) pour séparer les champs dans la source d'entrée (extension FreeBSD). cut-help-fields = filtrer les colonnes de champs depuis la source d'entrée cut-help-complement = inverser le filtre - au lieu d'afficher seulement les colonnes filtrées, afficher toutes sauf ces colonnes cut-help-only-delimited = en mode champ, afficher seulement les lignes qui contiennent le délimiteur cut-help-zero-terminated = au lieu de filtrer les colonnes basées sur la ligne, filtrer les colonnes basées sur \\0 (caractère NULL) cut-help-output-delimiter = en mode champ, remplacer le délimiteur dans les lignes de sortie avec l'argument de cette option # Messages d'erreur cut-error-is-directory = Est un répertoire cut-error-write-error = erreur d'écriture cut-error-delimiter-and-whitespace-conflict = entrée invalide : Seulement une des options --delimiter (-d) ou -w peut être spécifiée cut-error-delimiter-must-be-single-character = le délimiteur doit être un caractère unique cut-error-multiple-mode-args = usage invalide : attend au plus une des options --fields (-f), --chars (-c) ou --bytes (-b) cut-error-missing-mode-arg = usage invalide : attend une des options --fields (-f), --chars (-c) ou --bytes (-b) cut-error-delimiter-only-with-fields = entrée invalide : L'option '--delimiter' ('-d') n'est utilisable que si on affiche une séquence de champs cut-error-whitespace-only-with-fields = entrée invalide : L'option '-w' n'est utilisable que si on affiche une séquence de champs cut-error-only-delimited-only-with-fields = entrée invalide : L'option '--only-delimited' ('-s') n'est utilisable que si on affiche une séquence de champs coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/src/000077500000000000000000000000001504311601400235465ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/src/cut.rs000066400000000000000000000535751504311601400247260ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) delim sourcefiles use bstr::io::BufReadExt; use clap::{Arg, ArgAction, ArgMatches, Command, builder::ValueParser}; use std::ffi::OsString; use std::fs::File; use std::io::{BufRead, BufReader, BufWriter, IsTerminal, Read, Write, stdin, stdout}; use std::path::Path; use uucore::display::Quotable; use uucore::error::{FromIo, UResult, USimpleError, set_exit_code}; use uucore::line_ending::LineEnding; use uucore::os_str_as_bytes; use self::searcher::Searcher; use matcher::{ExactMatcher, Matcher, WhitespaceMatcher}; use uucore::ranges::Range; use uucore::translate; use uucore::{format_usage, show_error, show_if_err}; mod matcher; mod searcher; struct Options<'a> { out_delimiter: Option<&'a [u8]>, line_ending: LineEnding, field_opts: Option>, } enum Delimiter<'a> { Whitespace, Slice(&'a [u8]), } struct FieldOptions<'a> { delimiter: Delimiter<'a>, only_delimited: bool, } enum Mode<'a> { Bytes(Vec, Options<'a>), Characters(Vec, Options<'a>), Fields(Vec, Options<'a>), } impl Default for Delimiter<'_> { fn default() -> Self { Self::Slice(b"\t") } } impl<'a> From<&'a OsString> for Delimiter<'a> { fn from(s: &'a OsString) -> Self { Self::Slice(os_str_as_bytes(s).unwrap()) } } fn list_to_ranges(list: &str, complement: bool) -> Result, String> { if complement { Range::from_list(list).map(|r| uucore::ranges::complement(&r)) } else { Range::from_list(list) } } fn cut_bytes( reader: R, out: &mut W, ranges: &[Range], opts: &Options, ) -> UResult<()> { let newline_char = opts.line_ending.into(); let mut buf_in = BufReader::new(reader); let out_delim = opts.out_delimiter.unwrap_or(b"\t"); let result = buf_in.for_byte_record(newline_char, |line| { let mut print_delim = false; for &Range { low, high } in ranges { if low > line.len() { break; } if print_delim { out.write_all(out_delim)?; } else if opts.out_delimiter.is_some() { print_delim = true; } // change `low` from 1-indexed value to 0-index value let low = low - 1; let high = high.min(line.len()); out.write_all(&line[low..high])?; } out.write_all(&[newline_char])?; Ok(true) }); if let Err(e) = result { return Err(USimpleError::new(1, e.to_string())); } Ok(()) } /// Output delimiter is explicitly specified fn cut_fields_explicit_out_delim( reader: R, out: &mut W, matcher: &M, ranges: &[Range], only_delimited: bool, newline_char: u8, out_delim: &[u8], ) -> UResult<()> { let mut buf_in = BufReader::new(reader); let result = buf_in.for_byte_record_with_terminator(newline_char, |line| { let mut fields_pos = 1; let mut low_idx = 0; let mut delim_search = Searcher::new(matcher, line).peekable(); let mut print_delim = false; if delim_search.peek().is_none() { if !only_delimited { // Always write the entire line, even if it doesn't end with `newline_char` out.write_all(line)?; if line.is_empty() || line[line.len() - 1] != newline_char { out.write_all(&[newline_char])?; } } return Ok(true); } for &Range { low, high } in ranges { if low - fields_pos > 0 { // current field is not in the range, so jump to the field corresponding to the // beginning of the range if any low_idx = match delim_search.nth(low - fields_pos - 1) { Some((_, last)) => last, None => break, }; } // at this point, current field is the first in the range for _ in 0..=high - low { // skip printing delimiter if this is the first matching field for this line if print_delim { out.write_all(out_delim)?; } else { print_delim = true; } match delim_search.next() { // print the current field up to the next field delim Some((first, last)) => { let segment = &line[low_idx..first]; out.write_all(segment)?; low_idx = last; fields_pos = high + 1; } None => { // this is the last field in the line, so print the rest let segment = &line[low_idx..]; out.write_all(segment)?; if line[line.len() - 1] == newline_char { return Ok(true); } break; } } } } out.write_all(&[newline_char])?; Ok(true) }); if let Err(e) = result { return Err(USimpleError::new(1, e.to_string())); } Ok(()) } /// Output delimiter is the same as input delimiter fn cut_fields_implicit_out_delim( reader: R, out: &mut W, matcher: &M, ranges: &[Range], only_delimited: bool, newline_char: u8, ) -> UResult<()> { let mut buf_in = BufReader::new(reader); let result = buf_in.for_byte_record_with_terminator(newline_char, |line| { let mut fields_pos = 1; let mut low_idx = 0; let mut delim_search = Searcher::new(matcher, line).peekable(); let mut print_delim = false; if delim_search.peek().is_none() { if !only_delimited { // Always write the entire line, even if it doesn't end with `newline_char` out.write_all(line)?; if line.is_empty() || line[line.len() - 1] != newline_char { out.write_all(&[newline_char])?; } } return Ok(true); } for &Range { low, high } in ranges { if low - fields_pos > 0 { if let Some((first, last)) = delim_search.nth(low - fields_pos - 1) { low_idx = if print_delim { first } else { last } } else { break; } } match delim_search.nth(high - low) { Some((first, _)) => { let segment = &line[low_idx..first]; out.write_all(segment)?; print_delim = true; low_idx = first; fields_pos = high + 1; } None => { let segment = &line[low_idx..line.len()]; out.write_all(segment)?; if line[line.len() - 1] == newline_char { return Ok(true); } break; } } } out.write_all(&[newline_char])?; Ok(true) }); if let Err(e) = result { return Err(USimpleError::new(1, e.to_string())); } Ok(()) } /// The input delimiter is identical to `newline_char` fn cut_fields_newline_char_delim( reader: R, out: &mut W, ranges: &[Range], newline_char: u8, out_delim: &[u8], ) -> UResult<()> { let buf_in = BufReader::new(reader); let segments: Vec<_> = buf_in.split(newline_char).filter_map(|x| x.ok()).collect(); let mut print_delim = false; for &Range { low, high } in ranges { for i in low..=high { // "- 1" is necessary because fields start from 1 whereas a Vec starts from 0 if let Some(segment) = segments.get(i - 1) { if print_delim { out.write_all(out_delim)?; } else { print_delim = true; } out.write_all(segment.as_slice())?; } else { break; } } } out.write_all(&[newline_char])?; Ok(()) } fn cut_fields( reader: R, out: &mut W, ranges: &[Range], opts: &Options, ) -> UResult<()> { let newline_char = opts.line_ending.into(); let field_opts = opts.field_opts.as_ref().unwrap(); // it is safe to unwrap() here - field_opts will always be Some() for cut_fields() call match field_opts.delimiter { Delimiter::Slice(delim) if delim == [newline_char] => { let out_delim = opts.out_delimiter.unwrap_or(delim); cut_fields_newline_char_delim(reader, out, ranges, newline_char, out_delim) } Delimiter::Slice(delim) => { let matcher = ExactMatcher::new(delim); match opts.out_delimiter { Some(out_delim) => cut_fields_explicit_out_delim( reader, out, &matcher, ranges, field_opts.only_delimited, newline_char, out_delim, ), None => cut_fields_implicit_out_delim( reader, out, &matcher, ranges, field_opts.only_delimited, newline_char, ), } } Delimiter::Whitespace => { let matcher = WhitespaceMatcher {}; cut_fields_explicit_out_delim( reader, out, &matcher, ranges, field_opts.only_delimited, newline_char, opts.out_delimiter.unwrap_or(b"\t"), ) } } } fn cut_files(mut filenames: Vec, mode: &Mode) { let mut stdin_read = false; if filenames.is_empty() { filenames.push("-".to_owned()); } let mut out: Box = if stdout().is_terminal() { Box::new(stdout()) } else { Box::new(BufWriter::new(stdout())) as Box }; for filename in &filenames { if filename == "-" { if stdin_read { continue; } show_if_err!(match mode { Mode::Bytes(ranges, opts) => cut_bytes(stdin(), &mut out, ranges, opts), Mode::Characters(ranges, opts) => cut_bytes(stdin(), &mut out, ranges, opts), Mode::Fields(ranges, opts) => cut_fields(stdin(), &mut out, ranges, opts), }); stdin_read = true; } else { let path = Path::new(&filename[..]); if path.is_dir() { show_error!( "{}: {}", filename.maybe_quote(), translate!("cut-error-is-directory") ); set_exit_code(1); continue; } show_if_err!( File::open(path) .map_err_context(|| filename.maybe_quote().to_string()) .and_then(|file| { match &mode { Mode::Bytes(ranges, opts) | Mode::Characters(ranges, opts) => { cut_bytes(file, &mut out, ranges, opts) } Mode::Fields(ranges, opts) => cut_fields(file, &mut out, ranges, opts), } }) ); } } show_if_err!( out.flush() .map_err_context(|| translate!("cut-error-write-error")) ); } /// Get delimiter and output delimiter from `-d`/`--delimiter` and `--output-delimiter` options respectively /// Allow either delimiter to have a value that is neither UTF-8 nor ASCII to align with GNU behavior fn get_delimiters(matches: &ArgMatches) -> UResult<(Delimiter, Option<&[u8]>)> { let whitespace_delimited = matches.get_flag(options::WHITESPACE_DELIMITED); let delim_opt = matches.get_one::(options::DELIMITER); let delim = match delim_opt { Some(_) if whitespace_delimited => { return Err(USimpleError::new( 1, translate!("cut-error-delimiter-and-whitespace-conflict"), )); } Some(os_string) => { if os_string == "''" || os_string.is_empty() { // treat `''` as empty delimiter Delimiter::Slice(b"\0") } else { // For delimiter `-d` option value - allow both UTF-8 (possibly multi-byte) characters // and Non UTF-8 (and not ASCII) single byte "characters", like `b"\xAD"` to align with GNU behavior let bytes = os_str_as_bytes(os_string)?; if os_string.to_str().is_some_and(|s| s.chars().count() > 1) || os_string.to_str().is_none() && bytes.len() > 1 { return Err(USimpleError::new( 1, translate!("cut-error-delimiter-must-be-single-character"), )); } Delimiter::from(os_string) } } None => { if whitespace_delimited { Delimiter::Whitespace } else { Delimiter::default() } } }; let out_delim = matches .get_one::(options::OUTPUT_DELIMITER) .map(|os_string| { if os_string.is_empty() || os_string == "''" { b"\0" } else { os_str_as_bytes(os_string).unwrap() } }); Ok((delim, out_delim)) } mod options { pub const BYTES: &str = "bytes"; pub const CHARACTERS: &str = "characters"; pub const DELIMITER: &str = "delimiter"; pub const FIELDS: &str = "fields"; pub const ZERO_TERMINATED: &str = "zero-terminated"; pub const ONLY_DELIMITED: &str = "only-delimited"; pub const OUTPUT_DELIMITER: &str = "output-delimiter"; pub const WHITESPACE_DELIMITED: &str = "whitespace-delimited"; pub const COMPLEMENT: &str = "complement"; pub const FILE: &str = "file"; } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { // GNU's `cut` supports `-d=` to set the delimiter to `=`. // Clap parsing is limited in this situation, see: // https://github.com/uutils/coreutils/issues/2424#issuecomment-863825242 let args: Vec = args .into_iter() .map(|x| { if x == "-d=" { "--delimiter==".into() } else { x } }) .collect(); let matches = uu_app().try_get_matches_from(args)?; let complement = matches.get_flag(options::COMPLEMENT); let only_delimited = matches.get_flag(options::ONLY_DELIMITED); let (delimiter, out_delimiter) = get_delimiters(&matches)?; let line_ending = LineEnding::from_zero_flag(matches.get_flag(options::ZERO_TERMINATED)); // Only one, and only one of cutting mode arguments, i.e. `-b`, `-c`, `-f`, // is expected. The number of those arguments is used for parsing a cutting // mode and handling the error cases. let mode_args_count = [ matches.indices_of(options::BYTES), matches.indices_of(options::CHARACTERS), matches.indices_of(options::FIELDS), ] .into_iter() .map(|indices| indices.unwrap_or_default().count()) .sum(); let mode_parse = match ( mode_args_count, matches.get_one::(options::BYTES), matches.get_one::(options::CHARACTERS), matches.get_one::(options::FIELDS), ) { (1, Some(byte_ranges), None, None) => { list_to_ranges(byte_ranges, complement).map(|ranges| { Mode::Bytes( ranges, Options { out_delimiter, line_ending, field_opts: None, }, ) }) } (1, None, Some(char_ranges), None) => { list_to_ranges(char_ranges, complement).map(|ranges| { Mode::Characters( ranges, Options { out_delimiter, line_ending, field_opts: None, }, ) }) } (1, None, None, Some(field_ranges)) => { list_to_ranges(field_ranges, complement).map(|ranges| { Mode::Fields( ranges, Options { out_delimiter, line_ending, field_opts: Some(FieldOptions { delimiter, only_delimited, }), }, ) }) } (2.., _, _, _) => Err(translate!("cut-error-multiple-mode-args")), _ => Err(translate!("cut-error-missing-mode-arg")), }; let mode_parse = match mode_parse { Err(_) => mode_parse, Ok(mode) => match mode { Mode::Bytes(_, _) | Mode::Characters(_, _) if matches.contains_id(options::DELIMITER) => { Err(translate!("cut-error-delimiter-only-with-fields")) } Mode::Bytes(_, _) | Mode::Characters(_, _) if matches.get_flag(options::WHITESPACE_DELIMITED) => { Err(translate!("cut-error-whitespace-only-with-fields")) } Mode::Bytes(_, _) | Mode::Characters(_, _) if matches.get_flag(options::ONLY_DELIMITED) => { Err(translate!("cut-error-only-delimited-only-with-fields")) } _ => Ok(mode), }, }; let files: Vec = matches .get_many::(options::FILE) .unwrap_or_default() .cloned() .collect(); match mode_parse { Ok(mode) => { cut_files(files, &mode); Ok(()) } Err(e) => Err(USimpleError::new(1, e)), } } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .override_usage(format_usage(&translate!("cut-usage"))) .about(translate!("cut-about")) .after_help(translate!("cut-after-help")) .infer_long_args(true) // While `args_override_self(true)` for some arguments, such as `-d` // and `--output-delimiter`, is consistent to the behavior of GNU cut, // arguments related to cutting mode, i.e. `-b`, `-c`, `-f`, should // cause an error when there is more than one of them, as described in // the manual of GNU cut: "Use one, and only one of -b, -c or -f". // `ArgAction::Append` is used on `-b`, `-c`, `-f` arguments, so that // the occurrences of those could be counted and be handled accordingly. .args_override_self(true) .arg( Arg::new(options::BYTES) .short('b') .long(options::BYTES) .help(translate!("cut-help-bytes")) .allow_hyphen_values(true) .value_name("LIST") .action(ArgAction::Append), ) .arg( Arg::new(options::CHARACTERS) .short('c') .long(options::CHARACTERS) .help(translate!("cut-help-characters")) .allow_hyphen_values(true) .value_name("LIST") .action(ArgAction::Append), ) .arg( Arg::new(options::DELIMITER) .short('d') .long(options::DELIMITER) .value_parser(ValueParser::os_string()) .help(translate!("cut-help-delimiter")) .value_name("DELIM"), ) .arg( Arg::new(options::WHITESPACE_DELIMITED) .short('w') .help(translate!("cut-help-whitespace-delimited")) .value_name("WHITESPACE") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FIELDS) .short('f') .long(options::FIELDS) .help(translate!("cut-help-fields")) .allow_hyphen_values(true) .value_name("LIST") .action(ArgAction::Append), ) .arg( Arg::new(options::COMPLEMENT) .long(options::COMPLEMENT) .help(translate!("cut-help-complement")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ONLY_DELIMITED) .short('s') .long(options::ONLY_DELIMITED) .help(translate!("cut-help-only-delimited")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ZERO_TERMINATED) .short('z') .long(options::ZERO_TERMINATED) .help(translate!("cut-help-zero-terminated")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OUTPUT_DELIMITER) .long(options::OUTPUT_DELIMITER) .value_parser(ValueParser::os_string()) .help(translate!("cut-help-output-delimiter")) .value_name("NEW_DELIM"), ) .arg( Arg::new(options::FILE) .hide(true) .action(ArgAction::Append) .value_hint(clap::ValueHint::FilePath), ) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/src/main.rs000066400000000000000000000000261504311601400250360ustar00rootroot00000000000000uucore::bin!(uu_cut); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/src/matcher.rs000066400000000000000000000107231504311601400255420ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use memchr::{memchr, memchr2}; // Find the next matching byte sequence positions // Return (first, last) where haystack[first..last] corresponds to the matched pattern pub trait Matcher { fn next_match(&self, haystack: &[u8]) -> Option<(usize, usize)>; } // Matches for the exact byte sequence pattern pub struct ExactMatcher<'a> { needle: &'a [u8], } impl<'a> ExactMatcher<'a> { pub fn new(needle: &'a [u8]) -> Self { assert!(!needle.is_empty()); Self { needle } } } impl Matcher for ExactMatcher<'_> { fn next_match(&self, haystack: &[u8]) -> Option<(usize, usize)> { let mut pos = 0usize; loop { match memchr(self.needle[0], &haystack[pos..]) { Some(match_idx) => { let match_idx = match_idx + pos; // account for starting from pos if self.needle.len() == 1 || haystack[match_idx + 1..].starts_with(&self.needle[1..]) { return Some((match_idx, match_idx + self.needle.len())); } pos = match_idx + 1; } None => { return None; } } } } } // Matches for any number of SPACE or TAB pub struct WhitespaceMatcher {} impl Matcher for WhitespaceMatcher { fn next_match(&self, haystack: &[u8]) -> Option<(usize, usize)> { match memchr2(b' ', b'\t', haystack) { Some(match_idx) => { let mut skip = match_idx + 1; while skip < haystack.len() { match haystack[skip] { b' ' | b'\t' => skip += 1, _ => break, } } Some((match_idx, skip)) } None => None, } } } #[cfg(test)] mod matcher_tests { use super::*; #[test] fn test_exact_matcher_single_byte() { let matcher = ExactMatcher::new(":".as_bytes()); // spell-checker:disable assert_eq!(matcher.next_match("".as_bytes()), None); assert_eq!(matcher.next_match(":".as_bytes()), Some((0, 1))); assert_eq!(matcher.next_match(":abcxyz".as_bytes()), Some((0, 1))); assert_eq!(matcher.next_match("abc:xyz".as_bytes()), Some((3, 4))); assert_eq!(matcher.next_match("abcxyz:".as_bytes()), Some((6, 7))); assert_eq!(matcher.next_match("abcxyz".as_bytes()), None); // spell-checker:enable } #[test] fn test_exact_matcher_multi_bytes() { let matcher = ExactMatcher::new("<>".as_bytes()); // spell-checker:disable assert_eq!(matcher.next_match("".as_bytes()), None); assert_eq!(matcher.next_match("<>".as_bytes()), Some((0, 2))); assert_eq!(matcher.next_match("<>abcxyz".as_bytes()), Some((0, 2))); assert_eq!(matcher.next_match("abc<>xyz".as_bytes()), Some((3, 5))); assert_eq!(matcher.next_match("abcxyz<>".as_bytes()), Some((6, 8))); assert_eq!(matcher.next_match("abcxyz".as_bytes()), None); // spell-checker:enable } #[test] fn test_whitespace_matcher_single_space() { let matcher = WhitespaceMatcher {}; // spell-checker:disable assert_eq!(matcher.next_match("".as_bytes()), None); assert_eq!(matcher.next_match(" ".as_bytes()), Some((0, 1))); assert_eq!(matcher.next_match("\tabcxyz".as_bytes()), Some((0, 1))); assert_eq!(matcher.next_match("abc\txyz".as_bytes()), Some((3, 4))); assert_eq!(matcher.next_match("abcxyz ".as_bytes()), Some((6, 7))); assert_eq!(matcher.next_match("abcxyz".as_bytes()), None); // spell-checker:enable } #[test] fn test_whitespace_matcher_multi_spaces() { let matcher = WhitespaceMatcher {}; // spell-checker:disable assert_eq!(matcher.next_match("".as_bytes()), None); assert_eq!(matcher.next_match(" \t ".as_bytes()), Some((0, 3))); assert_eq!(matcher.next_match("\t\tabcxyz".as_bytes()), Some((0, 2))); assert_eq!(matcher.next_match("abc \txyz".as_bytes()), Some((3, 5))); assert_eq!(matcher.next_match("abcxyz ".as_bytes()), Some((6, 8))); assert_eq!(matcher.next_match("abcxyz".as_bytes()), None); // spell-checker:enable } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/cut/src/searcher.rs000066400000000000000000000126171504311601400257170ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore multispace use super::matcher::Matcher; // Generic searcher that relies on a specific matcher pub struct Searcher<'a, 'b, M: Matcher> { matcher: &'a M, haystack: &'b [u8], position: usize, } impl<'a, 'b, M: Matcher> Searcher<'a, 'b, M> { pub fn new(matcher: &'a M, haystack: &'b [u8]) -> Self { Self { matcher, haystack, position: 0, } } } // Iterate over field delimiters // Returns (first, last) positions of each sequence, where `haystack[first..last]` // corresponds to the delimiter. impl Iterator for Searcher<'_, '_, M> { type Item = (usize, usize); fn next(&mut self) -> Option { match self.matcher.next_match(&self.haystack[self.position..]) { Some((first, last)) => { let result = (first + self.position, last + self.position); self.position += last; Some(result) } None => None, } } } #[cfg(test)] mod exact_searcher_tests { use super::super::matcher::ExactMatcher; use super::*; #[test] fn test_normal() { let matcher = ExactMatcher::new("a".as_bytes()); let iter = Searcher::new(&matcher, "a.a.a".as_bytes()); let items: Vec<(usize, usize)> = iter.collect(); assert_eq!(vec![(0, 1), (2, 3), (4, 5)], items); } #[test] fn test_empty() { let matcher = ExactMatcher::new("a".as_bytes()); let iter = Searcher::new(&matcher, "".as_bytes()); let items: Vec<(usize, usize)> = iter.collect(); assert!(items.is_empty()); } fn test_multibyte(line: &[u8], expected: &[(usize, usize)]) { let matcher = ExactMatcher::new("ab".as_bytes()); let iter = Searcher::new(&matcher, line); let items: Vec<(usize, usize)> = iter.collect(); assert_eq!(expected, items); } #[test] fn test_multibyte_normal() { test_multibyte("...ab...ab...".as_bytes(), &[(3, 5), (8, 10)]); } #[test] fn test_multibyte_needle_head_at_end() { test_multibyte("a".as_bytes(), &[]); } #[test] fn test_multibyte_starting_needle() { test_multibyte("ab...ab...".as_bytes(), &[(0, 2), (5, 7)]); } #[test] fn test_multibyte_trailing_needle() { test_multibyte("...ab...ab".as_bytes(), &[(3, 5), (8, 10)]); } #[test] fn test_multibyte_first_byte_false_match() { test_multibyte("aA..aCaC..ab..aD".as_bytes(), &[(10, 12)]); } #[test] fn test_searcher_with_exact_matcher() { let matcher = ExactMatcher::new("<>".as_bytes()); let haystack = "<><>a<>b<><>cd<><>".as_bytes(); let mut searcher = Searcher::new(&matcher, haystack); assert_eq!(searcher.next(), Some((0, 2))); assert_eq!(searcher.next(), Some((2, 4))); assert_eq!(searcher.next(), Some((5, 7))); assert_eq!(searcher.next(), Some((8, 10))); assert_eq!(searcher.next(), Some((10, 12))); assert_eq!(searcher.next(), Some((14, 16))); assert_eq!(searcher.next(), Some((16, 18))); assert_eq!(searcher.next(), None); assert_eq!(searcher.next(), None); } } #[cfg(test)] mod whitespace_searcher_tests { use super::super::matcher::WhitespaceMatcher; use super::*; #[test] fn test_space() { let matcher = WhitespaceMatcher {}; let iter = Searcher::new(&matcher, " . . ".as_bytes()); let items: Vec<(usize, usize)> = iter.collect(); assert_eq!(vec![(0, 1), (2, 3), (4, 5)], items); } #[test] fn test_tab() { let matcher = WhitespaceMatcher {}; let iter = Searcher::new(&matcher, "\t.\t.\t".as_bytes()); let items: Vec<(usize, usize)> = iter.collect(); assert_eq!(vec![(0, 1), (2, 3), (4, 5)], items); } #[test] fn test_empty() { let matcher = WhitespaceMatcher {}; let iter = Searcher::new(&matcher, "".as_bytes()); let items: Vec<(usize, usize)> = iter.collect(); assert!(items.is_empty()); } fn test_multispace(line: &[u8], expected: &[(usize, usize)]) { let matcher = WhitespaceMatcher {}; let iter = Searcher::new(&matcher, line); let items: Vec<(usize, usize)> = iter.collect(); assert_eq!(expected, items); } #[test] fn test_multispace_normal() { test_multispace( "... ... \t...\t ... \t ...".as_bytes(), &[(3, 5), (8, 10), (13, 15), (18, 21)], ); } #[test] fn test_multispace_begin() { test_multispace(" \t\t...".as_bytes(), &[(0, 3)]); } #[test] fn test_multispace_end() { test_multispace("...\t ".as_bytes(), &[(3, 6)]); } #[test] fn test_searcher_with_whitespace_matcher() { let matcher = WhitespaceMatcher {}; let haystack = "\t a b \t cd\t\t".as_bytes(); let mut searcher = Searcher::new(&matcher, haystack); assert_eq!(searcher.next(), Some((0, 2))); assert_eq!(searcher.next(), Some((3, 4))); assert_eq!(searcher.next(), Some((5, 8))); assert_eq!(searcher.next(), Some((10, 12))); assert_eq!(searcher.next(), None); assert_eq!(searcher.next(), None); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/date/000077500000000000000000000000001504311601400231015ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/date/Cargo.toml000066400000000000000000000020571504311601400250350ustar00rootroot00000000000000# spell-checker:ignore datetime tzdb zoneinfo [package] name = "uu_date" description = "date ~ (uutils) display or set the current time" repository = "https://github.com/uutils/coreutils/tree/main/src/date" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/date.rs" [dependencies] clap = { workspace = true } chrono = { workspace = true } # TODO: Eventually we'll want to remove this jiff = { workspace = true, features = [ "tzdb-bundle-platform", "tzdb-zoneinfo", "tzdb-concatenated", ] } uucore = { workspace = true, features = ["parser"] } parse_datetime = { workspace = true } fluent = { workspace = true } [target.'cfg(unix)'.dependencies] libc = { workspace = true } [target.'cfg(windows)'.dependencies] windows-sys = { workspace = true, features = [ "Win32_Foundation", "Win32_System_SystemInformation", ] } [[bin]] name = "date" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/date/LICENSE000077700000000000000000000000001504311601400257472../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/date/locales/000077500000000000000000000000001504311601400245235ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/date/locales/en-US.ftl000066400000000000000000000201351504311601400261620ustar00rootroot00000000000000date-about = Print or set the system date and time date-usage = date [OPTION]... [+FORMAT]... date [OPTION]... [MMDDhhmm[[CC]YY][.ss]] FORMAT controls the output. Interpreted sequences are: { "| Sequence | Description | Example |" } { "| -------- | -------------------------------------------------------------------- | ---------------------- |" } { "| %% | a literal % | % |" } { "| %a | locale's abbreviated weekday name | Sun |" } { "| %A | locale's full weekday name | Sunday |" } { "| %b | locale's abbreviated month name | Jan |" } { "| %B | locale's full month name | January |" } { "| %c | locale's date and time | Thu Mar 3 23:05:25 2005|" } { "| %C | century; like %Y, except omit last two digits | 20 |" } { "| %d | day of month | 01 |" } { "| %D | date; same as %m/%d/%y | 12/31/99 |" } { "| %e | day of month, space padded; same as %_d | 3 |" } { "| %F | full date; same as %Y-%m-%d | 2005-03-03 |" } { "| %g | last two digits of year of ISO week number (see %G) | 05 |" } { "| %G | year of ISO week number (see %V); normally useful only with %V | 2005 |" } { "| %h | same as %b | Jan |" } { "| %H | hour (00..23) | 23 |" } { "| %I | hour (01..12) | 11 |" } { "| %j | day of year (001..366) | 062 |" } { "| %k | hour, space padded ( 0..23); same as %_H | 3 |" } { "| %l | hour, space padded ( 1..12); same as %_I | 9 |" } { "| %m | month (01..12) | 03 |" } { "| %M | minute (00..59) | 30 |" } { "| %n | a newline | \\n |" } { "| %N | nanoseconds (000000000..999999999) | 123456789 |" } { "| %p | locale's equivalent of either AM or PM; blank if not known | PM |" } { "| %P | like %p, but lower case | pm |" } { "| %q | quarter of year (1..4) | 1 |" } { "| %r | locale's 12-hour clock time | 11:11:04 PM |" } { "| %R | 24-hour hour and minute; same as %H:%M | 23:30 |" } { "| %s | seconds since 1970-01-01 00:00:00 UTC | 1615432800 |" } { "| %S | second (00..60) | 30 |" } { "| %t | a tab | \\t |" } { "| %T | time; same as %H:%M:%S | 23:30:30 |" } { "| %u | day of week (1..7); 1 is Monday | 4 |" } { "| %U | week number of year, with Sunday as first day of week (00..53) | 10 |" } { "| %V | ISO week number, with Monday as first day of week (01..53) | 12 |" } { "| %w | day of week (0..6); 0 is Sunday | 4 |" } { "| %W | week number of year, with Monday as first day of week (00..53) | 11 |" } { "| %x | locale's date representation | 03/03/2005 |" } { "| %X | locale's time representation | 23:30:30 |" } { "| %y | last two digits of year (00..99) | 05 |" } { "| %Y | year | 2005 |" } { "| %z | +hhmm numeric time zone | -0400 |" } { "| %:z | +hh:mm numeric time zone | -04:00 |" } { "| %::z | +hh:mm:ss numeric time zone | -04:00:00 |" } { "| %:::z | numeric time zone with : to necessary precision | -04, +05:30 |" } { "| %Z | alphabetic time zone abbreviation | EDT |" } By default, date pads numeric fields with zeroes. The following optional flags may follow '%': { "* `-` (hyphen) do not pad the field" } { "* `_` (underscore) pad with spaces" } { "* `0` (zero) pad with zeros" } { "* `^` use upper case if possible" } { "* `#` use opposite case if possible" } After any flags comes an optional field width, as a decimal number; then an optional modifier, which is either { "* `E` to use the locale's alternate representations if available, or" } { "* `O` to use the locale's alternate numeric symbols if available." } Examples: Convert seconds since the epoch (1970-01-01 UTC) to a date date --date='@2147483647' Show the time on the west coast of the US (use tzselect(1) to find TZ) TZ='America/Los_Angeles' date date-help-date = display time described by STRING, not 'now' date-help-file = like --date; once for each line of DATEFILE date-help-iso-8601 = output date/time in ISO 8601 format. FMT='date' for date only (the default), 'hours', 'minutes', 'seconds', or 'ns' for date and time to the indicated precision. Example: 2006-08-14T02:34:56-06:00 date-help-rfc-email = output date and time in RFC 5322 format. Example: Mon, 14 Aug 2006 02:34:56 -0600 date-help-rfc-3339 = output date/time in RFC 3339 format. FMT='date', 'seconds', or 'ns' for date and time to the indicated precision. Example: 2006-08-14 02:34:56-06:00 date-help-debug = annotate the parsed date, and warn about questionable usage to stderr date-help-reference = display the last modification time of FILE date-help-set = set time described by STRING date-help-set-macos = set time described by STRING (not available on mac yet) date-help-set-redox = set time described by STRING (not available on redox yet) date-help-universal = print or set Coordinated Universal Time (UTC) date-error-invalid-date = invalid date '{$date}' date-error-invalid-format = invalid format '{$format}' ({$error}) date-error-expected-file-got-directory = expected file, got directory '{$path}' date-error-date-overflow = date overflow '{$date}' date-error-setting-date-not-supported-macos = setting the date is not supported by macOS date-error-setting-date-not-supported-redox = setting the date is not supported by Redox date-error-cannot-set-date = cannot set date coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/date/locales/fr-FR.ftl000066400000000000000000000201531504311601400261470ustar00rootroot00000000000000date-about = afficher ou définir la date système date-usage = [OPTION]... [+FORMAT] date [-u|--utc|--universal] [MMDDhhmm[[CC]YY][.ss]] FORMAT contrôle la sortie. Les séquences interprétées sont : { "| Séquence | Description | Exemple |" } { "| -------- | -------------------------------------------------------------- | ---------------------- |" } { "| %% | un % littéral | % |" } { "| %a | nom abrégé du jour de la semaine selon la locale | dim |" } { "| %A | nom complet du jour de la semaine selon la locale | dimanche |" } { "| %b | nom abrégé du mois selon la locale | jan |" } { "| %B | nom complet du mois selon la locale | janvier |" } { "| %c | date et heure selon la locale | jeu 3 mar 23:05:25 2005|" } { "| %C | siècle ; comme %Y, sauf qu'on omet les deux derniers chiffres | 20 |" } { "| %d | jour du mois | 01 |" } { "| %D | date ; identique à %m/%d/%y | 12/31/99 |" } { "| %e | jour du mois, rempli avec des espaces ; identique à %_d | 3 |" } { "| %F | date complète ; identique à %Y-%m-%d | 2005-03-03 |" } { "| %g | deux derniers chiffres de l'année du numéro de semaine ISO (voir %G) | 05 |" } { "| %G | année du numéro de semaine ISO (voir %V) ; normalement utile seulement avec %V | 2005 |" } { "| %h | identique à %b | jan |" } { "| %H | heure (00..23) | 23 |" } { "| %I | heure (01..12) | 11 |" } { "| %j | jour de l'année (001..366) | 062 |" } { "| %k | heure, remplie avec des espaces ( 0..23) ; identique à %_H | 3 |" } { "| %l | heure, remplie avec des espaces ( 1..12) ; identique à %_I | 9 |" } { "| %m | mois (01..12) | 03 |" } { "| %M | minute (00..59) | 30 |" } { "| %n | une nouvelle ligne | \\n |" } { "| %N | nanosecondes (000000000..999999999) | 123456789 |" } { "| %p | équivalent locale de AM ou PM ; vide si inconnu | PM |" } { "| %P | comme %p, mais en minuscules | pm |" } { "| %q | trimestre de l'année (1..4) | 1 |" } { "| %r | heure sur 12 heures selon la locale | 11:11:04 PM |" } { "| %R | heure sur 24 heures et minute ; identique à %H:%M | 23:30 |" } { "| %s | secondes depuis 1970-01-01 00:00:00 UTC | 1615432800 |" } { "| %S | seconde (00..60) | 30 |" } { "| %t | une tabulation | \\t |" } { "| %T | heure ; identique à %H:%M:%S | 23:30:30 |" } { "| %u | jour de la semaine (1..7) ; 1 est lundi | 4 |" } { "| %U | numéro de semaine de l'année, avec dimanche comme premier jour de la semaine (00..53) | 10 |" } { "| %V | numéro de semaine ISO, avec lundi comme premier jour de la semaine (01..53) | 12 |" } { "| %w | jour de la semaine (0..6) ; 0 est dimanche | 4 |" } { "| %W | numéro de semaine de l'année, avec lundi comme premier jour de la semaine (00..53) | 11 |" } { "| %x | représentation de la date selon la locale | 03/03/2005 |" } { "| %X | représentation de l'heure selon la locale | 23:30:30 |" } { "| %y | deux derniers chiffres de l'année (00..99) | 05 |" } { "| %Y | année | 2005 |" } { "| %z | fuseau horaire numérique +hhmm | -0400 |" } { "| %:z | fuseau horaire numérique +hh:mm | -04:00 |" } { "| %::z | fuseau horaire numérique +hh:mm:ss | -04:00:00 |" } { "| %:::z | fuseau horaire numérique avec : à la précision nécessaire | -04, +05:30 |" } { "| %Z | abréviation alphabétique du fuseau horaire | EDT |" } Par défaut, date remplit les champs numériques avec des zéros. Les indicateurs optionnels suivants peuvent suivre '%' : { "* `-` (tiret) ne pas remplir le champ" } { "* `_` (soulignement) remplir avec des espaces" } { "* `0` (zéro) remplir avec des zéros" } { "* `^` utiliser des majuscules si possible" } { "* `#` utiliser l'inverse si possible" } Après tout indicateur vient une largeur de champ optionnelle, comme nombre décimal ; puis un modificateur optionnel, qui est soit { "* `E` pour utiliser les représentations alternatives de la locale si disponibles, ou" } { "* `O` pour utiliser les symboles numériques alternatifs de la locale si disponibles." } Exemples : Convertir les secondes depuis l'époque (1970-01-01 UTC) en date date --date='@2147483647' Montrer l'heure sur la côte ouest des États-Unis (utiliser tzselect(1) pour trouver TZ) TZ='America/Los_Angeles' date date-help-date = afficher l'heure décrite par CHAÃŽNE, pas 'maintenant' date-help-file = comme --date ; une fois pour chaque ligne de FICHIER_DATE date-help-iso-8601 = afficher la date/heure au format ISO 8601. FMT='date' pour la date seulement (par défaut), 'hours', 'minutes', 'seconds', ou 'ns' pour la date et l'heure à la précision indiquée. Exemple : 2006-08-14T02:34:56-06:00 date-help-rfc-email = afficher la date et l'heure au format RFC 5322. Exemple : Mon, 14 Aug 2006 02:34:56 -0600 date-help-rfc-3339 = afficher la date/heure au format RFC 3339. FMT='date', 'seconds', ou 'ns' pour la date et l'heure à la précision indiquée. Exemple : 2006-08-14 02:34:56-06:00 date-help-debug = annoter la date analysée et avertir des usages douteux sur stderr date-help-reference = afficher l'heure de dernière modification du FICHIER date-help-set = définir l'heure décrite par CHAÃŽNE date-help-set-macos = définir l'heure décrite par CHAÃŽNE (pas encore disponible sur mac) date-help-set-redox = définir l'heure décrite par CHAÃŽNE (pas encore disponible sur redox) date-help-universal = afficher ou définir le Temps Universel Coordonné (UTC) date-error-invalid-date = date invalide '{$date}' date-error-invalid-format = format invalide '{$format}' ({$error}) date-error-expected-file-got-directory = fichier attendu, répertoire obtenu '{$path}' date-error-date-overflow = débordement de date '{$date}' date-error-setting-date-not-supported-macos = la définition de la date n'est pas prise en charge par macOS date-error-setting-date-not-supported-redox = la définition de la date n'est pas prise en charge par Redox date-error-cannot-set-date = impossible de définir la date coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/date/src/000077500000000000000000000000001504311601400236705ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/date/src/date.rs000066400000000000000000000360441504311601400251620ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore strtime ; (format) DATEFILE MMDDhhmm ; (vars) datetime datetimes use clap::{Arg, ArgAction, Command}; use jiff::fmt::strtime; use jiff::tz::TimeZone; use jiff::{SignedDuration, Timestamp, Zoned}; #[cfg(all(unix, not(target_os = "macos"), not(target_os = "redox")))] use libc::{CLOCK_REALTIME, clock_settime, timespec}; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::PathBuf; use uucore::error::FromIo; use uucore::error::{UResult, USimpleError}; use uucore::translate; use uucore::{format_usage, show}; #[cfg(windows)] use windows_sys::Win32::{Foundation::SYSTEMTIME, System::SystemInformation::SetSystemTime}; use uucore::parser::shortcut_value_parser::ShortcutValueParser; // Options const DATE: &str = "date"; const HOURS: &str = "hours"; const MINUTES: &str = "minutes"; const SECONDS: &str = "seconds"; const NS: &str = "ns"; const OPT_DATE: &str = "date"; const OPT_FORMAT: &str = "format"; const OPT_FILE: &str = "file"; const OPT_DEBUG: &str = "debug"; const OPT_ISO_8601: &str = "iso-8601"; const OPT_RFC_EMAIL: &str = "rfc-email"; const OPT_RFC_3339: &str = "rfc-3339"; const OPT_SET: &str = "set"; const OPT_REFERENCE: &str = "reference"; const OPT_UNIVERSAL: &str = "universal"; const OPT_UNIVERSAL_2: &str = "utc"; /// Settings for this program, parsed from the command line struct Settings { utc: bool, format: Format, date_source: DateSource, set_to: Option, } /// Various ways of displaying the date enum Format { Iso8601(Iso8601Format), Rfc5322, Rfc3339(Rfc3339Format), Custom(String), Default, } /// Various places that dates can come from enum DateSource { Now, Custom(String), File(PathBuf), Stdin, Human(SignedDuration), } enum Iso8601Format { Date, Hours, Minutes, Seconds, Ns, } impl From<&str> for Iso8601Format { fn from(s: &str) -> Self { match s { HOURS => Self::Hours, MINUTES => Self::Minutes, SECONDS => Self::Seconds, NS => Self::Ns, DATE => Self::Date, // Note: This is caught by clap via `possible_values` _ => unreachable!(), } } } enum Rfc3339Format { Date, Seconds, Ns, } impl From<&str> for Rfc3339Format { fn from(s: &str) -> Self { match s { DATE => Self::Date, SECONDS => Self::Seconds, NS => Self::Ns, // Should be caught by clap _ => panic!("Invalid format: {s}"), } } } #[uucore::main] #[allow(clippy::cognitive_complexity)] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let format = if let Some(form) = matches.get_one::(OPT_FORMAT) { if !form.starts_with('+') { return Err(USimpleError::new( 1, translate!("date-error-invalid-date", "date" => form), )); } let form = form[1..].to_string(); Format::Custom(form) } else if let Some(fmt) = matches .get_many::(OPT_ISO_8601) .map(|mut iter| iter.next().unwrap_or(&DATE.to_string()).as_str().into()) { Format::Iso8601(fmt) } else if matches.get_flag(OPT_RFC_EMAIL) { Format::Rfc5322 } else if let Some(fmt) = matches .get_one::(OPT_RFC_3339) .map(|s| s.as_str().into()) { Format::Rfc3339(fmt) } else { Format::Default }; let date_source = if let Some(date) = matches.get_one::(OPT_DATE) { if let Ok(duration) = parse_offset(date.as_str()) { DateSource::Human(duration) } else { DateSource::Custom(date.into()) } } else if let Some(file) = matches.get_one::(OPT_FILE) { match file.as_ref() { "-" => DateSource::Stdin, _ => DateSource::File(file.into()), } } else { DateSource::Now }; let set_to = match matches.get_one::(OPT_SET).map(parse_date) { None => None, Some(Err((input, _err))) => { return Err(USimpleError::new( 1, translate!("date-error-invalid-date", "date" => input), )); } Some(Ok(date)) => Some(date), }; let settings = Settings { utc: matches.get_flag(OPT_UNIVERSAL), format, date_source, set_to, }; if let Some(date) = settings.set_to { // All set time functions expect UTC datetimes. let date = if settings.utc { date.with_time_zone(TimeZone::UTC) } else { date }; return set_system_datetime(date); } // Get the current time, either in the local time zone or UTC. let now = if settings.utc { Timestamp::now().to_zoned(TimeZone::UTC) } else { Zoned::now() }; // Iterate over all dates - whether it's a single date or a file. let dates: Box> = match settings.date_source { DateSource::Custom(ref input) => { let date = parse_date(input); let iter = std::iter::once(date); Box::new(iter) } DateSource::Human(relative_time) => { // Double check the result is overflow or not of the current_time + relative_time // it may cause a panic of chrono::datetime::DateTime add match now.checked_add(relative_time) { Ok(date) => { let iter = std::iter::once(Ok(date)); Box::new(iter) } Err(_) => { return Err(USimpleError::new( 1, translate!("date-error-date-overflow", "date" => relative_time), )); } } } DateSource::Stdin => { let lines = BufReader::new(std::io::stdin()).lines(); let iter = lines.map_while(Result::ok).map(parse_date); Box::new(iter) } DateSource::File(ref path) => { if path.is_dir() { return Err(USimpleError::new( 2, translate!("date-error-expected-file-got-directory", "path" => path.to_string_lossy()), )); } let file = File::open(path) .map_err_context(|| path.as_os_str().to_string_lossy().to_string())?; let lines = BufReader::new(file).lines(); let iter = lines.map_while(Result::ok).map(parse_date); Box::new(iter) } DateSource::Now => { let iter = std::iter::once(Ok(now)); Box::new(iter) } }; let format_string = make_format_string(&settings); // Format all the dates for date in dates { match date { // TODO: Switch to lenient formatting. Ok(date) => match strtime::format(format_string, &date) { Ok(s) => println!("{s}"), Err(e) => { return Err(USimpleError::new( 1, translate!("date-error-invalid-format", "format" => format_string, "error" => e), )); } }, Err((input, _err)) => show!(USimpleError::new( 1, translate!("date-error-invalid-date", "date" => input) )), } } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("date-about")) .override_usage(format_usage(&translate!("date-usage"))) .infer_long_args(true) .arg( Arg::new(OPT_DATE) .short('d') .long(OPT_DATE) .value_name("STRING") .allow_hyphen_values(true) .help(translate!("date-help-date")), ) .arg( Arg::new(OPT_FILE) .short('f') .long(OPT_FILE) .value_name("DATEFILE") .value_hint(clap::ValueHint::FilePath) .help(translate!("date-help-file")), ) .arg( Arg::new(OPT_ISO_8601) .short('I') .long(OPT_ISO_8601) .value_name("FMT") .value_parser(ShortcutValueParser::new([ DATE, HOURS, MINUTES, SECONDS, NS, ])) .num_args(0..=1) .default_missing_value(OPT_DATE) .help(translate!("date-help-iso-8601")), ) .arg( Arg::new(OPT_RFC_EMAIL) .short('R') .long(OPT_RFC_EMAIL) .help(translate!("date-help-rfc-email")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_RFC_3339) .long(OPT_RFC_3339) .value_name("FMT") .value_parser(ShortcutValueParser::new([DATE, SECONDS, NS])) .help(translate!("date-help-rfc-3339")), ) .arg( Arg::new(OPT_DEBUG) .long(OPT_DEBUG) .help(translate!("date-help-debug")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_REFERENCE) .short('r') .long(OPT_REFERENCE) .value_name("FILE") .value_hint(clap::ValueHint::AnyPath) .help(translate!("date-help-reference")), ) .arg( Arg::new(OPT_SET) .short('s') .long(OPT_SET) .value_name("STRING") .help({ #[cfg(not(any(target_os = "macos", target_os = "redox")))] { translate!("date-help-set") } #[cfg(target_os = "macos")] { translate!("date-help-set-macos") } #[cfg(target_os = "redox")] { translate!("date-help-set-redox") } }), ) .arg( Arg::new(OPT_UNIVERSAL) .short('u') .long(OPT_UNIVERSAL) .alias(OPT_UNIVERSAL_2) .help(translate!("date-help-universal")) .action(ArgAction::SetTrue), ) .arg(Arg::new(OPT_FORMAT)) } /// Return the appropriate format string for the given settings. fn make_format_string(settings: &Settings) -> &str { match settings.format { Format::Iso8601(ref fmt) => match *fmt { Iso8601Format::Date => "%F", Iso8601Format::Hours => "%FT%H%:z", Iso8601Format::Minutes => "%FT%H:%M%:z", Iso8601Format::Seconds => "%FT%T%:z", Iso8601Format::Ns => "%FT%T,%N%:z", }, Format::Rfc5322 => "%a, %d %h %Y %T %z", Format::Rfc3339(ref fmt) => match *fmt { Rfc3339Format::Date => "%F", Rfc3339Format::Seconds => "%F %T%:z", Rfc3339Format::Ns => "%F %T.%N%:z", }, Format::Custom(ref fmt) => fmt, Format::Default => "%a %b %e %X %Z %Y", } } /// Parse a `String` into a `DateTime`. /// If it fails, return a tuple of the `String` along with its `ParseError`. // TODO: Convert `parse_datetime` to jiff and remove wrapper from chrono to jiff structures. fn parse_date + Clone>( s: S, ) -> Result { match parse_datetime::parse_datetime(s.as_ref()) { Ok(date) => { let timestamp = Timestamp::new(date.timestamp(), date.timestamp_subsec_nanos() as i32).unwrap(); Ok(Zoned::new(timestamp, TimeZone::UTC)) } Err(e) => Err((s.as_ref().into(), e)), } } // TODO: Convert `parse_datetime` to jiff and remove wrapper from chrono to jiff structures. // Also, consider whether parse_datetime::parse_datetime_at_date can be renamed to something // like parse_datetime::parse_offset, instead of doing some addition/subtraction. fn parse_offset(date: &str) -> Result { let ref_time = chrono::Local::now(); if let Ok(new_time) = parse_datetime::parse_datetime_at_date(ref_time, date) { let duration = new_time.signed_duration_since(ref_time); Ok(SignedDuration::new( duration.num_seconds(), duration.subsec_nanos(), )) } else { Err(()) } } #[cfg(not(any(unix, windows)))] fn set_system_datetime(_date: Zoned) -> UResult<()> { unimplemented!("setting date not implemented (unsupported target)"); } #[cfg(target_os = "macos")] fn set_system_datetime(_date: Zoned) -> UResult<()> { Err(USimpleError::new( 1, translate!("date-error-setting-date-not-supported-macos"), )) } #[cfg(target_os = "redox")] fn set_system_datetime(_date: Zoned) -> UResult<()> { Err(USimpleError::new( 1, translate!("date-error-setting-date-not-supported-redox"), )) } #[cfg(all(unix, not(target_os = "macos"), not(target_os = "redox")))] /// System call to set date (unix). /// See here for more: /// `` /// `` /// `` fn set_system_datetime(date: Zoned) -> UResult<()> { let ts = date.timestamp(); let timespec = timespec { tv_sec: ts.as_second() as _, tv_nsec: ts.subsec_nanosecond() as _, }; let result = unsafe { clock_settime(CLOCK_REALTIME, &raw const timespec) }; if result == 0 { Ok(()) } else { Err(std::io::Error::last_os_error() .map_err_context(|| translate!("date-error-cannot-set-date"))) } } #[cfg(windows)] /// System call to set date (Windows). /// See here for more: /// * /// * fn set_system_datetime(date: Zoned) -> UResult<()> { let system_time = SYSTEMTIME { wYear: date.year() as u16, wMonth: date.month() as u16, // Ignored wDayOfWeek: 0, wDay: date.day() as u16, wHour: date.hour() as u16, wMinute: date.minute() as u16, wSecond: date.second() as u16, // TODO: be careful of leap seconds - valid range is [0, 999] - how to handle? wMilliseconds: ((date.subsec_nanosecond() / 1_000_000) % 1000) as u16, }; let result = unsafe { SetSystemTime(&raw const system_time) }; if result == 0 { Err(std::io::Error::last_os_error() .map_err_context(|| translate!("date-error-cannot-set-date"))) } else { Ok(()) } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/date/src/main.rs000066400000000000000000000000271504311601400251610ustar00rootroot00000000000000uucore::bin!(uu_date); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/000077500000000000000000000000001504311601400225535ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/BENCHMARKING.md000066400000000000000000000060721504311601400247320ustar00rootroot00000000000000 # Benchmarking dd `dd` is a utility used for copying and converting files. It is often used for writing directly to devices, such as when writing an `.iso` file directly to a drive. ## Understanding dd At the core, `dd` has a simple loop of operation. It reads in `blocksize` bytes from an input, optionally performs a conversion on the bytes, and then writes `blocksize` bytes to an output file. In typical usage, the performance of `dd` is dominated by the speed at which it can read or write to the filesystem. For those scenarios it is best to optimize the blocksize for the performance of the devices being read/written to. Devices typically have an optimal block size that they work best at, so for maximum performance `dd` should be using a block size, or multiple of the block size, that the underlying devices prefer. For benchmarking `dd` itself we will use fast special files provided by the operating system that work out of RAM, `/dev/zero` and `/dev/null`. This reduces the time taken reading/writing files to a minimum and maximises the percentage time we spend in the `dd` tool itself, but care still needs to be taken to understand where we are benchmarking the `dd` tool and where we are just benchmarking memory performance. The main parameter to vary for a `dd` benchmark is the blocksize, but benchmarks testing the conversions that are supported by `dd` could also be interesting. `dd` has a convenient `count` argument, that will copy `count` blocks of data from the input to the output, which is useful for benchmarking. ## Blocksize Benchmarks When measuring the impact of blocksize on the throughput, we want to avoid testing the startup time of `dd`. `dd` itself will give a report on the throughput speed once complete, but it's probably better to use an external tool, such as `hyperfine` to measure the performance. Benchmarks should be sized so that they run for a handful of seconds at a minimum to avoid measuring the startup time unnecessarily. The total time will be roughly equivalent to the total bytes copied (`blocksize` x `count`). Some useful invocations for testing would be the following: ```shell hyperfine "./target/release/dd bs=4k count=1000000 < /dev/zero > /dev/null" hyperfine "./target/release/dd bs=1M count=20000 < /dev/zero > /dev/null" hyperfine "./target/release/dd bs=1G count=10 < /dev/zero > /dev/null" ``` Choosing what to benchmark depends greatly on what you want to measure. Typically you would choose a small blocksize for measuring the performance of `dd`, as that would maximize the overhead introduced by the `dd` tool. `dd` typically does some set amount of work per block which only depends on the size of the block if conversions are used. As an example, made a change to reuse the same buffer between block copies, avoiding the need to reallocate a new block of memory for each copy. The impact of that change mostly had an impact on large block size copies because those are the circumstances where the memory performance dominated the total performance. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/Cargo.toml000066400000000000000000000015411504311601400245040ustar00rootroot00000000000000[package] name = "uu_dd" description = "dd ~ (uutils) copy and convert files" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/dd" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/dd.rs" [dependencies] clap = { workspace = true } gcd = { workspace = true } libc = { workspace = true } uucore = { workspace = true, features = [ "format", "parser", "quoting-style", "fs", ] } thiserror = { workspace = true } fluent = { workspace = true } [target.'cfg(any(target_os = "linux", target_os = "android"))'.dependencies] signal-hook = { workspace = true } nix = { workspace = true, features = ["fs"] } [[bin]] name = "dd" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/LICENSE000077700000000000000000000000001504311601400254212../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/locales/000077500000000000000000000000001504311601400241755ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/locales/en-US.ftl000066400000000000000000000174231504311601400256420ustar00rootroot00000000000000dd-about = Copy, and optionally convert, a file system resource dd-usage = dd [OPERAND]... dd OPTION dd-after-help = ### Operands - bs=BYTES : read and write up to BYTES bytes at a time (default: 512); overwrites ibs and obs. - cbs=BYTES : the 'conversion block size' in bytes. Applies to the conv=block, and conv=unblock operations. - conv=CONVS : a comma-separated list of conversion options or (for legacy reasons) file flags. - count=N : stop reading input after N ibs-sized read operations rather than proceeding until EOF. See iflag=count_bytes if stopping after N bytes is preferred - ibs=N : the size of buffer used for reads (default: 512) - if=FILE : the file used for input. When not specified, stdin is used instead - iflag=FLAGS : a comma-separated list of input flags which specify how the input source is treated. FLAGS may be any of the input-flags or general-flags specified below. - skip=N (or iseek=N) : skip N ibs-sized records into input before beginning copy/convert operations. See iflag=seek_bytes if seeking N bytes is preferred. - obs=N : the size of buffer used for writes (default: 512) - of=FILE : the file used for output. When not specified, stdout is used instead - oflag=FLAGS : comma separated list of output flags which specify how the output source is treated. FLAGS may be any of the output flags or general flags specified below - seek=N (or oseek=N) : seeks N obs-sized records into output before beginning copy/convert operations. See oflag=seek_bytes if seeking N bytes is preferred - status=LEVEL : controls whether volume and performance stats are written to stderr. When unspecified, dd will print stats upon completion. An example is below. ```plain 6+0 records in 16+0 records out 8192 bytes (8.2 kB, 8.0 KiB) copied, 0.00057009 s, 14.4 MB/s The first two lines are the 'volume' stats and the final line is the 'performance' stats. The volume stats indicate the number of complete and partial ibs-sized reads, or obs-sized writes that took place during the copy. The format of the volume stats is +. If records have been truncated (see conv=block), the volume stats will contain the number of truncated records. Possible LEVEL values are: - progress : Print periodic performance stats as the copy proceeds. - noxfer : Print final volume stats, but not performance stats. - none : Do not print any stats. Printing performance stats is also triggered by the INFO signal (where supported), or the USR1 signal. Setting the POSIXLY_CORRECT environment variable to any value (including an empty value) will cause the USR1 signal to be ignored. ### Conversion Options - ascii : convert from EBCDIC to ASCII. This is the inverse of the ebcdic option. Implies conv=unblock. - ebcdic : convert from ASCII to EBCDIC. This is the inverse of the ascii option. Implies conv=block. - ibm : convert from ASCII to EBCDIC, applying the conventions for [, ] and ~ specified in POSIX. Implies conv=block. - ucase : convert from lower-case to upper-case. - lcase : converts from upper-case to lower-case. - block : for each newline less than the size indicated by cbs=BYTES, remove the newline and pad with spaces up to cbs. Lines longer than cbs are truncated. - unblock : for each block of input of the size indicated by cbs=BYTES, remove right-trailing spaces and replace with a newline character. - sparse : attempts to seek the output when an obs-sized block consists of only zeros. - swab : swaps each adjacent pair of bytes. If an odd number of bytes is present, the final byte is omitted. - sync : pad each ibs-sided block with zeros. If block or unblock is specified, pad with spaces instead. - excl : the output file must be created. Fail if the output file is already present. - nocreat : the output file will not be created. Fail if the output file in not already present. - notrunc : the output file will not be truncated. If this option is not present, output will be truncated when opened. - noerror : all read errors will be ignored. If this option is not present, dd will only ignore Error::Interrupted. - fdatasync : data will be written before finishing. - fsync : data and metadata will be written before finishing. ### Input flags - count_bytes : a value to count=N will be interpreted as bytes. - skip_bytes : a value to skip=N will be interpreted as bytes. - fullblock : wait for ibs bytes from each read. zero-length reads are still considered EOF. ### Output flags - append : open file in append mode. Consider setting conv=notrunc as well. - seek_bytes : a value to seek=N will be interpreted as bytes. ### General Flags - direct : use direct I/O for data. - directory : fail unless the given input (if used as an iflag) or output (if used as an oflag) is a directory. - dsync : use synchronized I/O for data. - sync : use synchronized I/O for data and metadata. - nonblock : use non-blocking I/O. - noatime : do not update access time. - nocache : request that OS drop cache. - noctty : do not assign a controlling tty. - nofollow : do not follow system links. # Error messages dd-error-failed-to-open = failed to open { $path } dd-error-write-error = write error dd-error-failed-to-seek = failed to seek in output file dd-error-io-error = IO error dd-error-cannot-skip-offset = '{ $file }': cannot skip to specified offset dd-error-cannot-skip-invalid = '{ $file }': cannot skip: Invalid argument dd-error-cannot-seek-invalid = '{ $output }': cannot seek: Invalid argument dd-error-not-directory = setting flags for '{ $file }': Not a directory dd-error-failed-discard-cache-input = failed to discard cache for: 'standard input' dd-error-failed-discard-cache-output = failed to discard cache for: 'standard output' # Parse errors dd-error-unrecognized-operand = Unrecognized operand '{ $operand }' dd-error-multiple-format-table = Only one of conv=ascii conv=ebcdic or conv=ibm may be specified dd-error-multiple-case = Only one of conv=lcase or conv=ucase may be specified dd-error-multiple-block = Only one of conv=block or conv=unblock may be specified dd-error-multiple-excl = Only one ov conv=excl or conv=nocreat may be specified dd-error-invalid-flag = invalid input flag: ‘{ $flag }’ Try '{ $cmd } --help' for more information. dd-error-conv-flag-no-match = Unrecognized conv=CONV -> { $flag } dd-error-multiplier-parse-failure = invalid number: '{ $input }' dd-error-multiplier-overflow = Multiplier string would overflow on current system -> { $input } dd-error-block-without-cbs = conv=block or conv=unblock specified without cbs=N dd-error-status-not-recognized = status=LEVEL not recognized -> { $level } dd-error-unimplemented = feature not implemented on this system -> { $feature } dd-error-bs-out-of-range = { $param }=N cannot fit into memory dd-error-invalid-number = invalid number: ‘{ $input }’ # Progress messages dd-progress-records-in = { $complete }+{ $partial } records in dd-progress-records-out = { $complete }+{ $partial } records out dd-progress-truncated-record = { $count -> [one] { $count } truncated record *[other] { $count } truncated records } dd-progress-byte-copied = { $bytes } byte copied, { $duration } s, { $rate }/s dd-progress-bytes-copied = { $bytes } bytes copied, { $duration } s, { $rate }/s dd-progress-bytes-copied-si = { $bytes } bytes ({ $si }) copied, { $duration } s, { $rate }/s dd-progress-bytes-copied-si-iec = { $bytes } bytes ({ $si }, { $iec }) copied, { $duration } s, { $rate }/s # Warnings dd-warning-zero-multiplier = { $zero } is a zero multiplier; use { $alternative } if that is intended dd-warning-signal-handler = Internal dd Warning: Unable to register signal handler coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/locales/fr-FR.ftl000066400000000000000000000223711504311601400256250ustar00rootroot00000000000000dd-about = Copier, et optionnellement convertir, une ressource du système de fichiers dd-usage = dd [OPÉRANDE]... dd OPTION dd-after-help = ### Opérandes - bs=OCTETS : lire et écrire jusqu'à OCTETS octets à la fois (par défaut : 512) ; remplace ibs et obs. - cbs=OCTETS : la 'taille de bloc de conversion' en octets. S'applique aux opérations conv=block et conv=unblock. - conv=CONVS : une liste séparée par des virgules d'options de conversion ou (pour des raisons historiques) d'indicateurs de fichier. - count=N : arrêter la lecture de l'entrée après N opérations de lecture de taille ibs plutôt que de continuer jusqu'à EOF. Voir iflag=count_bytes si l'arrêt après N octets est préféré - ibs=N : la taille du tampon utilisé pour les lectures (par défaut : 512) - if=FICHIER : le fichier utilisé pour l'entrée. Quand non spécifié, stdin est utilisé à la place - iflag=INDICATEURS : une liste séparée par des virgules d'indicateurs d'entrée qui spécifient comment la source d'entrée est traitée. INDICATEURS peut être n'importe lequel des indicateurs d'entrée ou indicateurs généraux spécifiés ci-dessous. - skip=N (ou iseek=N) : ignorer N enregistrements de taille ibs dans l'entrée avant de commencer les opérations de copie/conversion. Voir iflag=seek_bytes si la recherche de N octets est préférée. - obs=N : la taille du tampon utilisé pour les écritures (par défaut : 512) - of=FICHIER : le fichier utilisé pour la sortie. Quand non spécifié, stdout est utilisé à la place - oflag=INDICATEURS : liste séparée par des virgules d'indicateurs de sortie qui spécifient comment la source de sortie est traitée. INDICATEURS peut être n'importe lequel des indicateurs de sortie ou indicateurs généraux spécifiés ci-dessous - seek=N (ou oseek=N) : recherche N enregistrements de taille obs dans la sortie avant de commencer les opérations de copie/conversion. Voir oflag=seek_bytes si la recherche de N octets est préférée - status=NIVEAU : contrôle si les statistiques de volume et de performance sont écrites sur stderr. Quand non spécifié, dd affichera les statistiques à la fin. Un exemple est ci-dessous. ```plain 6+0 enregistrements en entrée 16+0 enregistrements en sortie 8192 octets (8.2 kB, 8.0 KiB) copiés, 0.00057009 s, 14.4 MB/s Les deux premières lignes sont les statistiques de 'volume' et la dernière ligne est les statistiques de 'performance'. Les statistiques de volume indiquent le nombre de lectures complètes et partielles de taille ibs, ou d'écritures de taille obs qui ont eu lieu pendant la copie. Le format des statistiques de volume est +. Si des enregistrements ont été tronqués (voir conv=block), les statistiques de volume contiendront le nombre d'enregistrements tronqués. Les valeurs possibles de NIVEAU sont : - progress : Afficher les statistiques de performance périodiques pendant la copie. - noxfer : Afficher les statistiques de volume finales, mais pas les statistiques de performance. - none : N'afficher aucune statistique. L'affichage des statistiques de performance est aussi déclenché par le signal INFO (quand supporté), ou le signal USR1. Définir la variable d'environnement POSIXLY_CORRECT à n'importe quelle valeur (y compris une valeur vide) fera ignorer le signal USR1. ### Options de conversion - ascii : convertir d'EBCDIC vers ASCII. C'est l'inverse de l'option ebcdic. Implique conv=unblock. - ebcdic : convertir d'ASCII vers EBCDIC. C'est l'inverse de l'option ascii. Implique conv=block. - ibm : convertir d'ASCII vers EBCDIC, en appliquant les conventions pour [, ] et ~ spécifiées dans POSIX. Implique conv=block. - ucase : convertir de minuscules vers majuscules. - lcase : convertir de majuscules vers minuscules. - block : pour chaque nouvelle ligne inférieure à la taille indiquée par cbs=OCTETS, supprimer la nouvelle ligne et remplir avec des espaces jusqu'à cbs. Les lignes plus longues que cbs sont tronquées. - unblock : pour chaque bloc d'entrée de la taille indiquée par cbs=OCTETS, supprimer les espaces de fin à droite et remplacer par un caractère de nouvelle ligne. - sparse : tente de rechercher la sortie quand un bloc de taille obs ne contient que des zéros. - swab : échange chaque paire d'octets adjacents. Si un nombre impair d'octets est présent, l'octet final est omis. - sync : remplit chaque bloc de taille ibs avec des zéros. Si block ou unblock est spécifié, remplit avec des espaces à la place. - excl : le fichier de sortie doit être créé. Échoue si le fichier de sortie est déjà présent. - nocreat : le fichier de sortie ne sera pas créé. Échoue si le fichier de sortie n'est pas déjà présent. - notrunc : le fichier de sortie ne sera pas tronqué. Si cette option n'est pas présente, la sortie sera tronquée à l'ouverture. - noerror : toutes les erreurs de lecture seront ignorées. Si cette option n'est pas présente, dd n'ignorera que Error::Interrupted. - fdatasync : les données seront écrites avant la fin. - fsync : les données et les métadonnées seront écrites avant la fin. ### Indicateurs d'entrée - count_bytes : une valeur pour count=N sera interprétée comme des octets. - skip_bytes : une valeur pour skip=N sera interprétée comme des octets. - fullblock : attendre ibs octets de chaque lecture. les lectures de longueur zéro sont toujours considérées comme EOF. ### Indicateurs de sortie - append : ouvrir le fichier en mode ajout. Considérez définir conv=notrunc aussi. - seek_bytes : une valeur pour seek=N sera interprétée comme des octets. ### Indicateurs généraux - direct : utiliser les E/S directes pour les données. - directory : échouer sauf si l'entrée donnée (si utilisée comme iflag) ou la sortie (si utilisée comme oflag) est un répertoire. - dsync : utiliser les E/S synchronisées pour les données. - sync : utiliser les E/S synchronisées pour les données et les métadonnées. - nonblock : utiliser les E/S non-bloquantes. - noatime : ne pas mettre à jour l'heure d'accès. - nocache : demander au système d'exploitation de supprimer le cache. - noctty : ne pas assigner un tty de contrôle. - nofollow : ne pas suivre les liens système. # Error messages dd-error-failed-to-open = échec de l'ouverture de { $path } dd-error-write-error = erreur d'écriture dd-error-failed-to-seek = échec de la recherche dans le fichier de sortie dd-error-io-error = erreur E/S dd-error-cannot-skip-offset = '{ $file }' : impossible d'ignorer jusqu'au décalage spécifié dd-error-cannot-skip-invalid = '{ $file }' : impossible d'ignorer : Argument invalide dd-error-cannot-seek-invalid = '{ $output }' : impossible de rechercher : Argument invalide dd-error-not-directory = définir les indicateurs pour '{ $file }' : N'est pas un répertoire dd-error-failed-discard-cache-input = échec de la suppression du cache pour : 'entrée standard' dd-error-failed-discard-cache-output = échec de la suppression du cache pour : 'sortie standard' # Parse errors dd-error-unrecognized-operand = Opérande non reconnue '{ $operand }' dd-error-multiple-format-table = Seul un seul de conv=ascii conv=ebcdic ou conv=ibm peut être spécifié dd-error-multiple-case = Seul un seul de conv=lcase ou conv=ucase peut être spécifié dd-error-multiple-block = Seul un seul de conv=block ou conv=unblock peut être spécifié dd-error-multiple-excl = Seul un seul de conv=excl ou conv=nocreat peut être spécifié dd-error-invalid-flag = indicateur d'entrée invalide : '{ $flag }' Essayez '{ $cmd } --help' pour plus d'informations. dd-error-conv-flag-no-match = conv=CONV non reconnu -> { $flag } dd-error-multiplier-parse-failure = nombre invalide : ‘{ $input }‘ dd-error-multiplier-overflow = La chaîne de multiplicateur déborderait sur le système actuel -> { $input } dd-error-block-without-cbs = conv=block ou conv=unblock spécifié sans cbs=N dd-error-status-not-recognized = status=NIVEAU non reconnu -> { $level } dd-error-unimplemented = fonctionnalité non implémentée sur ce système -> { $feature } dd-error-bs-out-of-range = { $param }=N ne peut pas tenir en mémoire dd-error-invalid-number = nombre invalide : ‘{ $input }‘ # Progress messages dd-progress-records-in = { $complete }+{ $partial } enregistrements en entrée dd-progress-records-out = { $complete }+{ $partial } enregistrements en sortie dd-progress-truncated-record = { $count -> [one] { $count } enregistrement tronqué *[other] { $count } enregistrements tronqués } dd-progress-byte-copied = { $bytes } octet copié, { $duration } s, { $rate }/s dd-progress-bytes-copied = { $bytes } octets copiés, { $duration } s, { $rate }/s dd-progress-bytes-copied-si = { $bytes } octets ({ $si }) copiés, { $duration } s, { $rate }/s dd-progress-bytes-copied-si-iec = { $bytes } octets ({ $si }, { $iec }) copiés, { $duration } s, { $rate }/s # Warnings dd-warning-zero-multiplier = { $zero } est un multiplicateur zéro ; utilisez { $alternative } si c'est voulu dd-warning-signal-handler = Avertissement dd interne : Impossible d'enregistrer le gestionnaire de signal coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/000077500000000000000000000000001504311601400233425ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/blocks.rs000066400000000000000000000270511504311601400251720ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore datastructures rstat rposition cflags ctable use crate::conversion_tables::ConversionTable; use crate::datastructures::ConversionMode; use crate::progress::ReadStat; const NEWLINE: u8 = b'\n'; const SPACE: u8 = b' '; /// Split a slice into chunks, padding or truncating as necessary. /// /// The slice `buf` is split on newlines, then each block is resized /// to `cbs` bytes, padding with spaces if necessary. This function /// expects the input bytes to be ASCII-encoded. /// /// If `sync` is true and there has been at least one partial record /// read from the input (as indicated in `rstat`), then leave an /// all-spaces block at the end. Otherwise, remove the last block if /// it is all spaces. fn block(buf: &[u8], cbs: usize, sync: bool, rstat: &mut ReadStat) -> Vec> { let mut blocks = buf .split(|&e| e == NEWLINE) .map(|split| split.to_vec()) .fold(Vec::new(), |mut blocks, mut split| { if split.len() > cbs { rstat.records_truncated += 1; } split.resize(cbs, SPACE); blocks.push(split); blocks }); // If `sync` is true and there has been at least one partial // record read from the input, then leave the all-spaces block at // the end. Otherwise, remove it. if let Some(last) = blocks.last() { if (!sync || rstat.reads_partial == 0) && last.iter().all(|&e| e == SPACE) { blocks.pop(); } } blocks } /// Trims padding from each cbs-length partition of buf /// as specified by conv=unblock and cbs=N /// Expects ascii encoded data fn unblock(buf: &[u8], cbs: usize) -> Vec { buf.chunks(cbs).fold(Vec::new(), |mut acc, block| { if let Some(last_char_idx) = block.iter().rposition(|&e| e != SPACE) { // Include text up to last space. acc.extend(&block[..=last_char_idx]); } acc.push(NEWLINE); acc }) } /// Apply the specified conversion, blocking, and/or unblocking in the right order. /// /// The `mode` specifies the combination of conversion, blocking, and /// unblocking to apply and the order in which to apply it. This /// function is responsible only for applying the operations. /// /// `buf` is the buffer of input bytes to transform. This function /// mutates this input and also returns a new buffer of bytes /// representing the result of the transformation. /// /// `rstat` maintains a running total of the number of partial and /// complete blocks read before calling this function. In certain /// settings of `mode`, this function will update the number of /// records truncated; that's why `rstat` is borrowed mutably. pub(crate) fn conv_block_unblock_helper( buf: Vec, mode: &ConversionMode, rstat: &mut ReadStat, ) -> Vec { fn apply_conversion(buf: Vec, ct: &ConversionTable) -> impl Iterator + '_ { buf.into_iter().map(|b| ct[b as usize]) } match mode { ConversionMode::ConvertOnly(ct) => apply_conversion(buf, ct).collect(), ConversionMode::BlockThenConvert(ct, cbs, sync) => { let blocks = block(&buf, *cbs, *sync, rstat); blocks .into_iter() .flat_map(|block| apply_conversion(block, ct)) .collect() } ConversionMode::ConvertThenBlock(ct, cbs, sync) => { let buf: Vec<_> = apply_conversion(buf, ct).collect(); block(&buf, *cbs, *sync, rstat) .into_iter() .flatten() .collect() } ConversionMode::BlockOnly(cbs, sync) => block(&buf, *cbs, *sync, rstat) .into_iter() .flatten() .collect(), ConversionMode::UnblockThenConvert(ct, cbs) => { let buf = unblock(&buf, *cbs); apply_conversion(buf, ct).collect() } ConversionMode::ConvertThenUnblock(ct, cbs) => { let buf: Vec<_> = apply_conversion(buf, ct).collect(); unblock(&buf, *cbs) } ConversionMode::UnblockOnly(cbs) => unblock(&buf, *cbs), } } #[cfg(test)] mod tests { use crate::blocks::{block, unblock}; use crate::progress::ReadStat; const NEWLINE: u8 = b'\n'; const SPACE: u8 = b' '; #[test] fn block_test_no_nl() { let mut rs = ReadStat::default(); let buf = [0u8, 1u8, 2u8, 3u8]; let res = block(&buf, 4, false, &mut rs); assert_eq!(res, vec![vec![0u8, 1u8, 2u8, 3u8]]); } #[test] fn block_test_no_nl_short_record() { let mut rs = ReadStat::default(); let buf = [0u8, 1u8, 2u8, 3u8]; let res = block(&buf, 8, false, &mut rs); assert_eq!( res, vec![vec![0u8, 1u8, 2u8, 3u8, SPACE, SPACE, SPACE, SPACE]] ); } #[test] fn block_test_no_nl_trunc() { let mut rs = ReadStat::default(); let buf = [0u8, 1u8, 2u8, 3u8, 4u8]; let res = block(&buf, 4, false, &mut rs); // Commented section(s) should be truncated and appear for reference only. assert_eq!(res, vec![vec![0u8, 1u8, 2u8, 3u8 /*, 4u8*/]]); assert_eq!(rs.records_truncated, 1); } #[test] fn block_test_nl_gt_cbs_trunc() { let mut rs = ReadStat::default(); let buf = [ 0u8, 1u8, 2u8, 3u8, 4u8, NEWLINE, 0u8, 1u8, 2u8, 3u8, 4u8, NEWLINE, 5u8, 6u8, 7u8, 8u8, ]; let res = block(&buf, 4, false, &mut rs); assert_eq!( res, vec![ // Commented section(s) should be truncated and appear for reference only. vec![0u8, 1u8, 2u8, 3u8], // vec![4u8, SPACE, SPACE, SPACE], vec![0u8, 1u8, 2u8, 3u8], // vec![4u8, SPACE, SPACE, SPACE], vec![5u8, 6u8, 7u8, 8u8], ] ); assert_eq!(rs.records_truncated, 2); } #[test] fn block_test_surrounded_nl() { let mut rs = ReadStat::default(); let buf = [0u8, 1u8, 2u8, 3u8, NEWLINE, 4u8, 5u8, 6u8, 7u8, 8u8]; let res = block(&buf, 8, false, &mut rs); assert_eq!( res, vec![ vec![0u8, 1u8, 2u8, 3u8, SPACE, SPACE, SPACE, SPACE], vec![4u8, 5u8, 6u8, 7u8, 8u8, SPACE, SPACE, SPACE], ] ); } #[test] fn block_test_multiple_nl_same_cbs_block() { let mut rs = ReadStat::default(); let buf = [ 0u8, 1u8, 2u8, 3u8, NEWLINE, 4u8, NEWLINE, 5u8, 6u8, 7u8, 8u8, 9u8, ]; let res = block(&buf, 8, false, &mut rs); assert_eq!( res, vec![ vec![0u8, 1u8, 2u8, 3u8, SPACE, SPACE, SPACE, SPACE], vec![4u8, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE], vec![5u8, 6u8, 7u8, 8u8, 9u8, SPACE, SPACE, SPACE], ] ); } #[test] fn block_test_multiple_nl_diff_cbs_block() { let mut rs = ReadStat::default(); let buf = [ 0u8, 1u8, 2u8, 3u8, NEWLINE, 4u8, 5u8, 6u8, 7u8, NEWLINE, 8u8, 9u8, ]; let res = block(&buf, 8, false, &mut rs); assert_eq!( res, vec![ vec![0u8, 1u8, 2u8, 3u8, SPACE, SPACE, SPACE, SPACE], vec![4u8, 5u8, 6u8, 7u8, SPACE, SPACE, SPACE, SPACE], vec![8u8, 9u8, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE], ] ); } #[test] fn block_test_end_nl_diff_cbs_block() { let mut rs = ReadStat::default(); let buf = [0u8, 1u8, 2u8, 3u8, NEWLINE]; let res = block(&buf, 4, false, &mut rs); assert_eq!(res, vec![vec![0u8, 1u8, 2u8, 3u8]]); } #[test] fn block_test_end_nl_same_cbs_block() { let mut rs = ReadStat::default(); let buf = [0u8, 1u8, 2u8, NEWLINE]; let res = block(&buf, 4, false, &mut rs); assert_eq!(res, vec![vec![0u8, 1u8, 2u8, SPACE]]); } #[test] fn block_test_double_end_nl() { let mut rs = ReadStat::default(); let buf = [0u8, 1u8, 2u8, NEWLINE, NEWLINE]; let res = block(&buf, 4, false, &mut rs); assert_eq!( res, vec![vec![0u8, 1u8, 2u8, SPACE], vec![SPACE, SPACE, SPACE, SPACE]] ); } #[test] fn block_test_start_nl() { let mut rs = ReadStat::default(); let buf = [NEWLINE, 0u8, 1u8, 2u8, 3u8]; let res = block(&buf, 4, false, &mut rs); assert_eq!( res, vec![vec![SPACE, SPACE, SPACE, SPACE], vec![0u8, 1u8, 2u8, 3u8]] ); } #[test] fn block_test_double_surrounded_nl_no_trunc() { let mut rs = ReadStat::default(); let buf = [0u8, 1u8, 2u8, 3u8, NEWLINE, NEWLINE, 4u8, 5u8, 6u8, 7u8]; let res = block(&buf, 8, false, &mut rs); assert_eq!( res, vec![ vec![0u8, 1u8, 2u8, 3u8, SPACE, SPACE, SPACE, SPACE], vec![SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE], vec![4u8, 5u8, 6u8, 7u8, SPACE, SPACE, SPACE, SPACE], ] ); } #[test] fn block_test_double_surrounded_nl_double_trunc() { let mut rs = ReadStat::default(); let buf = [ 0u8, 1u8, 2u8, 3u8, NEWLINE, NEWLINE, 4u8, 5u8, 6u8, 7u8, 8u8, ]; let res = block(&buf, 4, false, &mut rs); assert_eq!( res, vec![ // Commented section(s) should be truncated and appear for reference only. vec![0u8, 1u8, 2u8, 3u8], vec![SPACE, SPACE, SPACE, SPACE], vec![4u8, 5u8, 6u8, 7u8 /*, 8u8*/], ] ); assert_eq!(rs.records_truncated, 1); } #[test] fn unblock_test_full_cbs() { let buf = [0u8, 1u8, 2u8, 3u8, 4u8, 5u8, 6u8, 7u8]; let res = unblock(&buf, 8); assert_eq!(res, vec![0u8, 1u8, 2u8, 3u8, 4u8, 5u8, 6u8, 7u8, NEWLINE]); } #[test] fn unblock_test_all_space() { let buf = [SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE]; let res = unblock(&buf, 8); assert_eq!(res, vec![NEWLINE]); } #[test] fn unblock_test_decoy_spaces() { let buf = [0u8, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, 7u8]; let res = unblock(&buf, 8); assert_eq!( res, vec![0u8, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, 7u8, NEWLINE], ); } #[test] fn unblock_test_strip_single_cbs() { let buf = [0u8, 1u8, 2u8, 3u8, SPACE, SPACE, SPACE, SPACE]; let res = unblock(&buf, 8); assert_eq!(res, vec![0u8, 1u8, 2u8, 3u8, NEWLINE]); } #[test] fn unblock_test_strip_multi_cbs() { let buf = vec![ vec![0u8, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE], vec![0u8, 1u8, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE], vec![0u8, 1u8, 2u8, SPACE, SPACE, SPACE, SPACE, SPACE], vec![0u8, 1u8, 2u8, 3u8, SPACE, SPACE, SPACE, SPACE], ] .into_iter() .flatten() .collect::>(); let res = unblock(&buf, 8); let exp = vec![ vec![0u8, NEWLINE], vec![0u8, 1u8, NEWLINE], vec![0u8, 1u8, 2u8, NEWLINE], vec![0u8, 1u8, 2u8, 3u8, NEWLINE], ] .into_iter() .flatten() .collect::>(); assert_eq!(res, exp); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/bufferedoutput.rs000066400000000000000000000152741504311601400267640ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // // spell-checker:ignore wstat towrite cdefg bufferedoutput //! Buffer partial output blocks until they are completed. //! //! Use the [`BufferedOutput`] struct to create a buffered form of the //! [`Output`] writer. use crate::{Output, WriteStat}; /// Buffer partial output blocks until they are completed. /// /// Complete blocks are written immediately to the inner [`Output`], /// but partial blocks are stored in an internal buffer until they are /// completed. pub(crate) struct BufferedOutput<'a> { /// The unbuffered inner block writer. inner: Output<'a>, /// The internal buffer that stores a partial block. /// /// The size of this buffer is always less than the output block /// size (that is, the value of the `obs` command-line option). buf: Vec, } impl<'a> BufferedOutput<'a> { /// Add partial block buffering to the given block writer. /// /// The internal buffer size is at most the value of `obs` as /// defined in `inner`. pub(crate) fn new(inner: Output<'a>) -> Self { let obs = inner.settings.obs; Self { inner, buf: Vec::with_capacity(obs), } } pub(crate) fn discard_cache(&self, offset: libc::off_t, len: libc::off_t) { self.inner.discard_cache(offset, len); } /// Flush the partial block stored in the internal buffer. pub(crate) fn flush(&mut self) -> std::io::Result { let wstat = self.inner.write_blocks(&self.buf)?; let n = wstat.bytes_total.try_into().unwrap(); self.buf.drain(0..n); Ok(wstat) } /// Synchronize the inner block writer. pub(crate) fn sync(&mut self) -> std::io::Result<()> { self.inner.sync() } /// Truncate the underlying file to the current stream position, if possible. pub(crate) fn truncate(&mut self) -> std::io::Result<()> { self.inner.dst.truncate() } /// Write the given bytes one block at a time. /// /// Only complete blocks will be written. Partial blocks will be /// buffered until enough bytes have been provided to complete a /// block. The returned [`WriteStat`] object will include the /// number of blocks written during execution of this function. pub(crate) fn write_blocks(&mut self, buf: &[u8]) -> std::io::Result { // Split the incoming buffer into two parts: the bytes to write // and the bytes to buffer for next time. // // If `buf` does not include enough bytes to form a full block, // just buffer the whole thing and write zero blocks. let n = self.buf.len() + buf.len(); let rem = n % self.inner.settings.obs; let i = buf.len().saturating_sub(rem); let (to_write, to_buffer) = buf.split_at(i); // Concatenate the old partial block with the new bytes to form // some number of complete blocks. self.buf.extend_from_slice(to_write); // Write all complete blocks to the inner block writer. // // For example, if the output block size were 3, the buffered // partial block were `b"ab"` and the new incoming bytes were // `b"cdefg"`, then we would write blocks `b"abc"` and // b`"def"` to the inner block writer. let wstat = self.inner.write_blocks(&self.buf)?; // Buffer any remaining bytes as a partial block. // // Continuing the example above, the last byte `b"g"` would be // buffered as a partial block until the next call to // `write_blocks()`. self.buf.clear(); self.buf.extend_from_slice(to_buffer); Ok(wstat) } } #[cfg(unix)] #[cfg(test)] mod tests { use crate::bufferedoutput::BufferedOutput; use crate::{Dest, Output, Settings}; #[test] fn test_buffered_output_write_blocks_empty() { let settings = Settings { obs: 3, ..Default::default() }; let inner = Output { dst: Dest::Sink, settings: &settings, }; let mut output = BufferedOutput::new(inner); let wstat = output.write_blocks(&[]).unwrap(); assert_eq!(wstat.writes_complete, 0); assert_eq!(wstat.writes_partial, 0); assert_eq!(wstat.bytes_total, 0); assert_eq!(output.buf, vec![]); } #[test] fn test_buffered_output_write_blocks_partial() { let settings = Settings { obs: 3, ..Default::default() }; let inner = Output { dst: Dest::Sink, settings: &settings, }; let mut output = BufferedOutput::new(inner); let wstat = output.write_blocks(b"ab").unwrap(); assert_eq!(wstat.writes_complete, 0); assert_eq!(wstat.writes_partial, 0); assert_eq!(wstat.bytes_total, 0); assert_eq!(output.buf, b"ab"); } #[test] fn test_buffered_output_write_blocks_complete() { let settings = Settings { obs: 3, ..Default::default() }; let inner = Output { dst: Dest::Sink, settings: &settings, }; let mut output = BufferedOutput::new(inner); let wstat = output.write_blocks(b"abcd").unwrap(); assert_eq!(wstat.writes_complete, 1); assert_eq!(wstat.writes_partial, 0); assert_eq!(wstat.bytes_total, 3); assert_eq!(output.buf, b"d"); } #[test] fn test_buffered_output_write_blocks_append() { let settings = Settings { obs: 3, ..Default::default() }; let inner = Output { dst: Dest::Sink, settings: &settings, }; let mut output = BufferedOutput { inner, buf: b"ab".to_vec(), }; let wstat = output.write_blocks(b"cdefg").unwrap(); assert_eq!(wstat.writes_complete, 2); assert_eq!(wstat.writes_partial, 0); assert_eq!(wstat.bytes_total, 6); assert_eq!(output.buf, b"g"); } #[test] fn test_buffered_output_flush() { let settings = Settings { obs: 10, ..Default::default() }; let inner = Output { dst: Dest::Sink, settings: &settings, }; let mut output = BufferedOutput { inner, buf: b"abc".to_vec(), }; let wstat = output.flush().unwrap(); assert_eq!(wstat.writes_complete, 0); assert_eq!(wstat.writes_partial, 1); assert_eq!(wstat.bytes_total, 3); assert_eq!(output.buf, vec![]); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/conversion_tables.rs000066400000000000000000000443401504311601400274340ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // Note: Conversion tables are just lookup tables. // eg. The ASCII->EBCDIC table stores the EBCDIC code at the index // obtained by treating the ASCII representation as a number. pub type ConversionTable = [u8; 256]; pub const ASCII_UCASE_TO_LCASE: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const ASCII_LCASE_TO_UCASE: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const ASCII_TO_EBCDIC: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2d, 0x2e, 0x2f, 0x16, 0x05, 0x25, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x3c, 0x3d, 0x32, 0x26, 0x18, 0x19, 0x3f, 0x27, 0x1c, 0x1d, 0x1e, 0x1f, 0x40, 0x5a, 0x7f, 0x7b, 0x5b, 0x6c, 0x50, 0x7d, 0x4d, 0x5d, 0x5c, 0x4e, 0x6b, 0x60, 0x4b, 0x61, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0x7a, 0x5e, 0x4c, 0x7e, 0x6e, 0x6f, 0x7c, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xad, 0xe0, 0xbd, 0x9a, 0x6d, 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xc0, 0x4f, 0xd0, 0x5f, 0x07, 0x20, 0x21, 0x22, 0x23, 0x24, 0x15, 0x06, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x09, 0x0a, 0x1b, 0x30, 0x31, 0x1a, 0x33, 0x34, 0x35, 0x36, 0x08, 0x38, 0x39, 0x3a, 0x3b, 0x04, 0x14, 0x3e, 0xe1, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x80, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x6a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xaa, 0xab, 0xac, 0x4a, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xa1, 0xbe, 0xbf, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const ASCII_TO_EBCDIC_UCASE_TO_LCASE: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2d, 0x2e, 0x2f, 0x16, 0x05, 0x25, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x3c, 0x3d, 0x32, 0x26, 0x18, 0x19, 0x3f, 0x27, 0x1c, 0x1d, 0x1e, 0x1f, 0x40, 0x5a, 0x7f, 0x7b, 0x5b, 0x6c, 0x50, 0x7d, 0x4d, 0x5d, 0x5c, 0x4e, 0x6b, 0x60, 0x4b, 0x61, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0x7a, 0x5e, 0x4c, 0x7e, 0x6e, 0x6f, 0x7c, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xad, 0xe0, 0xbd, 0x9a, 0x6d, 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xc0, 0x4f, 0xd0, 0x5f, 0x07, 0x20, 0x21, 0x22, 0x23, 0x24, 0x15, 0x06, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x09, 0x0a, 0x1b, 0x30, 0x31, 0x1a, 0x33, 0x34, 0x35, 0x36, 0x08, 0x38, 0x39, 0x3a, 0x3b, 0x04, 0x14, 0x3e, 0xe1, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x80, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x6a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xaa, 0xab, 0xac, 0x4a, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xa1, 0xbe, 0xbf, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const ASCII_TO_EBCDIC_LCASE_TO_UCASE: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2d, 0x2e, 0x2f, 0x16, 0x05, 0x25, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x3c, 0x3d, 0x32, 0x26, 0x18, 0x19, 0x3f, 0x27, 0x1c, 0x1d, 0x1e, 0x1f, 0x40, 0x5a, 0x7f, 0x7b, 0x5b, 0x6c, 0x50, 0x7d, 0x4d, 0x5d, 0x5c, 0x4e, 0x6b, 0x60, 0x4b, 0x61, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0x7a, 0x5e, 0x4c, 0x7e, 0x6e, 0x6f, 0x7c, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xad, 0xe0, 0xbd, 0x9a, 0x6d, 0x79, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xc0, 0x4f, 0xd0, 0x5f, 0x07, 0x20, 0x21, 0x22, 0x23, 0x24, 0x15, 0x06, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x09, 0x0a, 0x1b, 0x30, 0x31, 0x1a, 0x33, 0x34, 0x35, 0x36, 0x08, 0x38, 0x39, 0x3a, 0x3b, 0x04, 0x14, 0x3e, 0xe1, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x80, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x6a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xaa, 0xab, 0xac, 0x4a, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xa1, 0xbe, 0xbf, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const ASCII_TO_IBM: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2d, 0x2e, 0x2f, 0x16, 0x05, 0x25, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x3c, 0x3d, 0x32, 0x26, 0x18, 0x19, 0x3f, 0x27, 0x1c, 0x1d, 0x1e, 0x1f, 0x40, 0x5a, 0x7f, 0x7b, 0x5b, 0x6c, 0x50, 0x7d, 0x4d, 0x5d, 0x5c, 0x4e, 0x6b, 0x60, 0x4b, 0x61, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0x7a, 0x5e, 0x4c, 0x7e, 0x6e, 0x6f, 0x7c, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xad, 0xe0, 0xbd, 0x5f, 0x6d, 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xc0, 0x4f, 0xd0, 0xa1, 0x07, 0x20, 0x21, 0x22, 0x23, 0x24, 0x15, 0x06, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x09, 0x0a, 0x1b, 0x30, 0x31, 0x1a, 0x33, 0x34, 0x35, 0x36, 0x08, 0x38, 0x39, 0x3a, 0x3b, 0x04, 0x14, 0x3e, 0xe1, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x80, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const ASCII_TO_IBM_UCASE_TO_LCASE: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2d, 0x2e, 0x2f, 0x16, 0x05, 0x25, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x3c, 0x3d, 0x32, 0x26, 0x18, 0x19, 0x3f, 0x27, 0x1c, 0x1d, 0x1e, 0x1f, 0x40, 0x5a, 0x7f, 0x7b, 0x5b, 0x6c, 0x50, 0x7d, 0x4d, 0x5d, 0x5c, 0x4e, 0x6b, 0x60, 0x4b, 0x61, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0x7a, 0x5e, 0x4c, 0x7e, 0x6e, 0x6f, 0x7c, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xad, 0xe0, 0xbd, 0x5f, 0x6d, 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xc0, 0x4f, 0xd0, 0xa1, 0x07, 0x20, 0x21, 0x22, 0x23, 0x24, 0x15, 0x06, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x09, 0x0a, 0x1b, 0x30, 0x31, 0x1a, 0x33, 0x34, 0x35, 0x36, 0x08, 0x38, 0x39, 0x3a, 0x3b, 0x04, 0x14, 0x3e, 0xe1, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x80, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const ASCII_TO_IBM_LCASE_TO_UCASE: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2d, 0x2e, 0x2f, 0x16, 0x05, 0x25, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x3c, 0x3d, 0x32, 0x26, 0x18, 0x19, 0x3f, 0x27, 0x1c, 0x1d, 0x1e, 0x1f, 0x40, 0x5a, 0x7f, 0x7b, 0x5b, 0x6c, 0x50, 0x7d, 0x4d, 0x5d, 0x5c, 0x4e, 0x6b, 0x60, 0x4b, 0x61, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0x7a, 0x5e, 0x4c, 0x7e, 0x6e, 0x6f, 0x7c, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xad, 0xe0, 0xbd, 0x5f, 0x6d, 0x79, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xc0, 0x4f, 0xd0, 0xa1, 0x07, 0x20, 0x21, 0x22, 0x23, 0x24, 0x15, 0x06, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x09, 0x0a, 0x1b, 0x30, 0x31, 0x1a, 0x33, 0x34, 0x35, 0x36, 0x08, 0x38, 0x39, 0x3a, 0x3b, 0x04, 0x14, 0x3e, 0xe1, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x80, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const EBCDIC_TO_ASCII: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x9c, 0x09, 0x86, 0x7f, 0x97, 0x8d, 0x8e, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x9d, 0x85, 0x08, 0x87, 0x18, 0x19, 0x92, 0x8f, 0x1c, 0x1d, 0x1e, 0x1f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x0a, 0x17, 0x1b, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x05, 0x06, 0x07, 0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04, 0x98, 0x99, 0x9a, 0x9b, 0x14, 0x15, 0x9e, 0x1a, 0x20, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xd5, 0x2e, 0x3c, 0x28, 0x2b, 0x7c, 0x26, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x7e, 0x2d, 0x2f, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xcb, 0x2c, 0x25, 0x5f, 0x3e, 0x3f, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0x60, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22, 0xc3, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x5e, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xe5, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0xd2, 0xd3, 0xd4, 0x5b, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0x5d, 0xe6, 0xe7, 0x7b, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0x7d, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0x5c, 0x9f, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const EBCDIC_TO_ASCII_UCASE_TO_LCASE: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2d, 0x2e, 0x2f, 0x16, 0x05, 0x25, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x3c, 0x3d, 0x32, 0x26, 0x18, 0x19, 0x3f, 0x27, 0x1c, 0x1d, 0x1e, 0x1f, 0x40, 0x5a, 0x7f, 0x7b, 0x5b, 0x6c, 0x50, 0x7d, 0x4d, 0x5d, 0x5c, 0x4e, 0x6b, 0x60, 0x4b, 0x61, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0x7a, 0x5e, 0x4c, 0x7e, 0x6e, 0x6f, 0x7c, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xad, 0xe0, 0xbd, 0x5f, 0x6d, 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xc0, 0x4f, 0xd0, 0xa1, 0x07, 0x20, 0x21, 0x22, 0x23, 0x24, 0x15, 0x06, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x09, 0x0a, 0x1b, 0x30, 0x31, 0x1a, 0x33, 0x34, 0x35, 0x36, 0x08, 0x38, 0x39, 0x3a, 0x3b, 0x04, 0x14, 0x3e, 0xe1, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x80, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub const EBCDIC_TO_ASCII_LCASE_TO_UCASE: ConversionTable = [ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2d, 0x2e, 0x2f, 0x16, 0x05, 0x25, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x3c, 0x3d, 0x32, 0x26, 0x18, 0x19, 0x3f, 0x27, 0x1c, 0x1d, 0x1e, 0x1f, 0x40, 0x5a, 0x7f, 0x7b, 0x5b, 0x6c, 0x50, 0x7d, 0x4d, 0x5d, 0x5c, 0x4e, 0x6b, 0x60, 0x4b, 0x61, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0x7a, 0x5e, 0x4c, 0x7e, 0x6e, 0x6f, 0x7c, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xad, 0xe0, 0xbd, 0x5f, 0x6d, 0x79, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xc0, 0x4f, 0xd0, 0xa1, 0x07, 0x20, 0x21, 0x22, 0x23, 0x24, 0x15, 0x06, 0x17, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x09, 0x0a, 0x1b, 0x30, 0x31, 0x1a, 0x33, 0x34, 0x35, 0x36, 0x08, 0x38, 0x39, 0x3a, 0x3b, 0x04, 0x14, 0x3e, 0xe1, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x80, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/datastructures.rs000066400000000000000000000046231504311601400267720ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore ctable, outfile, iseek, oseek use crate::conversion_tables::*; type Cbs = usize; /// How to apply conversion, blocking, and/or unblocking. /// /// Certain settings of the `conv` parameter to `dd` require a /// combination of conversion, blocking, or unblocking, applied in a /// certain order. The variants of this enumeration give the different /// ways of combining those three operations. #[derive(Debug, PartialEq)] pub(crate) enum ConversionMode { ConvertOnly(&'static ConversionTable), BlockOnly(Cbs, bool), UnblockOnly(Cbs), BlockThenConvert(&'static ConversionTable, Cbs, bool), ConvertThenBlock(&'static ConversionTable, Cbs, bool), UnblockThenConvert(&'static ConversionTable, Cbs), ConvertThenUnblock(&'static ConversionTable, Cbs), } /// Stores all Conv Flags that apply to the input #[derive(Debug, Default, PartialEq)] pub(crate) struct IConvFlags { pub mode: Option, pub swab: bool, pub sync: Option, pub noerror: bool, } /// Stores all Conv Flags that apply to the output #[derive(Debug, Default, PartialEq, Eq)] pub struct OConvFlags { pub sparse: bool, pub excl: bool, pub nocreat: bool, pub notrunc: bool, pub fdatasync: bool, pub fsync: bool, } /// Stores all Flags that apply to the input #[derive(Debug, Default, PartialEq, Eq)] pub struct IFlags { pub cio: bool, pub direct: bool, pub directory: bool, pub dsync: bool, pub sync: bool, pub nocache: bool, pub nonblock: bool, pub noatime: bool, pub noctty: bool, pub nofollow: bool, pub nolinks: bool, pub binary: bool, pub text: bool, pub fullblock: bool, pub count_bytes: bool, pub skip_bytes: bool, } /// Stores all Flags that apply to the output #[derive(Debug, Default, PartialEq, Eq)] pub struct OFlags { pub append: bool, pub cio: bool, pub direct: bool, pub directory: bool, pub dsync: bool, pub sync: bool, pub nocache: bool, pub nonblock: bool, pub noatime: bool, pub noctty: bool, pub nofollow: bool, pub nolinks: bool, pub binary: bool, pub text: bool, pub seek_bytes: bool, } pub mod options { pub const OPERANDS: &str = "operands"; } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/dd.rs000066400000000000000000001455561504311601400243170ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore fname, ftype, tname, fpath, specfile, testfile, unspec, ifile, ofile, outfile, fullblock, urand, fileio, atoe, atoibm, behaviour, bmax, bremain, cflags, creat, ctable, ctty, datastructures, doesnt, etoa, fileout, fname, gnudd, iconvflags, iseek, nocache, noctty, noerror, nofollow, nolinks, nonblock, oconvflags, oseek, outfile, parseargs, rlen, rmax, rremain, rsofar, rstat, sigusr, wlen, wstat seekable oconv canonicalized fadvise Fadvise FADV DONTNEED ESPIPE bufferedoutput, SETFL mod blocks; mod bufferedoutput; mod conversion_tables; mod datastructures; mod numbers; mod parseargs; mod progress; use crate::bufferedoutput::BufferedOutput; use blocks::conv_block_unblock_helper; use datastructures::*; #[cfg(any(target_os = "linux", target_os = "android"))] use nix::fcntl::FcntlArg::F_SETFL; #[cfg(any(target_os = "linux", target_os = "android"))] use nix::fcntl::OFlag; use parseargs::Parser; use progress::ProgUpdateType; use progress::{ProgUpdate, ReadStat, StatusLevel, WriteStat, gen_prog_updater}; use uucore::io::OwnedFileDescriptorOrHandle; use uucore::translate; use std::cmp; use std::env; use std::ffi::OsString; use std::fs::{File, OpenOptions}; use std::io::{self, Read, Seek, SeekFrom, Stdout, Write}; #[cfg(any(target_os = "linux", target_os = "android"))] use std::os::fd::AsFd; #[cfg(any(target_os = "linux", target_os = "android"))] use std::os::unix::fs::OpenOptionsExt; #[cfg(unix)] use std::os::unix::{ fs::FileTypeExt, io::{AsRawFd, FromRawFd}, }; #[cfg(windows)] use std::os::windows::{fs::MetadataExt, io::AsHandle}; use std::path::Path; use std::sync::atomic::AtomicU8; use std::sync::{Arc, atomic::Ordering::Relaxed, mpsc}; use std::thread; use std::time::{Duration, Instant}; use clap::{Arg, Command}; use gcd::Gcd; #[cfg(target_os = "linux")] use nix::{ errno::Errno, fcntl::{PosixFadviseAdvice, posix_fadvise}, }; use uucore::display::Quotable; use uucore::error::{FromIo, UResult}; #[cfg(unix)] use uucore::error::{USimpleError, set_exit_code}; #[cfg(target_os = "linux")] use uucore::show_if_err; use uucore::{format_usage, show_error}; const BUF_INIT_BYTE: u8 = 0xDD; /// Final settings after parsing #[derive(Default)] struct Settings { infile: Option, outfile: Option, ibs: usize, obs: usize, skip: u64, seek: u64, count: Option, iconv: IConvFlags, iflags: IFlags, oconv: OConvFlags, oflags: OFlags, status: Option, /// Whether the output writer should buffer partial blocks until complete. buffered: bool, } /// A timer which triggers on a given interval /// /// After being constructed with [`Alarm::with_interval`], [`Alarm::get_trigger`] /// will return [`ALARM_TRIGGER_TIMER`] once per the given [`Duration`]. /// Alarm can be manually triggered with closure returned by [`Alarm::manual_trigger_fn`]. /// [`Alarm::get_trigger`] will return [`ALARM_TRIGGER_SIGNAL`] in this case. /// /// Can be cloned, but the trigger status is shared across all instances so only /// the first caller each interval will yield true. /// /// When all instances are dropped the background thread will exit on the next interval. pub struct Alarm { interval: Duration, trigger: Arc, } pub const ALARM_TRIGGER_NONE: u8 = 0; pub const ALARM_TRIGGER_TIMER: u8 = 1; pub const ALARM_TRIGGER_SIGNAL: u8 = 2; impl Alarm { /// use to construct alarm timer with duration pub fn with_interval(interval: Duration) -> Self { let trigger = Arc::new(AtomicU8::default()); let weak_trigger = Arc::downgrade(&trigger); thread::spawn(move || { while let Some(trigger) = weak_trigger.upgrade() { thread::sleep(interval); trigger.store(ALARM_TRIGGER_TIMER, Relaxed); } }); Self { interval, trigger } } /// Returns a closure that allows to manually trigger the alarm /// /// This is useful for cases where more than one alarm even source exists /// In case of `dd` there is the SIGUSR1/SIGINFO case where we want to /// trigger an manual progress report. pub fn manual_trigger_fn(&self) -> Box { let weak_trigger = Arc::downgrade(&self.trigger); Box::new(move || { if let Some(trigger) = weak_trigger.upgrade() { trigger.store(ALARM_TRIGGER_SIGNAL, Relaxed); } }) } /// Use this function to poll for any pending alarm event /// /// Returns `ALARM_TRIGGER_NONE` for no pending event. /// Returns `ALARM_TRIGGER_TIMER` if the event was triggered by timer /// Returns `ALARM_TRIGGER_SIGNAL` if the event was triggered manually /// by the closure returned from `manual_trigger_fn` pub fn get_trigger(&self) -> u8 { self.trigger.swap(ALARM_TRIGGER_NONE, Relaxed) } // Getter function for the configured interval duration pub fn get_interval(&self) -> Duration { self.interval } } /// A number in blocks or bytes /// /// Some values (seek, skip, iseek, oseek) can have values either in blocks or in bytes. /// We need to remember this because the size of the blocks (ibs) is only known after parsing /// all the arguments. #[derive(Clone, Copy, Debug, PartialEq)] enum Num { Blocks(u64), Bytes(u64), } impl Default for Num { fn default() -> Self { Self::Blocks(0) } } impl Num { fn force_bytes_if(self, force: bool) -> Self { match self { Self::Blocks(n) if force => Self::Bytes(n), count => count, } } fn to_bytes(self, block_size: u64) -> u64 { match self { Self::Blocks(n) => n * block_size, Self::Bytes(n) => n, } } } /// Data sources. /// /// Use [`Source::stdin_as_file`] if available to enable more /// fine-grained access to reading from stdin. enum Source { /// Input from stdin. #[cfg(not(unix))] Stdin(io::Stdin), /// Input from a file. File(File), /// Input from stdin, opened from its file descriptor. #[cfg(unix)] StdinFile(File), /// Input from a named pipe, also known as a FIFO. #[cfg(unix)] Fifo(File), } impl Source { /// Create a source from stdin using its raw file descriptor. /// /// This returns an instance of the `Source::StdinFile` variant, /// using the raw file descriptor of [`std::io::Stdin`] to create /// the [`std::fs::File`] parameter. You can use this instead of /// `Source::Stdin` to allow reading from stdin without consuming /// the entire contents of stdin when this process terminates. #[cfg(unix)] fn stdin_as_file() -> Self { let fd = io::stdin().as_raw_fd(); let f = unsafe { File::from_raw_fd(fd) }; Self::StdinFile(f) } /// The length of the data source in number of bytes. /// /// If it cannot be determined, then this function returns 0. fn len(&self) -> io::Result { #[allow(clippy::match_wildcard_for_single_variants)] match self { Self::File(f) => Ok(f.metadata()?.len().try_into().unwrap_or(i64::MAX)), _ => Ok(0), } } fn skip(&mut self, n: u64) -> io::Result { match self { #[cfg(not(unix))] Self::Stdin(stdin) => match io::copy(&mut stdin.take(n), &mut io::sink()) { Ok(m) if m < n => { show_error!( "{}", translate!("dd-error-cannot-skip-offset", "file" => "standard input") ); Ok(m) } Ok(m) => Ok(m), Err(e) => Err(e), }, #[cfg(unix)] Self::StdinFile(f) => { if let Ok(Some(len)) = try_get_len_of_block_device(f) { if len < n { // GNU compatibility: // this case prints the stats but sets the exit code to 1 show_error!( "{}", translate!("dd-error-cannot-skip-invalid", "file" => "standard input") ); set_exit_code(1); return Ok(len); } } match io::copy(&mut f.take(n), &mut io::sink()) { Ok(m) if m < n => { show_error!( "{}", translate!("dd-error-cannot-skip-offset", "file" => "standard input") ); Ok(m) } Ok(m) => Ok(m), Err(e) => Err(e), } } Self::File(f) => f.seek(SeekFrom::Current(n.try_into().unwrap())), #[cfg(unix)] Self::Fifo(f) => io::copy(&mut f.take(n), &mut io::sink()), } } /// Discard the system file cache for the given portion of the data source. /// /// `offset` and `len` specify a contiguous portion of the data /// source. This function informs the kernel that the specified /// portion of the source is no longer needed. If not possible, /// then this function returns an error. #[cfg(target_os = "linux")] fn discard_cache(&self, offset: libc::off_t, len: libc::off_t) -> nix::Result<()> { #[allow(clippy::match_wildcard_for_single_variants)] match self { Self::File(f) => { let advice = PosixFadviseAdvice::POSIX_FADV_DONTNEED; posix_fadvise(f.as_fd(), offset, len, advice) } _ => Err(Errno::ESPIPE), // "Illegal seek" } } } impl Read for Source { fn read(&mut self, buf: &mut [u8]) -> io::Result { match self { #[cfg(not(unix))] Self::Stdin(stdin) => stdin.read(buf), Self::File(f) => f.read(buf), #[cfg(unix)] Self::StdinFile(f) => f.read(buf), #[cfg(unix)] Self::Fifo(f) => f.read(buf), } } } /// The source of the data, configured with the given settings. /// /// Use the [`Input::new_stdin`] or [`Input::new_file`] functions to /// construct a new instance of this struct. Then pass the instance to /// the [`dd_copy`] function to execute the main copy operation /// for `dd`. struct Input<'a> { /// The source from which bytes will be read. src: Source, /// Configuration settings for how to read the data. settings: &'a Settings, } impl<'a> Input<'a> { /// Instantiate this struct with stdin as a source. fn new_stdin(settings: &'a Settings) -> UResult { #[cfg(not(unix))] let mut src = { let f = File::from(io::stdin().as_handle().try_clone_to_owned()?); let is_file = if let Ok(metadata) = f.metadata() { // this hack is needed as there is no other way on windows // to differentiate between the case where `seek` works // on a file handle or not. i.e. when the handle is no real // file but a pipe, `seek` is still successful, but following // `read`s are not affected by the seek. metadata.creation_time() != 0 } else { false }; if is_file { Source::File(f) } else { Source::Stdin(io::stdin()) } }; #[cfg(unix)] let mut src = Source::stdin_as_file(); #[cfg(unix)] if let Source::StdinFile(f) = &src { if settings.iflags.directory && !f.metadata()?.is_dir() { return Err(USimpleError::new( 1, translate!("dd-error-not-directory", "file" => "standard input"), )); } } if settings.skip > 0 { src.skip(settings.skip)?; } Ok(Self { src, settings }) } /// Instantiate this struct with the named file as a source. fn new_file(filename: &Path, settings: &'a Settings) -> UResult { let src = { let mut opts = OpenOptions::new(); opts.read(true); #[cfg(any(target_os = "linux", target_os = "android"))] if let Some(libc_flags) = make_linux_iflags(&settings.iflags) { opts.custom_flags(libc_flags); } opts.open(filename).map_err_context( || translate!("dd-error-failed-to-open", "path" => filename.quote()), )? }; let mut src = Source::File(src); if settings.skip > 0 { src.skip(settings.skip)?; } Ok(Self { src, settings }) } /// Instantiate this struct with the named pipe as a source. #[cfg(unix)] fn new_fifo(filename: &Path, settings: &'a Settings) -> UResult { let mut opts = OpenOptions::new(); opts.read(true); #[cfg(any(target_os = "linux", target_os = "android"))] opts.custom_flags(make_linux_iflags(&settings.iflags).unwrap_or(0)); let mut src = Source::Fifo(opts.open(filename)?); if settings.skip > 0 { src.skip(settings.skip)?; } Ok(Self { src, settings }) } } #[cfg(any(target_os = "linux", target_os = "android"))] fn make_linux_iflags(iflags: &IFlags) -> Option { let mut flag = 0; if iflags.direct { flag |= libc::O_DIRECT; } if iflags.directory { flag |= libc::O_DIRECTORY; } if iflags.dsync { flag |= libc::O_DSYNC; } if iflags.noatime { flag |= libc::O_NOATIME; } if iflags.noctty { flag |= libc::O_NOCTTY; } if iflags.nofollow { flag |= libc::O_NOFOLLOW; } if iflags.nonblock { flag |= libc::O_NONBLOCK; } if iflags.sync { flag |= libc::O_SYNC; } if flag == 0 { None } else { Some(flag) } } impl Read for Input<'_> { fn read(&mut self, buf: &mut [u8]) -> io::Result { let mut base_idx = 0; let target_len = buf.len(); loop { match self.src.read(&mut buf[base_idx..]) { Ok(0) => return Ok(base_idx), Ok(rlen) if self.settings.iflags.fullblock => { base_idx += rlen; if base_idx >= target_len { return Ok(target_len); } } Ok(len) => return Ok(len), Err(e) if e.kind() == io::ErrorKind::Interrupted => (), Err(_) if self.settings.iconv.noerror => return Ok(base_idx), Err(e) => return Err(e), } } } } impl Input<'_> { /// Discard the system file cache for the given portion of the input. /// /// `offset` and `len` specify a contiguous portion of the input. /// This function informs the kernel that the specified portion of /// the input file is no longer needed. If not possible, then this /// function prints an error message to stderr and sets the exit /// status code to 1. #[cfg_attr(not(target_os = "linux"), allow(clippy::unused_self, unused_variables))] fn discard_cache(&self, offset: libc::off_t, len: libc::off_t) { #[cfg(target_os = "linux")] { show_if_err!( self.src .discard_cache(offset, len) .map_err_context(|| translate!("dd-error-failed-discard-cache-input")) ); } #[cfg(not(target_os = "linux"))] { // TODO Is there a way to discard filesystem cache on // these other operating systems? } } /// Fills a given buffer. /// Reads in increments of 'self.ibs'. /// The start of each ibs-sized read follows the previous one. fn fill_consecutive(&mut self, buf: &mut Vec) -> io::Result { let mut reads_complete = 0; let mut reads_partial = 0; let mut bytes_total = 0; for chunk in buf.chunks_mut(self.settings.ibs) { match self.read(chunk)? { rlen if rlen == self.settings.ibs => { bytes_total += rlen; reads_complete += 1; } rlen if rlen > 0 => { bytes_total += rlen; reads_partial += 1; } _ => break, } } buf.truncate(bytes_total); Ok(ReadStat { reads_complete, reads_partial, // Records are not truncated when filling. records_truncated: 0, bytes_total: bytes_total.try_into().unwrap(), }) } /// Fills a given buffer. /// Reads in increments of 'self.ibs'. /// The start of each ibs-sized read is aligned to multiples of ibs; remaining space is filled with the 'pad' byte. fn fill_blocks(&mut self, buf: &mut Vec, pad: u8) -> io::Result { let mut reads_complete = 0; let mut reads_partial = 0; let mut base_idx = 0; let mut bytes_total = 0; while base_idx < buf.len() { let next_blk = cmp::min(base_idx + self.settings.ibs, buf.len()); let target_len = next_blk - base_idx; match self.read(&mut buf[base_idx..next_blk])? { 0 => break, rlen if rlen < target_len => { bytes_total += rlen; reads_partial += 1; let padding = vec![pad; target_len - rlen]; buf.splice(base_idx + rlen..next_blk, padding.into_iter()); } rlen => { bytes_total += rlen; reads_complete += 1; } } base_idx += self.settings.ibs; } buf.truncate(base_idx); Ok(ReadStat { reads_complete, reads_partial, records_truncated: 0, bytes_total: bytes_total.try_into().unwrap(), }) } } enum Density { Sparse, Dense, } /// Data destinations. enum Dest { /// Output to stdout. Stdout(Stdout), /// Output to a file. /// /// The [`Density`] component indicates whether to attempt to /// write a sparse file when all-zero blocks are encountered. File(File, Density), /// Output to a named pipe, also known as a FIFO. #[cfg(unix)] Fifo(File), /// Output to nothing, dropping each byte written to the output. #[cfg(unix)] Sink, } impl Dest { fn fsync(&mut self) -> io::Result<()> { match self { Self::Stdout(stdout) => stdout.flush(), Self::File(f, _) => { f.flush()?; f.sync_all() } #[cfg(unix)] Self::Fifo(f) => { f.flush()?; f.sync_all() } #[cfg(unix)] Self::Sink => Ok(()), } } fn fdatasync(&mut self) -> io::Result<()> { match self { Self::Stdout(stdout) => stdout.flush(), Self::File(f, _) => { f.flush()?; f.sync_data() } #[cfg(unix)] Self::Fifo(f) => { f.flush()?; f.sync_data() } #[cfg(unix)] Self::Sink => Ok(()), } } fn seek(&mut self, n: u64) -> io::Result { match self { Self::Stdout(stdout) => io::copy(&mut io::repeat(0).take(n), stdout), Self::File(f, _) => { #[cfg(unix)] if let Ok(Some(len)) = try_get_len_of_block_device(f) { if len < n { // GNU compatibility: // this case prints the stats but sets the exit code to 1 show_error!( "{}", translate!("dd-error-cannot-seek-invalid", "output" => "standard output") ); set_exit_code(1); return Ok(len); } } f.seek(SeekFrom::Current(n.try_into().unwrap())) } #[cfg(unix)] Self::Fifo(f) => { // Seeking in a named pipe means *reading* from the pipe. io::copy(&mut f.take(n), &mut io::sink()) } #[cfg(unix)] Self::Sink => Ok(0), } } /// Truncate the underlying file to the current stream position, if possible. fn truncate(&mut self) -> io::Result<()> { #[allow(clippy::match_wildcard_for_single_variants)] match self { Self::File(f, _) => { let pos = f.stream_position()?; f.set_len(pos) } _ => Ok(()), } } /// Discard the system file cache for the given portion of the destination. /// /// `offset` and `len` specify a contiguous portion of the /// destination. This function informs the kernel that the /// specified portion of the destination is no longer needed. If /// not possible, then this function returns an error. #[cfg(target_os = "linux")] fn discard_cache(&self, offset: libc::off_t, len: libc::off_t) -> nix::Result<()> { match self { Self::File(f, _) => { let advice = PosixFadviseAdvice::POSIX_FADV_DONTNEED; posix_fadvise(f.as_fd(), offset, len, advice) } _ => Err(Errno::ESPIPE), // "Illegal seek" } } /// The length of the data destination in number of bytes. /// /// If it cannot be determined, then this function returns 0. fn len(&self) -> io::Result { #[allow(clippy::match_wildcard_for_single_variants)] match self { Self::File(f, _) => Ok(f.metadata()?.len().try_into().unwrap_or(i64::MAX)), _ => Ok(0), } } } /// Decide whether the given buffer is all zeros. fn is_sparse(buf: &[u8]) -> bool { buf.iter().all(|&e| e == 0u8) } impl Write for Dest { fn write(&mut self, buf: &[u8]) -> io::Result { match self { Self::File(f, Density::Sparse) if is_sparse(buf) => { let seek_amt: i64 = buf .len() .try_into() .expect("Internal dd Error: Seek amount greater than signed 64-bit integer"); f.seek(SeekFrom::Current(seek_amt))?; Ok(buf.len()) } Self::File(f, _) => f.write(buf), Self::Stdout(stdout) => stdout.write(buf), #[cfg(unix)] Self::Fifo(f) => f.write(buf), #[cfg(unix)] Self::Sink => Ok(buf.len()), } } fn flush(&mut self) -> io::Result<()> { match self { Self::Stdout(stdout) => stdout.flush(), Self::File(f, _) => f.flush(), #[cfg(unix)] Self::Fifo(f) => f.flush(), #[cfg(unix)] Self::Sink => Ok(()), } } } /// The destination of the data, configured with the given settings. /// /// Use the [`Output::new_stdout`] or [`Output::new_file`] functions /// to construct a new instance of this struct. Then use the /// [`dd_copy`] function to execute the main copy operation for /// `dd`. struct Output<'a> { /// The destination to which bytes will be written. dst: Dest, /// Configuration settings for how to read and write the data. settings: &'a Settings, } impl<'a> Output<'a> { /// Instantiate this struct with stdout as a destination. fn new_stdout(settings: &'a Settings) -> UResult { let mut dst = Dest::Stdout(io::stdout()); dst.seek(settings.seek) .map_err_context(|| translate!("dd-error-write-error"))?; Ok(Self { dst, settings }) } /// Instantiate this struct with the named file as a destination. fn new_file(filename: &Path, settings: &'a Settings) -> UResult { fn open_dst(path: &Path, cflags: &OConvFlags, oflags: &OFlags) -> Result { let mut opts = OpenOptions::new(); opts.write(true) .create(!cflags.nocreat) .create_new(cflags.excl) .append(oflags.append); #[cfg(any(target_os = "linux", target_os = "android"))] if let Some(libc_flags) = make_linux_oflags(oflags) { opts.custom_flags(libc_flags); } opts.open(path) } let dst = open_dst(filename, &settings.oconv, &settings.oflags).map_err_context( || translate!("dd-error-failed-to-open", "path" => filename.quote()), )?; // Seek to the index in the output file, truncating if requested. // // Calling `set_len()` may result in an error (for example, // when calling it on `/dev/null`), but we don't want to // terminate the process when that happens. Instead, we // suppress the error by calling `Result::ok()`. This matches // the behavior of GNU `dd` when given the command-line // argument `of=/dev/null`. if !settings.oconv.notrunc { dst.set_len(settings.seek).ok(); } Self::prepare_file(dst, settings) } fn prepare_file(dst: File, settings: &'a Settings) -> UResult { let density = if settings.oconv.sparse { Density::Sparse } else { Density::Dense }; let mut dst = Dest::File(dst, density); dst.seek(settings.seek) .map_err_context(|| translate!("dd-error-failed-to-seek"))?; Ok(Self { dst, settings }) } /// Instantiate this struct with file descriptor as a destination. /// /// This is useful e.g. for the case when the file descriptor was /// already opened by the system (stdout) and has a state /// (current position) that shall be used. fn new_file_from_stdout(settings: &'a Settings) -> UResult { let fx = OwnedFileDescriptorOrHandle::from(io::stdout())?; #[cfg(any(target_os = "linux", target_os = "android"))] if let Some(libc_flags) = make_linux_oflags(&settings.oflags) { nix::fcntl::fcntl( fx.as_raw().as_fd(), F_SETFL(OFlag::from_bits_retain(libc_flags)), )?; } Self::prepare_file(fx.into_file(), settings) } /// Instantiate this struct with the given named pipe as a destination. #[cfg(unix)] fn new_fifo(filename: &Path, settings: &'a Settings) -> UResult { // We simulate seeking in a FIFO by *reading*, so we open the // file for reading. But then we need to close the file and // re-open it for writing. if settings.seek > 0 { Dest::Fifo(File::open(filename)?).seek(settings.seek)?; } // If `count=0`, then we don't bother opening the file for // writing because that would cause this process to block // indefinitely. if let Some(Num::Blocks(0) | Num::Bytes(0)) = settings.count { let dst = Dest::Sink; return Ok(Self { dst, settings }); } // At this point, we know there is at least one block to write // to the output, so we open the file for writing. let mut opts = OpenOptions::new(); opts.write(true) .create(!settings.oconv.nocreat) .create_new(settings.oconv.excl) .append(settings.oflags.append); #[cfg(any(target_os = "linux", target_os = "android"))] opts.custom_flags(make_linux_oflags(&settings.oflags).unwrap_or(0)); let dst = Dest::Fifo(opts.open(filename)?); Ok(Self { dst, settings }) } /// Discard the system file cache for the given portion of the output. /// /// `offset` and `len` specify a contiguous portion of the output. /// This function informs the kernel that the specified portion of /// the output file is no longer needed. If not possible, then /// this function prints an error message to stderr and sets the /// exit status code to 1. #[cfg_attr(not(target_os = "linux"), allow(clippy::unused_self, unused_variables))] fn discard_cache(&self, offset: libc::off_t, len: libc::off_t) { #[cfg(target_os = "linux")] { show_if_err!( self.dst .discard_cache(offset, len) .map_err_context(|| { translate!("dd-error-failed-discard-cache-output") }) ); } #[cfg(not(target_os = "linux"))] { // TODO Is there a way to discard filesystem cache on // these other operating systems? } } /// writes a block of data. optionally retries when first try didn't complete /// /// this is needed by gnu-test: tests/dd/stats.s /// the write can be interrupted by a system signal. /// e.g. SIGUSR1 which is send to report status /// without retry, the data might not be fully written to destination. fn write_block(&mut self, chunk: &[u8]) -> io::Result { let full_len = chunk.len(); let mut base_idx = 0; loop { match self.dst.write(&chunk[base_idx..]) { Ok(wlen) => { base_idx += wlen; // take iflags.fullblock as oflags shall not have this option if (base_idx >= full_len) || !self.settings.iflags.fullblock { return Ok(base_idx); } } Err(e) if e.kind() == io::ErrorKind::Interrupted => (), Err(e) => return Err(e), } } } /// Write the given bytes one block at a time. /// /// This may write partial blocks (for example, if the underlying /// call to [`Write::write`] writes fewer than `buf.len()` /// bytes). The returned [`WriteStat`] object will include the /// number of partial and complete blocks written during execution /// of this function. fn write_blocks(&mut self, buf: &[u8]) -> io::Result { let mut writes_complete = 0; let mut writes_partial = 0; let mut bytes_total = 0; for chunk in buf.chunks(self.settings.obs) { let wlen = self.write_block(chunk)?; if wlen < self.settings.obs { writes_partial += 1; } else { writes_complete += 1; } bytes_total += wlen; } Ok(WriteStat { writes_complete, writes_partial, bytes_total: bytes_total.try_into().unwrap_or(0u128), }) } /// Flush the output to disk, if configured to do so. fn sync(&mut self) -> io::Result<()> { if self.settings.oconv.fsync { self.dst.fsync() } else if self.settings.oconv.fdatasync { self.dst.fdatasync() } else { // Intentionally do nothing in this case. Ok(()) } } /// Truncate the underlying file to the current stream position, if possible. fn truncate(&mut self) -> io::Result<()> { self.dst.truncate() } } /// The block writer either with or without partial block buffering. enum BlockWriter<'a> { /// Block writer with partial block buffering. /// /// Partial blocks are buffered until completed. Buffered(BufferedOutput<'a>), /// Block writer without partial block buffering. /// /// Partial blocks are written immediately. Unbuffered(Output<'a>), } impl BlockWriter<'_> { fn discard_cache(&self, offset: libc::off_t, len: libc::off_t) { match self { Self::Unbuffered(o) => o.discard_cache(offset, len), Self::Buffered(o) => o.discard_cache(offset, len), } } fn flush(&mut self) -> io::Result { match self { Self::Unbuffered(_) => Ok(WriteStat::default()), Self::Buffered(o) => o.flush(), } } fn sync(&mut self) -> io::Result<()> { match self { Self::Unbuffered(o) => o.sync(), Self::Buffered(o) => o.sync(), } } /// Truncate the file to the final cursor location. fn truncate(&mut self) { // Calling `set_len()` may result in an error (for example, // when calling it on `/dev/null`), but we don't want to // terminate the process when that happens. Instead, we // suppress the error by calling `Result::ok()`. This matches // the behavior of GNU `dd` when given the command-line // argument `of=/dev/null`. match self { Self::Unbuffered(o) => o.truncate().ok(), Self::Buffered(o) => o.truncate().ok(), }; } fn write_blocks(&mut self, buf: &[u8]) -> io::Result { match self { Self::Unbuffered(o) => o.write_blocks(buf), Self::Buffered(o) => o.write_blocks(buf), } } } /// depending on the command line arguments, this function /// informs the OS to flush/discard the caches for input and/or output file. fn flush_caches_full_length(i: &Input, o: &Output) -> io::Result<()> { // TODO Better error handling for overflowing `len`. if i.settings.iflags.nocache { let offset = 0; #[allow(clippy::useless_conversion)] let len = i.src.len()?.try_into().unwrap(); i.discard_cache(offset, len); } // Similarly, discard the system cache for the output file. // // TODO Better error handling for overflowing `len`. if i.settings.oflags.nocache { let offset = 0; #[allow(clippy::useless_conversion)] let len = o.dst.len()?.try_into().unwrap(); o.discard_cache(offset, len); } Ok(()) } /// Copy the given input data to this output, consuming both. /// /// This method contains the main loop for the `dd` program. Bytes /// are read in blocks from `i` and written in blocks to this /// output. Read/write statistics are reported to stderr as /// configured by the `status` command-line argument. /// /// # Errors /// /// If there is a problem reading from the input or writing to /// this output. fn dd_copy(mut i: Input, o: Output) -> io::Result<()> { // The read and write statistics. // // These objects are counters, initialized to zero. After each // iteration of the main loop, each will be incremented by the // number of blocks read and written, respectively. let mut rstat = ReadStat::default(); let mut wstat = WriteStat::default(); // The time at which the main loop starts executing. // // When `status=progress` is given on the command-line, the // `dd` program reports its progress every second or so. Part // of its report includes the throughput in bytes per second, // which requires knowing how long the process has been // running. let start = Instant::now(); // A good buffer size for reading. // // This is an educated guess about a good buffer size based on // the input and output block sizes. let bsize = calc_bsize(i.settings.ibs, o.settings.obs); // Start a thread that reports transfer progress. // // The `dd` program reports its progress after every block is written, // at most every 1 second, and only if `status=progress` is given on // the command-line or a SIGUSR1 signal is received. We // perform this reporting in a new thread so as not to take // any CPU time away from the actual reading and writing of // data. We send a `ProgUpdate` from the transmitter `prog_tx` // to the receives `rx`, and the receiver prints the transfer // information. let (prog_tx, rx) = mpsc::channel(); let output_thread = thread::spawn(gen_prog_updater(rx, i.settings.status)); // Whether to truncate the output file after all blocks have been written. let truncate = !o.settings.oconv.notrunc; // Optimization: if no blocks are to be written, then don't // bother allocating any buffers. if let Some(Num::Blocks(0) | Num::Bytes(0)) = i.settings.count { // Even though we are not reading anything from the input // file, we still need to honor the `nocache` flag, which // requests that we inform the system that we no longer // need the contents of the input file in a system cache. // flush_caches_full_length(&i, &o)?; return finalize( BlockWriter::Unbuffered(o), rstat, wstat, start, &prog_tx, output_thread, truncate, ); } // Create a common buffer with a capacity of the block size. // This is the max size needed. let mut buf = vec![BUF_INIT_BYTE; bsize]; // Spawn a timer thread to provide a scheduled signal indicating when we // should send an update of our progress to the reporting thread. // // This avoids the need to query the OS monotonic clock for every block. let alarm = Alarm::with_interval(Duration::from_secs(1)); // The signal handler spawns an own thread that waits for signals. // When the signal is received, it calls a handler function. // We inject a handler function that manually triggers the alarm. #[cfg(target_os = "linux")] let signal_handler = progress::SignalHandler::install_signal_handler(alarm.manual_trigger_fn()); #[cfg(target_os = "linux")] if let Err(e) = &signal_handler { if Some(StatusLevel::None) != i.settings.status { eprintln!("{}\n\t{e}", translate!("dd-warning-signal-handler")); } } // Index in the input file where we are reading bytes and in // the output file where we are writing bytes. // // These are updated on each iteration of the main loop. let mut read_offset = 0; let mut write_offset = 0; let input_nocache = i.settings.iflags.nocache; let output_nocache = o.settings.oflags.nocache; // Add partial block buffering, if needed. let mut o = if o.settings.buffered { BlockWriter::Buffered(BufferedOutput::new(o)) } else { BlockWriter::Unbuffered(o) }; // The main read/write loop. // // Each iteration reads blocks from the input and writes // blocks to this output. Read/write statistics are updated on // each iteration and cumulative statistics are reported to // the progress reporting thread. while below_count_limit(i.settings.count, &rstat) { // Read a block from the input then write the block to the output. // // As an optimization, make an educated guess about the // best buffer size for reading based on the number of // blocks already read and the number of blocks remaining. let loop_bsize = calc_loop_bsize(i.settings.count, &rstat, &wstat, i.settings.ibs, bsize); let rstat_update = read_helper(&mut i, &mut buf, loop_bsize)?; if rstat_update.is_empty() { break; } let wstat_update = o.write_blocks(&buf)?; // Discard the system file cache for the read portion of // the input file. // // TODO Better error handling for overflowing `offset` and `len`. let read_len = rstat_update.bytes_total; if input_nocache { let offset = read_offset.try_into().unwrap(); let len = read_len.try_into().unwrap(); i.discard_cache(offset, len); } read_offset += read_len; // Discard the system file cache for the written portion // of the output file. // // TODO Better error handling for overflowing `offset` and `len`. let write_len = wstat_update.bytes_total; if output_nocache { let offset = write_offset.try_into().unwrap(); let len = write_len.try_into().unwrap(); o.discard_cache(offset, len); } write_offset += write_len; // Update the read/write stats and inform the progress thread once per second. // // If the receiver is disconnected, `send()` returns an // error. Since it is just reporting progress and is not // crucial to the operation of `dd`, let's just ignore the // error. rstat += rstat_update; wstat += wstat_update; match alarm.get_trigger() { ALARM_TRIGGER_NONE => {} t @ (ALARM_TRIGGER_TIMER | ALARM_TRIGGER_SIGNAL) => { let tp = match t { ALARM_TRIGGER_TIMER => ProgUpdateType::Periodic, _ => ProgUpdateType::Signal, }; let prog_update = ProgUpdate::new(rstat, wstat, start.elapsed(), tp); prog_tx.send(prog_update).unwrap_or(()); } _ => {} } } finalize(o, rstat, wstat, start, &prog_tx, output_thread, truncate) } /// Flush output, print final stats, and join with the progress thread. fn finalize( mut output: BlockWriter, rstat: ReadStat, wstat: WriteStat, start: Instant, prog_tx: &mpsc::Sender, output_thread: thread::JoinHandle, truncate: bool, ) -> io::Result<()> { // Flush the output in case a partial write has been buffered but // not yet written. let wstat_update = output.flush()?; // Sync the output, if configured to do so. output.sync()?; // Truncate the file to the final cursor location. if truncate { output.truncate(); } // Print the final read/write statistics. let wstat = wstat + wstat_update; let prog_update = ProgUpdate::new(rstat, wstat, start.elapsed(), ProgUpdateType::Final); prog_tx.send(prog_update).unwrap_or(()); // Wait for the output thread to finish output_thread .join() .expect("Failed to join with the output thread."); Ok(()) } #[cfg(any(target_os = "linux", target_os = "android"))] #[allow(clippy::cognitive_complexity)] fn make_linux_oflags(oflags: &OFlags) -> Option { let mut flag = 0; // oflag=FLAG if oflags.append { flag |= libc::O_APPEND; } if oflags.direct { flag |= libc::O_DIRECT; } if oflags.directory { flag |= libc::O_DIRECTORY; } if oflags.dsync { flag |= libc::O_DSYNC; } if oflags.noatime { flag |= libc::O_NOATIME; } if oflags.noctty { flag |= libc::O_NOCTTY; } if oflags.nofollow { flag |= libc::O_NOFOLLOW; } if oflags.nonblock { flag |= libc::O_NONBLOCK; } if oflags.sync { flag |= libc::O_SYNC; } if flag == 0 { None } else { Some(flag) } } /// Read from an input (that is, a source of bytes) into the given buffer. /// /// This function also performs any conversions as specified by /// `conv=swab` or `conv=block` command-line arguments. This function /// mutates the `buf` argument in-place. The returned [`ReadStat`] /// indicates how many blocks were read. fn read_helper(i: &mut Input, buf: &mut Vec, bsize: usize) -> io::Result { // Local Helper Fns ------------------------------------------------- fn perform_swab(buf: &mut [u8]) { for base in (1..buf.len()).step_by(2) { buf.swap(base, base - 1); } } // ------------------------------------------------------------------ // Read // Resize the buffer to the bsize. Any garbage data in the buffer is overwritten or truncated, so there is no need to fill with BUF_INIT_BYTE first. buf.resize(bsize, BUF_INIT_BYTE); let mut rstat = match i.settings.iconv.sync { Some(ch) => i.fill_blocks(buf, ch)?, _ => i.fill_consecutive(buf)?, }; // Return early if no data if rstat.reads_complete == 0 && rstat.reads_partial == 0 { return Ok(rstat); } // Perform any conv=x[,x...] options if i.settings.iconv.swab { perform_swab(buf); } match i.settings.iconv.mode { Some(ref mode) => { *buf = conv_block_unblock_helper(buf.clone(), mode, &mut rstat); Ok(rstat) } None => Ok(rstat), } } // Calculate a 'good' internal buffer size. // For performance of the read/write functions, the buffer should hold // both an integral number of reads and an integral number of writes. For // sane real-world memory use, it should not be too large. I believe // the least common multiple is a good representation of these interests. // https://en.wikipedia.org/wiki/Least_common_multiple#Using_the_greatest_common_divisor fn calc_bsize(ibs: usize, obs: usize) -> usize { let gcd = Gcd::gcd(ibs, obs); // calculate the lcm from gcd (ibs / gcd) * obs } /// Calculate the buffer size appropriate for this loop iteration, respecting /// a `count=N` if present. fn calc_loop_bsize( count: Option, rstat: &ReadStat, wstat: &WriteStat, ibs: usize, ideal_bsize: usize, ) -> usize { match count { Some(Num::Blocks(rmax)) => { let rsofar = rstat.reads_complete + rstat.reads_partial; let rremain = rmax - rsofar; cmp::min(ideal_bsize as u64, rremain * ibs as u64) as usize } Some(Num::Bytes(bmax)) => { let bmax: u128 = bmax.into(); let bremain: u128 = bmax - wstat.bytes_total; cmp::min(ideal_bsize as u128, bremain) as usize } None => ideal_bsize, } } /// Decide if the current progress is below a `count=N` limit or return /// `true` if no such limit is set. fn below_count_limit(count: Option, rstat: &ReadStat) -> bool { match count { Some(Num::Blocks(n)) => rstat.reads_complete + rstat.reads_partial < n, Some(Num::Bytes(n)) => rstat.bytes_total < n, None => true, } } /// Canonicalized file name of `/dev/stdout`. /// /// For example, if this process were invoked from the command line as /// `dd`, then this function returns the [`OsString`] form of /// `"/dev/stdout"`. However, if this process were invoked as `dd > /// outfile`, then this function returns the canonicalized path to /// `outfile`, something like `"/path/to/outfile"`. fn stdout_canonicalized() -> OsString { match Path::new("/dev/stdout").canonicalize() { Ok(p) => p.into_os_string(), Err(_) => OsString::from("/dev/stdout"), } } /// Decide whether stdout is being redirected to a seekable file. /// /// For example, if this process were invoked from the command line as /// /// ```sh /// dd if=/dev/zero bs=1 count=10 seek=5 > /dev/sda1 /// ``` /// /// where `/dev/sda1` is a seekable block device then this function /// would return true. If invoked as /// /// ```sh /// dd if=/dev/zero bs=1 count=10 seek=5 /// ``` /// /// then this function would return false. fn is_stdout_redirected_to_seekable_file() -> bool { let s = stdout_canonicalized(); let p = Path::new(&s); match File::open(p) { Ok(mut f) => { f.stream_position().is_ok() && f.seek(SeekFrom::End(0)).is_ok() && f.rewind().is_ok() } Err(_) => false, } } /// Try to get the len if it is a block device #[cfg(unix)] fn try_get_len_of_block_device(file: &mut File) -> io::Result> { let ftype = file.metadata()?.file_type(); if !ftype.is_block_device() { return Ok(None); } // FIXME: this can be replaced by file.stream_len() when stable. let len = file.seek(SeekFrom::End(0))?; file.rewind()?; Ok(Some(len)) } /// Decide whether the named file is a named pipe, also known as a FIFO. #[cfg(unix)] fn is_fifo(filename: &str) -> bool { if let Ok(metadata) = std::fs::metadata(filename) { if metadata.file_type().is_fifo() { return true; } } false } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let settings: Settings = Parser::new().parse( matches .get_many::(options::OPERANDS) .unwrap_or_default(), )?; let i = match settings.infile { #[cfg(unix)] Some(ref infile) if is_fifo(infile) => Input::new_fifo(Path::new(&infile), &settings)?, Some(ref infile) => Input::new_file(Path::new(&infile), &settings)?, None => Input::new_stdin(&settings)?, }; let o = match settings.outfile { #[cfg(unix)] Some(ref outfile) if is_fifo(outfile) => Output::new_fifo(Path::new(&outfile), &settings)?, Some(ref outfile) => Output::new_file(Path::new(&outfile), &settings)?, None if is_stdout_redirected_to_seekable_file() => Output::new_file_from_stdout(&settings)?, None => Output::new_stdout(&settings)?, }; dd_copy(i, o).map_err_context(|| translate!("dd-error-io-error")) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("dd-about")) .override_usage(format_usage(&translate!("dd-usage"))) .after_help(translate!("dd-after-help")) .infer_long_args(true) .arg(Arg::new(options::OPERANDS).num_args(1..)) } #[cfg(test)] mod tests { use crate::{Output, Parser, calc_bsize}; use std::path::Path; #[test] fn bsize_test_primes() { let (n, m) = (7901, 7919); let res = calc_bsize(n, m); assert_eq!(res % n, 0); assert_eq!(res % m, 0); assert_eq!(res, n * m); } #[test] fn bsize_test_rel_prime_obs_greater() { let (n, m) = (7 * 5119, 13 * 5119); let res = calc_bsize(n, m); assert_eq!(res % n, 0); assert_eq!(res % m, 0); assert_eq!(res, 7 * 13 * 5119); } #[test] fn bsize_test_rel_prime_ibs_greater() { let (n, m) = (13 * 5119, 7 * 5119); let res = calc_bsize(n, m); assert_eq!(res % n, 0); assert_eq!(res % m, 0); assert_eq!(res, 7 * 13 * 5119); } #[test] fn bsize_test_3fac_rel_prime() { let (n, m) = (11 * 13 * 5119, 7 * 11 * 5119); let res = calc_bsize(n, m); assert_eq!(res % n, 0); assert_eq!(res % m, 0); assert_eq!(res, 7 * 11 * 13 * 5119); } #[test] fn bsize_test_ibs_greater() { let (n, m) = (512 * 1024, 256 * 1024); let res = calc_bsize(n, m); assert_eq!(res % n, 0); assert_eq!(res % m, 0); assert_eq!(res, n); } #[test] fn bsize_test_obs_greater() { let (n, m) = (256 * 1024, 512 * 1024); let res = calc_bsize(n, m); assert_eq!(res % n, 0); assert_eq!(res % m, 0); assert_eq!(res, m); } #[test] fn bsize_test_bs_eq() { let (n, m) = (1024, 1024); let res = calc_bsize(n, m); assert_eq!(res % n, 0); assert_eq!(res % m, 0); assert_eq!(res, m); } #[test] fn test_nocreat_causes_failure_when_ofile_doesnt_exist() { let args = &["conv=nocreat", "of=not-a-real.file"]; let settings = Parser::new().parse(args).unwrap(); assert!( Output::new_file(Path::new(settings.outfile.as_ref().unwrap()), &settings).is_err() ); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/main.rs000066400000000000000000000000721504311601400246330ustar00rootroot00000000000000uucore::bin!(uu_dd); // spell-checker:ignore procs uucore coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/numbers.rs000066400000000000000000000124341504311601400253670ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. //! Functions for formatting a number as a magnitude and a unit suffix. /// The first ten powers of 1024. const IEC_BASES: [u128; 10] = [ 1, 1_024, 1_048_576, 1_073_741_824, 1_099_511_627_776, 1_125_899_906_842_624, 1_152_921_504_606_846_976, 1_180_591_620_717_411_303_424, 1_208_925_819_614_629_174_706_176, 1_237_940_039_285_380_274_899_124_224, ]; const IEC_SUFFIXES: [&str; 9] = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"]; /// The first ten powers of 1000. const SI_BASES: [u128; 10] = [ 1, 1_000, 1_000_000, 1_000_000_000, 1_000_000_000_000, 1_000_000_000_000_000, 1_000_000_000_000_000_000, 1_000_000_000_000_000_000_000, 1_000_000_000_000_000_000_000_000, 1_000_000_000_000_000_000_000_000_000, ]; const SI_SUFFIXES: [&str; 9] = ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]; /// A `SuffixType` determines whether the suffixes are 1000 or 1024 based. #[derive(Clone, Copy)] pub(crate) enum SuffixType { Iec, Si, } impl SuffixType { fn base_and_suffix(&self, n: u128) -> (u128, &'static str) { let (bases, suffixes) = match self { Self::Iec => (IEC_BASES, IEC_SUFFIXES), Self::Si => (SI_BASES, SI_SUFFIXES), }; let mut i = 0; while bases[i + 1] - bases[i] < n && i < suffixes.len() { i += 1; } (bases[i], suffixes[i]) } } /// Convert a number into a magnitude and a multi-byte unit suffix. /// /// The returned string has a maximum length of 5 chars, for example: "1.1kB", "999kB", "1MB". pub(crate) fn to_magnitude_and_suffix(n: u128, suffix_type: SuffixType) -> String { let (base, suffix) = suffix_type.base_and_suffix(n); // TODO To match dd on my machine, we would need to round like // this: // // 1049 => 1.0 kB // 1050 => 1.0 kB # why is this different? // 1051 => 1.1 kB // ... // 1149 => 1.1 kB // 1150 => 1.2 kB // ... // 1250 => 1.2 kB // 1251 => 1.3 kB // .. // 10500 => 10 kB // 10501 => 11 kB // let quotient = (n as f64) / (base as f64); if quotient < 10.0 { format!("{quotient:.1} {suffix}") } else { format!("{} {suffix}", quotient.round()) } } #[cfg(test)] mod tests { use crate::numbers::{SuffixType, to_magnitude_and_suffix}; #[test] fn test_to_magnitude_and_suffix_powers_of_1024() { assert_eq!(to_magnitude_and_suffix(1024, SuffixType::Iec), "1.0 KiB"); assert_eq!(to_magnitude_and_suffix(2048, SuffixType::Iec), "2.0 KiB"); assert_eq!(to_magnitude_and_suffix(4096, SuffixType::Iec), "4.0 KiB"); assert_eq!( to_magnitude_and_suffix(1024 * 1024, SuffixType::Iec), "1.0 MiB" ); assert_eq!( to_magnitude_and_suffix(2 * 1024 * 1024, SuffixType::Iec), "2.0 MiB" ); assert_eq!( to_magnitude_and_suffix(1024 * 1024 * 1024, SuffixType::Iec), "1.0 GiB" ); assert_eq!( to_magnitude_and_suffix(34 * 1024 * 1024 * 1024, SuffixType::Iec), "34 GiB" ); } #[test] #[allow(clippy::cognitive_complexity)] fn test_to_magnitude_and_suffix_not_powers_of_1024() { assert_eq!(to_magnitude_and_suffix(1, SuffixType::Si), "1.0 B"); assert_eq!(to_magnitude_and_suffix(999, SuffixType::Si), "999 B"); assert_eq!(to_magnitude_and_suffix(1000, SuffixType::Si), "1.0 kB"); assert_eq!(to_magnitude_and_suffix(1001, SuffixType::Si), "1.0 kB"); assert_eq!(to_magnitude_and_suffix(1023, SuffixType::Si), "1.0 kB"); assert_eq!(to_magnitude_and_suffix(1025, SuffixType::Si), "1.0 kB"); assert_eq!(to_magnitude_and_suffix(10_001, SuffixType::Si), "10 kB"); assert_eq!(to_magnitude_and_suffix(999_000, SuffixType::Si), "999 kB"); assert_eq!(to_magnitude_and_suffix(999_001, SuffixType::Si), "1.0 MB"); assert_eq!(to_magnitude_and_suffix(999_999, SuffixType::Si), "1.0 MB"); assert_eq!(to_magnitude_and_suffix(1_000_000, SuffixType::Si), "1.0 MB"); assert_eq!(to_magnitude_and_suffix(1_000_001, SuffixType::Si), "1.0 MB"); assert_eq!(to_magnitude_and_suffix(1_100_000, SuffixType::Si), "1.1 MB"); assert_eq!(to_magnitude_and_suffix(1_100_001, SuffixType::Si), "1.1 MB"); assert_eq!(to_magnitude_and_suffix(1_900_000, SuffixType::Si), "1.9 MB"); assert_eq!(to_magnitude_and_suffix(1_900_001, SuffixType::Si), "1.9 MB"); assert_eq!(to_magnitude_and_suffix(9_900_000, SuffixType::Si), "9.9 MB"); assert_eq!(to_magnitude_and_suffix(9_900_001, SuffixType::Si), "9.9 MB"); assert_eq!( to_magnitude_and_suffix(999_000_000, SuffixType::Si), "999 MB" ); assert_eq!( to_magnitude_and_suffix(999_000_001, SuffixType::Si), "1.0 GB" ); assert_eq!( to_magnitude_and_suffix(1_000_000_000, SuffixType::Si), "1.0 GB" ); assert_eq!( to_magnitude_and_suffix(1_000_000_001, SuffixType::Si), "1.0 GB" ); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/parseargs.rs000066400000000000000000000546351504311601400257140ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore ctty, ctable, iseek, oseek, iconvflags, oconvflags parseargs outfile oconv #[cfg(test)] mod unit_tests; use super::{ConversionMode, IConvFlags, IFlags, Num, OConvFlags, OFlags, Settings, StatusLevel}; use crate::conversion_tables::ConversionTable; use thiserror::Error; use uucore::display::Quotable; use uucore::error::UError; use uucore::parser::parse_size::{ParseSizeError, Parser as SizeParser}; use uucore::show_warning; use uucore::translate; /// Parser Errors describe errors with parser input #[derive(Debug, PartialEq, Eq, Error)] pub enum ParseError { #[error("{}", translate!("dd-error-unrecognized-operand", "operand" => .0.clone()))] UnrecognizedOperand(String), #[error("{}", translate!("dd-error-multiple-format-table"))] MultipleFmtTable, #[error("{}", translate!("dd-error-multiple-case"))] MultipleUCaseLCase, #[error("{}", translate!("dd-error-multiple-block"))] MultipleBlockUnblock, #[error("{}", translate!("dd-error-multiple-excl"))] MultipleExclNoCreate, #[error("{}", translate!("dd-error-invalid-flag", "flag" => .0.clone(), "cmd" => uucore::execution_phrase()))] FlagNoMatch(String), #[error("{}", translate!("dd-error-conv-flag-no-match", "flag" => .0.clone()))] ConvFlagNoMatch(String), #[error("{}", translate!("dd-error-multiplier-parse-failure", "input" => .0.clone()))] MultiplierStringParseFailure(String), #[error("{}", translate!("dd-error-multiplier-overflow", "input" => .0.clone()))] MultiplierStringOverflow(String), #[error("{}", translate!("dd-error-block-without-cbs"))] BlockUnblockWithoutCBS, #[error("{}", translate!("dd-error-status-not-recognized", "level" => .0.clone()))] StatusLevelNotRecognized(String), #[error("{}", translate!("dd-error-unimplemented", "feature" => .0.clone()))] Unimplemented(String), #[error("{}", translate!("dd-error-bs-out-of-range", "param" => .0.clone()))] BsOutOfRange(String), #[error("{}", translate!("dd-error-invalid-number", "input" => .0.clone()))] InvalidNumber(String), } /// Contains a temporary state during parsing of the arguments #[derive(Debug, PartialEq, Default)] pub struct Parser { infile: Option, outfile: Option, /// The block size option specified on the command-line, if any. bs: Option, /// The input block size option specified on the command-line, if any. ibs: Option, /// The output block size option specified on the command-line, if any. obs: Option, cbs: Option, skip: Num, seek: Num, count: Option, conv: ConvFlags, /// Whether a data-transforming `conv` option has been specified. is_conv_specified: bool, iflag: IFlags, oflag: OFlags, status: Option, } #[derive(Debug, Default, PartialEq, Eq)] pub struct ConvFlags { ascii: bool, ebcdic: bool, ibm: bool, ucase: bool, lcase: bool, block: bool, unblock: bool, swab: bool, sync: bool, noerror: bool, sparse: bool, excl: bool, nocreat: bool, notrunc: bool, fdatasync: bool, fsync: bool, } #[derive(Clone, Copy, PartialEq)] enum Conversion { Ascii, Ebcdic, Ibm, } #[derive(Clone, Copy)] enum Case { Lower, Upper, } #[derive(Clone, Copy)] enum Block { Block(usize), Unblock(usize), } /// Return an Unimplemented error when the target is not Linux or Android macro_rules! linux_only { ($s: expr, $val: expr) => { if cfg!(any(target_os = "linux", target_os = "android")) { $val } else { return Err(ParseError::Unimplemented($s.to_string()).into()); } }; } impl Parser { pub(crate) fn new() -> Self { Self::default() } pub(crate) fn parse( self, operands: impl IntoIterator>, ) -> Result { self.read(operands)?.validate() } pub(crate) fn read( mut self, operands: impl IntoIterator>, ) -> Result { for operand in operands { self.parse_operand(operand.as_ref())?; } Ok(self) } pub(crate) fn validate(self) -> Result { let conv = self.conv; let conversion = match (conv.ascii, conv.ebcdic, conv.ibm) { (false, false, false) => None, (true, false, false) => Some(Conversion::Ascii), (false, true, false) => Some(Conversion::Ebcdic), (false, false, true) => Some(Conversion::Ibm), _ => return Err(ParseError::MultipleFmtTable), }; let case = match (conv.ucase, conv.lcase) { (false, false) => None, (true, false) => Some(Case::Upper), (false, true) => Some(Case::Lower), (true, true) => return Err(ParseError::MultipleUCaseLCase), }; let non_ascii = matches!(conversion, Some(Conversion::Ascii)); let conversion_table = get_ctable(conversion, case); if conv.nocreat && conv.excl { return Err(ParseError::MultipleExclNoCreate); } // The GNU docs state that // - ascii implies unblock // - ebcdic and ibm imply block // This has a side effect in how it's implemented in GNU, because this errors: // conv=block,unblock // but these don't: // conv=ascii,block,unblock // conv=block,ascii,unblock // conv=block,unblock,ascii // conv=block conv=unblock conv=ascii let block = if let Some(cbs) = self.cbs { match conversion { Some(Conversion::Ascii) => Some(Block::Unblock(cbs)), Some(_) => Some(Block::Block(cbs)), None => match (conv.block, conv.unblock) { (false, false) => None, (true, false) => Some(Block::Block(cbs)), (false, true) => Some(Block::Unblock(cbs)), (true, true) => return Err(ParseError::MultipleBlockUnblock), }, } } else if conv.block || conv.unblock { return Err(ParseError::BlockUnblockWithoutCBS); } else { None }; let iconv = IConvFlags { mode: conversion_mode(conversion_table, block, non_ascii, conv.sync), swab: conv.swab, sync: if conv.sync { if block.is_some() { Some(b' ') } else { Some(0u8) } } else { None }, noerror: conv.noerror, }; let oconv = OConvFlags { sparse: conv.sparse, excl: conv.excl, nocreat: conv.nocreat, notrunc: conv.notrunc, fdatasync: conv.fdatasync, fsync: conv.fsync, }; // Input and output block sizes. // // The `bs` option takes precedence. If either is not // provided, `ibs` and `obs` are each 512 bytes by default. let (ibs, obs) = match self.bs { None => (self.ibs.unwrap_or(512), self.obs.unwrap_or(512)), Some(bs) => (bs, bs), }; // Whether to buffer partial output blocks until they are completed. // // From the GNU `dd` documentation for the `bs=BYTES` option: // // > [...] if no data-transforming 'conv' option is specified, // > input is copied to the output as soon as it's read, even if // > it is smaller than the block size. // let buffered = self.bs.is_none() || self.is_conv_specified; let skip = self .skip .force_bytes_if(self.iflag.skip_bytes) .to_bytes(ibs as u64); let seek = self .seek .force_bytes_if(self.oflag.seek_bytes) .to_bytes(obs as u64); let count = self.count.map(|c| c.force_bytes_if(self.iflag.count_bytes)); Ok(Settings { skip, seek, count, iconv, oconv, ibs, obs, buffered, infile: self.infile, outfile: self.outfile, iflags: self.iflag, oflags: self.oflag, status: self.status, }) } fn parse_operand(&mut self, operand: &str) -> Result<(), ParseError> { match operand.split_once('=') { None => return Err(ParseError::UnrecognizedOperand(operand.to_string())), Some((k, v)) => match k { "bs" => self.bs = Some(Self::parse_bytes(k, v)?), "cbs" => self.cbs = Some(Self::parse_bytes(k, v)?), "conv" => { self.is_conv_specified = true; self.parse_conv_flags(v)?; } "count" => self.count = Some(Self::parse_n(v)?), "ibs" => self.ibs = Some(Self::parse_bytes(k, v)?), "if" => self.infile = Some(v.to_string()), "iflag" => self.parse_input_flags(v)?, "obs" => self.obs = Some(Self::parse_bytes(k, v)?), "of" => self.outfile = Some(v.to_string()), "oflag" => self.parse_output_flags(v)?, "seek" | "oseek" => self.seek = Self::parse_n(v)?, "skip" | "iseek" => self.skip = Self::parse_n(v)?, "status" => self.status = Some(Self::parse_status_level(v)?), _ => return Err(ParseError::UnrecognizedOperand(operand.to_string())), }, } Ok(()) } fn parse_n(val: &str) -> Result { let n = parse_bytes_with_opt_multiplier(val)?; Ok(if val.contains('B') { Num::Bytes(n) } else { Num::Blocks(n) }) } fn parse_bytes(arg: &str, val: &str) -> Result { parse_bytes_with_opt_multiplier(val)? .try_into() .map_err(|_| ParseError::BsOutOfRange(arg.to_string())) } fn parse_status_level(val: &str) -> Result { match val { "none" => Ok(StatusLevel::None), "noxfer" => Ok(StatusLevel::Noxfer), "progress" => Ok(StatusLevel::Progress), _ => Err(ParseError::StatusLevelNotRecognized(val.to_string())), } } #[allow(clippy::cognitive_complexity)] fn parse_input_flags(&mut self, val: &str) -> Result<(), ParseError> { let i = &mut self.iflag; for f in val.split(',') { match f { // Common flags "cio" => return Err(ParseError::Unimplemented(f.to_string())), "direct" => linux_only!(f, i.direct = true), "directory" => linux_only!(f, i.directory = true), "dsync" => linux_only!(f, i.dsync = true), "sync" => linux_only!(f, i.sync = true), "nocache" => linux_only!(f, i.nocache = true), "nonblock" => linux_only!(f, i.nonblock = true), "noatime" => linux_only!(f, i.noatime = true), "noctty" => linux_only!(f, i.noctty = true), "nofollow" => linux_only!(f, i.nofollow = true), "nolinks" => return Err(ParseError::Unimplemented(f.to_string())), "binary" => return Err(ParseError::Unimplemented(f.to_string())), "text" => return Err(ParseError::Unimplemented(f.to_string())), // Input-only flags "fullblock" => i.fullblock = true, "count_bytes" => i.count_bytes = true, "skip_bytes" => i.skip_bytes = true, // GNU silently ignores oflags given as iflag. "append" | "seek_bytes" => {} _ => return Err(ParseError::FlagNoMatch(f.to_string())), } } Ok(()) } #[allow(clippy::cognitive_complexity)] fn parse_output_flags(&mut self, val: &str) -> Result<(), ParseError> { let o = &mut self.oflag; for f in val.split(',') { match f { // Common flags "cio" => return Err(ParseError::Unimplemented(val.to_string())), "direct" => linux_only!(f, o.direct = true), "directory" => linux_only!(f, o.directory = true), "dsync" => linux_only!(f, o.dsync = true), "sync" => linux_only!(f, o.sync = true), "nocache" => linux_only!(f, o.nocache = true), "nonblock" => linux_only!(f, o.nonblock = true), "noatime" => linux_only!(f, o.noatime = true), "noctty" => linux_only!(f, o.noctty = true), "nofollow" => linux_only!(f, o.nofollow = true), "nolinks" => return Err(ParseError::Unimplemented(f.to_string())), "binary" => return Err(ParseError::Unimplemented(f.to_string())), "text" => return Err(ParseError::Unimplemented(f.to_string())), // Output-only flags "append" => o.append = true, "seek_bytes" => o.seek_bytes = true, // GNU silently ignores iflags given as oflag. "fullblock" | "count_bytes" | "skip_bytes" => {} _ => return Err(ParseError::FlagNoMatch(f.to_string())), } } Ok(()) } fn parse_conv_flags(&mut self, val: &str) -> Result<(), ParseError> { let c = &mut self.conv; for f in val.split(',') { match f { // Conversion "ascii" => c.ascii = true, "ebcdic" => c.ebcdic = true, "ibm" => c.ibm = true, // Case "lcase" => c.lcase = true, "ucase" => c.ucase = true, // Block "block" => c.block = true, "unblock" => c.unblock = true, // Other input "swab" => c.swab = true, "sync" => c.sync = true, "noerror" => c.noerror = true, // Output "sparse" => c.sparse = true, "excl" => c.excl = true, "nocreat" => c.nocreat = true, "notrunc" => c.notrunc = true, "fdatasync" => c.fdatasync = true, "fsync" => c.fsync = true, _ => return Err(ParseError::ConvFlagNoMatch(f.to_string())), } } Ok(()) } } impl UError for ParseError { fn code(&self) -> i32 { 1 } } fn show_zero_multiplier_warning() { show_warning!( "{}", translate!("dd-warning-zero-multiplier", "zero" => "0x".quote(), "alternative" => "00x".quote()) ); } /// Parse bytes using [`str::parse`], then map error if needed. fn parse_bytes_only(s: &str, i: usize) -> Result { s[..i] .parse() .map_err(|_| ParseError::MultiplierStringParseFailure(s.to_string())) } /// Parse a number of bytes from the given string, assuming no `'x'` characters. /// /// The `'x'` character means "multiply the number before the `'x'` by /// the number after the `'x'`". In order to compute the numbers /// before and after the `'x'`, use this function, which assumes there /// are no `'x'` characters in the string. /// /// A suffix `'c'` means multiply by 1, `'w'` by 2, and `'b'` by /// 512. You can also use standard block size suffixes like `'k'` for /// 1024. /// /// If the number would be too large, return [`u64::MAX`] instead. /// /// # Errors /// /// If a number cannot be parsed or if the multiplication would cause /// an overflow. /// /// # Examples /// /// ```rust,ignore /// assert_eq!(parse_bytes_no_x("123", "123").unwrap(), 123); /// assert_eq!(parse_bytes_no_x("2c", "2c").unwrap(), 2 * 1); /// assert_eq!(parse_bytes_no_x("3w", "3w").unwrap(), 3 * 2); /// assert_eq!(parse_bytes_no_x("2b", "2b").unwrap(), 2 * 512); /// assert_eq!(parse_bytes_no_x("2k", "2k").unwrap(), 2 * 1024); /// ``` fn parse_bytes_no_x(full: &str, s: &str) -> Result { let parser = SizeParser { capital_b_bytes: true, no_empty_numeric: true, ..Default::default() }; let (num, multiplier) = match (s.find('c'), s.rfind('w'), s.rfind('b')) { (None, None, None) => match parser.parse_u64(s) { Ok(n) => (n, 1), Err(ParseSizeError::SizeTooBig(_)) => (u64::MAX, 1), Err(_) => return Err(ParseError::InvalidNumber(full.to_string())), }, (Some(i), None, None) => (parse_bytes_only(s, i)?, 1), (None, Some(i), None) => (parse_bytes_only(s, i)?, 2), (None, None, Some(i)) => (parse_bytes_only(s, i)?, 512), _ => return Err(ParseError::MultiplierStringParseFailure(full.to_string())), }; num.checked_mul(multiplier) .ok_or_else(|| ParseError::MultiplierStringOverflow(full.to_string())) } /// Parse byte and multiplier like 512, 5KiB, or 1G. /// Uses [`uucore::parser::parse_size`], and adds the 'w' and 'c' suffixes which are mentioned /// in dd's info page. pub fn parse_bytes_with_opt_multiplier(s: &str) -> Result { // TODO On my Linux system, there seems to be a maximum block size of 4096 bytes: // // $ printf "%0.sa" {1..10000} | dd bs=4095 count=1 status=none | wc -c // 4095 // $ printf "%0.sa" {1..10000} | dd bs=4k count=1 status=none | wc -c // 4096 // $ printf "%0.sa" {1..10000} | dd bs=4097 count=1 status=none | wc -c // 4096 // $ printf "%0.sa" {1..10000} | dd bs=5k count=1 status=none | wc -c // 4096 // // Split on the 'x' characters. Each component will be parsed // individually, then multiplied together. let parts: Vec<&str> = s.split('x').collect(); if parts.len() == 1 { parse_bytes_no_x(s, parts[0]) } else { let mut total: u64 = 1; for part in parts { if part == "0" { show_zero_multiplier_warning(); } let num = parse_bytes_no_x(s, part)?; total = total .checked_mul(num) .ok_or_else(|| ParseError::InvalidNumber(s.to_string()))?; } Ok(total) } } fn get_ctable( conversion: Option, case: Option, ) -> Option<&'static ConversionTable> { use crate::conversion_tables::*; Some(match (conversion, case) { (None, None) => return None, (Some(conv), None) => match conv { Conversion::Ascii => &EBCDIC_TO_ASCII, Conversion::Ebcdic => &ASCII_TO_EBCDIC, Conversion::Ibm => &ASCII_TO_IBM, }, (None, Some(case)) => match case { Case::Lower => &ASCII_UCASE_TO_LCASE, Case::Upper => &ASCII_LCASE_TO_UCASE, }, (Some(conv), Some(case)) => match (conv, case) { (Conversion::Ascii, Case::Upper) => &EBCDIC_TO_ASCII_LCASE_TO_UCASE, (Conversion::Ascii, Case::Lower) => &EBCDIC_TO_ASCII_UCASE_TO_LCASE, (Conversion::Ebcdic, Case::Upper) => &ASCII_TO_EBCDIC_LCASE_TO_UCASE, (Conversion::Ebcdic, Case::Lower) => &ASCII_TO_EBCDIC_UCASE_TO_LCASE, (Conversion::Ibm, Case::Upper) => &ASCII_TO_IBM_UCASE_TO_LCASE, (Conversion::Ibm, Case::Lower) => &ASCII_TO_IBM_LCASE_TO_UCASE, }, }) } /// Given the various command-line parameters, determine the conversion mode. /// /// The `conv` command-line option can take many different values, /// each of which may combine with others. For example, `conv=ascii`, /// `conv=lcase`, `conv=sync`, and so on. The arguments to this /// function represent the settings of those various command-line /// parameters. This function translates those settings to a /// [`ConversionMode`]. fn conversion_mode( ctable: Option<&'static ConversionTable>, block: Option, is_ascii: bool, is_sync: bool, ) -> Option { match (ctable, block) { (Some(ct), None) => Some(ConversionMode::ConvertOnly(ct)), (Some(ct), Some(Block::Block(cbs))) => { if is_ascii { Some(ConversionMode::ConvertThenBlock(ct, cbs, is_sync)) } else { Some(ConversionMode::BlockThenConvert(ct, cbs, is_sync)) } } (Some(ct), Some(Block::Unblock(cbs))) => { if is_ascii { Some(ConversionMode::ConvertThenUnblock(ct, cbs)) } else { Some(ConversionMode::UnblockThenConvert(ct, cbs)) } } (None, Some(Block::Block(cbs))) => Some(ConversionMode::BlockOnly(cbs, is_sync)), (None, Some(Block::Unblock(cbs))) => Some(ConversionMode::UnblockOnly(cbs)), (None, None) => None, } } #[cfg(test)] mod tests { use crate::Num; use crate::parseargs::{Parser, parse_bytes_with_opt_multiplier}; use std::matches; const BIG: &str = "9999999999999999999999999999999999999999999999999999999999999"; #[test] fn test_parse_bytes_with_opt_multiplier_invalid() { assert!(parse_bytes_with_opt_multiplier("123asdf").is_err()); } #[test] fn test_parse_bytes_with_opt_multiplier_without_x() { assert_eq!(parse_bytes_with_opt_multiplier("123").unwrap(), 123); assert_eq!(parse_bytes_with_opt_multiplier("123c").unwrap(), 123); // 123 * 1 assert_eq!(parse_bytes_with_opt_multiplier("123w").unwrap(), 123 * 2); assert_eq!(parse_bytes_with_opt_multiplier("123b").unwrap(), 123 * 512); assert_eq!(parse_bytes_with_opt_multiplier("123k").unwrap(), 123 * 1024); assert_eq!(parse_bytes_with_opt_multiplier(BIG).unwrap(), u64::MAX); } #[test] fn test_parse_bytes_with_opt_multiplier_with_x() { assert_eq!(parse_bytes_with_opt_multiplier("123x3").unwrap(), 123 * 3); assert_eq!(parse_bytes_with_opt_multiplier("1x2x3").unwrap(), 6); // 1 * 2 * 3 assert_eq!( parse_bytes_with_opt_multiplier("1wx2cx3w").unwrap(), 2 * 2 * (3 * 2) // (1 * 2) * (2 * 1) * (3 * 2) ); } #[test] fn test_parse_n() { for arg in ["1x8x4", "1c", "123b", "123w"] { assert!(matches!(Parser::parse_n(arg), Ok(Num::Blocks(_)))); } for arg in ["1Bx8x4", "2Bx8", "2Bx8B", "2x8B"] { assert!(matches!(Parser::parse_n(arg), Ok(Num::Bytes(_)))); } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/parseargs/000077500000000000000000000000001504311601400253315ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/parseargs/unit_tests.rs000066400000000000000000000310761504311601400301070ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore fname, tname, fpath, specfile, testfile, unspec, ifile, ofile, outfile, fullblock, urand, fileio, atoe, atoibm, behaviour, bmax, bremain, btotal, cflags, creat, ctable, ctty, datastructures, doesnt, etoa, fileout, fname, gnudd, iconvflags, iseek, nocache, noctty, noerror, nofollow, nolinks, nonblock, oconvflags, oseek, outfile, parseargs, rlen, rmax, rposition, rremain, rsofar, rstat, sigusr, sigval, wlen, wstat, oconv use super::*; use crate::StatusLevel; use crate::conversion_tables::{ ASCII_TO_EBCDIC_UCASE_TO_LCASE, ASCII_TO_IBM, EBCDIC_TO_ASCII_LCASE_TO_UCASE, }; use crate::parseargs::Parser; #[cfg(not(any(target_os = "linux", target_os = "android")))] #[allow(clippy::useless_vec)] #[test] fn unimplemented_flags_should_error_non_linux() { let mut succeeded = Vec::new(); // The following flags are only implemented in linux for flag in [ "direct", "directory", "dsync", "sync", "nonblock", "noatime", "noctty", "nofollow", ] { let arg = format!("iflag={flag}"); if Parser::new().parse([&arg]).is_ok() { succeeded.push(arg); } let arg = format!("oflag={flag}"); if Parser::new().parse([&arg]).is_ok() { succeeded.push(arg); } } assert!( succeeded.is_empty(), "The following flags did not panic as expected: {succeeded:?}", ); } #[test] #[allow(clippy::useless_vec)] fn unimplemented_flags_should_error() { let mut succeeded = Vec::new(); // The following flags are not implemented for flag in ["cio", "nolinks", "text", "binary"] { let arg = format!("iflag={flag}"); if Parser::new().parse([&arg]).is_ok() { succeeded.push(arg); } let arg = format!("oflag={flag}"); if Parser::new().parse([&arg]).is_ok() { succeeded.push(arg); } } assert!( succeeded.is_empty(), "The following flags did not panic as expected: {succeeded:?}" ); } #[test] fn test_status_level_absent() { let args = ["if=foo.file", "of=bar.file"]; assert_eq!(Parser::new().parse(args).unwrap().status, None); } #[test] fn test_status_level_none() { let args = ["status=none", "if=foo.file", "of=bar.file"]; assert_eq!( Parser::new().parse(args).unwrap().status, Some(StatusLevel::None) ); } #[test] #[allow(clippy::cognitive_complexity)] fn test_all_top_level_args_no_leading_dashes() { let args = [ "if=foo.file", "of=bar.file", "ibs=10", "obs=10", "cbs=1", "bs=100", "count=2", "skip=2", "seek=2", "iseek=2", "oseek=2", "status=progress", "conv=ascii,ucase", "iflag=count_bytes,skip_bytes", "oflag=append,seek_bytes", ]; let settings = Parser::new().parse(args).unwrap(); // ibs=10 and obs=10 are overwritten by bs=100 assert_eq!(settings.ibs, 100); assert_eq!(settings.obs, 100); // count=2 iflag=count_bytes assert_eq!(settings.count, Some(Num::Bytes(2))); // seek=2 oflag=seek_bytes assert_eq!(settings.seek, 2); // skip=2 iflag=skip_bytes assert_eq!(settings.skip, 2); // status=progress assert_eq!(settings.status, Some(StatusLevel::Progress)); // conv=ascii,ucase assert_eq!( settings.iconv, IConvFlags { // ascii implies unblock mode: Some(ConversionMode::ConvertThenUnblock( &EBCDIC_TO_ASCII_LCASE_TO_UCASE, 1 )), ..IConvFlags::default() }, ); // no conv flags apply to output assert_eq!(settings.oconv, OConvFlags::default()); // iconv=count_bytes,skip_bytes assert_eq!( settings.iflags, IFlags { count_bytes: true, skip_bytes: true, ..IFlags::default() }, ); // oconv=append,seek_bytes assert_eq!( settings.oflags, OFlags { append: true, seek_bytes: true, ..OFlags::default() }, ); } #[test] fn test_status_level_progress() { let args = ["if=foo.file", "of=bar.file", "status=progress"]; let settings = Parser::new().parse(args).unwrap(); assert_eq!(settings.status, Some(StatusLevel::Progress)); } #[test] fn test_status_level_noxfer() { let args = ["if=foo.file", "status=noxfer", "of=bar.file"]; let settings = Parser::new().parse(args).unwrap(); assert_eq!(settings.status, Some(StatusLevel::Noxfer)); } #[test] fn test_multiple_flags_options() { let args = [ "iflag=fullblock,count_bytes", "iflag=skip_bytes", "oflag=append", "oflag=seek_bytes", "conv=ascii,ucase", "conv=unblock", "cbs=512", ]; let settings = Parser::new().parse(args).unwrap(); // iflag assert_eq!( settings.iflags, IFlags { fullblock: true, count_bytes: true, skip_bytes: true, ..Default::default() } ); // oflag assert_eq!( settings.oflags, OFlags { append: true, seek_bytes: true, ..Default::default() } ); // conv assert_eq!( settings.iconv, IConvFlags { mode: Some(ConversionMode::ConvertThenUnblock( &EBCDIC_TO_ASCII_LCASE_TO_UCASE, 512 )), ..Default::default() } ); } #[test] fn test_override_multiple_options() { let args = [ "if=foo.file", "if=correct.file", "of=bar.file", "of=correct.file", "ibs=256", "ibs=1024", "obs=256", "obs=1024", "cbs=1", "cbs=2", "skip=0", "skip=2", "seek=0", "seek=2", "iseek=0", "iseek=2", "oseek=0", "oseek=2", "status=none", "status=noxfer", "count=512", "count=1024", "iflag=count_bytes", ]; let settings = Parser::new().parse(args).unwrap(); assert_eq!(settings.infile, Some("correct.file".into())); assert_eq!(settings.outfile, Some("correct.file".into())); assert_eq!(settings.ibs, 1024); assert_eq!(settings.obs, 1024); assert_eq!(settings.status, Some(StatusLevel::Noxfer)); assert_eq!(settings.skip, 2048); assert_eq!(settings.seek, 2048); assert_eq!(settings.count, Some(Num::Bytes(1024))); } // // ----- IConvFlags/Output ----- #[test] fn icf_ctable_error() { let args = ["conv=ascii,ebcdic,ibm"]; assert!(Parser::new().parse(args).is_err()); } #[test] fn icf_case_error() { let args = ["conv=ucase,lcase"]; assert!(Parser::new().parse(args).is_err()); } #[test] fn icf_block_error() { let args = ["conv=block,unblock"]; assert!(Parser::new().parse(args).is_err()); } #[test] fn icf_creat_error() { let args = ["conv=excl,nocreat"]; assert!(Parser::new().parse(args).is_err()); } #[test] fn parse_icf_token_ibm() { let args = ["conv=ibm"]; let settings = Parser::new().parse(args).unwrap(); assert_eq!( settings.iconv, IConvFlags { mode: Some(ConversionMode::ConvertOnly(&ASCII_TO_IBM)), ..Default::default() } ); } #[test] fn parse_icf_tokens_elu() { let args = ["conv=ebcdic,lcase"]; let settings = Parser::new().parse(args).unwrap(); assert_eq!( settings.iconv, IConvFlags { mode: Some(ConversionMode::ConvertOnly(&ASCII_TO_EBCDIC_UCASE_TO_LCASE)), ..Default::default() } ); } #[test] fn parse_icf_tokens_remaining() { let args = [ "conv=ascii,ucase,block,sparse,swab,sync,noerror,excl,nocreat,notrunc,noerror,fdatasync,fsync", ]; assert_eq!( Parser::new().read(args), Ok(Parser { conv: ConvFlags { ascii: true, ucase: true, block: true, sparse: true, swab: true, sync: true, noerror: true, excl: true, nocreat: true, notrunc: true, fdatasync: true, fsync: true, ..Default::default() }, is_conv_specified: true, ..Default::default() }) ); } #[test] fn parse_iflag_tokens() { let args = ["iflag=fullblock,count_bytes,skip_bytes"]; assert_eq!( Parser::new().read(args), Ok(Parser { iflag: IFlags { fullblock: true, count_bytes: true, skip_bytes: true, ..Default::default() }, ..Default::default() }) ); } #[test] fn parse_oflag_tokens() { let args = ["oflag=append,seek_bytes"]; assert_eq!( Parser::new().read(args), Ok(Parser { oflag: OFlags { append: true, seek_bytes: true, ..Default::default() }, ..Default::default() }) ); } #[cfg(any(target_os = "linux", target_os = "android"))] #[test] fn parse_iflag_tokens_linux() { let args = ["iflag=direct,directory,dsync,sync,nonblock,noatime,noctty,nofollow"]; assert_eq!( Parser::new().read(args), Ok(Parser { iflag: IFlags { direct: true, directory: true, dsync: true, sync: true, nonblock: true, noatime: true, noctty: true, nofollow: true, ..Default::default() }, ..Default::default() }) ); } #[cfg(any(target_os = "linux", target_os = "android"))] #[test] fn parse_oflag_tokens_linux() { let args = ["oflag=direct,directory,dsync,sync,nonblock,noatime,noctty,nofollow"]; assert_eq!( Parser::new().read(args), Ok(Parser { oflag: OFlags { direct: true, directory: true, dsync: true, sync: true, nonblock: true, noatime: true, noctty: true, nofollow: true, ..Default::default() }, ..Default::default() }) ); } // ----- Multiplier Strings etc. ----- macro_rules! test_byte_parser ( ( $test_name:ident, $bs_str:expr, $bs:expr ) => { #[allow(non_snake_case)] #[test] fn $test_name() { // let bs_str = String::from($bs_str); assert_eq!($bs, parse_bytes_with_opt_multiplier($bs_str).unwrap()) } } ); test_byte_parser!(test_bytes_n, "765", 765); test_byte_parser!(test_bytes_c, "13c", 13); test_byte_parser!(test_bytes_w, "1w", 2); test_byte_parser!(test_bytes_b, "1b", 512); test_byte_parser!(test_bytes_k, "1kB", 1000); test_byte_parser!(test_bytes_K, "1K", 1024); test_byte_parser!(test_bytes_Ki, "1KiB", 1024); test_byte_parser!(test_bytes_MB, "2MB", 2 * 1000 * 1000); test_byte_parser!(test_bytes_M, "2M", 2 * 1024 * 1024); test_byte_parser!(test_bytes_Mi, "2MiB", 2 * 1024 * 1024); test_byte_parser!(test_bytes_GB, "3GB", 3 * 1000 * 1000 * 1000); test_byte_parser!(test_bytes_G, "3G", 3 * 1024 * 1024 * 1024); test_byte_parser!(test_bytes_Gi, "3GiB", 3 * 1024 * 1024 * 1024); #[cfg(target_pointer_width = "64")] #[cfg(test)] mod test_64bit_arch { use super::*; test_byte_parser!(test_bytes_TB, "4TB", 4 * 1000 * 1000 * 1000 * 1000); test_byte_parser!(test_bytes_T, "4T", 4 * 1024 * 1024 * 1024 * 1024); test_byte_parser!(test_bytes_Ti, "4TiB", 4 * 1024 * 1024 * 1024 * 1024); test_byte_parser!(test_bytes_PB, "5PB", 5 * 1000 * 1000 * 1000 * 1000 * 1000); test_byte_parser!(test_bytes_P, "5P", 5 * 1024 * 1024 * 1024 * 1024 * 1024); test_byte_parser!(test_bytes_Pi, "5PiB", 5 * 1024 * 1024 * 1024 * 1024 * 1024); test_byte_parser!( test_bytes_EB, "6EB", 6 * 1000 * 1000 * 1000 * 1000 * 1000 * 1000 ); test_byte_parser!( test_bytes_E, "6E", 6 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 ); test_byte_parser!( test_bytes_Ei, "6EiB", 6 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 ); } #[test] #[should_panic] fn test_neg_panic() { let bs_str = format!("{}", -1); parse_bytes_with_opt_multiplier(&bs_str).unwrap(); } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dd/src/progress.rs000066400000000000000000000616551504311601400255710ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore btotal sigval //! Read and write progress tracking for dd. //! //! The [`ProgUpdate`] struct represents summary statistics for the //! read and write progress of a running `dd` process. The //! [`gen_prog_updater`] function can be used to implement a progress //! updater that runs in its own thread. use std::io::Write; use std::sync::mpsc; #[cfg(target_os = "linux")] use std::thread::JoinHandle; use std::time::Duration; #[cfg(target_os = "linux")] use signal_hook::iterator::Handle; use uucore::{ error::UResult, format::num_format::{FloatVariant, Formatter}, locale::setup_localization, translate, }; use crate::numbers::{SuffixType, to_magnitude_and_suffix}; #[derive(PartialEq, Eq)] pub(crate) enum ProgUpdateType { Periodic, Signal, Final, } /// Summary statistics for read and write progress of dd for a given duration. pub(crate) struct ProgUpdate { /// Read statistics. /// /// This contains information about the number of blocks read from /// the data source. pub(crate) read_stat: ReadStat, /// Write statistics. /// /// This contains information about the number of blocks and /// number of bytes written to the data sink. pub(crate) write_stat: WriteStat, /// The time period over which the reads and writes were measured. pub(crate) duration: Duration, /// The status of the write. /// /// True if the write is completed, false if still in-progress. pub(crate) update_type: ProgUpdateType, } impl ProgUpdate { /// Instantiate this struct. pub(crate) fn new( read_stat: ReadStat, write_stat: WriteStat, duration: Duration, update_type: ProgUpdateType, ) -> Self { Self { read_stat, write_stat, duration, update_type, } } /// Write the number of complete and partial records both read and written. /// /// The information is written to `w`. /// /// # Examples /// /// ```rust,ignore /// use std::io::Cursor; /// use std::time::Duration; /// use crate::progress::{ProgUpdate, ReadStat, WriteStat}; /// /// let read_stat = ReadStat::new(1, 2, 3, 999); /// let write_stat = WriteStat::new(4, 5, 6); /// let duration = Duration::new(789, 0); /// let prog_update = ProgUpdate { /// read_stat, /// write_stat, /// duration, /// }; /// /// let mut cursor = Cursor::new(vec![]); /// prog_update.write_io_lines(&mut cursor).unwrap(); /// assert_eq!( /// cursor.get_ref(), /// b"1+2 records in\n3 truncated records\n4+5 records out\n" /// ); /// ``` fn write_io_lines(&self, w: &mut impl Write) -> std::io::Result<()> { self.read_stat.report(w)?; self.write_stat.report(w)?; match self.read_stat.records_truncated { 0 => {} count => { let message = translate!("dd-progress-truncated-record", "count" => count); writeln!(w, "{message}")?; } } Ok(()) } /// Write the number of bytes written, duration, and throughput. /// /// The information is written to `w`. If `rewrite` is `true`, /// then a `\r` character is written first and no newline is /// written at the end. When writing to `stderr`, this has the /// visual effect of overwriting the previous characters on the /// line. /// /// # Examples /// /// ```rust,ignore /// use std::io::Cursor; /// use std::time::Duration; /// use crate::progress::ProgUpdate; /// /// let prog_update = ProgUpdate { /// read_stat: Default::default(), /// write_stat: Default::default(), /// duration: Duration::new(1, 0), // one second /// }; /// /// let mut cursor = Cursor::new(vec![]); /// let rewrite = false; /// prog_update.write_prog_line(&mut cursor, rewrite).unwrap(); /// assert_eq!(cursor.get_ref(), b"0 bytes copied, 1.0 s, 0.0 B/s\n"); /// ``` fn write_prog_line(&self, w: &mut impl Write, rewrite: bool) -> UResult<()> { // The total number of bytes written as a string, in SI and IEC format. let btotal = self.write_stat.bytes_total; let btotal_metric = to_magnitude_and_suffix(btotal, SuffixType::Si); let btotal_bin = to_magnitude_and_suffix(btotal, SuffixType::Iec); // Compute the throughput (bytes per second) as a string. let duration = self.duration.as_secs_f64(); let safe_millis = std::cmp::max(1, self.duration.as_millis()); let rate = 1000 * (btotal / safe_millis); let transfer_rate = to_magnitude_and_suffix(rate, SuffixType::Si); // If we are rewriting the progress line, do write a carriage // return (`\r`) at the beginning and don't write a newline // (`\n`) at the end. let (carriage_return, newline) = if rewrite { ("\r", "") } else { ("", "\n") }; // The duration should be formatted as in `printf %g`. let mut duration_str = Vec::new(); uucore::format::num_format::Float { variant: FloatVariant::Shortest, ..Default::default() } .fmt(&mut duration_str, &duration.into())?; // We assume that printf will output valid UTF-8 let duration_str = std::str::from_utf8(&duration_str).unwrap(); // If the number of bytes written is sufficiently large, then // print a more concise representation of the number, like // "1.2 kB" and "1.0 KiB". let message = match btotal { 1 => { translate!("dd-progress-byte-copied", "bytes" => btotal, "duration" => duration_str, "rate" => transfer_rate) } 0..=999 => { translate!("dd-progress-bytes-copied", "bytes" => btotal, "duration" => duration_str, "rate" => transfer_rate) } 1000..=1023 => { translate!("dd-progress-bytes-copied-si", "bytes" => btotal, "si" => btotal_metric, "duration" => duration_str, "rate" => transfer_rate) } _ => { translate!("dd-progress-bytes-copied-si-iec", "bytes" => btotal, "si" => btotal_metric, "iec" => btotal_bin, "duration" => duration_str, "rate" => transfer_rate) } }; write!(w, "{carriage_return}{message}{newline}")?; Ok(()) } /// Write all summary statistics. /// /// This is a convenience method that calls /// [`ProgUpdate::write_io_lines`] and /// [`ProgUpdate::write_prog_line`] in that order. The information /// is written to `w`. It optionally begins by writing a new line, /// intended to handle the case of an existing progress line. /// /// # Examples /// /// ```rust,ignore /// use std::io::Cursor; /// use std::time::Duration; /// use crate::progress::ProgUpdate; /// /// let prog_update = ProgUpdate { /// read_stat: Default::default(), /// write_stat: Default::default(), /// duration: Duration::new(1, 0), // one second /// }; /// let mut cursor = Cursor::new(vec![]); /// prog_update.write_transfer_stats(&mut cursor, false).unwrap(); /// let mut iter = cursor.get_ref().split(|v| *v == b'\n'); /// assert_eq!(iter.next().unwrap(), b"0+0 records in"); /// assert_eq!(iter.next().unwrap(), b"0+0 records out"); /// assert_eq!(iter.next().unwrap(), b"0 bytes copied, 1.0 s, 0.0 B/s"); /// assert_eq!(iter.next().unwrap(), b""); /// assert!(iter.next().is_none()); /// ``` fn write_transfer_stats(&self, w: &mut impl Write, new_line: bool) -> UResult<()> { if new_line { writeln!(w)?; } self.write_io_lines(w)?; let rewrite = false; self.write_prog_line(w, rewrite)?; Ok(()) } /// Print number of complete and partial records read and written to stderr. /// /// See [`ProgUpdate::write_io_lines`] for more information. pub(crate) fn print_io_lines(&self) { let mut stderr = std::io::stderr(); self.write_io_lines(&mut stderr).unwrap(); } /// Re-print the number of bytes written, duration, and throughput. /// /// See [`ProgUpdate::write_prog_line`] for more information. pub(crate) fn reprint_prog_line(&self) { let mut stderr = std::io::stderr(); let rewrite = true; self.write_prog_line(&mut stderr, rewrite).unwrap(); } /// Write all summary statistics. /// /// See [`ProgUpdate::write_transfer_stats`] for more information. pub(crate) fn print_transfer_stats(&self, new_line: bool) { let mut stderr = std::io::stderr(); self.write_transfer_stats(&mut stderr, new_line).unwrap(); } /// Write all the final statistics. pub(crate) fn print_final_stats( &self, print_level: Option, progress_printed: bool, ) { match print_level { Some(StatusLevel::None) => {} Some(StatusLevel::Noxfer) => self.print_io_lines(), Some(StatusLevel::Progress) | None => self.print_transfer_stats(progress_printed), } } } /// Read statistics. /// /// This contains information about the number of blocks read from the /// input file. A block is sometimes referred to as a "record". #[derive(Clone, Copy, Default)] pub(crate) struct ReadStat { /// The number of complete blocks that have been read. pub(crate) reads_complete: u64, /// The number of partial blocks that have been read. /// /// A partial block read can happen if, for example, there are /// fewer bytes in the input file than the specified input block /// size. pub(crate) reads_partial: u64, /// The number of truncated records. /// /// A truncated record can only occur in `conv=block` mode. pub(crate) records_truncated: u32, /// The total number of bytes read. pub(crate) bytes_total: u64, } impl ReadStat { /// Create a new instance. #[allow(dead_code)] fn new(complete: u64, partial: u64, truncated: u32, bytes_total: u64) -> Self { Self { reads_complete: complete, reads_partial: partial, records_truncated: truncated, bytes_total, } } /// Whether this counter has zero complete reads and zero partial reads. pub(crate) fn is_empty(&self) -> bool { self.reads_complete == 0 && self.reads_partial == 0 } /// Write the counts in the format required by `dd`. /// /// # Errors /// /// If there is a problem writing to `w`. fn report(&self, w: &mut impl Write) -> std::io::Result<()> { let message = translate!("dd-progress-records-in", "complete" => self.reads_complete, "partial" => self.reads_partial); writeln!(w, "{message}")?; Ok(()) } } impl std::ops::AddAssign for ReadStat { fn add_assign(&mut self, other: Self) { *self = Self { reads_complete: self.reads_complete + other.reads_complete, reads_partial: self.reads_partial + other.reads_partial, records_truncated: self.records_truncated + other.records_truncated, bytes_total: self.bytes_total + other.bytes_total, } } } /// Write statistics. /// /// This contains information about the number of blocks written to /// the output file and the total number of bytes written. #[derive(Clone, Copy, Default)] pub(crate) struct WriteStat { /// The number of complete blocks that have been written. pub(crate) writes_complete: u64, /// The number of partial blocks that have been written. /// /// A partial block write can happen if, for example, there are /// fewer bytes in the input file than the specified output block /// size. pub(crate) writes_partial: u64, /// The total number of bytes written. pub(crate) bytes_total: u128, } impl WriteStat { /// Create a new instance. #[allow(dead_code)] fn new(complete: u64, partial: u64, bytes_total: u128) -> Self { Self { writes_complete: complete, writes_partial: partial, bytes_total, } } /// Write the counts in the format required by `dd`. /// /// # Errors /// /// If there is a problem writing to `w`. fn report(&self, w: &mut impl Write) -> std::io::Result<()> { let message = translate!("dd-progress-records-out", "complete" => self.writes_complete, "partial" => self.writes_partial); writeln!(w, "{message}") } } impl std::ops::AddAssign for WriteStat { fn add_assign(&mut self, other: Self) { *self = Self { writes_complete: self.writes_complete + other.writes_complete, writes_partial: self.writes_partial + other.writes_partial, bytes_total: self.bytes_total + other.bytes_total, } } } impl std::ops::Add for WriteStat { type Output = Self; fn add(self, other: Self) -> Self { Self { writes_complete: self.writes_complete + other.writes_complete, writes_partial: self.writes_partial + other.writes_partial, bytes_total: self.bytes_total + other.bytes_total, } } } /// How much detail to report when printing transfer statistics. /// /// This corresponds to the available settings of the `status` /// command-line argument. #[derive(Copy, Clone, Debug, PartialEq)] pub(crate) enum StatusLevel { /// Report number of blocks read and written, throughput, and volume. /// /// This corresponds to `status=progress`. Progress, /// Report number of blocks read and written, but no throughput and volume. /// /// This corresponds to `status=noxfer`. Noxfer, /// Print no status information. None, } /// Return a closure that can be used in its own thread to print progress info. /// /// This function returns a closure that receives [`ProgUpdate`] /// instances sent through `rx`. When a [`ProgUpdate`] instance is /// received, the transfer statistics are re-printed to stderr. #[cfg(not(target_os = "linux"))] pub(crate) fn gen_prog_updater( rx: mpsc::Receiver, print_level: Option, ) -> impl Fn() { move || { // As we are in a thread, we need to set up localization independently. let _ = setup_localization("dd"); let mut progress_printed = false; while let Ok(update) = rx.recv() { // Print the final read/write statistics. if update.update_type == ProgUpdateType::Final { update.print_final_stats(print_level, progress_printed); return; } if Some(StatusLevel::Progress) == print_level { update.reprint_prog_line(); progress_printed = true; } } } } /// signal handler listens for SIGUSR1 signal and runs provided closure. #[cfg(target_os = "linux")] pub(crate) struct SignalHandler { handle: Handle, thread: Option>, } #[cfg(target_os = "linux")] impl SignalHandler { pub(crate) fn install_signal_handler( f: Box, ) -> Result { use signal_hook::consts::signal::*; use signal_hook::iterator::Signals; let mut signals = Signals::new([SIGUSR1])?; let handle = signals.handle(); let thread = std::thread::spawn(move || { for signal in &mut signals { match signal { SIGUSR1 => (*f)(), _ => unreachable!(), } } }); Ok(Self { handle, thread: Some(thread), }) } } #[cfg(target_os = "linux")] impl Drop for SignalHandler { fn drop(&mut self) { self.handle.close(); if let Some(thread) = std::mem::take(&mut self.thread) { thread.join().unwrap(); } } } /// Return a closure that can be used in its own thread to print progress info. /// /// This function returns a closure that receives [`ProgUpdate`] /// instances sent through `rx`. When a [`ProgUpdate`] instance is /// received, the transfer statistics are re-printed to stderr. /// /// The closure also registers a signal handler for `SIGUSR1`. When /// the `SIGUSR1` signal is sent to this process, the transfer /// statistics are printed to stderr. #[cfg(target_os = "linux")] pub(crate) fn gen_prog_updater( rx: mpsc::Receiver, print_level: Option, ) -> impl Fn() { // -------------------------------------------------------------- move || { // As we are in a thread, we need to set up localization independently. let _ = setup_localization("dd"); // Holds the state of whether we have printed the current progress. // This is needed so that we know whether or not to print a newline // character before outputting non-progress data. let mut progress_printed = false; while let Ok(update) = rx.recv() { match update.update_type { ProgUpdateType::Final => { // Print the final read/write statistics. update.print_final_stats(print_level, progress_printed); return; } ProgUpdateType::Periodic => { // (Re)print status line if progress is requested. if Some(StatusLevel::Progress) == print_level { update.reprint_prog_line(); progress_printed = true; } } ProgUpdateType::Signal => { update.print_transfer_stats(progress_printed); // Reset the progress printed, since print_transfer_stats always prints a newline. progress_printed = false; } } } } } #[cfg(test)] mod tests { use std::env; use std::io::Cursor; use std::time::Duration; use uucore::locale::setup_localization; use super::{ProgUpdate, ReadStat, WriteStat}; fn init() { unsafe { env::set_var("LANG", "C"); } let _ = setup_localization("dd"); } fn prog_update_write(n: u128) -> ProgUpdate { ProgUpdate { read_stat: ReadStat::default(), write_stat: WriteStat { bytes_total: n, ..Default::default() }, duration: Duration::new(1, 0), // one second update_type: super::ProgUpdateType::Periodic, } } fn prog_update_duration(duration: Duration) -> ProgUpdate { ProgUpdate { read_stat: ReadStat::default(), write_stat: WriteStat::default(), duration, update_type: super::ProgUpdateType::Periodic, } } #[test] fn test_read_stat_report() { init(); let read_stat = ReadStat::new(1, 2, 3, 4); let mut cursor = Cursor::new(vec![]); read_stat.report(&mut cursor).unwrap(); assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "1+2 records in\n" ); } #[test] fn test_write_stat_report() { init(); let write_stat = WriteStat::new(1, 2, 3); let mut cursor = Cursor::new(vec![]); write_stat.report(&mut cursor).unwrap(); assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "1+2 records out\n" ); } #[test] fn test_prog_update_write_io_lines() { init(); let read_stat = ReadStat::new(1, 2, 3, 4); let write_stat = WriteStat::new(4, 5, 6); let duration = Duration::new(789, 0); let update_type = super::ProgUpdateType::Periodic; let prog_update = ProgUpdate { read_stat, write_stat, duration, update_type, }; let mut cursor = Cursor::new(vec![]); prog_update.write_io_lines(&mut cursor).unwrap(); assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "1+2 records in\n4+5 records out\n3 truncated records\n" ); } #[test] fn test_prog_update_write_prog_line() { init(); let prog_update = ProgUpdate { read_stat: ReadStat::default(), write_stat: WriteStat::default(), duration: Duration::new(1, 0), // one second update_type: super::ProgUpdateType::Periodic, }; let mut cursor = Cursor::new(vec![]); let rewrite = false; prog_update.write_prog_line(&mut cursor, rewrite).unwrap(); // TODO The expected output string below is what our code // produces today, but it does not match GNU dd: // // $ : | dd // 0 bytes copied, 7.9151e-05 s, 0.0 kB/s // // The throughput still does not match GNU dd. assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "0 bytes copied, 1 s, 0.0 B/s\n" ); let prog_update = prog_update_write(1); let mut cursor = Cursor::new(vec![]); prog_update.write_prog_line(&mut cursor, rewrite).unwrap(); assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "1 byte copied, 1 s, 0.0 B/s\n" ); let prog_update = prog_update_write(999); let mut cursor = Cursor::new(vec![]); prog_update.write_prog_line(&mut cursor, rewrite).unwrap(); assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "999 bytes copied, 1 s, 0.0 B/s\n" ); let prog_update = prog_update_write(1000); let mut cursor = Cursor::new(vec![]); prog_update.write_prog_line(&mut cursor, rewrite).unwrap(); assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "1000 bytes (1.0 kB) copied, 1 s, 1.0 kB/s\n" ); let prog_update = prog_update_write(1023); let mut cursor = Cursor::new(vec![]); prog_update.write_prog_line(&mut cursor, rewrite).unwrap(); assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "1023 bytes (1.0 kB) copied, 1 s, 1.0 kB/s\n" ); let prog_update = prog_update_write(1024); let mut cursor = Cursor::new(vec![]); prog_update.write_prog_line(&mut cursor, rewrite).unwrap(); assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "1024 bytes (1.0 kB, 1.0 KiB) copied, 1 s, 1.0 kB/s\n" ); } #[test] fn write_transfer_stats() { init(); let prog_update = ProgUpdate { read_stat: ReadStat::default(), write_stat: WriteStat::default(), duration: Duration::new(1, 0), // one second update_type: super::ProgUpdateType::Periodic, }; let mut cursor = Cursor::new(vec![]); prog_update .write_transfer_stats(&mut cursor, false) .unwrap(); let output_str = std::str::from_utf8(cursor.get_ref()).unwrap(); let mut iter = output_str.split('\n'); assert_eq!(iter.next().unwrap(), "0+0 records in"); assert_eq!(iter.next().unwrap(), "0+0 records out"); assert_eq!(iter.next().unwrap(), "0 bytes copied, 1 s, 0.0 B/s"); assert_eq!(iter.next().unwrap(), ""); assert!(iter.next().is_none()); } #[test] fn write_final_transfer_stats() { init(); // Tests the formatting of the final statistics written after a progress line. let prog_update = ProgUpdate { read_stat: ReadStat::default(), write_stat: WriteStat::default(), duration: Duration::new(1, 0), // one second update_type: super::ProgUpdateType::Periodic, }; let mut cursor = Cursor::new(vec![]); let rewrite = true; prog_update.write_prog_line(&mut cursor, rewrite).unwrap(); prog_update.write_transfer_stats(&mut cursor, true).unwrap(); let output_str = std::str::from_utf8(cursor.get_ref()).unwrap(); let mut iter = output_str.split('\n'); assert_eq!(iter.next().unwrap(), "\r0 bytes copied, 1 s, 0.0 B/s"); assert_eq!(iter.next().unwrap(), "0+0 records in"); assert_eq!(iter.next().unwrap(), "0+0 records out"); assert_eq!(iter.next().unwrap(), "0 bytes copied, 1 s, 0.0 B/s"); assert_eq!(iter.next().unwrap(), ""); assert!(iter.next().is_none()); } #[test] fn test_duration_precision() { init(); let prog_update = prog_update_duration(Duration::from_nanos(123)); let mut cursor = Cursor::new(vec![]); let rewrite = false; prog_update.write_prog_line(&mut cursor, rewrite).unwrap(); assert_eq!( std::str::from_utf8(cursor.get_ref()).unwrap(), "0 bytes copied, 0.000000123 s, 0.0 B/s\n" ); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/000077500000000000000000000000001504311601400225555ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/Cargo.toml000066400000000000000000000013221504311601400245030ustar00rootroot00000000000000[package] name = "uu_df" description = "df ~ (uutils) display file system information" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/df" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/df.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["libc", "fsext", "parser"] } unicode-width = { workspace = true } thiserror = { workspace = true } fluent = { workspace = true } [dev-dependencies] tempfile = { workspace = true } [[bin]] name = "df" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/LICENSE000077700000000000000000000000001504311601400254232../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/locales/000077500000000000000000000000001504311601400241775ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/locales/en-US.ftl000066400000000000000000000054361504311601400256450ustar00rootroot00000000000000df-about = Show information about the file system on which each FILE resides, or all file systems by default. df-usage = df [OPTION]... [FILE]... df-after-help = Display values are in units of the first available SIZE from --block-size, and the DF_BLOCK_SIZE, BLOCK_SIZE and BLOCKSIZE environment variables. Otherwise, units default to 1024 bytes (or 512 if POSIXLY_CORRECT is set). SIZE is an integer and optional unit (example: 10M is 10*1024*1024). Units are K, M, G, T, P, E, Z, Y (powers of 1024) or KB, MB,... (powers of 1000). # Help messages df-help-print-help = Print help information. df-help-all = include dummy file systems df-help-block-size = scale sizes by SIZE before printing them; e.g. '-BM' prints sizes in units of 1,048,576 bytes df-help-total = produce a grand total df-help-human-readable = print sizes in human readable format (e.g., 1K 234M 2G) df-help-si = likewise, but use powers of 1000 not 1024 df-help-inodes = list inode information instead of block usage df-help-kilo = like --block-size=1K df-help-local = limit listing to local file systems df-help-no-sync = do not invoke sync before getting usage info (default) df-help-output = use output format defined by FIELD_LIST, or print all fields if FIELD_LIST is omitted. df-help-portability = use the POSIX output format df-help-sync = invoke sync before getting usage info (non-windows only) df-help-type = limit listing to file systems of type TYPE df-help-print-type = print file system type df-help-exclude-type = limit listing to file systems not of type TYPE # Error messages df-error-block-size-too-large = --block-size argument '{ $size }' too large df-error-invalid-block-size = invalid --block-size argument { $size } df-error-invalid-suffix = invalid suffix in --block-size argument { $size } df-error-field-used-more-than-once = option --output: field { $field } used more than once df-error-filesystem-type-both-selected-and-excluded = file system type { $type } both selected and excluded df-error-no-such-file-or-directory = { $path }: No such file or directory df-error-no-file-systems-processed = no file systems processed df-error-cannot-access-over-mounted = cannot access { $path }: over-mounted by another device df-error-cannot-read-table-of-mounted-filesystems = cannot read table of mounted file systems df-error-inodes-not-supported-windows = { $program }: doesn't support -i option # Headers df-header-filesystem = Filesystem df-header-size = Size df-header-used = Used df-header-avail = Avail df-header-available = Available df-header-use-percent = Use% df-header-capacity = Capacity df-header-mounted-on = Mounted on df-header-inodes = Inodes df-header-iused = IUsed df-header-iavail = IFree df-header-iuse-percent = IUse% df-header-file = File df-header-type = Type # Other df-total = total df-blocks-suffix = -blocks coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/locales/fr-FR.ftl000066400000000000000000000064601504311601400256300ustar00rootroot00000000000000df-about = afficher des informations sur le système de fichiers sur lequel chaque FICHIER réside, ou tous les systèmes de fichiers par défaut. df-usage = df [OPTION]... [FICHIER]... df-after-help = Les valeurs affichées sont en unités de la première TAILLE disponible de --block-size, et des variables d'environnement DF_BLOCK_SIZE, BLOCK_SIZE et BLOCKSIZE. Sinon, les unités par défaut sont 1024 octets (ou 512 si POSIXLY_CORRECT est défini). TAILLE est un entier et une unité optionnelle (exemple : 10M est 10*1024*1024). Les unités sont K, M, G, T, P, E, Z, Y (puissances de 1024) ou KB, MB,... (puissances de 1000). # Messages d'aide df-help-print-help = afficher les informations d'aide. df-help-all = inclure les systèmes de fichiers factices df-help-block-size = mettre les tailles à l'échelle par TAILLE avant de les afficher ; par ex. '-BM' affiche les tailles en unités de 1 048 576 octets df-help-total = produire un total général df-help-human-readable = afficher les tailles dans un format lisible par l'homme (par ex., 1K 234M 2G) df-help-si = pareillement, mais utiliser les puissances de 1000 pas 1024 df-help-inodes = lister les informations d'inode au lieu de l'utilisation des blocs df-help-kilo = comme --block-size=1K df-help-local = limiter l'affichage aux systèmes de fichiers locaux df-help-no-sync = ne pas invoquer sync avant d'obtenir les informations d'utilisation (par défaut) df-help-output = utiliser le format de sortie défini par LISTE_CHAMPS, ou afficher tous les champs si LISTE_CHAMPS est omise. df-help-portability = utiliser le format de sortie POSIX df-help-sync = invoquer sync avant d'obtenir les informations d'utilisation (non-windows seulement) df-help-type = limiter l'affichage aux systèmes de fichiers de type TYPE df-help-print-type = afficher le type de système de fichiers df-help-exclude-type = limiter l'affichage aux systèmes de fichiers pas de type TYPE # Messages d'erreur df-error-block-size-too-large = argument --block-size '{ $size }' trop grand df-error-invalid-block-size = argument --block-size invalide { $size } df-error-invalid-suffix = suffixe invalide dans l'argument --block-size { $size } df-error-field-used-more-than-once = option --output : champ { $field } utilisé plus d'une fois df-error-filesystem-type-both-selected-and-excluded = type de système de fichiers { $type } à la fois sélectionné et exclu df-error-no-such-file-or-directory = { $path } : aucun fichier ou répertoire de ce type df-error-no-file-systems-processed = aucun système de fichiers traité df-error-cannot-access-over-mounted = impossible d'accéder à { $path } : sur-monté par un autre périphérique df-error-cannot-read-table-of-mounted-filesystems = impossible de lire la table des systèmes de fichiers montés df-error-inodes-not-supported-windows = { $program } : ne supporte pas l'option -i # En-têtes du tableau df-header-filesystem = Sys. de fichiers df-header-size = Taille df-header-used = Utilisé df-header-avail = Disp. df-header-available = Disponible df-header-use-percent = Util% df-header-capacity = Capacité df-header-mounted-on = Monté sur df-header-inodes = Inodes df-header-iused = IUtil df-header-iavail = ILibre df-header-iuse-percent = IUtil% df-header-file = Fichier df-header-type = Type # Autres messages df-total = total df-blocks-suffix = -blocs coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/src/000077500000000000000000000000001504311601400233445ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/src/blocks.rs000066400000000000000000000231651504311601400251760ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. //! Types for representing and displaying block sizes. use crate::{OPT_BLOCKSIZE, OPT_PORTABILITY}; use clap::ArgMatches; use std::{env, fmt}; use uucore::{ display::Quotable, parser::parse_size::{ParseSizeError, parse_size_u64}, }; /// The first ten powers of 1024. const IEC_BASES: [u128; 10] = [ 1, 1_024, 1_048_576, 1_073_741_824, 1_099_511_627_776, 1_125_899_906_842_624, 1_152_921_504_606_846_976, 1_180_591_620_717_411_303_424, 1_208_925_819_614_629_174_706_176, 1_237_940_039_285_380_274_899_124_224, ]; /// The first ten powers of 1000. const SI_BASES: [u128; 10] = [ 1, 1_000, 1_000_000, 1_000_000_000, 1_000_000_000_000, 1_000_000_000_000_000, 1_000_000_000_000_000_000, 1_000_000_000_000_000_000_000, 1_000_000_000_000_000_000_000_000, 1_000_000_000_000_000_000_000_000_000, ]; /// A `SuffixType` determines whether the suffixes are 1000 or 1024 based, and whether they are /// intended for `HumanReadable` mode or not. #[derive(Clone, Copy)] pub(crate) enum SuffixType { Iec, Si, HumanReadable(HumanReadable), } impl SuffixType { /// The first ten powers of 1024 and 1000, respectively. fn bases(&self) -> [u128; 10] { match self { Self::Iec | Self::HumanReadable(HumanReadable::Binary) => IEC_BASES, Self::Si | Self::HumanReadable(HumanReadable::Decimal) => SI_BASES, } } /// Suffixes for the first nine multi-byte unit suffixes. fn suffixes(&self) -> [&'static str; 9] { match self { // we use "kB" instead of "KB", same as GNU df Self::Si => ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"], Self::Iec => ["B", "K", "M", "G", "T", "P", "E", "Z", "Y"], Self::HumanReadable(HumanReadable::Binary) => { ["", "K", "M", "G", "T", "P", "E", "Z", "Y"] } Self::HumanReadable(HumanReadable::Decimal) => { ["", "k", "M", "G", "T", "P", "E", "Z", "Y"] } } } } /// Convert a number into a magnitude and a multi-byte unit suffix. /// /// The returned string has a maximum length of 5 chars, for example: "1.1kB", "999kB", "1MB". pub(crate) fn to_magnitude_and_suffix(n: u128, suffix_type: SuffixType) -> String { let bases = suffix_type.bases(); let suffixes = suffix_type.suffixes(); let mut i = 0; while bases[i + 1] - bases[i] < n && i < suffixes.len() { i += 1; } let quot = n / bases[i]; let rem = n % bases[i]; let suffix = suffixes[i]; if rem == 0 { format!("{quot}{suffix}") } else { let tenths_place = rem / (bases[i] / 10); if rem % (bases[i] / 10) == 0 { format!("{quot}.{tenths_place}{suffix}") } else if tenths_place + 1 == 10 || quot >= 10 { format!("{}{suffix}", quot + 1) } else { format!("{quot}.{}{suffix}", tenths_place + 1) } } } /// A mode to use in condensing the human readable display of a large number /// of bytes. /// /// The [`HumanReadable::Decimal`] and[`HumanReadable::Binary`] variants /// represent dynamic block sizes: as the number of bytes increases, the /// divisor increases as well (for example, from 1 to 1,000 to 1,000,000 /// and so on in the case of [`HumanReadable::Decimal`]). #[derive(Clone, Copy)] pub(crate) enum HumanReadable { /// Use the largest divisor corresponding to a unit, like B, K, M, G, etc. /// /// This variant represents powers of 1,000. Contrast with /// [`HumanReadable::Binary`], which represents powers of /// 1,024. Decimal, /// Use the largest divisor corresponding to a unit, like B, K, M, G, etc. /// /// This variant represents powers of 1,024. Contrast with /// [`HumanReadable::Decimal`], which represents powers /// of 1,000. Binary, } /// A block size to use in condensing the display of a large number of bytes. /// /// The [`BlockSize::Bytes`] variant represents a static block /// size. /// /// The default variant is `Bytes(1024)`. #[derive(Debug, PartialEq)] pub(crate) enum BlockSize { /// A fixed number of bytes. /// /// The number must be positive. Bytes(u64), } impl BlockSize { /// Returns the associated value pub(crate) fn as_u64(&self) -> u64 { match *self { Self::Bytes(n) => n, } } } impl Default for BlockSize { fn default() -> Self { if env::var("POSIXLY_CORRECT").is_ok() { Self::Bytes(512) } else { Self::Bytes(1024) } } } pub(crate) fn read_block_size(matches: &ArgMatches) -> Result { if matches.contains_id(OPT_BLOCKSIZE) { let s = matches.get_one::(OPT_BLOCKSIZE).unwrap(); let bytes = parse_size_u64(s)?; if bytes > 0 { Ok(BlockSize::Bytes(bytes)) } else { Err(ParseSizeError::ParseFailure(format!("{}", s.quote()))) } } else if matches.get_flag(OPT_PORTABILITY) { Ok(BlockSize::default()) } else if let Some(bytes) = block_size_from_env() { Ok(BlockSize::Bytes(bytes)) } else { Ok(BlockSize::default()) } } fn block_size_from_env() -> Option { for env_var in ["DF_BLOCK_SIZE", "BLOCK_SIZE", "BLOCKSIZE"] { if let Ok(env_size) = env::var(env_var) { return parse_size_u64(&env_size).ok(); } } None } impl fmt::Display for BlockSize { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::Bytes(n) => { let s = if n % 1024 == 0 && n % 1000 != 0 { to_magnitude_and_suffix(*n as u128, SuffixType::Iec) } else { to_magnitude_and_suffix(*n as u128, SuffixType::Si) }; write!(f, "{s}") } } } } #[cfg(test)] mod tests { use std::env; use crate::blocks::{BlockSize, SuffixType, to_magnitude_and_suffix}; #[test] fn test_to_magnitude_and_suffix_powers_of_1024() { assert_eq!(to_magnitude_and_suffix(1024, SuffixType::Iec), "1K"); assert_eq!(to_magnitude_and_suffix(2048, SuffixType::Iec), "2K"); assert_eq!(to_magnitude_and_suffix(4096, SuffixType::Iec), "4K"); assert_eq!(to_magnitude_and_suffix(1024 * 1024, SuffixType::Iec), "1M"); assert_eq!( to_magnitude_and_suffix(2 * 1024 * 1024, SuffixType::Iec), "2M" ); assert_eq!( to_magnitude_and_suffix(1024 * 1024 * 1024, SuffixType::Iec), "1G" ); assert_eq!( to_magnitude_and_suffix(34 * 1024 * 1024 * 1024, SuffixType::Iec), "34G" ); } #[test] #[allow(clippy::cognitive_complexity)] fn test_to_magnitude_and_suffix_not_powers_of_1024() { assert_eq!(to_magnitude_and_suffix(1, SuffixType::Si), "1B"); assert_eq!(to_magnitude_and_suffix(999, SuffixType::Si), "999B"); assert_eq!(to_magnitude_and_suffix(1000, SuffixType::Si), "1kB"); assert_eq!(to_magnitude_and_suffix(1001, SuffixType::Si), "1.1kB"); assert_eq!(to_magnitude_and_suffix(1023, SuffixType::Si), "1.1kB"); assert_eq!(to_magnitude_and_suffix(1025, SuffixType::Si), "1.1kB"); assert_eq!(to_magnitude_and_suffix(10_001, SuffixType::Si), "11kB"); assert_eq!(to_magnitude_and_suffix(999_000, SuffixType::Si), "999kB"); assert_eq!(to_magnitude_and_suffix(999_001, SuffixType::Si), "1MB"); assert_eq!(to_magnitude_and_suffix(999_999, SuffixType::Si), "1MB"); assert_eq!(to_magnitude_and_suffix(1_000_000, SuffixType::Si), "1MB"); assert_eq!(to_magnitude_and_suffix(1_000_001, SuffixType::Si), "1.1MB"); assert_eq!(to_magnitude_and_suffix(1_100_000, SuffixType::Si), "1.1MB"); assert_eq!(to_magnitude_and_suffix(1_100_001, SuffixType::Si), "1.2MB"); assert_eq!(to_magnitude_and_suffix(1_900_000, SuffixType::Si), "1.9MB"); assert_eq!(to_magnitude_and_suffix(1_900_001, SuffixType::Si), "2MB"); assert_eq!(to_magnitude_and_suffix(9_900_000, SuffixType::Si), "9.9MB"); assert_eq!(to_magnitude_and_suffix(9_900_001, SuffixType::Si), "10MB"); assert_eq!( to_magnitude_and_suffix(999_000_000, SuffixType::Si), "999MB" ); assert_eq!(to_magnitude_and_suffix(999_000_001, SuffixType::Si), "1GB"); assert_eq!( to_magnitude_and_suffix(1_000_000_000, SuffixType::Si), "1GB" ); assert_eq!( to_magnitude_and_suffix(1_000_000_001, SuffixType::Si), "1.1GB" ); } #[test] fn test_block_size_display() { assert_eq!(format!("{}", BlockSize::Bytes(1024)), "1K"); assert_eq!(format!("{}", BlockSize::Bytes(2 * 1024)), "2K"); assert_eq!(format!("{}", BlockSize::Bytes(3 * 1024 * 1024)), "3M"); } #[test] fn test_block_size_display_multiples_of_1000_and_1024() { assert_eq!(format!("{}", BlockSize::Bytes(128_000)), "128kB"); assert_eq!(format!("{}", BlockSize::Bytes(1000 * 1024)), "1.1MB"); assert_eq!(format!("{}", BlockSize::Bytes(1_000_000_000_000)), "1TB"); } #[test] fn test_default_block_size() { assert_eq!(BlockSize::Bytes(1024), BlockSize::default()); unsafe { env::set_var("POSIXLY_CORRECT", "1") }; assert_eq!(BlockSize::Bytes(512), BlockSize::default()); unsafe { env::remove_var("POSIXLY_CORRECT") }; } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/src/columns.rs000066400000000000000000000155301504311601400253760ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore itotal iused iavail ipcent pcent squashfs use crate::{OPT_INODES, OPT_OUTPUT, OPT_PRINT_TYPE}; use clap::{ArgMatches, parser::ValueSource}; use thiserror::Error; use uucore::display::Quotable; /// The columns in the output table produced by `df`. /// /// The [`Row`] struct has a field corresponding to each of the /// variants of this enumeration. /// /// [`Row`]: crate::table::Row #[derive(PartialEq, Copy, Clone)] pub(crate) enum Column { /// The source of the mount point, usually a device. Source, /// Total number of blocks. Size, /// Number of used blocks. Used, /// Number of available blocks. Avail, /// Percentage of blocks used out of total number of blocks. Pcent, /// The mount point. Target, /// Total number of inodes. Itotal, /// Number of used inodes. Iused, /// Number of available inodes. Iavail, /// Percentage of inodes used out of total number of inodes. Ipcent, /// The filename given as a command-line argument. File, /// The filesystem type, like "ext4" or "squashfs". Fstype, /// Percentage of bytes available to non-privileged processes. #[cfg(target_os = "macos")] Capacity, } /// An error while defining which columns to display in the output table. #[derive(Debug, Error)] pub(crate) enum ColumnError { /// If a column appears more than once in the `--output` argument. #[error("{}", .0.quote())] MultipleColumns(String), } impl Column { /// Convert from command-line arguments to sequence of columns. /// /// The set of columns that will appear in the output table can be /// specified by command-line arguments. This function converts /// those arguments to a [`Vec`] of [`Column`] variants. /// /// # Errors /// /// This function returns an error if a column is specified more /// than once in the command-line argument. pub(crate) fn from_matches(matches: &ArgMatches) -> Result, ColumnError> { match ( matches.get_flag(OPT_PRINT_TYPE), matches.get_flag(OPT_INODES), matches.value_source(OPT_OUTPUT) == Some(ValueSource::CommandLine), ) { (false, false, false) => Ok(vec![ Self::Source, Self::Size, Self::Used, Self::Avail, #[cfg(target_os = "macos")] Self::Capacity, Self::Pcent, Self::Target, ]), (false, false, true) => { // Unwrapping should not panic because in this arm of // the `match` statement, we know that `OPT_OUTPUT` // is non-empty. let names = matches .get_many::(OPT_OUTPUT) .unwrap() .map(|s| s.as_str()); let mut seen: Vec<&str> = vec![]; let mut columns = vec![]; for name in names { if seen.contains(&name) { return Err(ColumnError::MultipleColumns(name.to_string())); } seen.push(name); // Unwrapping here should not panic because the // command-line argument parsing library should be // responsible for ensuring each comma-separated // string is a valid column label. let column = Self::parse(name).unwrap(); columns.push(column); } Ok(columns) } (false, true, false) => Ok(vec![ Self::Source, Self::Itotal, Self::Iused, Self::Iavail, Self::Ipcent, Self::Target, ]), (true, false, false) => Ok(vec![ Self::Source, Self::Fstype, Self::Size, Self::Used, Self::Avail, #[cfg(target_os = "macos")] Self::Capacity, Self::Pcent, Self::Target, ]), (true, true, false) => Ok(vec![ Self::Source, Self::Fstype, Self::Itotal, Self::Iused, Self::Iavail, Self::Ipcent, Self::Target, ]), // The command-line arguments -T and -i are each mutually // exclusive with --output, so the command-line argument // parser should reject those combinations before we get // to this point in the code. _ => unreachable!(), } } /// Convert a column name to the corresponding enumeration variant. /// /// There are twelve valid column names, one for each variant: /// /// - "source" /// - "fstype" /// - "itotal" /// - "iused" /// - "iavail" /// - "ipcent" /// - "size" /// - "used" /// - "avail" /// - "pcent" /// - "file" /// - "target" /// /// # Errors /// /// If the string `s` is not one of the valid column names. fn parse(s: &str) -> Result { match s { "source" => Ok(Self::Source), "fstype" => Ok(Self::Fstype), "itotal" => Ok(Self::Itotal), "iused" => Ok(Self::Iused), "iavail" => Ok(Self::Iavail), "ipcent" => Ok(Self::Ipcent), "size" => Ok(Self::Size), "used" => Ok(Self::Used), "avail" => Ok(Self::Avail), "pcent" => Ok(Self::Pcent), "file" => Ok(Self::File), "target" => Ok(Self::Target), _ => Err(()), } } /// Return the alignment of the specified column. pub(crate) fn alignment(column: &Self) -> Alignment { match column { Self::Source | Self::Target | Self::File | Self::Fstype => Alignment::Left, _ => Alignment::Right, } } /// Return the minimum width of the specified column. pub(crate) fn min_width(column: &Self) -> usize { match column { // 14 = length of "Filesystem" plus 4 spaces Self::Source => 14, Self::Used => 5, Self::Size => 5, // the shortest headers have a length of 4 chars so we use that as the minimum width _ => 4, } } } /// A column's alignment. /// /// We define our own `Alignment` enum instead of using `std::fmt::Alignment` because df doesn't /// have centered columns and hence a `Center` variant is not needed. pub(crate) enum Alignment { Left, Right, } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/src/df.rs000066400000000000000000000701751504311601400243150ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore itotal iused iavail ipcent pcent tmpfs squashfs lofs mod blocks; mod columns; mod filesystem; mod table; use blocks::HumanReadable; use clap::builder::ValueParser; use table::HeaderMode; use uucore::display::Quotable; use uucore::error::{UError, UResult, USimpleError, get_exit_code}; use uucore::fsext::{MountInfo, read_fs_list}; use uucore::parser::parse_size::ParseSizeError; use uucore::translate; use uucore::{format_usage, show}; use clap::{Arg, ArgAction, ArgMatches, Command, parser::ValueSource}; use std::ffi::OsString; use std::io::stdout; use std::path::Path; use thiserror::Error; use crate::blocks::{BlockSize, read_block_size}; use crate::columns::{Column, ColumnError}; use crate::filesystem::Filesystem; use crate::filesystem::FsError; use crate::table::Table; static OPT_HELP: &str = "help"; static OPT_ALL: &str = "all"; static OPT_BLOCKSIZE: &str = "blocksize"; static OPT_TOTAL: &str = "total"; static OPT_HUMAN_READABLE_BINARY: &str = "human-readable-binary"; static OPT_HUMAN_READABLE_DECIMAL: &str = "human-readable-decimal"; static OPT_INODES: &str = "inodes"; static OPT_KILO: &str = "kilo"; static OPT_LOCAL: &str = "local"; static OPT_NO_SYNC: &str = "no-sync"; static OPT_OUTPUT: &str = "output"; static OPT_PATHS: &str = "paths"; static OPT_PORTABILITY: &str = "portability"; static OPT_SYNC: &str = "sync"; static OPT_TYPE: &str = "type"; static OPT_PRINT_TYPE: &str = "print-type"; static OPT_EXCLUDE_TYPE: &str = "exclude-type"; static OUTPUT_FIELD_LIST: [&str; 12] = [ "source", "fstype", "itotal", "iused", "iavail", "ipcent", "size", "used", "avail", "pcent", "file", "target", ]; /// Parameters that control the behavior of `df`. /// /// Most of these parameters control which rows and which columns are /// displayed. The `block_size` determines the units to use when /// displaying numbers of bytes or inodes. struct Options { show_local_fs: bool, show_all_fs: bool, human_readable: Option, block_size: BlockSize, header_mode: HeaderMode, /// Optional list of filesystem types to include in the output table. /// /// If this is not `None`, only filesystems that match one of /// these types will be listed. include: Option>, /// Optional list of filesystem types to exclude from the output table. /// /// If this is not `None`, filesystems that match one of these /// types will *not* be listed. exclude: Option>, /// Whether to sync before operating. sync: bool, /// Whether to show a final row comprising the totals for each column. show_total: bool, /// Sequence of columns to display in the output table. columns: Vec, } impl Default for Options { fn default() -> Self { Self { show_local_fs: Default::default(), show_all_fs: Default::default(), block_size: BlockSize::default(), human_readable: Option::default(), header_mode: HeaderMode::default(), include: Option::default(), exclude: Option::default(), sync: Default::default(), show_total: Default::default(), columns: vec![ Column::Source, Column::Size, Column::Used, Column::Avail, Column::Pcent, Column::Target, ], } } } #[derive(Debug, Error)] enum OptionsError { // TODO This needs to vary based on whether `--block-size` // or `-B` were provided. #[error("{}", translate!("df-error-block-size-too-large", "size" => .0.clone()))] BlockSizeTooLarge(String), // TODO This needs to vary based on whether `--block-size` // or `-B` were provided., #[error("{}", translate!("df-error-invalid-block-size", "size" => .0.clone()))] InvalidBlockSize(String), // TODO This needs to vary based on whether `--block-size` // or `-B` were provided. #[error("{}", translate!("df-error-invalid-suffix", "size" => .0.clone()))] InvalidSuffix(String), /// An error getting the columns to display in the output table. #[error("{}", translate!("df-error-field-used-more-than-once", "field" => format!("{}", .0)))] ColumnError(ColumnError), #[error( "{}", .0.iter() .map(|t| translate!("df-error-filesystem-type-both-selected-and-excluded", "type" => t.quote())) .collect::>() .join(format!("\n{}: ", uucore::util_name()).as_str()) )] FilesystemTypeBothSelectedAndExcluded(Vec), } impl Options { /// Convert command-line arguments into [`Options`]. fn from(matches: &ArgMatches) -> Result { let include: Option> = matches .get_many::(OPT_TYPE) .map(|v| v.map(|s| s.to_string_lossy().to_string()).collect()); let exclude: Option> = matches .get_many::(OPT_EXCLUDE_TYPE) .map(|v| v.map(|s| s.to_string_lossy().to_string()).collect()); if let (Some(include), Some(exclude)) = (&include, &exclude) { if let Some(types) = Self::get_intersected_types(include, exclude) { return Err(OptionsError::FilesystemTypeBothSelectedAndExcluded(types)); } } Ok(Self { show_local_fs: matches.get_flag(OPT_LOCAL), show_all_fs: matches.get_flag(OPT_ALL), sync: matches.get_flag(OPT_SYNC), block_size: read_block_size(matches).map_err(|e| match e { ParseSizeError::InvalidSuffix(s) => OptionsError::InvalidSuffix(s), ParseSizeError::SizeTooBig(_) => OptionsError::BlockSizeTooLarge( matches .get_one::(OPT_BLOCKSIZE) .unwrap() .to_string(), ), ParseSizeError::ParseFailure(s) => OptionsError::InvalidBlockSize(s), ParseSizeError::PhysicalMem(s) => OptionsError::InvalidBlockSize(s), })?, header_mode: { if matches.get_flag(OPT_HUMAN_READABLE_BINARY) || matches.get_flag(OPT_HUMAN_READABLE_DECIMAL) { HeaderMode::HumanReadable } else if matches.get_flag(OPT_PORTABILITY) { HeaderMode::PosixPortability // get_flag() doesn't work here, it always returns true because OPT_OUTPUT has // default values and hence is always present } else if matches.value_source(OPT_OUTPUT) == Some(ValueSource::CommandLine) { HeaderMode::Output } else { HeaderMode::Default } }, human_readable: { if matches.get_flag(OPT_HUMAN_READABLE_BINARY) { Some(HumanReadable::Binary) } else if matches.get_flag(OPT_HUMAN_READABLE_DECIMAL) { Some(HumanReadable::Decimal) } else { None } }, include, exclude, show_total: matches.get_flag(OPT_TOTAL), columns: Column::from_matches(matches).map_err(OptionsError::ColumnError)?, }) } fn get_intersected_types(include: &[String], exclude: &[String]) -> Option> { let mut intersected_types = Vec::new(); for t in include { if exclude.contains(t) { intersected_types.push(t.clone()); } } (!intersected_types.is_empty()).then_some(intersected_types) } } /// Whether to display the mount info given the inclusion settings. fn is_included(mi: &MountInfo, opt: &Options) -> bool { // Don't show remote filesystems if `--local` has been given. if mi.remote && opt.show_local_fs { return false; } // Don't show pseudo filesystems unless `--all` has been given. // The "lofs" filesystem is a loopback // filesystem present on Solaris and FreeBSD systems. It // is similar to a symbolic link. if (mi.dummy || mi.fs_type == "lofs") && !opt.show_all_fs { return false; } // Don't show filesystems if they have been explicitly excluded. if let Some(ref excludes) = opt.exclude { if excludes.contains(&mi.fs_type) { return false; } } if let Some(ref includes) = opt.include { if !includes.contains(&mi.fs_type) { return false; } } true } /// Whether the mount info in `m2` should be prioritized over `m1`. /// /// The "lt" in the function name is in analogy to the /// [`std::cmp::PartialOrd::lt`]. fn mount_info_lt(m1: &MountInfo, m2: &MountInfo) -> bool { // let "real" devices with '/' in the name win. if m1.dev_name.starts_with('/') && !m2.dev_name.starts_with('/') { return false; } let m1_nearer_root = m1.mount_dir.len() < m2.mount_dir.len(); // With bind mounts, prefer items nearer the root of the source let m2_below_root = !m1.mount_root.is_empty() && !m2.mount_root.is_empty() && m1.mount_root.len() > m2.mount_root.len(); // let points towards the root of the device win. if m1_nearer_root && !m2_below_root { return false; } // let an entry over-mounted on a new device win, but only when // matching an existing mnt point, to avoid problematic // replacement when given inaccurate mount lists, seen with some // chroot environments for example. if m1.dev_name != m2.dev_name && m1.mount_dir == m2.mount_dir { return false; } true } /// Whether to prioritize given mount info over all others on the same device. /// /// This function decides whether the mount info `mi` is better than /// all others in `previous` that mount the same device as `mi`. fn is_best(previous: &[MountInfo], mi: &MountInfo) -> bool { for seen in previous { if seen.dev_id == mi.dev_id && mount_info_lt(mi, seen) { return false; } } true } /// Get all currently mounted filesystems. /// /// `opt` excludes certain filesystems from consideration and allows for the synchronization of filesystems before running; see /// [`Options`] for more information. fn get_all_filesystems(opt: &Options) -> UResult> { // Run a sync call before any operation if so instructed. if opt.sync { #[cfg(not(any(windows, target_os = "redox")))] unsafe { #[cfg(not(target_os = "android"))] uucore::libc::sync(); #[cfg(target_os = "android")] uucore::libc::syscall(uucore::libc::SYS_sync); } } let mut mounts = vec![]; for mi in read_fs_list()? { // TODO The running time of the `is_best()` function is linear // in the length of `result`. That makes the running time of // this loop quadratic in the length of `vmi`. This could be // improved by a more efficient implementation of `is_best()`, // but `vmi` is probably not very long in practice. if is_included(&mi, opt) && is_best(&mounts, &mi) { mounts.push(mi); } } // Convert each `MountInfo` into a `Filesystem`, which contains // both the mount information and usage information. #[cfg(not(windows))] { let maybe_mount = |m| Filesystem::from_mount(&mounts, &m, None).ok(); Ok(mounts .clone() .into_iter() .filter_map(maybe_mount) .filter(|fs| opt.show_all_fs || fs.usage.blocks > 0) .collect()) } #[cfg(windows)] { let maybe_mount = |m| Filesystem::from_mount(&m, None).ok(); Ok(mounts .into_iter() .filter_map(maybe_mount) .filter(|fs| opt.show_all_fs || fs.usage.blocks > 0) .collect()) } } /// For each path, get the filesystem that contains that path. fn get_named_filesystems

(paths: &[P], opt: &Options) -> UResult> where P: AsRef, { // The list of all mounted filesystems. let mounts: Vec = read_fs_list()?; let mut result = vec![]; // Convert each path into a `Filesystem`, which contains // both the mount information and usage information. for path in paths { match Filesystem::from_path(&mounts, path) { Ok(fs) => { if is_included(&fs.mount_info, opt) { result.push(fs); } } Err(FsError::InvalidPath) => { show!(USimpleError::new( 1, translate!("df-error-no-such-file-or-directory", "path" => path.as_ref().display()) )); } Err(FsError::MountMissing) => { show!(USimpleError::new( 1, translate!("df-error-no-file-systems-processed") )); } #[cfg(not(windows))] Err(FsError::OverMounted) => { show!(USimpleError::new( 1, translate!("df-error-cannot-access-over-mounted", "path" => path.as_ref().quote()) )); } } } if get_exit_code() == 0 && result.is_empty() { show!(USimpleError::new( 1, translate!("df-error-no-file-systems-processed") )); return Ok(result); } Ok(result) } #[derive(Debug, Error)] enum DfError { /// A problem while parsing command-line options. #[error("{}", .0)] OptionsError(OptionsError), } impl UError for DfError { fn usage(&self) -> bool { matches!(self, Self::OptionsError(OptionsError::ColumnError(_))) } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; #[cfg(windows)] { if matches.get_flag(OPT_INODES) { println!( "{}", translate!("df-error-inodes-not-supported-windows", "program" => uucore::util_name()) ); return Ok(()); } } let opt = Options::from(&matches).map_err(DfError::OptionsError)?; // Get the list of filesystems to display in the output table. let filesystems: Vec = match matches.get_many::(OPT_PATHS) { None => { let filesystems = get_all_filesystems(&opt).map_err(|e| { let context = translate!("df-error-cannot-read-table-of-mounted-filesystems"); USimpleError::new(e.code(), format!("{context}: {e}")) })?; if filesystems.is_empty() { return Err(USimpleError::new( 1, translate!("df-error-no-file-systems-processed"), )); } filesystems } Some(paths) => { let paths: Vec<_> = paths.collect(); let filesystems = get_named_filesystems(&paths, &opt).map_err(|e| { let context = translate!("df-error-cannot-read-table-of-mounted-filesystems"); USimpleError::new(e.code(), format!("{context}: {e}")) })?; // This can happen if paths are given as command-line arguments // but none of the paths exist. if filesystems.is_empty() { return Ok(()); } filesystems } }; Table::new(&opt, filesystems).write_to(&mut stdout())?; Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("df-about")) .override_usage(format_usage(&translate!("df-usage"))) .after_help(translate!("df-after-help")) .infer_long_args(true) .disable_help_flag(true) .arg( Arg::new(OPT_HELP) .long(OPT_HELP) .help(translate!("df-help-print-help")) .action(ArgAction::Help), ) .arg( Arg::new(OPT_ALL) .short('a') .long("all") .overrides_with(OPT_ALL) .help(translate!("df-help-all")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_BLOCKSIZE) .short('B') .long("block-size") .value_name("SIZE") .overrides_with_all([OPT_KILO, OPT_BLOCKSIZE]) .help(translate!("df-help-block-size")), ) .arg( Arg::new(OPT_TOTAL) .long("total") .overrides_with(OPT_TOTAL) .help(translate!("df-help-total")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_HUMAN_READABLE_BINARY) .short('h') .long("human-readable") .overrides_with_all([OPT_HUMAN_READABLE_DECIMAL, OPT_HUMAN_READABLE_BINARY]) .help(translate!("df-help-human-readable")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_HUMAN_READABLE_DECIMAL) .short('H') .long("si") .overrides_with_all([OPT_HUMAN_READABLE_BINARY, OPT_HUMAN_READABLE_DECIMAL]) .help(translate!("df-help-si")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_INODES) .short('i') .long("inodes") .overrides_with(OPT_INODES) .help(translate!("df-help-inodes")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_KILO) .short('k') .help(translate!("df-help-kilo")) .overrides_with_all([OPT_BLOCKSIZE, OPT_KILO]) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_LOCAL) .short('l') .long("local") .overrides_with(OPT_LOCAL) .help(translate!("df-help-local")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_NO_SYNC) .long("no-sync") .overrides_with_all([OPT_SYNC, OPT_NO_SYNC]) .help(translate!("df-help-no-sync")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_OUTPUT) .long("output") .value_name("FIELD_LIST") .action(ArgAction::Append) .num_args(0..) .require_equals(true) .use_value_delimiter(true) .value_parser(OUTPUT_FIELD_LIST) .default_missing_values(OUTPUT_FIELD_LIST) .default_values(["source", "size", "used", "avail", "pcent", "target"]) .conflicts_with_all([OPT_INODES, OPT_PORTABILITY, OPT_PRINT_TYPE]) .help(translate!("df-help-output")), ) .arg( Arg::new(OPT_PORTABILITY) .short('P') .long("portability") .overrides_with(OPT_PORTABILITY) .help(translate!("df-help-portability")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_SYNC) .long("sync") .overrides_with_all([OPT_NO_SYNC, OPT_SYNC]) .help(translate!("df-help-sync")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_TYPE) .short('t') .long("type") .value_parser(ValueParser::os_string()) .value_name("TYPE") .action(ArgAction::Append) .help(translate!("df-help-type")), ) .arg( Arg::new(OPT_PRINT_TYPE) .short('T') .long("print-type") .overrides_with(OPT_PRINT_TYPE) .help(translate!("df-help-print-type")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_EXCLUDE_TYPE) .short('x') .long("exclude-type") .action(ArgAction::Append) .value_parser(ValueParser::os_string()) .value_name("TYPE") .use_value_delimiter(true) .help(translate!("df-help-exclude-type")), ) .arg( Arg::new(OPT_PATHS) .action(ArgAction::Append) .value_parser(ValueParser::os_string()) .value_hint(clap::ValueHint::AnyPath), ) } #[cfg(test)] mod tests { mod mount_info_lt { use crate::mount_info_lt; use uucore::fsext::MountInfo; /// Instantiate a [`MountInfo`] with the given fields. fn mount_info(dev_name: &str, mount_root: &str, mount_dir: &str) -> MountInfo { MountInfo { dev_id: String::new(), dev_name: String::from(dev_name), fs_type: String::new(), mount_dir: mount_dir.into(), mount_option: String::new(), mount_root: mount_root.into(), remote: false, dummy: false, } } #[test] fn test_absolute() { // Prefer device name "/dev/foo" over "dev_foo". let m1 = mount_info("/dev/foo", "/", "/mnt/bar"); let m2 = mount_info("dev_foo", "/", "/mnt/bar"); assert!(!mount_info_lt(&m1, &m2)); } #[test] fn test_shorter() { // Prefer mount directory "/mnt/bar" over "/mnt/bar/baz"... let m1 = mount_info("/dev/foo", "/", "/mnt/bar"); let m2 = mount_info("/dev/foo", "/", "/mnt/bar/baz"); assert!(!mount_info_lt(&m1, &m2)); // ..but prefer mount root "/root" over "/". let m1 = mount_info("/dev/foo", "/root", "/mnt/bar"); let m2 = mount_info("/dev/foo", "/", "/mnt/bar/baz"); assert!(mount_info_lt(&m1, &m2)); } #[test] fn test_over_mounted() { // Prefer the earlier entry if the devices are different but // the mount directory is the same. let m1 = mount_info("/dev/foo", "/", "/mnt/baz"); let m2 = mount_info("/dev/bar", "/", "/mnt/baz"); assert!(!mount_info_lt(&m1, &m2)); } } mod is_best { use crate::is_best; use uucore::fsext::MountInfo; /// Instantiate a [`MountInfo`] with the given fields. fn mount_info(dev_id: &str, mount_dir: &str) -> MountInfo { MountInfo { dev_id: String::from(dev_id), dev_name: String::new(), fs_type: String::new(), mount_dir: mount_dir.into(), mount_option: String::new(), mount_root: "/".into(), remote: false, dummy: false, } } #[test] fn test_empty() { let m = mount_info("0", "/mnt/bar"); assert!(is_best(&[], &m)); } #[test] fn test_different_dev_id() { let m1 = mount_info("0", "/mnt/bar"); let m2 = mount_info("1", "/mnt/bar"); assert!(is_best(&[m1.clone()], &m2)); assert!(is_best(&[m2], &m1)); } #[test] fn test_same_dev_id() { // There are several conditions under which a `MountInfo` is // considered "better" than the others, we're just checking // one condition in this test. let m1 = mount_info("0", "/mnt/bar"); let m2 = mount_info("0", "/mnt/bar/baz"); assert!(!is_best(&[m1.clone()], &m2)); assert!(is_best(&[m2], &m1)); } } mod is_included { use crate::{Options, is_included}; use uucore::fsext::MountInfo; /// Instantiate a [`MountInfo`] with the given fields. fn mount_info(fs_type: &str, mount_dir: &str, remote: bool, dummy: bool) -> MountInfo { MountInfo { dev_id: String::new(), dev_name: String::new(), fs_type: String::from(fs_type), mount_dir: mount_dir.into(), mount_option: String::new(), mount_root: "/".into(), remote, dummy, } } #[test] fn test_remote_included() { let opt = Options::default(); let m = mount_info("ext4", "/mnt/foo", true, false); assert!(is_included(&m, &opt)); } #[test] fn test_remote_excluded() { let opt = Options { show_local_fs: true, ..Default::default() }; let m = mount_info("ext4", "/mnt/foo", true, false); assert!(!is_included(&m, &opt)); } #[test] fn test_dummy_included() { let opt = Options { show_all_fs: true, ..Default::default() }; let m = mount_info("ext4", "/mnt/foo", false, true); assert!(is_included(&m, &opt)); } #[test] fn test_dummy_excluded() { let opt = Options::default(); let m = mount_info("ext4", "/mnt/foo", false, true); assert!(!is_included(&m, &opt)); } #[test] fn test_exclude_match() { let exclude = Some(vec![String::from("ext4")]); let opt = Options { exclude, ..Default::default() }; let m = mount_info("ext4", "/mnt/foo", false, false); assert!(!is_included(&m, &opt)); } #[test] fn test_exclude_no_match() { let exclude = Some(vec![String::from("tmpfs")]); let opt = Options { exclude, ..Default::default() }; let m = mount_info("ext4", "/mnt/foo", false, false); assert!(is_included(&m, &opt)); } #[test] fn test_include_match() { let include = Some(vec![String::from("ext4")]); let opt = Options { include, ..Default::default() }; let m = mount_info("ext4", "/mnt/foo", false, false); assert!(is_included(&m, &opt)); } #[test] fn test_include_no_match() { let include = Some(vec![String::from("tmpfs")]); let opt = Options { include, ..Default::default() }; let m = mount_info("ext4", "/mnt/foo", false, false); assert!(!is_included(&m, &opt)); } #[test] fn test_include_and_exclude_match_neither() { let include = Some(vec![String::from("tmpfs")]); let exclude = Some(vec![String::from("squashfs")]); let opt = Options { include, exclude, ..Default::default() }; let m = mount_info("ext4", "/mnt/foo", false, false); assert!(!is_included(&m, &opt)); } #[test] fn test_include_and_exclude_match_exclude() { let include = Some(vec![String::from("tmpfs")]); let exclude = Some(vec![String::from("ext4")]); let opt = Options { include, exclude, ..Default::default() }; let m = mount_info("ext4", "/mnt/foo", false, false); assert!(!is_included(&m, &opt)); } #[test] fn test_include_and_exclude_match_include() { let include = Some(vec![String::from("ext4")]); let exclude = Some(vec![String::from("squashfs")]); let opt = Options { include, exclude, ..Default::default() }; let m = mount_info("ext4", "/mnt/foo", false, false); assert!(is_included(&m, &opt)); } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/src/filesystem.rs000066400000000000000000000303361504311601400261030ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. //! Provides a summary representation of a filesystem. //! //! A [`Filesystem`] struct represents a device containing a //! filesystem mounted at a particular directory. It also includes //! information on amount of space available and amount of space used. // spell-checker:ignore canonicalized use std::{ffi::OsString, path::Path}; #[cfg(unix)] use uucore::fsext::statfs; use uucore::fsext::{FsUsage, MountInfo}; /// Summary representation of a filesystem. /// /// A [`Filesystem`] struct represents a device containing a /// filesystem mounted at a particular directory. The /// [`Filesystem::mount_info`] field exposes that information. The /// [`Filesystem::usage`] field provides information on the amount of /// space available on the filesystem and the amount of space used. #[derive(Debug, Clone)] pub(crate) struct Filesystem { /// The file given on the command line if any. /// /// When invoking `df` with a positional argument, it displays /// usage information for the filesystem that contains the given /// file. If given, this field contains that filename. pub file: Option, /// Information about the mounted device, mount directory, and related options. pub mount_info: MountInfo, /// Information about the amount of space used on the filesystem. pub usage: FsUsage, } #[derive(Debug, PartialEq)] pub(crate) enum FsError { #[cfg(not(windows))] OverMounted, InvalidPath, MountMissing, } /// Check whether `mount` has been over-mounted. /// /// `mount` is considered over-mounted if it there is an element in /// `mounts` after mount that has the same `mount_dir`. #[cfg(not(windows))] fn is_over_mounted(mounts: &[MountInfo], mount: &MountInfo) -> bool { let last_mount_for_dir = mounts .iter() .filter(|m| m.mount_dir == mount.mount_dir) .next_back(); if let Some(lmi) = last_mount_for_dir { lmi.dev_name != mount.dev_name } else { // Should be unreachable if `mount` is in `mounts` false } } /// Find the mount info that best matches a given filesystem path. /// /// This function returns the element of `mounts` on which `path` is /// mounted. If there are no matches, this function returns /// [`None`]. If there are two or more matches, then the single /// [`MountInfo`] with the device name corresponding to the entered path. /// /// If `canonicalize` is `true`, then the `path` is canonicalized /// before checking whether it matches any mount directories. /// /// # See also /// /// * [`Path::canonicalize`] /// * [`MountInfo::mount_dir`] fn mount_info_from_path

( mounts: &[MountInfo], path: P, // This is really only used for testing purposes. canonicalize: bool, ) -> Result<&MountInfo, FsError> where P: AsRef, { // TODO Refactor this function with `Stater::find_mount_point()` // in the `stat` crate. let path = if canonicalize { path.as_ref() .canonicalize() .map_err(|_| FsError::InvalidPath)? } else { path.as_ref().to_path_buf() }; // Find the potential mount point that matches entered path let maybe_mount_point = mounts .iter() // Create pair MountInfo, canonicalized device name // TODO Abstract from accessing real filesystem to // make code more testable .map(|m| (m, std::fs::canonicalize(&m.dev_name))) // Ignore non existing paths .filter(|m| m.1.is_ok()) .map(|m| (m.0, m.1.ok().unwrap())) // Try to find canonicalized device name corresponding to entered path .find(|m| m.1.eq(&path)) .map(|m| m.0); maybe_mount_point .or_else(|| { mounts .iter() .filter(|mi| path.starts_with(&mi.mount_dir)) .max_by_key(|mi| mi.mount_dir.len()) }) .ok_or(FsError::MountMissing) } impl Filesystem { // TODO: resolve uuid in `mount_info.dev_name` if exists pub(crate) fn new(mount_info: MountInfo, file: Option) -> Option { let _stat_path = if mount_info.mount_dir.is_empty() { #[cfg(unix)] { mount_info.dev_name.clone().into() } #[cfg(windows)] { // On windows, we expect the volume id mount_info.dev_id.clone().into() } } else { mount_info.mount_dir.clone() }; #[cfg(unix)] let usage = FsUsage::new(statfs(&_stat_path).ok()?); #[cfg(windows)] let usage = FsUsage::new(Path::new(&_stat_path)).ok()?; Some(Self { file, mount_info, usage, }) } /// Find and create the filesystem from the given mount /// after checking that the it hasn't been over-mounted #[cfg(not(windows))] pub(crate) fn from_mount( mounts: &[MountInfo], mount: &MountInfo, file: Option, ) -> Result { if is_over_mounted(mounts, mount) { Err(FsError::OverMounted) } else { Self::new(mount.clone(), file).ok_or(FsError::MountMissing) } } /// Find and create the filesystem from the given mount. #[cfg(windows)] pub(crate) fn from_mount(mount: &MountInfo, file: Option) -> Result { Self::new(mount.clone(), file).ok_or(FsError::MountMissing) } /// Find and create the filesystem that best matches a given path. /// /// This function returns a new `Filesystem` derived from the /// element of `mounts` on which `path` is mounted. If there are /// no matches, this function returns [`None`]. If there are two /// or more matches, then the single [`Filesystem`] with the /// longest mount directory is returned. /// /// The `path` is canonicalized before checking whether it matches /// any mount directories. /// /// # See also /// /// * [`Path::canonicalize`] /// * [`MountInfo::mount_dir`] /// pub(crate) fn from_path

(mounts: &[MountInfo], path: P) -> Result where P: AsRef, { let file = path.as_ref().as_os_str().to_owned(); let canonicalize = true; let result = mount_info_from_path(mounts, path, canonicalize); #[cfg(windows)] return result.and_then(|mount_info| Self::from_mount(mount_info, Some(file))); #[cfg(not(windows))] return result.and_then(|mount_info| Self::from_mount(mounts, mount_info, Some(file))); } } #[cfg(test)] mod tests { mod mount_info_from_path { use std::ffi::OsString; use uucore::fsext::MountInfo; use crate::filesystem::{FsError, mount_info_from_path}; /// Create a fake `MountInfo` with the given directory name. fn mount_info(mount_dir: &str) -> MountInfo { MountInfo { dev_id: String::default(), dev_name: String::default(), fs_type: String::default(), mount_dir: OsString::from(mount_dir), mount_option: String::default(), mount_root: OsString::default(), remote: Default::default(), dummy: Default::default(), } } /// Check whether two `MountInfo` instances are equal. fn mount_info_eq(m1: &MountInfo, m2: &MountInfo) -> bool { m1.dev_id == m2.dev_id && m1.dev_name == m2.dev_name && m1.fs_type == m2.fs_type && m1.mount_dir == m2.mount_dir && m1.mount_option == m2.mount_option && m1.mount_root == m2.mount_root && m1.remote == m2.remote && m1.dummy == m2.dummy } #[test] fn test_empty_mounts() { assert_eq!( mount_info_from_path(&[], "/", false).unwrap_err(), FsError::MountMissing ); } #[test] fn test_bad_path() { assert_eq!( // This path better not exist.... mount_info_from_path(&[], "/non-existent-path", true).unwrap_err(), FsError::InvalidPath ); } #[test] fn test_exact_match() { let mounts = [mount_info("/foo")]; let actual = mount_info_from_path(&mounts, "/foo", false).unwrap(); assert!(mount_info_eq(actual, &mounts[0])); } #[test] fn test_prefix_match() { let mounts = [mount_info("/foo")]; let actual = mount_info_from_path(&mounts, "/foo/bar", false).unwrap(); assert!(mount_info_eq(actual, &mounts[0])); } #[test] fn test_multiple_matches() { let mounts = [mount_info("/foo"), mount_info("/foo/bar")]; let actual = mount_info_from_path(&mounts, "/foo/bar", false).unwrap(); assert!(mount_info_eq(actual, &mounts[1])); } #[test] fn test_no_match() { let mounts = [mount_info("/foo")]; assert_eq!( mount_info_from_path(&mounts, "/bar", false).unwrap_err(), FsError::MountMissing ); } #[test] fn test_partial_match() { let mounts = [mount_info("/foo/bar")]; assert_eq!( mount_info_from_path(&mounts, "/foo/baz", false).unwrap_err(), FsError::MountMissing ); } #[test] // clippy::assigning_clones added with Rust 1.78 // Rust version = 1.76 on OpenBSD stable/7.5 #[cfg_attr(not(target_os = "openbsd"), allow(clippy::assigning_clones))] fn test_dev_name_match() { let tmp = tempfile::TempDir::new().expect("Failed to create temp dir"); let dev_name = std::fs::canonicalize(tmp.path()) .expect("Failed to canonicalize tmp path") .to_string_lossy() .to_string(); let mut mount_info = mount_info("/foo"); mount_info.dev_name = dev_name.clone(); let mounts = [mount_info]; let actual = mount_info_from_path(&mounts, dev_name, false).unwrap(); assert!(mount_info_eq(actual, &mounts[0])); } } #[cfg(not(windows))] mod over_mount { use std::ffi::OsString; use crate::filesystem::{Filesystem, FsError, is_over_mounted}; use uucore::fsext::MountInfo; fn mount_info_with_dev_name(mount_dir: &str, dev_name: Option<&str>) -> MountInfo { MountInfo { dev_id: String::default(), dev_name: dev_name.map(String::from).unwrap_or_default(), fs_type: String::default(), mount_dir: OsString::from(mount_dir), mount_option: String::default(), mount_root: OsString::default(), remote: Default::default(), dummy: Default::default(), } } #[test] fn test_over_mount() { let mount_info1 = mount_info_with_dev_name("/foo", Some("dev_name_1")); let mount_info2 = mount_info_with_dev_name("/foo", Some("dev_name_2")); let mounts = [mount_info1, mount_info2]; assert!(is_over_mounted(&mounts, &mounts[0])); } #[test] fn test_over_mount_not_over_mounted() { let mount_info1 = mount_info_with_dev_name("/foo", Some("dev_name_1")); let mount_info2 = mount_info_with_dev_name("/foo", Some("dev_name_2")); let mounts = [mount_info1, mount_info2]; assert!(!is_over_mounted(&mounts, &mounts[1])); } #[test] fn test_from_mount_over_mounted() { let mount_info1 = mount_info_with_dev_name("/foo", Some("dev_name_1")); let mount_info2 = mount_info_with_dev_name("/foo", Some("dev_name_2")); let mounts = [mount_info1, mount_info2]; assert_eq!( Filesystem::from_mount(&mounts, &mounts[0], None).unwrap_err(), FsError::OverMounted ); } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/src/main.rs000066400000000000000000000000251504311601400246330ustar00rootroot00000000000000uucore::bin!(uu_df); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/df/src/table.rs000066400000000000000000001026761504311601400250150ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore tmpfs Pcent Itotal Iused Iavail Ipcent nosuid nodev //! The filesystem usage data table. //! //! A table ([`Table`]) comprises a header row ([`Header`]) and a //! collection of data rows ([`Row`]), one per filesystem. use unicode_width::UnicodeWidthStr; use crate::blocks::{SuffixType, to_magnitude_and_suffix}; use crate::columns::{Alignment, Column}; use crate::filesystem::Filesystem; use crate::{BlockSize, Options}; use uucore::fsext::{FsUsage, MountInfo}; use uucore::translate; use std::ffi::OsString; use std::iter; use std::ops::AddAssign; /// A row in the filesystem usage data table. /// /// A row comprises several pieces of information, including the /// filesystem device, the mountpoint, the number of bytes used, etc. pub(crate) struct Row { /// The filename given on the command-line, if given. file: Option, /// Name of the device on which the filesystem lives. fs_device: String, /// Type of filesystem (for example, `"ext4"`, `"tmpfs"`, etc.). fs_type: String, /// Path at which the filesystem is mounted. fs_mount: OsString, /// Total number of bytes in the filesystem regardless of whether they are used. bytes: u64, /// Number of used bytes. bytes_used: u64, /// Number of available bytes. bytes_avail: u64, /// Percentage of bytes that are used, given as a float between 0 and 1. /// /// If the filesystem has zero bytes, then this is `None`. bytes_usage: Option, /// Percentage of bytes that are available, given as a float between 0 and 1. /// /// These are the bytes that are available to non-privileged processes. /// /// If the filesystem has zero bytes, then this is `None`. #[cfg(target_os = "macos")] bytes_capacity: Option, /// Total number of inodes in the filesystem. inodes: u128, /// Number of used inodes. inodes_used: u128, /// Number of free inodes. inodes_free: u128, /// Percentage of inodes that are used, given as a float between 0 and 1. /// /// If the filesystem has zero bytes, then this is `None`. inodes_usage: Option, } impl Row { pub(crate) fn new(source: &str) -> Self { Self { file: None, fs_device: source.into(), fs_type: "-".into(), fs_mount: "-".into(), bytes: 0, bytes_used: 0, bytes_avail: 0, bytes_usage: None, #[cfg(target_os = "macos")] bytes_capacity: None, inodes: 0, inodes_used: 0, inodes_free: 0, inodes_usage: None, } } } impl AddAssign for Row { /// Sum the numeric values of two rows. /// /// The `Row::fs_device` field is set to `"total"` and the /// remaining `String` fields are set to `"-"`. fn add_assign(&mut self, rhs: Self) { let bytes = self.bytes + rhs.bytes; let bytes_used = self.bytes_used + rhs.bytes_used; let bytes_avail = self.bytes_avail + rhs.bytes_avail; let inodes = self.inodes + rhs.inodes; let inodes_used = self.inodes_used + rhs.inodes_used; *self = Self { file: None, fs_device: translate!("df-total"), fs_type: "-".into(), fs_mount: "-".into(), bytes, bytes_used, bytes_avail, bytes_usage: if bytes == 0 { None } else { // We use "(bytes_used + bytes_avail)" instead of "bytes" because on some filesystems (e.g. // ext4) "bytes" also includes reserved blocks we ignore for the usage calculation. // https://www.gnu.org/software/coreutils/faq/coreutils-faq.html#df-Size-and-Used-and-Available-do-not-add-up Some(bytes_used as f64 / (bytes_used + bytes_avail) as f64) }, // TODO Figure out how to compute this. #[cfg(target_os = "macos")] bytes_capacity: None, inodes, inodes_used, inodes_free: self.inodes_free + rhs.inodes_free, inodes_usage: if inodes == 0 { None } else { Some(inodes_used as f64 / inodes as f64) }, } } } impl From for Row { fn from(fs: Filesystem) -> Self { let MountInfo { dev_name, fs_type, mount_dir, .. } = fs.mount_info; let FsUsage { blocksize, blocks, bfree, bavail, files, ffree, .. } = fs.usage; // On Windows WSL, files can be less than ffree. Protect such cases via saturating_sub. let bused = blocks.saturating_sub(bfree); let fused = files.saturating_sub(ffree); Self { file: fs.file, fs_device: dev_name, fs_type, fs_mount: mount_dir, bytes: blocksize * blocks, bytes_used: blocksize * bused, bytes_avail: blocksize * bavail, bytes_usage: if blocks == 0 { None } else { // We use "(bused + bavail)" instead of "blocks" because on some filesystems (e.g. // ext4) "blocks" also includes reserved blocks we ignore for the usage calculation. // https://www.gnu.org/software/coreutils/faq/coreutils-faq.html#df-Size-and-Used-and-Available-do-not-add-up Some(bused as f64 / (bused + bavail) as f64) }, #[cfg(target_os = "macos")] bytes_capacity: if bavail == 0 { None } else { Some(bavail as f64 / ((bused + bavail) as f64)) }, inodes: files as u128, inodes_used: fused as u128, inodes_free: ffree as u128, inodes_usage: if files == 0 { None } else { Some(fused as f64 / files as f64) }, } } } /// A `Cell` in the table. We store raw `bytes` as the data (e.g. directory name /// may be non-Unicode). We also record the printed `width` for alignment purpose, /// as it is easier to compute on the original string. struct Cell { bytes: Vec, width: usize, } impl Cell { /// Create a cell, knowing that s contains only 1-length chars fn from_ascii_string>(s: T) -> Cell { let s = s.as_ref(); Cell { bytes: s.as_bytes().into(), width: s.len(), } } /// Create a cell from an unknown origin string that may contain /// wide characters. fn from_string>(s: T) -> Cell { let s = s.as_ref(); Cell { bytes: s.as_bytes().into(), width: UnicodeWidthStr::width(s), } } /// Create a cell from an `OsString` fn from_os_string(os: &OsString) -> Cell { Cell { bytes: uucore::os_str_as_bytes(os).unwrap().to_vec(), width: UnicodeWidthStr::width(os.to_string_lossy().as_ref()), } } } /// A formatter for [`Row`]. /// /// The `options` control how the information in the row gets formatted. pub(crate) struct RowFormatter<'a> { /// The data in this row. row: &'a Row, /// Options that control how to format the data. options: &'a Options, // TODO We don't need all of the command-line options here. Some // of the command-line options indicate which rows to include or // exclude. Other command-line options indicate which columns to // include or exclude. Still other options indicate how to format // numbers. We could split the options up into those groups to // reduce the coupling between this `table.rs` module and the main // `df.rs` module. /// Whether to use the special rules for displaying the total row. is_total_row: bool, } impl<'a> RowFormatter<'a> { /// Instantiate this struct. pub(crate) fn new(row: &'a Row, options: &'a Options, is_total_row: bool) -> Self { Self { row, options, is_total_row, } } /// Get a string giving the scaled version of the input number. /// /// The scaling factor is defined in the `options` field. fn scaled_bytes(&self, size: u64) -> Cell { let s = if let Some(h) = self.options.human_readable { to_magnitude_and_suffix(size.into(), SuffixType::HumanReadable(h)) } else { let BlockSize::Bytes(d) = self.options.block_size; (size as f64 / d as f64).ceil().to_string() }; Cell::from_ascii_string(s) } /// Get a string giving the scaled version of the input number. /// /// The scaling factor is defined in the `options` field. fn scaled_inodes(&self, size: u128) -> Cell { let s = if let Some(h) = self.options.human_readable { to_magnitude_and_suffix(size, SuffixType::HumanReadable(h)) } else { size.to_string() }; Cell::from_ascii_string(s) } /// Convert a float between 0 and 1 into a percentage string. /// /// If `None`, return the string `"-"` instead. fn percentage(fraction: Option) -> Cell { let s = match fraction { None => "-".to_string(), Some(x) => format!("{:.0}%", (100.0 * x).ceil()), }; Cell::from_ascii_string(s) } /// Returns formatted row data. fn get_cells(&self) -> Vec { let mut cells = Vec::new(); for column in &self.options.columns { let cell = match column { Column::Source => { if self.is_total_row { Cell::from_string(translate!("df-total")) } else { Cell::from_string(&self.row.fs_device) } } Column::Size => self.scaled_bytes(self.row.bytes), Column::Used => self.scaled_bytes(self.row.bytes_used), Column::Avail => self.scaled_bytes(self.row.bytes_avail), Column::Pcent => Self::percentage(self.row.bytes_usage), Column::Target => { if self.is_total_row && !self.options.columns.contains(&Column::Source) { Cell::from_string(translate!("df-total")) } else { Cell::from_os_string(&self.row.fs_mount) } } Column::Itotal => self.scaled_inodes(self.row.inodes), Column::Iused => self.scaled_inodes(self.row.inodes_used), Column::Iavail => self.scaled_inodes(self.row.inodes_free), Column::Ipcent => Self::percentage(self.row.inodes_usage), Column::File => self .row .file .as_ref() .map_or(Cell::from_ascii_string("-"), Cell::from_os_string), Column::Fstype => Cell::from_string(&self.row.fs_type), #[cfg(target_os = "macos")] Column::Capacity => Self::percentage(self.row.bytes_capacity), }; cells.push(cell); } cells } } /// A `HeaderMode` defines what header labels should be shown. pub(crate) enum HeaderMode { Default, // the user used -h or -H HumanReadable, // the user used -P PosixPortability, // the user used --output Output, } impl Default for HeaderMode { fn default() -> Self { Self::Default } } /// The data of the header row. struct Header {} impl Header { /// Return the headers for the specified columns. /// /// The `options` control which column headers are returned. fn get_headers(options: &Options) -> Vec { let mut headers = Vec::new(); for column in &options.columns { let header = match column { Column::Source => translate!("df-header-filesystem"), Column::Size => match options.header_mode { HeaderMode::HumanReadable => translate!("df-header-size"), HeaderMode::PosixPortability => { format!( "{}{}", options.block_size.as_u64(), translate!("df-blocks-suffix") ) } _ => format!("{}{}", options.block_size, translate!("df-blocks-suffix")), }, Column::Used => translate!("df-header-used"), Column::Avail => match options.header_mode { HeaderMode::HumanReadable | HeaderMode::Output => { translate!("df-header-avail") } _ => translate!("df-header-available"), }, Column::Pcent => match options.header_mode { HeaderMode::PosixPortability => translate!("df-header-capacity"), _ => translate!("df-header-use-percent"), }, Column::Target => translate!("df-header-mounted-on"), Column::Itotal => translate!("df-header-inodes"), Column::Iused => translate!("df-header-iused"), Column::Iavail => translate!("df-header-iavail"), Column::Ipcent => translate!("df-header-iuse-percent"), Column::File => translate!("df-header-file"), Column::Fstype => translate!("df-header-type"), #[cfg(target_os = "macos")] Column::Capacity => translate!("df-header-capacity"), }; headers.push(header); } headers } } /// The output table. pub(crate) struct Table { alignments: Vec, rows: Vec>, widths: Vec, } impl Table { pub(crate) fn new(options: &Options, filesystems: Vec) -> Self { let headers = Header::get_headers(options); let mut widths: Vec<_> = options .columns .iter() .enumerate() .map(|(i, col)| Column::min_width(col).max(headers[i].len())) .collect(); let mut rows = vec![headers.iter().map(Cell::from_string).collect()]; // The running total of filesystem sizes and usage. // // This accumulator is computed in case we need to display the // total counts in the last row of the table. let mut total = Row::new(&translate!("df-total")); for filesystem in filesystems { // If the filesystem is not empty, or if the options require // showing all filesystems, then print the data as a row in // the output table. if options.show_all_fs || filesystem.usage.blocks > 0 { let row = Row::from(filesystem); let fmt = RowFormatter::new(&row, options, false); let values = fmt.get_cells(); total += row; rows.push(values); } } if options.show_total { let total_row = RowFormatter::new(&total, options, true); rows.push(total_row.get_cells()); } // extend the column widths (in chars) for long values in rows // do it here, after total row was added to the list of rows for row in &rows { for (i, value) in row.iter().enumerate() { if value.width > widths[i] { widths[i] = value.width; } } } Self { rows, widths, alignments: Self::get_alignments(&options.columns), } } fn get_alignments(columns: &Vec) -> Vec { let mut alignments = Vec::new(); for column in columns { alignments.push(Column::alignment(column)); } alignments } pub(crate) fn write_to(&self, writer: &mut dyn std::io::Write) -> std::io::Result<()> { for row in &self.rows { let mut col_iter = row.iter().enumerate().peekable(); while let Some((i, elem)) = col_iter.next() { let is_last_col = col_iter.peek().is_none(); let pad_width = self.widths[i].saturating_sub(elem.width); match self.alignments.get(i) { Some(Alignment::Left) => { writer.write_all(&elem.bytes)?; // no trailing spaces in last column if !is_last_col { writer .write_all(&iter::repeat_n(b' ', pad_width).collect::>())?; } } Some(Alignment::Right) => { writer.write_all(&iter::repeat_n(b' ', pad_width).collect::>())?; writer.write_all(&elem.bytes)?; } None => break, } if !is_last_col { // column separator writer.write_all(b" ")?; } } writeln!(writer)?; } Ok(()) } } #[cfg(test)] mod tests { use std::vec; use uucore::locale::setup_localization; use crate::blocks::HumanReadable; use crate::columns::Column; use crate::table::{Cell, Header, HeaderMode, Row, RowFormatter, Table}; use crate::{BlockSize, Options}; fn init() { unsafe { std::env::set_var("LANG", "C"); } let _ = setup_localization("df"); } const COLUMNS_WITH_FS_TYPE: [Column; 7] = [ Column::Source, Column::Fstype, Column::Size, Column::Used, Column::Avail, Column::Pcent, Column::Target, ]; const COLUMNS_WITH_INODES: [Column; 6] = [ Column::Source, Column::Itotal, Column::Iused, Column::Iavail, Column::Ipcent, Column::Target, ]; impl Default for Row { fn default() -> Self { Self { file: Some("/path/to/file".into()), fs_device: "my_device".to_string(), fs_type: "my_type".to_string(), fs_mount: "my_mount".into(), bytes: 100, bytes_used: 25, bytes_avail: 75, bytes_usage: Some(0.25), #[cfg(target_os = "macos")] bytes_capacity: Some(0.5), inodes: 10, inodes_used: 2, inodes_free: 8, inodes_usage: Some(0.2), } } } #[test] fn test_default_header() { init(); let options = Options::default(); assert_eq!( Header::get_headers(&options), vec!( "Filesystem", "1K-blocks", "Used", "Available", "Use%", "Mounted on" ) ); } #[test] fn test_header_with_fs_type() { init(); let options = Options { columns: COLUMNS_WITH_FS_TYPE.to_vec(), ..Default::default() }; assert_eq!( Header::get_headers(&options), vec!( "Filesystem", "Type", "1K-blocks", "Used", "Available", "Use%", "Mounted on" ) ); } #[test] fn test_header_with_inodes() { init(); let options = Options { columns: COLUMNS_WITH_INODES.to_vec(), ..Default::default() }; assert_eq!( Header::get_headers(&options), vec!( "Filesystem", "Inodes", "IUsed", "IFree", "IUse%", "Mounted on" ) ); } #[test] fn test_header_with_block_size_1024() { init(); let options = Options { block_size: BlockSize::Bytes(3 * 1024), ..Default::default() }; assert_eq!( Header::get_headers(&options), vec!( "Filesystem", "3K-blocks", "Used", "Available", "Use%", "Mounted on" ) ); } #[test] fn test_human_readable_header() { init(); let options = Options { header_mode: HeaderMode::HumanReadable, ..Default::default() }; assert_eq!( Header::get_headers(&options), vec!("Filesystem", "Size", "Used", "Avail", "Use%", "Mounted on") ); } #[test] fn test_posix_portability_header() { init(); let options = Options { header_mode: HeaderMode::PosixPortability, ..Default::default() }; assert_eq!( Header::get_headers(&options), vec!( "Filesystem", "1024-blocks", "Used", "Available", "Capacity", "Mounted on" ) ); } #[test] fn test_output_header() { init(); let options = Options { header_mode: HeaderMode::Output, ..Default::default() }; assert_eq!( Header::get_headers(&options), vec!( "Filesystem", "1K-blocks", "Used", "Avail", "Use%", "Mounted on" ) ); } fn compare_cell_content(cells: Vec, expected: Vec<&str>) -> bool { cells .into_iter() .zip(expected) .all(|(c, s)| c.bytes == s.as_bytes()) } #[test] fn test_row_formatter() { init(); let options = Options { block_size: BlockSize::Bytes(1), ..Default::default() }; let row = Row { fs_device: "my_device".to_string(), fs_mount: "my_mount".into(), bytes: 100, bytes_used: 25, bytes_avail: 75, bytes_usage: Some(0.25), ..Default::default() }; let fmt = RowFormatter::new(&row, &options, false); assert!(compare_cell_content( fmt.get_cells(), vec!("my_device", "100", "25", "75", "25%", "my_mount") )); } #[test] fn test_row_formatter_with_fs_type() { init(); let options = Options { columns: COLUMNS_WITH_FS_TYPE.to_vec(), block_size: BlockSize::Bytes(1), ..Default::default() }; let row = Row { fs_device: "my_device".to_string(), fs_type: "my_type".to_string(), fs_mount: "my_mount".into(), bytes: 100, bytes_used: 25, bytes_avail: 75, bytes_usage: Some(0.25), ..Default::default() }; let fmt = RowFormatter::new(&row, &options, false); assert!(compare_cell_content( fmt.get_cells(), vec!("my_device", "my_type", "100", "25", "75", "25%", "my_mount") )); } #[test] fn test_row_formatter_with_inodes() { init(); let options = Options { columns: COLUMNS_WITH_INODES.to_vec(), block_size: BlockSize::Bytes(1), ..Default::default() }; let row = Row { fs_device: "my_device".to_string(), fs_mount: "my_mount".into(), inodes: 10, inodes_used: 2, inodes_free: 8, inodes_usage: Some(0.2), ..Default::default() }; let fmt = RowFormatter::new(&row, &options, false); assert!(compare_cell_content( fmt.get_cells(), vec!("my_device", "10", "2", "8", "20%", "my_mount") )); } #[test] fn test_row_formatter_with_bytes_and_inodes() { init(); let options = Options { columns: vec![Column::Size, Column::Itotal], block_size: BlockSize::Bytes(100), ..Default::default() }; let row = Row { bytes: 100, inodes: 10, ..Default::default() }; let fmt = RowFormatter::new(&row, &options, false); assert!(compare_cell_content(fmt.get_cells(), vec!("1", "10"))); } #[test] fn test_row_formatter_with_human_readable_si() { init(); let options = Options { human_readable: Some(HumanReadable::Decimal), columns: COLUMNS_WITH_FS_TYPE.to_vec(), ..Default::default() }; let row = Row { fs_device: "my_device".to_string(), fs_type: "my_type".to_string(), fs_mount: "my_mount".into(), bytes: 4000, bytes_used: 1000, bytes_avail: 3000, bytes_usage: Some(0.25), ..Default::default() }; let fmt = RowFormatter::new(&row, &options, false); assert!(compare_cell_content( fmt.get_cells(), vec!("my_device", "my_type", "4k", "1k", "3k", "25%", "my_mount") )); } #[test] fn test_row_formatter_with_human_readable_binary() { init(); let options = Options { human_readable: Some(HumanReadable::Binary), columns: COLUMNS_WITH_FS_TYPE.to_vec(), ..Default::default() }; let row = Row { fs_device: "my_device".to_string(), fs_type: "my_type".to_string(), fs_mount: "my_mount".into(), bytes: 4096, bytes_used: 1024, bytes_avail: 3072, bytes_usage: Some(0.25), ..Default::default() }; let fmt = RowFormatter::new(&row, &options, false); assert!(compare_cell_content( fmt.get_cells(), vec!("my_device", "my_type", "4K", "1K", "3K", "25%", "my_mount") )); } #[test] fn test_row_formatter_with_round_up_usage() { init(); let options = Options { columns: vec![Column::Pcent], ..Default::default() }; let row = Row { bytes_usage: Some(0.251), ..Default::default() }; let fmt = RowFormatter::new(&row, &options, false); assert!(compare_cell_content(fmt.get_cells(), vec!("26%"))); } #[test] fn test_row_formatter_with_round_up_byte_values() { init(); fn get_formatted_values(bytes: u64, bytes_used: u64, bytes_avail: u64) -> Vec { let options = Options { block_size: BlockSize::Bytes(1000), columns: vec![Column::Size, Column::Used, Column::Avail], ..Default::default() }; let row = Row { bytes, bytes_used, bytes_avail, ..Default::default() }; RowFormatter::new(&row, &options, false).get_cells() } assert!(compare_cell_content( get_formatted_values(100, 100, 0), vec!("1", "1", "0") )); assert!(compare_cell_content( get_formatted_values(100, 99, 1), vec!("1", "1", "1") )); assert!(compare_cell_content( get_formatted_values(1000, 1000, 0), vec!("1", "1", "0") )); assert!(compare_cell_content( get_formatted_values(1001, 1000, 1), vec!("2", "1", "1") )); } #[test] fn test_row_converter_with_invalid_numbers() { init(); // copy from wsl linux let d = crate::Filesystem { file: None, mount_info: crate::MountInfo { dev_id: "28".to_string(), dev_name: "none".to_string(), fs_type: "9p".to_string(), mount_dir: "/usr/lib/wsl/drivers".into(), mount_option: "ro,nosuid,nodev,noatime".to_string(), mount_root: "/".into(), remote: false, dummy: false, }, usage: crate::table::FsUsage { blocksize: 4096, blocks: 244_029_695, bfree: 125_085_030, bavail: 125_085_030, bavail_top_bit_set: false, files: 999, ffree: 1_000_000, }, }; let row = Row::from(d); assert_eq!(row.inodes_used, 0); } #[test] fn test_table_column_width_computation_include_total_row() { init(); let d1 = crate::Filesystem { file: None, mount_info: crate::MountInfo { dev_id: "28".to_string(), dev_name: "none".to_string(), fs_type: "9p".to_string(), mount_dir: "/usr/lib/wsl/drivers".into(), mount_option: "ro,nosuid,nodev,noatime".to_string(), mount_root: "/".into(), remote: false, dummy: false, }, usage: crate::table::FsUsage { blocksize: 4096, blocks: 244_029_695, bfree: 125_085_030, bavail: 125_085_030, bavail_top_bit_set: false, files: 99_999_999_999, ffree: 999_999, }, }; let filesystems = vec![d1.clone(), d1]; let mut options = Options { show_total: true, columns: vec![ Column::Source, Column::Itotal, Column::Iused, Column::Iavail, ], ..Default::default() }; let table_w_total = Table::new(&options, filesystems.clone()); let mut data_w_total: Vec = vec![]; table_w_total .write_to(&mut data_w_total) .expect("Write error."); assert_eq!( String::from_utf8_lossy(&data_w_total), "Filesystem Inodes IUsed IFree\n\ none 99999999999 99999000000 999999\n\ none 99999999999 99999000000 999999\n\ total 199999999998 199998000000 1999998\n" ); options.show_total = false; let table_w_o_total = Table::new(&options, filesystems); let mut data_w_o_total: Vec = vec![]; table_w_o_total .write_to(&mut data_w_o_total) .expect("Write error."); assert_eq!( String::from_utf8_lossy(&data_w_o_total), "Filesystem Inodes IUsed IFree\n\ none 99999999999 99999000000 999999\n\ none 99999999999 99999000000 999999\n" ); } #[cfg(unix)] #[test] fn test_table_column_width_non_unicode() { init(); let bad_unicode_os_str = uucore::os_str_from_bytes(b"/usr/lib/w\xf3l/drivers") .expect("Only unix platforms can test non-unicode names") .to_os_string(); let d1 = crate::Filesystem { file: None, mount_info: crate::MountInfo { dev_id: "28".to_string(), dev_name: "none".to_string(), fs_type: "9p".to_string(), mount_dir: bad_unicode_os_str, mount_option: "ro,nosuid,nodev,noatime".to_string(), mount_root: "/".into(), remote: false, dummy: false, }, usage: crate::table::FsUsage { blocksize: 4096, blocks: 244_029_695, bfree: 125_085_030, bavail: 125_085_030, bavail_top_bit_set: false, files: 99_999_999_999, ffree: 999_999, }, }; let filesystems = vec![d1]; let options = Options { show_total: false, columns: vec![Column::Source, Column::Target, Column::Itotal], ..Default::default() }; let table = Table::new(&options, filesystems.clone()); let mut data: Vec = vec![]; table.write_to(&mut data).expect("Write error."); assert_eq!( data, b"Filesystem Mounted on Inodes\n\ none /usr/lib/w\xf3l/drivers 99999999999\n", "Comparison failed, lossy data for reference:\n{}\n", String::from_utf8_lossy(&data) ); } #[test] fn test_row_accumulation_u64_overflow() { init(); let total = u64::MAX as u128; let used1 = 3000u128; let used2 = 50000u128; let mut row1 = Row { inodes: total, inodes_used: used1, inodes_free: total - used1, ..Default::default() }; let row2 = Row { inodes: total, inodes_used: used2, inodes_free: total - used2, ..Default::default() }; row1 += row2; assert_eq!(row1.inodes, total * 2); assert_eq!(row1.inodes_used, used1 + used2); assert_eq!(row1.inodes_free, total * 2 - used1 - used2); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dir/000077500000000000000000000000001504311601400227425ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dir/Cargo.toml000066400000000000000000000011341504311601400246710ustar00rootroot00000000000000[package] name = "uu_dir" description = "shortcut to ls -C -b" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/ls" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/dir.rs" [dependencies] clap = { workspace = true, features = ["env"] } uucore = { workspace = true, features = ["entries", "fs", "quoting-style"] } uu_ls = { workspace = true } [[bin]] name = "dir" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dir/LICENSE000077700000000000000000000000001504311601400256102../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dir/src/000077500000000000000000000000001504311601400235315ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dir/src/dir.rs000066400000000000000000000042651504311601400246640ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use clap::Command; use std::ffi::OsString; use std::path::Path; use uu_ls::{Config, Format, options}; use uucore::error::UResult; use uucore::quoting_style::QuotingStyle; #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let command = uu_app(); let matches = command.get_matches_from(args); let mut default_quoting_style = false; let mut default_format_style = false; // We check if any options on formatting or quoting style have been given. // If not, we will use dir default formatting and quoting style options if !matches.contains_id(options::QUOTING_STYLE) && !matches.get_flag(options::quoting::C) && !matches.get_flag(options::quoting::ESCAPE) && !matches.get_flag(options::quoting::LITERAL) { default_quoting_style = true; } if !matches.contains_id(options::FORMAT) && !matches.get_flag(options::format::ACROSS) && !matches.get_flag(options::format::COLUMNS) && !matches.get_flag(options::format::COMMAS) && !matches.get_flag(options::format::LONG) && !matches.get_flag(options::format::LONG_NO_GROUP) && !matches.get_flag(options::format::LONG_NO_OWNER) && !matches.get_flag(options::format::LONG_NUMERIC_UID_GID) && !matches.get_flag(options::format::ONE_LINE) { default_format_style = true; } let mut config = Config::from(&matches)?; if default_quoting_style { config.quoting_style = QuotingStyle::C_NO_QUOTES; } if default_format_style { config.format = Format::Columns; } let locs = matches .get_many::(options::PATHS) .map_or_else(|| vec![Path::new(".")], |v| v.map(Path::new).collect()); uu_ls::list(locs, &config) } // To avoid code duplication, we reuse ls uu_app function which has the same // arguments. However, coreutils won't compile if one of the utils is missing // an uu_app function, so we return the `ls` app. pub fn uu_app() -> Command { uu_ls::uu_app() } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dir/src/main.rs000066400000000000000000000000261504311601400250210ustar00rootroot00000000000000uucore::bin!(uu_dir); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/000077500000000000000000000000001504311601400241645ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/Cargo.toml000066400000000000000000000011661504311601400261200ustar00rootroot00000000000000[package] name = "uu_dircolors" description = "dircolors ~ (uutils) display commands to set LS_COLORS" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/dircolors" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/dircolors.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["colors", "parser"] } fluent = { workspace = true } [[bin]] name = "dircolors" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/LICENSE000077700000000000000000000000001504311601400270322../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/README.md000066400000000000000000000012241504311601400254420ustar00rootroot00000000000000# How to update the internal database Create the test fixtures by writing the output of the GNU dircolors commands to the fixtures folder: ```shell dircolors --print-database > /PATH_TO_COREUTILS/tests/fixtures/dircolors/internal.expected dircolors --print-ls-colors > /PATH_TO_COREUTILS/tests/fixtures/dircolors/ls_colors.expected dircolors -b > /PATH_TO_COREUTILS/tests/fixtures/dircolors/bash_def.expected dircolors -c > /PATH_TO_COREUTILS/tests/fixtures/dircolors/csh_def.expected ``` Run the tests: ```shell cargo test --features "dircolors" --no-default-features ``` Edit `/PATH_TO_COREUTILS/src/uu/dircolors/src/dircolors.rs` until the tests pass. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/locales/000077500000000000000000000000001504311601400256065ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/locales/en-US.ftl000066400000000000000000000027171504311601400272530ustar00rootroot00000000000000dircolors-about = Output commands to set the LS_COLORS environment variable. dircolors-usage = dircolors [OPTION]... [FILE] dircolors-after-help = If FILE is specified, read it to determine which colors to use for which file types and extensions. Otherwise, a precompiled database is used. For details on the format of these files, run 'dircolors --print-database' # Help messages dircolors-help-bourne-shell = output Bourne shell code to set LS_COLORS dircolors-help-c-shell = output C shell code to set LS_COLORS dircolors-help-print-database = print the byte counts dircolors-help-print-ls-colors = output fully escaped colors for display # Error messages dircolors-error-shell-and-output-exclusive = the options to output non shell syntax, and to select a shell syntax are mutually exclusive dircolors-error-print-database-and-ls-colors-exclusive = options --print-database and --print-ls-colors are mutually exclusive dircolors-error-extra-operand-print-database = extra operand { $operand } file operands cannot be combined with --print-database (-p) dircolors-error-no-shell-environment = no SHELL environment variable, and no shell type option given dircolors-error-extra-operand = extra operand { $operand } dircolors-error-expected-file-got-directory = expected file, got directory { $path } dircolors-error-invalid-line-missing-token = { $file }:{ $line }: invalid line; missing second token dircolors-error-unrecognized-keyword = unrecognized keyword { $keyword } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/locales/fr-FR.ftl000066400000000000000000000033131504311601400272310ustar00rootroot00000000000000dircolors-about = Afficher les commandes pour définir la variable d'environnement LS_COLORS. dircolors-usage = dircolors [OPTION]... [FICHIER] dircolors-after-help = Si FICHIER est spécifié, le lire pour déterminer quelles couleurs utiliser pour quels types de fichiers et extensions. Sinon, une base de données précompilée est utilisée. Pour les détails sur le format de ces fichiers, exécutez 'dircolors --print-database' # Messages d'aide dircolors-help-bourne-shell = afficher le code Bourne shell pour définir LS_COLORS dircolors-help-c-shell = afficher le code C shell pour définir LS_COLORS dircolors-help-print-database = afficher la base de données de configuration dircolors-help-print-ls-colors = afficher les couleurs entièrement échappées pour l'affichage # Messages d'erreur dircolors-error-shell-and-output-exclusive = les options pour afficher une syntaxe non-shell et pour sélectionner une syntaxe shell sont mutuellement exclusives dircolors-error-print-database-and-ls-colors-exclusive = les options --print-database et --print-ls-colors sont mutuellement exclusives dircolors-error-extra-operand-print-database = opérande supplémentaire { $operand } les opérandes de fichier ne peuvent pas être combinées avec --print-database (-p) dircolors-error-no-shell-environment = aucune variable d'environnement SHELL, et aucune option de type de shell donnée dircolors-error-extra-operand = opérande supplémentaire { $operand } dircolors-error-expected-file-got-directory = fichier attendu, répertoire obtenu { $path } dircolors-error-invalid-line-missing-token = { $file }:{ $line } : ligne invalide ; jeton manquant dircolors-error-unrecognized-keyword = mot-clé non reconnu { $keyword } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/src/000077500000000000000000000000001504311601400247535ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/src/dircolors.rs000066400000000000000000000425431504311601400273310ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) clrtoeol dircolors eightbit endcode fnmatch leftcode multihardlink rightcode setenv sgid suid colorterm disp use std::borrow::Borrow; use std::env; use std::fmt::Write as _; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use clap::{Arg, ArgAction, Command}; use uucore::colors::{FILE_ATTRIBUTE_CODES, FILE_COLORS, FILE_TYPES, TERMS}; use uucore::display::Quotable; use uucore::error::{UResult, USimpleError, UUsageError}; use uucore::translate; use uucore::{format_usage, parser::parse_glob}; mod options { pub const BOURNE_SHELL: &str = "bourne-shell"; pub const C_SHELL: &str = "c-shell"; pub const PRINT_DATABASE: &str = "print-database"; pub const PRINT_LS_COLORS: &str = "print-ls-colors"; pub const FILE: &str = "FILE"; } #[derive(PartialEq, Eq, Debug)] pub enum OutputFmt { Shell, CShell, Display, Unknown, } pub fn guess_syntax() -> OutputFmt { match env::var("SHELL") { Ok(ref s) if !s.is_empty() => { let shell_path: &Path = s.as_ref(); if let Some(name) = shell_path.file_name() { if name == "csh" || name == "tcsh" { OutputFmt::CShell } else { OutputFmt::Shell } } else { OutputFmt::Shell } } _ => OutputFmt::Unknown, } } fn get_colors_format_strings(fmt: &OutputFmt) -> (String, String) { let prefix = match fmt { OutputFmt::Shell => "LS_COLORS='".to_string(), OutputFmt::CShell => "setenv LS_COLORS '".to_string(), OutputFmt::Display => String::new(), OutputFmt::Unknown => unreachable!(), }; let suffix = match fmt { OutputFmt::Shell => "';\nexport LS_COLORS".to_string(), OutputFmt::CShell => "'".to_string(), OutputFmt::Display => String::new(), OutputFmt::Unknown => unreachable!(), }; (prefix, suffix) } pub fn generate_type_output(fmt: &OutputFmt) -> String { match fmt { OutputFmt::Display => FILE_TYPES .iter() .map(|&(_, key, val)| format!("\x1b[{val}m{key}\t{val}\x1b[0m")) .collect::>() .join("\n"), _ => { // Existing logic for other formats FILE_TYPES .iter() .map(|&(_, v1, v2)| format!("{v1}={v2}")) .collect::>() .join(":") } } } fn generate_ls_colors(fmt: &OutputFmt, sep: &str) -> String { match fmt { OutputFmt::Display => { let mut display_parts = vec![]; let type_output = generate_type_output(fmt); display_parts.push(type_output); for &(extension, code) in FILE_COLORS { let prefix = if extension.starts_with('*') { "" } else { "*" }; let formatted_extension = format!("\x1b[{code}m{prefix}{extension}\t{code}\x1b[0m"); display_parts.push(formatted_extension); } display_parts.join("\n") } _ => { // existing logic for other formats let mut parts = vec![]; for &(extension, code) in FILE_COLORS { let prefix = if extension.starts_with('*') { "" } else { "*" }; let formatted_extension = format!("{prefix}{extension}"); parts.push(format!("{formatted_extension}={code}")); } let (prefix, suffix) = get_colors_format_strings(fmt); let ls_colors = parts.join(sep); format!("{prefix}{}:{ls_colors}:{suffix}", generate_type_output(fmt),) } } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let files = matches .get_many::(options::FILE) .map_or(vec![], |file_values| file_values.collect()); // clap provides .conflicts_with / .conflicts_with_all, but we want to // manually handle conflicts so we can match the output of GNU coreutils if (matches.get_flag(options::C_SHELL) || matches.get_flag(options::BOURNE_SHELL)) && (matches.get_flag(options::PRINT_DATABASE) || matches.get_flag(options::PRINT_LS_COLORS)) { return Err(UUsageError::new( 1, translate!("dircolors-error-shell-and-output-exclusive"), )); } if matches.get_flag(options::PRINT_DATABASE) && matches.get_flag(options::PRINT_LS_COLORS) { return Err(UUsageError::new( 1, translate!("dircolors-error-print-database-and-ls-colors-exclusive"), )); } if matches.get_flag(options::PRINT_DATABASE) { if !files.is_empty() { return Err(UUsageError::new( 1, translate!("dircolors-error-extra-operand-print-database", "operand" => files[0].quote()), )); } println!("{}", generate_dircolors_config()); return Ok(()); } let mut out_format = if matches.get_flag(options::C_SHELL) { OutputFmt::CShell } else if matches.get_flag(options::BOURNE_SHELL) { OutputFmt::Shell } else if matches.get_flag(options::PRINT_LS_COLORS) { OutputFmt::Display } else { OutputFmt::Unknown }; if out_format == OutputFmt::Unknown { match guess_syntax() { OutputFmt::Unknown => { return Err(USimpleError::new( 1, translate!("dircolors-error-no-shell-environment"), )); } fmt => out_format = fmt, } } let result; if files.is_empty() { println!("{}", generate_ls_colors(&out_format, ":")); return Ok(()); /* // Check if data is being piped into the program if std::io::stdin().is_terminal() { // No data piped, use default behavior println!("{}", generate_ls_colors(&out_format, ":")); return Ok(()); } else { // Data is piped, process the input from stdin let fin = BufReader::new(std::io::stdin()); result = parse(fin.lines().map_while(Result::ok), &out_format, "-"); } */ } else if files.len() > 1 { return Err(UUsageError::new( 1, translate!("dircolors-error-extra-operand", "operand" => files[1].quote()), )); } else if files[0].eq("-") { let fin = BufReader::new(std::io::stdin()); // For example, for echo "owt 40;33"|dircolors -b - result = parse(fin.lines().map_while(Result::ok), &out_format, files[0]); } else { let path = Path::new(files[0]); if path.is_dir() { return Err(USimpleError::new( 2, translate!("dircolors-error-expected-file-got-directory", "path" => path.quote()), )); } match File::open(path) { Ok(f) => { let fin = BufReader::new(f); result = parse( fin.lines().map_while(Result::ok), &out_format, &path.to_string_lossy(), ); } Err(e) => { return Err(USimpleError::new(1, format!("{}: {e}", path.maybe_quote()))); } } } match result { Ok(s) => { println!("{s}"); Ok(()) } Err(s) => Err(USimpleError::new(1, s)), } } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("dircolors-about")) .after_help(translate!("dircolors-after-help")) .override_usage(format_usage(&translate!("dircolors-usage"))) .args_override_self(true) .infer_long_args(true) .arg( Arg::new(options::BOURNE_SHELL) .long("sh") .short('b') .visible_alias("bourne-shell") .overrides_with(options::C_SHELL) .help(translate!("dircolors-help-bourne-shell")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::C_SHELL) .long("csh") .short('c') .visible_alias("c-shell") .overrides_with(options::BOURNE_SHELL) .help(translate!("dircolors-help-c-shell")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::PRINT_DATABASE) .long("print-database") .short('p') .help(translate!("dircolors-help-print-database")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::PRINT_LS_COLORS) .long("print-ls-colors") .help(translate!("dircolors-help-print-ls-colors")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FILE) .hide(true) .value_hint(clap::ValueHint::FilePath) .action(ArgAction::Append), ) } pub trait StrUtils { /// Remove comments and trim whitespace fn purify(&self) -> &Self; /// Like `split_whitespace()` but only produce 2 parts fn split_two(&self) -> (&str, &str); fn fnmatch(&self, pattern: &str) -> bool; } impl StrUtils for str { fn purify(&self) -> &Self { let mut line = self; for (n, _) in self .as_bytes() .iter() .enumerate() .filter(|(_, c)| **c == b'#') { // Ignore the content after '#' // only if it is preceded by at least one whitespace match self[..n].chars().last() { Some(c) if c.is_whitespace() => { line = &self[..n - c.len_utf8()]; break; } None => { // n == 0 line = &self[..0]; break; } _ => (), } } line.trim() } fn split_two(&self) -> (&str, &str) { if let Some(b) = self.find(char::is_whitespace) { let key = &self[..b]; if let Some(e) = self[b..].find(|c: char| !c.is_whitespace()) { (key, &self[b + e..]) } else { (key, "") } } else { ("", "") } } fn fnmatch(&self, pat: &str) -> bool { parse_glob::from_str(pat).unwrap().matches(self) } } #[derive(PartialEq)] enum ParseState { Global, Matched, Continue, Pass, } fn parse(user_input: T, fmt: &OutputFmt, fp: &str) -> Result where T: IntoIterator, T::Item: Borrow, { let mut result = String::with_capacity(1790); let (prefix, suffix) = get_colors_format_strings(fmt); result.push_str(&prefix); // Get environment variables once at the start let term = env::var("TERM").unwrap_or_else(|_| "none".to_owned()); let colorterm = env::var("COLORTERM").unwrap_or_default(); let mut state = ParseState::Global; let mut saw_colorterm_match = false; for (num, line) in user_input.into_iter().enumerate() { let num = num + 1; let line = line.borrow().purify(); if line.is_empty() { continue; } let line = escape(line); let (key, val) = line.split_two(); if val.is_empty() { return Err( translate!("dircolors-error-invalid-line-missing-token", "file" => fp.maybe_quote(), "line" => num), ); } let lower = key.to_lowercase(); match lower.as_str() { "term" => { if term.fnmatch(val) { state = ParseState::Matched; } else if state == ParseState::Global { state = ParseState::Pass; } } "colorterm" => { // For COLORTERM ?*, only match if COLORTERM is non-empty let matches = if val == "?*" { !colorterm.is_empty() } else { colorterm.fnmatch(val) }; if matches { state = ParseState::Matched; saw_colorterm_match = true; } else if !saw_colorterm_match && state == ParseState::Global { state = ParseState::Pass; } } _ => { if state == ParseState::Matched { // prevent subsequent mismatched TERM from // cancelling the input state = ParseState::Continue; } if state != ParseState::Pass { append_entry(&mut result, fmt, key, &lower, val)?; } } } } if fmt == &OutputFmt::Display { // remove latest "\n" result.pop(); } result.push_str(&suffix); Ok(result) } fn append_entry( result: &mut String, fmt: &OutputFmt, key: &str, lower: &str, val: &str, ) -> Result<(), String> { if key.starts_with(['.', '*']) { let entry = if key.starts_with('.') { format!("*{key}") } else { key.to_string() }; let disp = if *fmt == OutputFmt::Display { format!("\x1b[{val}m{entry}\t{val}\x1b[0m\n") } else { format!("{entry}={val}:") }; result.push_str(&disp); return Ok(()); } match lower { "options" | "color" | "eightbit" => Ok(()), // Slackware only, ignore _ => { if let Some((_, s)) = FILE_ATTRIBUTE_CODES.iter().find(|&&(key, _)| key == lower) { let disp = if *fmt == OutputFmt::Display { format!("\x1b[{val}m{s}\t{val}\x1b[0m\n") } else { format!("{s}={val}:") }; result.push_str(&disp); Ok(()) } else { Err(translate!("dircolors-error-unrecognized-keyword", "keyword" => key)) } } } } /// Escape single quotes because they are not allowed between single quotes in shell code, and code /// enclosed by single quotes is what is returned by `parse()`. /// /// We also escape ":" to make the "quote" test pass in the GNU test suite: /// fn escape(s: &str) -> String { let mut result = String::new(); let mut previous = ' '; for c in s.chars() { match c { '\'' => result.push_str("'\\''"), ':' if previous != '\\' => result.push_str("\\:"), _ => result.push(c), } previous = c; } result } pub fn generate_dircolors_config() -> String { let mut config = String::new(); config.push_str( "\ # Configuration file for dircolors, a utility to help you set the\n\ # LS_COLORS environment variable used by GNU ls with the --color option.\n\ # The keywords COLOR, OPTIONS, and EIGHTBIT (honored by the\n\ # slackware version of dircolors) are recognized but ignored.\n\ # Global config options can be specified before TERM or COLORTERM entries\n\ # Below are TERM or COLORTERM entries, which can be glob patterns, which\n\ # restrict following config to systems with matching environment variables.\n\ ", ); config.push_str("COLORTERM ?*\n"); for term in TERMS { let _ = writeln!(config, "TERM {term}"); } config.push_str( "\ # Below are the color init strings for the basic file types.\n\ # One can use codes for 256 or more colors supported by modern terminals.\n\ # The default color codes use the capabilities of an 8 color terminal\n\ # with some additional attributes as per the following codes:\n\ # Attribute codes:\n\ # 00=none 01=bold 04=underscore 05=blink 07=reverse 08=concealed\n\ # Text color codes:\n\ # 30=black 31=red 32=green 33=yellow 34=blue 35=magenta 36=cyan 37=white\n\ # Background color codes:\n\ # 40=black 41=red 42=green 43=yellow 44=blue 45=magenta 46=cyan 47=white\n\ #NORMAL 00 # no color code at all\n\ #FILE 00 # regular file: use no color at all\n\ ", ); for (name, _, code) in FILE_TYPES { let _ = writeln!(config, "{name} {code}"); } config.push_str("# List any file extensions like '.gz' or '.tar' that you would like ls\n"); config.push_str("# to color below. Put the extension, a space, and the color init string.\n"); for (ext, color) in FILE_COLORS { let _ = writeln!(config, "{ext} {color}"); } config.push_str("# Subsequent TERM or COLORTERM entries, can be used to add / override\n"); config.push_str("# config specific to those matching environment variables."); config } #[cfg(test)] mod tests { use super::escape; #[test] fn test_escape() { assert_eq!("", escape("")); assert_eq!("'\\''", escape("'")); assert_eq!("\\:", escape(":")); assert_eq!("\\:", escape("\\:")); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dircolors/src/main.rs000066400000000000000000000000341504311601400262420ustar00rootroot00000000000000uucore::bin!(uu_dircolors); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dirname/000077500000000000000000000000001504311601400236035ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dirname/Cargo.toml000066400000000000000000000011161504311601400255320ustar00rootroot00000000000000[package] name = "uu_dirname" description = "dirname ~ (uutils) display parent directory of PATHNAME" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/dirname" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/dirname.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true } fluent = { workspace = true } [[bin]] name = "dirname" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dirname/LICENSE000077700000000000000000000000001504311601400264512../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dirname/locales/000077500000000000000000000000001504311601400252255ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dirname/locales/en-US.ftl000066400000000000000000000005651504311601400266710ustar00rootroot00000000000000dirname-about = Strip last component from file name dirname-usage = dirname [OPTION] NAME... dirname-after-help = Output each NAME with its last non-slash component and trailing slashes removed; if NAME contains no /'s, output '.' (meaning the current directory). dirname-missing-operand = missing operand dirname-zero-help = separate output with NUL rather than newline coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dirname/locales/fr-FR.ftl000066400000000000000000000006541504311601400266550ustar00rootroot00000000000000dirname-about = Supprimer le dernier composant du nom de fichier dirname-usage = dirname [OPTION] NOM... dirname-after-help = Afficher chaque NOM avec son dernier composant non-slash et les slashes finaux supprimés ; si NOM ne contient pas de '/', afficher '.' (signifiant le répertoire courant). dirname-missing-operand = opérande manquant dirname-zero-help = séparer la sortie avec NUL plutôt qu'avec un saut de ligne coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dirname/src/000077500000000000000000000000001504311601400243725ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dirname/src/dirname.rs000066400000000000000000000043761504311601400263710ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use clap::{Arg, ArgAction, Command}; use std::path::Path; use uucore::display::print_verbatim; use uucore::error::{UResult, UUsageError}; use uucore::format_usage; use uucore::line_ending::LineEnding; use uucore::translate; mod options { pub const ZERO: &str = "zero"; pub const DIR: &str = "dir"; } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app() .after_help(translate!("dirname-after-help")) .try_get_matches_from(args)?; let line_ending = LineEnding::from_zero_flag(matches.get_flag(options::ZERO)); let dirnames: Vec = matches .get_many::(options::DIR) .unwrap_or_default() .cloned() .collect(); if dirnames.is_empty() { return Err(UUsageError::new(1, translate!("dirname-missing-operand"))); } for path in &dirnames { let p = Path::new(path); match p.parent() { Some(d) => { if d.components().next().is_none() { print!("."); } else { print_verbatim(d).unwrap(); } } None => { if p.is_absolute() || path == "/" { print!("/"); } else { print!("."); } } } print!("{line_ending}"); } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .about(translate!("dirname-about")) .version(uucore::crate_version!()) .override_usage(format_usage(&translate!("dirname-usage"))) .args_override_self(true) .infer_long_args(true) .arg( Arg::new(options::ZERO) .long(options::ZERO) .short('z') .help(translate!("dirname-zero-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::DIR) .hide(true) .action(ArgAction::Append) .value_hint(clap::ValueHint::AnyPath), ) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/dirname/src/main.rs000066400000000000000000000000321504311601400256570ustar00rootroot00000000000000uucore::bin!(uu_dirname); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/du/000077500000000000000000000000001504311601400225745ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/du/Cargo.toml000066400000000000000000000015321504311601400245250ustar00rootroot00000000000000[package] name = "uu_du" description = "du ~ (uutils) display disk usage" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/du" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/du.rs" [dependencies] # For the --exclude & --exclude-from options glob = { workspace = true } clap = { workspace = true } uucore = { workspace = true, features = ["format", "fsext", "parser", "time"] } thiserror = { workspace = true } fluent = { workspace = true } [target.'cfg(target_os = "windows")'.dependencies] windows-sys = { workspace = true, features = [ "Win32_Storage_FileSystem", "Win32_Foundation", ] } [[bin]] name = "du" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/du/LICENSE000077700000000000000000000000001504311601400254422../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/du/locales/000077500000000000000000000000001504311601400242165ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/du/locales/en-US.ftl000066400000000000000000000114561504311601400256630ustar00rootroot00000000000000du-about = Estimate file space usage du-usage = du [OPTION]... [FILE]... du [OPTION]... --files0-from=F du-after-help = Display values are in units of the first available SIZE from --block-size, and the DU_BLOCK_SIZE, BLOCK_SIZE and BLOCKSIZE environment variables. Otherwise, units default to 1024 bytes (or 512 if POSIXLY_CORRECT is set). SIZE is an integer and optional unit (example: 10M is 10*1024*1024). Units are K, M, G, T, P, E, Z, Y (powers of 1024) or KB, MB,... (powers of 1000). PATTERN allows some advanced exclusions. For example, the following syntaxes are supported: ? will match only one character { "*" } will match zero or more characters {"{"}a,b{"}"} will match a or b # Help messages du-help-print-help = Print help information. du-help-all = write counts for all files, not just directories du-help-apparent-size = print apparent sizes, rather than disk usage although the apparent size is usually smaller, it may be larger due to holes in ('sparse') files, internal fragmentation, indirect blocks, and the like du-help-block-size = scale sizes by SIZE before printing them. E.g., '-BM' prints sizes in units of 1,048,576 bytes. See SIZE format below. du-help-bytes = equivalent to '--apparent-size --block-size=1' du-help-total = produce a grand total du-help-max-depth = print the total for a directory (or file, with --all) only if it is N or fewer levels below the command line argument; --max-depth=0 is the same as --summarize du-help-human-readable = print sizes in human readable format (e.g., 1K 234M 2G) du-help-inodes = list inode usage information instead of block usage like --block-size=1K du-help-block-size-1k = like --block-size=1K du-help-count-links = count sizes many times if hard linked du-help-dereference = follow all symbolic links du-help-dereference-args = follow only symlinks that are listed on the command line du-help-no-dereference = don't follow any symbolic links (this is the default) du-help-block-size-1m = like --block-size=1M du-help-null = end each output line with 0 byte rather than newline du-help-separate-dirs = do not include size of subdirectories du-help-summarize = display only a total for each argument du-help-si = like -h, but use powers of 1000 not 1024 du-help-one-file-system = skip directories on different file systems du-help-threshold = exclude entries smaller than SIZE if positive, or entries greater than SIZE if negative du-help-verbose = verbose mode (option not present in GNU/Coreutils) du-help-exclude = exclude files that match PATTERN du-help-exclude-from = exclude files that match any pattern in FILE du-help-files0-from = summarize device usage of the NUL-terminated file names specified in file F; if F is -, then read names from standard input du-help-time = show time of the last modification of any file in the directory, or any of its subdirectories. If WORD is given, show time as WORD instead of modification time: atime, access, use, ctime, status, birth or creation du-help-time-style = show times using style STYLE: full-iso, long-iso, iso, +FORMAT FORMAT is interpreted like 'date' # Error messages du-error-invalid-max-depth = invalid maximum depth { $depth } du-error-summarize-depth-conflict = summarizing conflicts with --max-depth={ $depth } du-error-invalid-time-style = invalid argument { $style } for 'time style' Valid arguments are: - 'full-iso' - 'long-iso' - 'iso' - +FORMAT (e.g., +%H:%M) for a 'date'-style format Try '{ $help }' for more information. du-error-invalid-time-arg = 'birth' and 'creation' arguments for --time are not supported on this platform. du-error-invalid-glob = Invalid exclude syntax: { $error } du-error-cannot-read-directory = cannot read directory { $path } du-error-cannot-access = cannot access { $path } du-error-read-error-is-directory = { $file }: read error: Is a directory du-error-cannot-open-for-reading = cannot open '{ $file }' for reading: No such file or directory du-error-invalid-zero-length-file-name = { $file }:{ $line }: invalid zero-length file name du-error-extra-operand-with-files0-from = extra operand { $file } file operands cannot be combined with --files0-from du-error-invalid-block-size-argument = invalid --{ $option } argument { $value } du-error-cannot-access-no-such-file = cannot access { $path }: No such file or directory du-error-printing-thread-panicked = Printing thread panicked. du-error-invalid-suffix = invalid suffix in --{ $option } argument { $value } du-error-invalid-argument = invalid --{ $option } argument { $value } du-error-argument-too-large = --{ $option } argument { $value } too large # Verbose/status messages du-verbose-ignored = { $path } ignored du-verbose-adding-to-exclude-list = adding { $pattern } to the exclude list du-total = total du-warning-apparent-size-ineffective-with-inodes = options --apparent-size and -b are ineffective with --inodes coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/du/locales/fr-FR.ftl000066400000000000000000000133011504311601400256370ustar00rootroot00000000000000du-about = Estimer l'utilisation de l'espace disque des fichiers du-usage = du [OPTION]... [FICHIER]... du [OPTION]... --files0-from=F du-after-help = Les valeurs affichées sont en unités de la première TAILLE disponible de --block-size, et des variables d'environnement DU_BLOCK_SIZE, BLOCK_SIZE et BLOCKSIZE. Sinon, les unités par défaut sont 1024 octets (ou 512 si POSIXLY_CORRECT est défini). TAILLE est un entier et une unité optionnelle (exemple : 10M est 10*1024*1024). Les unités sont K, M, G, T, P, E, Z, Y (puissances de 1024) ou KB, MB,... (puissances de 1000). MOTIF permet des exclusions avancées. Par exemple, les syntaxes suivantes sont supportées : ? correspondra à un seul caractère { "*" } correspondra à zéro ou plusieurs caractères {"{"}a,b{"}"} correspondra à a ou b # Messages d'aide du-help-print-help = Afficher les informations d'aide. du-help-all = afficher les comptes pour tous les fichiers, pas seulement les répertoires du-help-apparent-size = afficher les tailles apparentes, plutôt que l'utilisation du disque bien que la taille apparente soit généralement plus petite, elle peut être plus grande en raison de trous dans les fichiers ('sparse'), la fragmentation interne, les blocs indirects, etc. du-help-block-size = mettre à l'échelle les tailles par TAILLE avant de les afficher. Par ex., '-BM' affiche les tailles en unités de 1 048 576 octets. Voir le format TAILLE ci-dessous. du-help-bytes = équivalent à '--apparent-size --block-size=1' du-help-total = produire un total général du-help-max-depth = afficher le total pour un répertoire (ou fichier, avec --all) seulement s'il est à N niveaux ou moins sous l'argument de ligne de commande ; --max-depth=0 est identique à --summarize du-help-human-readable = afficher les tailles dans un format lisible par l'homme (p. ex., 1K 234M 2G) du-help-inodes = lister les informations d'utilisation des inodes au lieu de l'utilisation des blocs comme --block-size=1K du-help-block-size-1k = comme --block-size=1K du-help-count-links = compter les tailles plusieurs fois si liées en dur du-help-dereference = suivre tous les liens symboliques du-help-dereference-args = suivre seulement les liens symboliques qui sont listés sur la ligne de commande du-help-no-dereference = ne pas suivre les liens symboliques (c'est le défaut) du-help-block-size-1m = comme --block-size=1M du-help-null = terminer chaque ligne de sortie avec un octet 0 plutôt qu'une nouvelle ligne du-help-separate-dirs = ne pas inclure la taille des sous-répertoires du-help-summarize = afficher seulement un total pour chaque argument du-help-si = comme -h, mais utiliser les puissances de 1000 et non 1024 du-help-one-file-system = ignorer les répertoires sur des systèmes de fichiers différents du-help-threshold = exclure les entrées plus petites que TAILLE si positive, ou les entrées plus grandes que TAILLE si négative du-help-verbose = mode verbeux (option non présente dans GNU/Coreutils) du-help-exclude = exclure les fichiers qui correspondent au MOTIF du-help-exclude-from = exclure les fichiers qui correspondent à n'importe quel motif dans FICHIER du-help-files0-from = résumer l'utilisation du périphérique des noms de fichiers terminés par NUL spécifiés dans le fichier F ; si F est -, alors lire les noms depuis l'entrée standard du-help-time = montrer l'heure de la dernière modification de n'importe quel fichier dans le répertoire, ou n'importe lequel de ses sous-répertoires. Si MOT est donné, montrer l'heure comme MOT au lieu de l'heure de modification : atime, access, use, ctime, status, birth ou creation du-help-time-style = montrer les heures en utilisant le style STYLE : full-iso, long-iso, iso, +FORMAT FORMAT est interprété comme 'date' # Messages d'erreur du-error-invalid-max-depth = profondeur maximale invalide { $depth } du-error-summarize-depth-conflict = la synthèse entre en conflit avec --max-depth={ $depth } du-error-invalid-time-style = argument invalide { $style } pour 'style de temps' Les arguments valides sont : - 'full-iso' - 'long-iso' - 'iso' - +FORMAT (e.g., +%H:%M) pour un format de type 'date' Essayez '{ $help }' pour plus d'informations. du-error-invalid-time-arg = les arguments 'birth' et 'creation' pour --time ne sont pas supportés sur cette plateforme. du-error-invalid-glob = Syntaxe d'exclusion invalide : { $error } du-error-cannot-read-directory = impossible de lire le répertoire { $path } du-error-cannot-access = impossible d'accéder à { $path } du-error-read-error-is-directory = { $file } : erreur de lecture : C'est un répertoire du-error-cannot-open-for-reading = impossible d'ouvrir '{ $file }' en lecture : Aucun fichier ou répertoire de ce type du-error-invalid-zero-length-file-name = { $file }:{ $line } : nom de fichier de longueur zéro invalide du-error-extra-operand-with-files0-from = opérande supplémentaire { $file } les opérandes de fichier ne peuvent pas être combinées avec --files0-from du-error-invalid-block-size-argument = argument --{ $option } invalide { $value } du-error-cannot-access-no-such-file = impossible d'accéder à { $path } : Aucun fichier ou répertoire de ce type du-error-printing-thread-panicked = Le thread d'affichage a paniqué. du-error-invalid-suffix = suffixe invalide dans l'argument --{ $option } { $value } du-error-invalid-argument = argument --{ $option } invalide { $value } du-error-argument-too-large = argument --{ $option } { $value } trop grand # Messages verbeux/de statut du-verbose-ignored = { $path } ignoré du-verbose-adding-to-exclude-list = ajout de { $pattern } à la liste d'exclusion du-total = total du-warning-apparent-size-ineffective-with-inodes = les options --apparent-size et -b sont inefficaces avec --inodes coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/du/src/000077500000000000000000000000001504311601400233635ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/du/src/du.rs000066400000000000000000001104321504311601400243420ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use clap::{Arg, ArgAction, ArgMatches, Command, builder::PossibleValue}; use glob::Pattern; use std::collections::HashSet; use std::env; use std::fs::Metadata; use std::fs::{self, DirEntry, File}; use std::io::{BufRead, BufReader, stdout}; #[cfg(not(windows))] use std::os::unix::fs::MetadataExt; #[cfg(windows)] use std::os::windows::io::AsRawHandle; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::mpsc; use std::thread; use thiserror::Error; use uucore::display::{Quotable, print_verbatim}; use uucore::error::{FromIo, UError, UResult, USimpleError, set_exit_code}; use uucore::fsext::{MetadataTimeField, metadata_get_time}; use uucore::line_ending::LineEnding; use uucore::translate; use uucore::parser::parse_glob; use uucore::parser::parse_size::{ParseSizeError, parse_size_u64}; use uucore::parser::shortcut_value_parser::ShortcutValueParser; use uucore::time::{FormatSystemTimeFallback, format, format_system_time}; use uucore::{format_usage, show, show_error, show_warning}; #[cfg(windows)] use windows_sys::Win32::Foundation::HANDLE; #[cfg(windows)] use windows_sys::Win32::Storage::FileSystem::{ FILE_ID_128, FILE_ID_INFO, FILE_STANDARD_INFO, FileIdInfo, FileStandardInfo, GetFileInformationByHandleEx, }; mod options { pub const HELP: &str = "help"; pub const NULL: &str = "0"; pub const ALL: &str = "all"; pub const APPARENT_SIZE: &str = "apparent-size"; pub const BLOCK_SIZE: &str = "block-size"; pub const BYTES: &str = "b"; pub const TOTAL: &str = "c"; pub const MAX_DEPTH: &str = "d"; pub const HUMAN_READABLE: &str = "h"; pub const BLOCK_SIZE_1K: &str = "k"; pub const COUNT_LINKS: &str = "l"; pub const BLOCK_SIZE_1M: &str = "m"; pub const SEPARATE_DIRS: &str = "S"; pub const SUMMARIZE: &str = "s"; pub const THRESHOLD: &str = "threshold"; pub const SI: &str = "si"; pub const TIME: &str = "time"; pub const TIME_STYLE: &str = "time-style"; pub const ONE_FILE_SYSTEM: &str = "one-file-system"; pub const DEREFERENCE: &str = "dereference"; pub const DEREFERENCE_ARGS: &str = "dereference-args"; pub const NO_DEREFERENCE: &str = "no-dereference"; pub const INODES: &str = "inodes"; pub const EXCLUDE: &str = "exclude"; pub const EXCLUDE_FROM: &str = "exclude-from"; pub const FILES0_FROM: &str = "files0-from"; pub const VERBOSE: &str = "verbose"; pub const FILE: &str = "FILE"; } struct TraversalOptions { all: bool, separate_dirs: bool, one_file_system: bool, dereference: Deref, count_links: bool, verbose: bool, excludes: Vec, } struct StatPrinter { total: bool, inodes: bool, max_depth: Option, threshold: Option, apparent_size: bool, size_format: SizeFormat, time: Option, time_format: String, line_ending: LineEnding, summarize: bool, total_text: String, } #[derive(PartialEq, Clone)] enum Deref { All, Args(Vec), None, } #[derive(Clone)] enum SizeFormat { HumanDecimal, HumanBinary, BlockSize(u64), } #[derive(PartialEq, Eq, Hash, Clone, Copy)] struct FileInfo { file_id: u128, dev_id: u64, } struct Stat { path: PathBuf, size: u64, blocks: u64, inodes: u64, inode: Option, metadata: Metadata, } impl Stat { fn new( path: &Path, dir_entry: Option<&DirEntry>, options: &TraversalOptions, ) -> std::io::Result { // Determine whether to dereference (follow) the symbolic link let should_dereference = match &options.dereference { Deref::All => true, Deref::Args(paths) => paths.contains(&path.to_path_buf()), Deref::None => false, }; let metadata = if should_dereference { // Get metadata, following symbolic links if necessary fs::metadata(path) } else if let Some(dir_entry) = dir_entry { // Get metadata directly from the DirEntry, which is faster on Windows dir_entry.metadata() } else { // Get metadata from the filesystem without following symbolic links fs::symlink_metadata(path) }?; let file_info = get_file_info(path, &metadata); let blocks = get_blocks(path, &metadata); Ok(Self { path: path.to_path_buf(), size: if metadata.is_dir() { 0 } else { metadata.len() }, blocks, inodes: 1, inode: file_info, metadata, }) } } #[cfg(not(windows))] fn get_blocks(_path: &Path, metadata: &Metadata) -> u64 { metadata.blocks() } #[cfg(windows)] fn get_blocks(path: &Path, _metadata: &Metadata) -> u64 { let mut size_on_disk = 0; // bind file so it stays in scope until end of function // if it goes out of scope the handle below becomes invalid let Ok(file) = File::open(path) else { return size_on_disk; // opening directories will fail }; unsafe { let mut file_info: FILE_STANDARD_INFO = core::mem::zeroed(); let file_info_ptr: *mut FILE_STANDARD_INFO = &raw mut file_info; let success = GetFileInformationByHandleEx( file.as_raw_handle() as HANDLE, FileStandardInfo, file_info_ptr.cast(), size_of::() as u32, ); if success != 0 { size_on_disk = file_info.AllocationSize as u64; } } size_on_disk / 1024 * 2 } #[cfg(not(windows))] fn get_file_info(_path: &Path, metadata: &Metadata) -> Option { Some(FileInfo { file_id: metadata.ino() as u128, dev_id: metadata.dev(), }) } #[cfg(windows)] fn get_file_info(path: &Path, _metadata: &Metadata) -> Option { let mut result = None; let Ok(file) = File::open(path) else { return result; }; unsafe { let mut file_info: FILE_ID_INFO = core::mem::zeroed(); let file_info_ptr: *mut FILE_ID_INFO = &raw mut file_info; let success = GetFileInformationByHandleEx( file.as_raw_handle() as HANDLE, FileIdInfo, file_info_ptr.cast(), size_of::() as u32, ); if success != 0 { result = Some(FileInfo { file_id: std::mem::transmute::(file_info.FileId), dev_id: file_info.VolumeSerialNumber, }); } } result } fn read_block_size(s: Option<&str>) -> UResult { if let Some(s) = s { parse_size_u64(s) .map_err(|e| USimpleError::new(1, format_error_message(&e, s, options::BLOCK_SIZE))) } else { for env_var in ["DU_BLOCK_SIZE", "BLOCK_SIZE", "BLOCKSIZE"] { if let Ok(env_size) = env::var(env_var) { if let Ok(v) = parse_size_u64(&env_size) { return Ok(v); } } } if env::var("POSIXLY_CORRECT").is_ok() { Ok(512) } else { Ok(1024) } } } // this takes `my_stat` to avoid having to stat files multiple times. #[allow(clippy::cognitive_complexity)] fn du( mut my_stat: Stat, options: &TraversalOptions, depth: usize, seen_inodes: &mut HashSet, print_tx: &mpsc::Sender>, ) -> Result>>> { if my_stat.metadata.is_dir() { let read = match fs::read_dir(&my_stat.path) { Ok(read) => read, Err(e) => { print_tx.send(Err(e.map_err_context( || translate!("du-error-cannot-read-directory", "path" => my_stat.path.quote()), )))?; return Ok(my_stat); } }; 'file_loop: for f in read { match f { Ok(entry) => { match Stat::new(&entry.path(), Some(&entry), options) { Ok(this_stat) => { // We have an exclude list for pattern in &options.excludes { // Look at all patterns with both short and long paths // if we have 'du foo' but search to exclude 'foo/bar' // we need the full path if pattern.matches(&this_stat.path.to_string_lossy()) || pattern.matches(&entry.file_name().into_string().unwrap()) { // if the directory is ignored, leave early if options.verbose { println!( "{}", translate!("du-verbose-ignored", "path" => this_stat.path.quote()) ); } // Go to the next file continue 'file_loop; } } if let Some(inode) = this_stat.inode { // Check if the inode has been seen before and if we should skip it if seen_inodes.contains(&inode) && (!options.count_links || !options.all) { // If `count_links` is enabled and `all` is not, increment the inode count if options.count_links && !options.all { my_stat.inodes += 1; } // Skip further processing for this inode continue; } // Mark this inode as seen seen_inodes.insert(inode); } if this_stat.metadata.is_dir() { if options.one_file_system { if let (Some(this_inode), Some(my_inode)) = (this_stat.inode, my_stat.inode) { if this_inode.dev_id != my_inode.dev_id { continue; } } } let this_stat = du(this_stat, options, depth + 1, seen_inodes, print_tx)?; if !options.separate_dirs { my_stat.size += this_stat.size; my_stat.blocks += this_stat.blocks; my_stat.inodes += this_stat.inodes; } print_tx.send(Ok(StatPrintInfo { stat: this_stat, depth: depth + 1, }))?; } else { my_stat.size += this_stat.size; my_stat.blocks += this_stat.blocks; my_stat.inodes += 1; if options.all { print_tx.send(Ok(StatPrintInfo { stat: this_stat, depth: depth + 1, }))?; } } } Err(e) => print_tx.send(Err(e.map_err_context( || translate!("du-error-cannot-access", "path" => entry.path().quote()), )))?, } } Err(error) => print_tx.send(Err(error.into()))?, } } } Ok(my_stat) } #[derive(Debug, Error)] enum DuError { #[error("{}", translate!("du-error-invalid-max-depth", "depth" => _0.quote()))] InvalidMaxDepthArg(String), #[error("{}", translate!("du-error-summarize-depth-conflict", "depth" => _0.maybe_quote()))] SummarizeDepthConflict(String), #[error("{}", translate!("du-error-invalid-time-style", "style" => _0.quote(), "help" => uucore::execution_phrase()))] InvalidTimeStyleArg(String), #[error("{}", translate!("du-error-invalid-glob", "error" => _0))] InvalidGlob(String), } impl UError for DuError { fn code(&self) -> i32 { match self { Self::InvalidMaxDepthArg(_) | Self::SummarizeDepthConflict(_) | Self::InvalidTimeStyleArg(_) | Self::InvalidGlob(_) => 1, } } } /// Read a file and return each line in a vector of String fn file_as_vec(filename: impl AsRef) -> Vec { let file = File::open(filename).expect("no such file"); let buf = BufReader::new(file); buf.lines() .map(|l| l.expect("Could not parse line")) .collect() } /// Given the `--exclude-from` and/or `--exclude` arguments, returns the globset lists /// to ignore the files fn build_exclude_patterns(matches: &ArgMatches) -> UResult> { let exclude_from_iterator = matches .get_many::(options::EXCLUDE_FROM) .unwrap_or_default() .flat_map(file_as_vec); let excludes_iterator = matches .get_many::(options::EXCLUDE) .unwrap_or_default() .cloned(); let mut exclude_patterns = Vec::new(); for f in excludes_iterator.chain(exclude_from_iterator) { if matches.get_flag(options::VERBOSE) { println!( "{}", translate!("du-verbose-adding-to-exclude-list", "pattern" => f.clone()) ); } match parse_glob::from_str(&f) { Ok(glob) => exclude_patterns.push(glob), Err(err) => return Err(DuError::InvalidGlob(err.to_string()).into()), } } Ok(exclude_patterns) } struct StatPrintInfo { stat: Stat, depth: usize, } impl StatPrinter { fn choose_size(&self, stat: &Stat) -> u64 { if self.inodes { stat.inodes } else if self.apparent_size { stat.size } else { // The st_blocks field indicates the number of blocks allocated to the file, 512-byte units. // See: http://linux.die.net/man/2/stat stat.blocks * 512 } } fn print_stats(&self, rx: &mpsc::Receiver>) -> UResult<()> { let mut grand_total = 0; loop { let received = rx.recv(); match received { Ok(message) => match message { Ok(stat_info) => { let size = self.choose_size(&stat_info.stat); if stat_info.depth == 0 { grand_total += size; } if !self .threshold .is_some_and(|threshold| threshold.should_exclude(size)) && self .max_depth .is_none_or(|max_depth| stat_info.depth <= max_depth) && (!self.summarize || stat_info.depth == 0) { self.print_stat(&stat_info.stat, size)?; } } Err(e) => show!(e), }, Err(_) => break, } } if self.total { print!("{}\t{}", self.convert_size(grand_total), self.total_text); print!("{}", self.line_ending); } Ok(()) } fn convert_size(&self, size: u64) -> String { match self.size_format { SizeFormat::HumanDecimal => uucore::format::human::human_readable( size, uucore::format::human::SizeFormat::Decimal, ), SizeFormat::HumanBinary => uucore::format::human::human_readable( size, uucore::format::human::SizeFormat::Binary, ), SizeFormat::BlockSize(block_size) => { if self.inodes { // we ignore block size (-B) with --inodes size.to_string() } else { size.div_ceil(block_size).to_string() } } } } fn print_stat(&self, stat: &Stat, size: u64) -> UResult<()> { print!("{}\t", self.convert_size(size)); if let Some(md_time) = &self.time { if let Some(time) = metadata_get_time(&stat.metadata, *md_time) { format_system_time( &mut stdout(), time, &self.time_format, FormatSystemTimeFallback::IntegerError, )?; print!("\t"); } else { print!("???\t"); } } print_verbatim(&stat.path).unwrap(); print!("{}", self.line_ending); Ok(()) } } /// Read file paths from the specified file, separated by null characters fn read_files_from(file_name: &str) -> Result, std::io::Error> { let reader: Box = if file_name == "-" { // Read from standard input Box::new(BufReader::new(std::io::stdin())) } else { // First, check if the file_name is a directory let path = PathBuf::from(file_name); if path.is_dir() { return Err(std::io::Error::other( translate!("du-error-read-error-is-directory", "file" => file_name), )); } // Attempt to open the file and handle the error if it does not exist match File::open(file_name) { Ok(file) => Box::new(BufReader::new(file)), Err(e) if e.kind() == std::io::ErrorKind::NotFound => { return Err(std::io::Error::other( translate!("du-error-cannot-open-for-reading", "file" => file_name), )); } Err(e) => return Err(e), } }; let mut paths = Vec::new(); for (i, line) in reader.split(b'\0').enumerate() { let path = line?; if path.is_empty() { let line_number = i + 1; show_error!( "{}", translate!("du-error-invalid-zero-length-file-name", "file" => file_name, "line" => line_number) ); set_exit_code(1); } else { let p = PathBuf::from(String::from_utf8_lossy(&path).to_string()); if !paths.contains(&p) { paths.push(p); } } } Ok(paths) } #[uucore::main] #[allow(clippy::cognitive_complexity)] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let summarize = matches.get_flag(options::SUMMARIZE); let count_links = matches.get_flag(options::COUNT_LINKS); let max_depth = parse_depth( matches .get_one::(options::MAX_DEPTH) .map(|s| s.as_str()), summarize, )?; let files = if let Some(file_from) = matches.get_one::(options::FILES0_FROM) { if file_from == "-" && matches.get_one::(options::FILE).is_some() { return Err(std::io::Error::other( translate!("du-error-extra-operand-with-files0-from", "file" => matches .get_one::(options::FILE) .unwrap() .quote() ), ) .into()); } read_files_from(file_from)? } else if let Some(files) = matches.get_many::(options::FILE) { let files = files.map(PathBuf::from); if count_links { files.collect() } else { // Deduplicate while preserving order let mut seen = HashSet::new(); files .filter(|path| seen.insert(path.clone())) .collect::>() } } else { vec![PathBuf::from(".")] }; let time = matches.contains_id(options::TIME).then(|| { matches .get_one::(options::TIME) .map_or(MetadataTimeField::Modification, |s| s.as_str().into()) }); let size_format = if matches.get_flag(options::HUMAN_READABLE) { SizeFormat::HumanBinary } else if matches.get_flag(options::SI) { SizeFormat::HumanDecimal } else if matches.get_flag(options::BYTES) { SizeFormat::BlockSize(1) } else if matches.get_flag(options::BLOCK_SIZE_1K) { SizeFormat::BlockSize(1024) } else if matches.get_flag(options::BLOCK_SIZE_1M) { SizeFormat::BlockSize(1024 * 1024) } else { let block_size_str = matches.get_one::(options::BLOCK_SIZE); let block_size = read_block_size(block_size_str.map(AsRef::as_ref))?; if block_size == 0 { return Err(std::io::Error::other(translate!("du-error-invalid-block-size-argument", "option" => options::BLOCK_SIZE, "value" => block_size_str.map_or("???BUG", |v| v).quote())) .into()); } SizeFormat::BlockSize(block_size) }; let traversal_options = TraversalOptions { all: matches.get_flag(options::ALL), separate_dirs: matches.get_flag(options::SEPARATE_DIRS), one_file_system: matches.get_flag(options::ONE_FILE_SYSTEM), dereference: if matches.get_flag(options::DEREFERENCE) { Deref::All } else if matches.get_flag(options::DEREFERENCE_ARGS) { // We don't care about the cost of cloning as it is rarely used Deref::Args(files.clone()) } else { Deref::None }, count_links, verbose: matches.get_flag(options::VERBOSE), excludes: build_exclude_patterns(&matches)?, }; let time_format = if time.is_some() { parse_time_style(matches.get_one::("time-style"))? } else { format::LONG_ISO.to_string() }; let stat_printer = StatPrinter { max_depth, size_format, summarize, total: matches.get_flag(options::TOTAL), inodes: matches.get_flag(options::INODES), threshold: matches .get_one::(options::THRESHOLD) .map(|s| { Threshold::from_str(s).map_err(|e| { USimpleError::new(1, format_error_message(&e, s, options::THRESHOLD)) }) }) .transpose()?, apparent_size: matches.get_flag(options::APPARENT_SIZE) || matches.get_flag(options::BYTES), time, time_format, line_ending: LineEnding::from_zero_flag(matches.get_flag(options::NULL)), total_text: translate!("du-total"), }; if stat_printer.inodes && (matches.get_flag(options::APPARENT_SIZE) || matches.get_flag(options::BYTES)) { show_warning!( "{}", translate!("du-warning-apparent-size-ineffective-with-inodes") ); } // Use separate thread to print output, so we can print finished results while computation is still running let (print_tx, rx) = mpsc::channel::>(); let printing_thread = thread::spawn(move || stat_printer.print_stats(&rx)); 'loop_file: for path in files { // Skip if we don't want to ignore anything if !&traversal_options.excludes.is_empty() { let path_string = path.to_string_lossy(); for pattern in &traversal_options.excludes { if pattern.matches(&path_string) { // if the directory is ignored, leave early if traversal_options.verbose { println!( "{}", translate!("du-verbose-ignored", "path" => path_string.quote()) ); } continue 'loop_file; } } } // Check existence of path provided in argument if let Ok(stat) = Stat::new(&path, None, &traversal_options) { // Kick off the computation of disk usage from the initial path let mut seen_inodes: HashSet = HashSet::new(); if let Some(inode) = stat.inode { seen_inodes.insert(inode); } let stat = du(stat, &traversal_options, 0, &mut seen_inodes, &print_tx) .map_err(|e| USimpleError::new(1, e.to_string()))?; print_tx .send(Ok(StatPrintInfo { stat, depth: 0 })) .map_err(|e| USimpleError::new(1, e.to_string()))?; } else { print_tx .send(Err(USimpleError::new( 1, translate!("du-error-cannot-access-no-such-file", "path" => path.to_string_lossy().quote()), ))) .map_err(|e| USimpleError::new(1, e.to_string()))?; } } drop(print_tx); printing_thread .join() .map_err(|_| USimpleError::new(1, translate!("du-error-printing-thread-panicked")))??; Ok(()) } // Parse --time-style argument, falling back to environment variable if necessary. fn parse_time_style(s: Option<&String>) -> UResult { let s = match s { Some(s) => Some(s.into()), None => { match env::var("TIME_STYLE") { // Per GNU manual, strip `posix-` if present, ignore anything after a newline if // the string starts with +, and ignore "locale". Ok(s) => { let s = s.strip_prefix("posix-").unwrap_or(s.as_str()); let s = match s.chars().next().unwrap() { '+' => s.split('\n').next().unwrap(), _ => s, }; match s { "locale" => None, _ => Some(s.to_string()), } } Err(_) => None, } } }; match s { Some(s) => match s.as_ref() { "full-iso" => Ok(format::FULL_ISO.to_string()), "long-iso" => Ok(format::LONG_ISO.to_string()), "iso" => Ok(format::ISO.to_string()), _ => match s.chars().next().unwrap() { '+' => Ok(s[1..].to_string()), _ => Err(DuError::InvalidTimeStyleArg(s).into()), }, }, None => Ok(format::LONG_ISO.to_string()), } } fn parse_depth(max_depth_str: Option<&str>, summarize: bool) -> UResult> { let max_depth = max_depth_str.as_ref().and_then(|s| s.parse::().ok()); match (max_depth_str, max_depth) { (Some(s), _) if summarize => Err(DuError::SummarizeDepthConflict(s.into()).into()), (Some(s), None) => Err(DuError::InvalidMaxDepthArg(s.into()).into()), (Some(_), Some(_)) | (None, _) => Ok(max_depth), } } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("du-about")) .after_help(translate!("du-after-help")) .override_usage(format_usage(&translate!("du-usage"))) .infer_long_args(true) .disable_help_flag(true) .arg( Arg::new(options::HELP) .long(options::HELP) .help(translate!("du-help-print-help")) .action(ArgAction::Help), ) .arg( Arg::new(options::ALL) .short('a') .long(options::ALL) .help(translate!("du-help-all")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::APPARENT_SIZE) .long(options::APPARENT_SIZE) .help(translate!("du-help-apparent-size")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::BLOCK_SIZE) .short('B') .long(options::BLOCK_SIZE) .value_name("SIZE") .help(translate!("du-help-block-size")), ) .arg( Arg::new(options::BYTES) .short('b') .long("bytes") .help(translate!("du-help-bytes")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::TOTAL) .long("total") .short('c') .help(translate!("du-help-total")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::MAX_DEPTH) .short('d') .long("max-depth") .value_name("N") .help(translate!("du-help-max-depth")), ) .arg( Arg::new(options::HUMAN_READABLE) .long("human-readable") .short('h') .help(translate!("du-help-human-readable")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::INODES) .long(options::INODES) .help(translate!("du-help-inodes")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::BLOCK_SIZE_1K) .short('k') .help(translate!("du-help-block-size-1k")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::COUNT_LINKS) .short('l') .long("count-links") .help(translate!("du-help-count-links")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::DEREFERENCE) .short('L') .long(options::DEREFERENCE) .help(translate!("du-help-dereference")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::DEREFERENCE_ARGS) .short('D') .visible_short_alias('H') .long(options::DEREFERENCE_ARGS) .help(translate!("du-help-dereference-args")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NO_DEREFERENCE) .short('P') .long(options::NO_DEREFERENCE) .help(translate!("du-help-no-dereference")) .overrides_with(options::DEREFERENCE) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::BLOCK_SIZE_1M) .short('m') .help(translate!("du-help-block-size-1m")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NULL) .short('0') .long("null") .help(translate!("du-help-null")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SEPARATE_DIRS) .short('S') .long("separate-dirs") .help(translate!("du-help-separate-dirs")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SUMMARIZE) .short('s') .long("summarize") .help(translate!("du-help-summarize")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SI) .long(options::SI) .help(translate!("du-help-si")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ONE_FILE_SYSTEM) .short('x') .long(options::ONE_FILE_SYSTEM) .help(translate!("du-help-one-file-system")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::THRESHOLD) .short('t') .long(options::THRESHOLD) .value_name("SIZE") .num_args(1) .allow_hyphen_values(true) .help(translate!("du-help-threshold")), ) .arg( Arg::new(options::VERBOSE) .short('v') .long("verbose") .help(translate!("du-help-verbose")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::EXCLUDE) .long(options::EXCLUDE) .value_name("PATTERN") .help(translate!("du-help-exclude")) .action(ArgAction::Append), ) .arg( Arg::new(options::EXCLUDE_FROM) .short('X') .long("exclude-from") .value_name("FILE") .value_hint(clap::ValueHint::FilePath) .help(translate!("du-help-exclude-from")) .action(ArgAction::Append), ) .arg( Arg::new(options::FILES0_FROM) .long("files0-from") .value_name("FILE") .value_hint(clap::ValueHint::FilePath) .help(translate!("du-help-files0-from")) .action(ArgAction::Append), ) .arg( Arg::new(options::TIME) .long(options::TIME) .value_name("WORD") .require_equals(true) .num_args(0..) .value_parser(ShortcutValueParser::new([ PossibleValue::new("atime").alias("access").alias("use"), PossibleValue::new("ctime").alias("status"), PossibleValue::new("creation").alias("birth"), ])) .help(translate!("du-help-time")), ) .arg( Arg::new(options::TIME_STYLE) .long(options::TIME_STYLE) .value_name("STYLE") .help(translate!("du-help-time-style")), ) .arg( Arg::new(options::FILE) .hide(true) .value_hint(clap::ValueHint::AnyPath) .action(ArgAction::Append), ) } #[derive(Clone, Copy)] enum Threshold { Lower(u64), Upper(u64), } impl FromStr for Threshold { type Err = ParseSizeError; fn from_str(s: &str) -> Result { let offset = usize::from(s.starts_with(&['-', '+'][..])); let size = parse_size_u64(&s[offset..])?; if s.starts_with('-') { // Threshold of '-0' excludes everything besides 0 sized entries // GNU's du treats '-0' as an invalid argument if size == 0 { return Err(ParseSizeError::ParseFailure(s.to_string())); } Ok(Self::Upper(size)) } else { Ok(Self::Lower(size)) } } } impl Threshold { fn should_exclude(&self, size: u64) -> bool { match *self { Self::Upper(threshold) => size > threshold, Self::Lower(threshold) => size < threshold, } } } fn format_error_message(error: &ParseSizeError, s: &str, option: &str) -> String { // NOTE: // GNU's du echos affected flag, -B or --block-size (-t or --threshold), depending user's selection match error { ParseSizeError::InvalidSuffix(_) => { translate!("du-error-invalid-suffix", "option" => option, "value" => s.quote()) } ParseSizeError::ParseFailure(_) | ParseSizeError::PhysicalMem(_) => { translate!("du-error-invalid-argument", "option" => option, "value" => s.quote()) } ParseSizeError::SizeTooBig(_) => { translate!("du-error-argument-too-large", "option" => option, "value" => s.quote()) } } } #[cfg(test)] mod test_du { #[allow(unused_imports)] use super::*; #[test] fn test_read_block_size() { let test_data = [Some("1024".to_string()), Some("K".to_string()), None]; for it in &test_data { assert!(matches!(read_block_size(it.as_deref()), Ok(1024))); } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/du/src/main.rs000066400000000000000000000000251504311601400246520ustar00rootroot00000000000000uucore::bin!(uu_du); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/echo/000077500000000000000000000000001504311601400231025ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/echo/Cargo.toml000066400000000000000000000010761504311601400250360ustar00rootroot00000000000000[package] name = "uu_echo" description = "echo ~ (uutils) display TEXT" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/echo" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/echo.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["format"] } fluent = { workspace = true } [[bin]] name = "echo" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/echo/LICENSE000077700000000000000000000000001504311601400257502../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/echo/locales/000077500000000000000000000000001504311601400245245ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/echo/locales/en-US.ftl000066400000000000000000000014511504311601400261630ustar00rootroot00000000000000echo-about = Display a line of text echo-usage = echo [OPTIONS]... [STRING]... echo-after-help = Echo the STRING(s) to standard output. If -e is in effect, the following sequences are recognized: - \ backslash - \a alert (BEL) - \b backspace - \c produce no further output - \e escape - \f form feed - \n new line - \r carriage return - \t horizontal tab - \v vertical tab - \0NNN byte with octal value NNN (1 to 3 digits) - \xHH byte with hexadecimal value HH (1 to 2 digits) echo-help-no-newline = do not output the trailing newline echo-help-enable-escapes = enable interpretation of backslash escapes echo-help-disable-escapes = disable interpretation of backslash escapes (default) echo-error-non-utf8 = Non-UTF-8 arguments provided, but this platform does not support them coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/echo/locales/fr-FR.ftl000066400000000000000000000016671504311601400261610ustar00rootroot00000000000000echo-about = Affiche une ligne de texte echo-usage = echo [OPTIONS]... [CHAÃŽNE]... echo-after-help = Affiche la ou les CHAÃŽNE(s) sur la sortie standard. Si -e est activé, les séquences suivantes sont reconnues : - \ barre oblique inverse - \a alerte (BEL) - \b retour arrière - \c ne produit aucune sortie supplémentaire - \e échappement - \f saut de page - \n nouvelle ligne - \r retour chariot - \t tabulation horizontale - \v tabulation verticale - \0NNN octet avec valeur octale NNN (1 à 3 chiffres) - \xHH octet avec valeur hexadécimale HH (1 à 2 chiffres) echo-help-no-newline = ne pas afficher la nouvelle ligne finale echo-help-enable-escapes = activer l'interprétation des séquences d'échappement echo-help-disable-escapes = désactiver l'interprétation des séquences d'échappement (par défaut) echo-error-non-utf8 = Arguments non-UTF-8 fournis, mais cette plateforme ne les prend pas en charge coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/echo/src/000077500000000000000000000000001504311601400236715ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/echo/src/echo.rs000066400000000000000000000204031504311601400251540ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use clap::builder::ValueParser; use clap::{Arg, ArgAction, Command}; use std::env; use std::ffi::{OsStr, OsString}; use std::io::{self, StdoutLock, Write}; use uucore::error::UResult; use uucore::format::{FormatChar, OctalParsing, parse_escape_only}; use uucore::{format_usage, os_str_as_bytes}; use uucore::translate; mod options { pub const STRING: &str = "STRING"; pub const NO_NEWLINE: &str = "no_newline"; pub const ENABLE_BACKSLASH_ESCAPE: &str = "enable_backslash_escape"; pub const DISABLE_BACKSLASH_ESCAPE: &str = "disable_backslash_escape"; } /// Options for the echo command. #[derive(Debug, Clone, Copy)] struct Options { /// Whether the output should have a trailing newline. /// /// True by default. `-n` disables it. pub trailing_newline: bool, /// Whether given string literals should be parsed for /// escape characters. /// /// False by default, can be enabled with `-e`. Always true if /// `POSIXLY_CORRECT` (cannot be disabled with `-E`). pub escape: bool, } impl Default for Options { fn default() -> Self { Self { trailing_newline: true, escape: false, } } } impl Options { fn posixly_correct_default() -> Self { Self { trailing_newline: true, escape: true, } } } /// Checks if an argument is a valid echo flag, and if /// it is records the changes in [`Options`]. fn is_flag(arg: &OsStr, options: &mut Options) -> bool { let arg = arg.as_encoded_bytes(); if arg.first() != Some(&b'-') || arg == b"-" { // Argument doesn't start with '-' or is '-' => not a flag. return false; } // We don't modify the given options until after // the loop because there is a chance the flag isn't // valid after all & shouldn't affect the options. let mut options_: Options = *options; // Skip the '-' when processing characters. for c in &arg[1..] { match c { b'e' => options_.escape = true, b'E' => options_.escape = false, b'n' => options_.trailing_newline = false, // If there is any character in an supposed flag // that is not a valid flag character, it is not // a flag. // // "-eeEnEe" => is a flag. // "-eeBne" => not a flag, short circuit at the B. _ => return false, } } // We are now sure that the argument is a // flag, and can apply the modified options. *options = options_; true } /// Processes command line arguments, separating flags from normal arguments. /// /// # Returns /// /// - Vector of non-flag arguments. /// - [`Options`], describing how teh arguments should be interpreted. fn filter_flags(mut args: impl Iterator) -> (Vec, Options) { let mut arguments = Vec::with_capacity(args.size_hint().0); let mut options = Options::default(); // Process arguments until first non-flag is found. for arg in &mut args { // We parse flags and aggregate the options in `options`. // First call to `is_echo_flag` to return false will break the loop. if !is_flag(&arg, &mut options) { // Not a flag. Can break out of flag-processing loop. // Don't forget to push it to the arguments too. arguments.push(arg); break; } } // Collect remaining non-flag arguments. arguments.extend(args); (arguments, options) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { // args[0] is the name of the binary. let args: Vec = args.skip(1).collect(); // Check POSIX compatibility mode // // From the GNU manual, on what it should do: // // > If the POSIXLY_CORRECT environment variable is set, then when // > echo’s first argument is not -n it outputs option-like arguments // > instead of treating them as options. For example, echo -ne hello // > outputs ‘-ne hello’ instead of plain ‘hello’. Also backslash // > escapes are always enabled. To echo the string ‘-n’, one of the // > characters can be escaped in either octal or hexadecimal // > representation. For example, echo -e '\x2dn'. let is_posixly_correct = env::var_os("POSIXLY_CORRECT").is_some(); let (args, options) = if is_posixly_correct { if args.first().is_some_and(|arg| arg == "-n") { // if POSIXLY_CORRECT is set and the first argument is the "-n" flag // we filter flags normally but 'escaped' is activated nonetheless. let (args, _) = filter_flags(args.into_iter()); ( args, Options { trailing_newline: false, ..Options::posixly_correct_default() }, ) } else { // if POSIXLY_CORRECT is set and the first argument is not the "-n" flag // we just collect all arguments as no arguments are interpreted as flags. (args, Options::posixly_correct_default()) } } else if args.len() == 1 && args[0] == "--help" { // If POSIXLY_CORRECT is not set and the first argument // is `--help`, GNU coreutils prints the help message. // // Verify this using: // // POSIXLY_CORRECT=1 echo --help // echo --help uu_app().print_help()?; return Ok(()); } else if args.len() == 1 && args[0] == "--version" { print!("{}", uu_app().render_version()); return Ok(()); } else { // if POSIXLY_CORRECT is not set we filter the flags normally filter_flags(args.into_iter()) }; execute(&mut io::stdout().lock(), args, options)?; Ok(()) } pub fn uu_app() -> Command { // Note: echo is different from the other utils in that it should **not** // have `infer_long_args(true)`, because, for example, `--ver` should be // printed as `--ver` and not show the version text. Command::new(uucore::util_name()) // TrailingVarArg specifies the final positional argument is a VarArg // and it doesn't attempts the parse any further args. // Final argument must have multiple(true) or the usage string equivalent. .trailing_var_arg(true) .allow_hyphen_values(true) .version(uucore::crate_version!()) .about(translate!("echo-about")) .after_help(translate!("echo-after-help")) .override_usage(format_usage(&translate!("echo-usage"))) .arg( Arg::new(options::NO_NEWLINE) .short('n') .help(translate!("echo-help-no-newline")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ENABLE_BACKSLASH_ESCAPE) .short('e') .help(translate!("echo-help-enable-escapes")) .action(ArgAction::SetTrue) .overrides_with(options::DISABLE_BACKSLASH_ESCAPE), ) .arg( Arg::new(options::DISABLE_BACKSLASH_ESCAPE) .short('E') .help(translate!("echo-help-disable-escapes")) .action(ArgAction::SetTrue) .overrides_with(options::ENABLE_BACKSLASH_ESCAPE), ) .arg( Arg::new(options::STRING) .action(ArgAction::Append) .value_parser(ValueParser::os_string()), ) } fn execute(stdout: &mut StdoutLock, args: Vec, options: Options) -> UResult<()> { for (i, arg) in args.into_iter().enumerate() { let bytes = os_str_as_bytes(&arg)?; // Don't print a space before the first argument if i > 0 { stdout.write_all(b" ")?; } if options.escape { for item in parse_escape_only(bytes, OctalParsing::ThreeDigits) { if item.write(&mut *stdout)?.is_break() { return Ok(()); } } } else { stdout.write_all(bytes)?; } } if options.trailing_newline { stdout.write_all(b"\n")?; } Ok(()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/echo/src/main.rs000066400000000000000000000000271504311601400251620ustar00rootroot00000000000000uucore::bin!(uu_echo); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/000077500000000000000000000000001504311601400227545ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/Cargo.toml000066400000000000000000000013761504311601400247130ustar00rootroot00000000000000[package] name = "uu_env" description = "env ~ (uutils) set each NAME to VALUE in the environment and run COMMAND" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/env" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/env.rs" [dependencies] clap = { workspace = true } rust-ini = { workspace = true } thiserror = { workspace = true } uucore = { workspace = true, features = ["signals"] } fluent = { workspace = true } [target.'cfg(unix)'.dependencies] nix = { workspace = true, features = ["signal"] } [[bin]] name = "env" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/LICENSE000077700000000000000000000000001504311601400256222../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/locales/000077500000000000000000000000001504311601400243765ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/locales/en-US.ftl000066400000000000000000000061661504311601400260450ustar00rootroot00000000000000env-about = Set each NAME to VALUE in the environment and run COMMAND env-usage = env [OPTION]... [-] [NAME=VALUE]... [COMMAND [ARG]...] env-after-help = A mere - implies -i. If no COMMAND, print the resulting environment. # Help messages env-help-ignore-environment = start with an empty environment env-help-chdir = change working directory to DIR env-help-null = end each output line with a 0 byte rather than a newline (only valid when printing the environment) env-help-file = read and set variables from a ".env"-style configuration file (prior to any unset and/or set) env-help-unset = remove variable from the environment env-help-debug = print verbose information for each processing step env-help-split-string = process and split S into separate arguments; used to pass multiple arguments on shebang lines env-help-argv0 = Override the zeroth argument passed to the command being executed. Without this option a default value of `command` is used. env-help-ignore-signal = set handling of SIG signal(s) to do nothing # Error messages env-error-missing-closing-quote = no terminating quote in -S string at position { $position } for quote '{ $quote }' env-error-invalid-backslash-at-end = invalid backslash at end of string in -S at position { $position } in context { $context } env-error-backslash-c-not-allowed = '\c' must not appear in double-quoted -S string at position { $position } env-error-invalid-sequence = invalid sequence '\{ $char }' in -S at position { $position } env-error-missing-closing-brace = Missing closing brace at position { $position } env-error-missing-variable = Missing variable name at position { $position } env-error-missing-closing-brace-after-value = Missing closing brace after default value at position { $position } env-error-unexpected-number = Unexpected character: '{ $char }', expected variable name must not start with 0..9 at position { $position } env-error-expected-brace-or-colon = Unexpected character: '{ $char }', expected a closing brace ('{"}"}') or colon (':') at position { $position } env-error-cannot-specify-null-with-command = cannot specify --null (-0) with command env-error-invalid-signal = { $signal }: invalid signal env-error-config-file = { $file }: { $error } env-error-variable-name-issue = variable name issue (at { $position }): { $error } env-error-generic = Error: { $error } env-error-no-such-file = { $program }: No such file or directory env-error-use-s-shebang = use -[v]S to pass options in shebang lines env-error-cannot-unset = cannot unset '{ $name }': Invalid argument env-error-cannot-unset-invalid = cannot unset { $name }: Invalid argument env-error-must-specify-command-with-chdir = must specify command with --chdir (-C) env-error-cannot-change-directory = cannot change directory to { $directory }: { $error } env-error-argv0-not-supported = --argv0 is currently not supported on this platform env-error-permission-denied = { $program }: Permission denied env-error-unknown = unknown error: { $error } env-error-failed-set-signal-action = failed to set signal action for signal { $signal }: { $error } # Warning messages env-warning-no-name-specified = no name specified for value { $value } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/locales/fr-FR.ftl000066400000000000000000000072131504311601400260240ustar00rootroot00000000000000env-about = Définir chaque NOM à VALEUR dans l'environnement et exécuter COMMANDE env-usage = env [OPTION]... [-] [NOM=VALEUR]... [COMMANDE [ARG]...] env-after-help = Un simple - implique -i. Si aucune COMMANDE, afficher l'environnement résultant. # Messages d'aide env-help-ignore-environment = commencer avec un environnement vide env-help-chdir = changer le répertoire de travail vers RÉP env-help-null = terminer chaque ligne de sortie avec un octet 0 plutôt qu'un retour à la ligne (valide uniquement lors de l'affichage de l'environnement) env-help-file = lire et définir les variables à partir d'un fichier de configuration de style ".env" (avant toute suppression et/ou définition) env-help-unset = supprimer la variable de l'environnement env-help-debug = afficher des informations détaillées pour chaque étape de traitement env-help-split-string = traiter et diviser S en arguments séparés ; utilisé pour passer plusieurs arguments sur les lignes shebang env-help-argv0 = Remplacer le zéroième argument passé à la commande en cours d'exécution. Sans cette option, une valeur par défaut de `command` est utilisée. env-help-ignore-signal = définir la gestion du/des signal/signaux SIG pour ne rien faire # Messages d'erreur env-error-missing-closing-quote = aucune guillemet de fermeture dans la chaîne -S à la position { $position } pour la guillemet '{ $quote }' env-error-invalid-backslash-at-end = barre oblique inverse invalide à la fin de la chaîne dans -S à la position { $position } dans le contexte { $context } env-error-backslash-c-not-allowed = '\\c' ne doit pas apparaître dans une chaîne -S entre guillemets doubles à la position { $position } env-error-invalid-sequence = séquence invalide '\\{ $char }' dans -S à la position { $position } env-error-missing-closing-brace = Accolade fermante manquante à la position { $position } env-error-missing-variable = Nom de variable manquant à la position { $position } env-error-missing-closing-brace-after-value = Accolade fermante manquante après la valeur par défaut à la position { $position } env-error-unexpected-number = Caractère inattendu : '{ $char }', le nom de variable attendu ne doit pas commencer par 0..9 à la position { $position } env-error-expected-brace-or-colon = Caractère inattendu : '{ $char }', accolade fermante ('}') ou deux-points (':') attendu à la position { $position } env-error-cannot-specify-null-with-command = impossible de spécifier --null (-0) avec une commande env-error-invalid-signal = { $signal } : signal invalide env-error-config-file = { $file } : { $error } env-error-variable-name-issue = problème de nom de variable (à { $position }) : { $error } env-error-generic = Erreur : { $error } env-error-no-such-file = { $program } : Aucun fichier ou répertoire de ce type env-error-use-s-shebang = utilisez -[v]S pour passer des options dans les lignes shebang env-error-cannot-unset = impossible de supprimer '{ $name }' : Argument invalide env-error-cannot-unset-invalid = impossible de supprimer { $name } : Argument invalide env-error-must-specify-command-with-chdir = doit spécifier une commande avec --chdir (-C) env-error-cannot-change-directory = impossible de changer de répertoire vers { $directory } : { $error } env-error-argv0-not-supported = --argv0 n'est actuellement pas supporté sur cette plateforme env-error-permission-denied = { $program } : Permission refusée env-error-unknown = erreur inconnue : { $error } env-error-failed-set-signal-action = échec de la définition de l'action du signal pour le signal { $signal } : { $error } # Messages d'avertissement env-warning-no-name-specified = aucun nom spécifié pour la valeur { $value } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/src/000077500000000000000000000000001504311601400235435ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/src/env.rs000066400000000000000000001036111504311601400247030ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) chdir execvp progname subcommand subcommands unsets setenv putenv spawnp SIGSEGV SIGBUS sigaction pub mod native_int_str; pub mod split_iterator; pub mod string_expander; pub mod string_parser; pub mod variable_parser; use clap::builder::ValueParser; use clap::{Arg, ArgAction, Command, crate_name}; use ini::Ini; use native_int_str::{ Convert, NCvt, NativeIntStr, NativeIntString, NativeStr, from_native_int_representation_owned, }; #[cfg(unix)] use nix::libc; #[cfg(unix)] use nix::sys::signal::{SigHandler::SigIgn, Signal, signal}; #[cfg(unix)] use nix::unistd::execvp; use std::borrow::Cow; use std::env; #[cfg(unix)] use std::ffi::CString; use std::ffi::{OsStr, OsString}; use std::io::{self, Write}; #[cfg(unix)] use std::os::unix::ffi::OsStrExt; use uucore::display::Quotable; use uucore::error::{ExitCode, UError, UResult, USimpleError, UUsageError}; use uucore::line_ending::LineEnding; #[cfg(unix)] use uucore::signals::signal_by_name_or_value; use uucore::translate; use uucore::{format_usage, show_warning}; use thiserror::Error; #[derive(Debug, Error, PartialEq)] pub enum EnvError { #[error("{}", translate!("env-error-missing-closing-quote", "position" => .0, "quote" => .1))] EnvMissingClosingQuote(usize, char), #[error("{}", translate!("env-error-invalid-backslash-at-end", "position" => .0, "context" => .1.clone()))] EnvInvalidBackslashAtEndOfStringInMinusS(usize, String), #[error("{}", translate!("env-error-backslash-c-not-allowed", "position" => .0))] EnvBackslashCNotAllowedInDoubleQuotes(usize), #[error("{}", translate!("env-error-invalid-sequence", "position" => .0, "char" => .1))] EnvInvalidSequenceBackslashXInMinusS(usize, char), #[error("{}", translate!("env-error-missing-closing-brace", "position" => .0))] EnvParsingOfVariableMissingClosingBrace(usize), #[error("{}", translate!("env-error-missing-variable", "position" => .0))] EnvParsingOfMissingVariable(usize), #[error("{}", translate!("env-error-missing-closing-brace-after-value", "position" => .0))] EnvParsingOfVariableMissingClosingBraceAfterValue(usize), #[error("{}", translate!("env-error-unexpected-number", "position" => .0, "char" => .1.clone()))] EnvParsingOfVariableUnexpectedNumber(usize, String), #[error("{}", translate!("env-error-expected-brace-or-colon", "position" => .0, "char" => .1.clone()))] EnvParsingOfVariableExceptedBraceOrColon(usize, String), #[error("")] EnvReachedEnd, #[error("")] EnvContinueWithDelimiter, #[error("{}{:?}",.0,.1)] EnvInternalError(usize, string_parser::Error), } impl From for EnvError { fn from(value: string_parser::Error) -> Self { EnvError::EnvInternalError(value.peek_position, value) } } mod options { pub const IGNORE_ENVIRONMENT: &str = "ignore-environment"; pub const CHDIR: &str = "chdir"; pub const NULL: &str = "null"; pub const FILE: &str = "file"; pub const UNSET: &str = "unset"; pub const DEBUG: &str = "debug"; pub const SPLIT_STRING: &str = "split-string"; pub const ARGV0: &str = "argv0"; pub const IGNORE_SIGNAL: &str = "ignore-signal"; } struct Options<'a> { ignore_env: bool, line_ending: LineEnding, running_directory: Option<&'a OsStr>, files: Vec<&'a OsStr>, unsets: Vec<&'a OsStr>, sets: Vec<(Cow<'a, OsStr>, Cow<'a, OsStr>)>, program: Vec<&'a OsStr>, argv0: Option<&'a OsStr>, #[cfg(unix)] ignore_signal: Vec, } /// print `name=value` env pairs on screen /// if null is true, separate pairs with a \0, \n otherwise fn print_env(line_ending: LineEnding) { let stdout_raw = io::stdout(); let mut stdout = stdout_raw.lock(); for (n, v) in env::vars() { write!(stdout, "{n}={v}{line_ending}").unwrap(); } } fn parse_name_value_opt<'a>(opts: &mut Options<'a>, opt: &'a OsStr) -> UResult { // is it a NAME=VALUE like opt ? let wrap = NativeStr::<'a>::new(opt); let split_o = wrap.split_once(&'='); if let Some((name, value)) = split_o { // yes, so push name, value pair opts.sets.push((name, value)); Ok(false) } else { // no, it's a program-like opt parse_program_opt(opts, opt).map(|_| true) } } fn parse_program_opt<'a>(opts: &mut Options<'a>, opt: &'a OsStr) -> UResult<()> { if opts.line_ending == LineEnding::Nul { Err(UUsageError::new( 125, translate!("env-error-cannot-specify-null-with-command"), )) } else { opts.program.push(opt); Ok(()) } } #[cfg(unix)] fn parse_signal_value(signal_name: &str) -> UResult { let signal_name_upcase = signal_name.to_uppercase(); let optional_signal_value = signal_by_name_or_value(&signal_name_upcase); let error = USimpleError::new( 125, translate!("env-error-invalid-signal", "signal" => signal_name.quote()), ); match optional_signal_value { Some(sig_val) => { if sig_val == 0 { Err(error) } else { Ok(sig_val) } } None => Err(error), } } #[cfg(unix)] fn parse_signal_opt<'a>(opts: &mut Options<'a>, opt: &'a OsStr) -> UResult<()> { if opt.is_empty() { return Ok(()); } let signals: Vec<&'a OsStr> = opt .as_bytes() .split(|&b| b == b',') .map(OsStr::from_bytes) .collect(); let mut sig_vec = Vec::with_capacity(signals.len()); for sig in signals { if !sig.is_empty() { sig_vec.push(sig); } } for sig in sig_vec { let Some(sig_str) = sig.to_str() else { return Err(USimpleError::new( 1, translate!("env-error-invalid-signal", "signal" => sig.quote()), )); }; let sig_val = parse_signal_value(sig_str)?; if !opts.ignore_signal.contains(&sig_val) { opts.ignore_signal.push(sig_val); } } Ok(()) } fn load_config_file(opts: &mut Options) -> UResult<()> { // NOTE: config files are parsed using an INI parser b/c it's available and compatible with ".env"-style files // ... * but support for actual INI files, although working, is not intended, nor claimed for &file in &opts.files { let conf = if file == "-" { let stdin = io::stdin(); let mut stdin_locked = stdin.lock(); Ini::read_from(&mut stdin_locked) } else { Ini::load_from_file(file) }; let conf = conf.map_err(|e| { USimpleError::new( 1, translate!("env-error-config-file", "file" => file.maybe_quote(), "error" => e), ) })?; for (_, prop) in &conf { // ignore all INI section lines (treat them as comments) for (key, value) in prop { unsafe { env::set_var(key, value); } } } } Ok(()) } pub fn uu_app() -> Command { Command::new(crate_name!()) .version(uucore::crate_version!()) .about(translate!("env-about")) .override_usage(format_usage(&translate!("env-usage"))) .after_help(translate!("env-after-help")) .infer_long_args(true) .trailing_var_arg(true) .arg( Arg::new(options::IGNORE_ENVIRONMENT) .short('i') .long(options::IGNORE_ENVIRONMENT) .help(translate!("env-help-ignore-environment")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::CHDIR) .short('C') // GNU env compatibility .long(options::CHDIR) .number_of_values(1) .value_name("DIR") .value_parser(ValueParser::os_string()) .value_hint(clap::ValueHint::DirPath) .help(translate!("env-help-chdir")), ) .arg( Arg::new(options::NULL) .short('0') .long(options::NULL) .help(translate!("env-help-null")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FILE) .short('f') .long(options::FILE) .value_name("PATH") .value_hint(clap::ValueHint::FilePath) .value_parser(ValueParser::os_string()) .action(ArgAction::Append) .help(translate!("env-help-file")), ) .arg( Arg::new(options::UNSET) .short('u') .long(options::UNSET) .value_name("NAME") .action(ArgAction::Append) .value_parser(ValueParser::os_string()) .help(translate!("env-help-unset")), ) .arg( Arg::new(options::DEBUG) .short('v') .long(options::DEBUG) .action(ArgAction::Count) .help(translate!("env-help-debug")), ) .arg( Arg::new(options::SPLIT_STRING) // split string handling is implemented directly, not using CLAP. But this entry here is needed for the help information output. .short('S') .long(options::SPLIT_STRING) .value_name("S") .action(ArgAction::Set) .value_parser(ValueParser::os_string()) .help(translate!("env-help-split-string")), ) .arg( Arg::new(options::ARGV0) .overrides_with(options::ARGV0) .short('a') .long(options::ARGV0) .value_name("a") .action(ArgAction::Set) .value_parser(ValueParser::os_string()) .help(translate!("env-help-argv0")), ) .arg( Arg::new("vars") .action(ArgAction::Append) .value_parser(ValueParser::os_string()), ) .arg( Arg::new(options::IGNORE_SIGNAL) .long(options::IGNORE_SIGNAL) .value_name("SIG") .action(ArgAction::Append) .value_parser(ValueParser::os_string()) .help(translate!("env-help-ignore-signal")), ) } pub fn parse_args_from_str(text: &NativeIntStr) -> UResult> { split_iterator::split(text).map_err(|e| match e { EnvError::EnvBackslashCNotAllowedInDoubleQuotes(_) => USimpleError::new(125, e.to_string()), EnvError::EnvInvalidBackslashAtEndOfStringInMinusS(_, _) => { USimpleError::new(125, e.to_string()) } EnvError::EnvInvalidSequenceBackslashXInMinusS(_, _) => { USimpleError::new(125, e.to_string()) } EnvError::EnvMissingClosingQuote(_, _) => USimpleError::new(125, e.to_string()), EnvError::EnvParsingOfVariableMissingClosingBrace(pos) => USimpleError::new( 125, translate!("env-error-variable-name-issue", "position" => pos, "error" => e), ), EnvError::EnvParsingOfMissingVariable(pos) => USimpleError::new( 125, translate!("env-error-variable-name-issue", "position" => pos, "error" => e), ), EnvError::EnvParsingOfVariableMissingClosingBraceAfterValue(pos) => USimpleError::new( 125, translate!("env-error-variable-name-issue", "position" => pos, "error" => e), ), EnvError::EnvParsingOfVariableUnexpectedNumber(pos, _) => USimpleError::new( 125, translate!("env-error-variable-name-issue", "position" => pos, "error" => e), ), EnvError::EnvParsingOfVariableExceptedBraceOrColon(pos, _) => USimpleError::new( 125, translate!("env-error-variable-name-issue", "position" => pos, "error" => e), ), _ => USimpleError::new( 125, translate!("env-error-generic", "error" => format!("{e:?}")), ), }) } fn debug_print_args(args: &[OsString]) { eprintln!("input args:"); for (i, arg) in args.iter().enumerate() { eprintln!("arg[{i}]: {}", arg.quote()); } } fn check_and_handle_string_args( arg: &OsString, prefix_to_test: &str, all_args: &mut Vec, do_debug_print_args: Option<&Vec>, ) -> UResult { let native_arg = NCvt::convert(arg); if let Some(remaining_arg) = native_arg.strip_prefix(&*NCvt::convert(prefix_to_test)) { if let Some(input_args) = do_debug_print_args { debug_print_args(input_args); // do it here, such that its also printed when we get an error/panic during parsing } let arg_strings = parse_args_from_str(remaining_arg)?; all_args.extend( arg_strings .into_iter() .map(from_native_int_representation_owned), ); Ok(true) } else { Ok(false) } } #[derive(Default)] struct EnvAppData { do_debug_printing: bool, do_input_debug_printing: Option, had_string_argument: bool, } impl EnvAppData { fn make_error_no_such_file_or_dir(&self, prog: &OsStr) -> Box { uucore::show_error!( "{}", translate!("env-error-no-such-file", "program" => prog.quote()) ); if !self.had_string_argument { uucore::show_error!("{}", translate!("env-error-use-s-shebang")); } ExitCode::new(127) } fn process_all_string_arguments( &mut self, original_args: &Vec, ) -> UResult> { let mut all_args: Vec = Vec::new(); let mut process_flags = true; let mut expecting_arg = false; // Leave out split-string since it's a special case below let flags_with_args = [ options::ARGV0, options::CHDIR, options::FILE, options::IGNORE_SIGNAL, options::UNSET, ]; let short_flags_with_args = ['a', 'C', 'f', 'u']; for (n, arg) in original_args.iter().enumerate() { let arg_str = arg.to_string_lossy(); // Stop processing env flags once we reach the command or -- argument if 0 < n && !expecting_arg && (arg == "--" || !(arg_str.starts_with('-') || arg_str.contains('='))) { process_flags = false; } if !process_flags { all_args.push(arg.clone()); continue; } expecting_arg = false; match arg { b if check_and_handle_string_args(b, "--split-string", &mut all_args, None)? => { self.had_string_argument = true; } b if check_and_handle_string_args(b, "-S", &mut all_args, None)? => { self.had_string_argument = true; } b if check_and_handle_string_args(b, "-vS", &mut all_args, None)? => { self.do_debug_printing = true; self.had_string_argument = true; } b if check_and_handle_string_args( b, "-vvS", &mut all_args, Some(original_args), )? => { self.do_debug_printing = true; self.do_input_debug_printing = Some(false); // already done self.had_string_argument = true; } _ => { if let Some(flag) = arg_str.strip_prefix("--") { if flags_with_args.contains(&flag) { expecting_arg = true; } } else if let Some(flag) = arg_str.strip_prefix("-") { for c in flag.chars() { expecting_arg = short_flags_with_args.contains(&c); } } // Short unset option (-u) is not allowed to contain '=' if arg_str.contains('=') && arg_str.starts_with("-u") && !arg_str.starts_with("--") { let name = &arg_str[arg_str.find('=').unwrap()..]; return Err(USimpleError::new( 125, translate!("env-error-cannot-unset", "name" => name), )); } all_args.push(arg.clone()); } } } Ok(all_args) } fn parse_arguments( &mut self, original_args: impl uucore::Args, ) -> Result<(Vec, clap::ArgMatches), Box> { let original_args: Vec = original_args.collect(); let args = self.process_all_string_arguments(&original_args)?; let app = uu_app(); let matches = app .try_get_matches_from(args) .map_err(|e| -> Box { match e.kind() { clap::error::ErrorKind::DisplayHelp | clap::error::ErrorKind::DisplayVersion => e.into(), _ => { // extent any real issue with parameter parsing by the ERROR_MSG_S_SHEBANG let s = format!("{e}"); if !s.is_empty() { let s = s.trim_end(); uucore::show_error!("{s}"); } uucore::show_error!("{}", translate!("env-error-use-s-shebang")); ExitCode::new(125) } } })?; Ok((original_args, matches)) } fn run_env(&mut self, original_args: impl uucore::Args) -> UResult<()> { let (original_args, matches) = self.parse_arguments(original_args)?; self.do_debug_printing = self.do_debug_printing || (0 != matches.get_count("debug")); self.do_input_debug_printing = self .do_input_debug_printing .or(Some(matches.get_count("debug") >= 2)); if let Some(value) = self.do_input_debug_printing { if value { debug_print_args(&original_args); self.do_input_debug_printing = Some(false); } } let mut opts = make_options(&matches)?; apply_change_directory(&opts)?; // NOTE: we manually set and unset the env vars below rather than using Command::env() to more // easily handle the case where no command is given apply_removal_of_all_env_vars(&opts); // load .env-style config file prior to those given on the command-line load_config_file(&mut opts)?; apply_unset_env_vars(&opts)?; apply_specified_env_vars(&opts); #[cfg(unix)] apply_ignore_signal(&opts)?; if opts.program.is_empty() { // no program provided, so just dump all env vars to stdout print_env(opts.line_ending); } else { return self.run_program(&opts, self.do_debug_printing); } Ok(()) } /// Run the program specified by the options. /// /// Note that the env command must exec the program, not spawn it. See /// for more information. /// /// Exit status: /// - 125: if the env command itself fails /// - 126: if the program is found but cannot be invoked /// - 127: if the program cannot be found fn run_program( &mut self, opts: &Options<'_>, do_debug_printing: bool, ) -> Result<(), Box> { let prog = Cow::from(opts.program[0]); #[cfg(unix)] let mut arg0 = prog.clone(); #[cfg(not(unix))] let arg0 = prog.clone(); let args = &opts.program[1..]; if let Some(_argv0) = opts.argv0 { #[cfg(unix)] { arg0 = Cow::Borrowed(_argv0); if do_debug_printing { eprintln!("argv0: {}", arg0.quote()); } } #[cfg(not(unix))] return Err(USimpleError::new( 2, translate!("env-error-argv0-not-supported"), )); } if do_debug_printing { eprintln!("executing: {}", prog.maybe_quote()); let arg_prefix = " arg"; eprintln!("{arg_prefix}[{}]= {}", 0, arg0.quote()); for (i, arg) in args.iter().enumerate() { eprintln!("{arg_prefix}[{}]= {}", i + 1, arg.quote()); } } #[cfg(unix)] { // Convert program name to CString. let Ok(prog_cstring) = CString::new(prog.as_bytes()) else { return Err(self.make_error_no_such_file_or_dir(&prog)); }; // Prepare arguments for execvp. let mut argv = Vec::new(); // Convert arg0 to CString. let Ok(arg0_cstring) = CString::new(arg0.as_bytes()) else { return Err(self.make_error_no_such_file_or_dir(&prog)); }; argv.push(arg0_cstring); // Convert remaining arguments to CString. for arg in args { let Ok(arg_cstring) = CString::new(arg.as_bytes()) else { return Err(self.make_error_no_such_file_or_dir(&prog)); }; argv.push(arg_cstring); } // Execute the program using execvp. this replaces the current // process. The execvp function takes care of appending a NULL // argument to the argument list so that we don't have to. match execvp(&prog_cstring, &argv) { Err(nix::errno::Errno::ENOENT) => Err(self.make_error_no_such_file_or_dir(&prog)), Err(nix::errno::Errno::EACCES) => { uucore::show_error!( "{}", translate!( "env-error-permission-denied", "program" => prog.quote() ) ); Err(126.into()) } Err(_) => { uucore::show_error!( "{}", translate!( "env-error-unknown", "error" => "execvp failed" ) ); Err(126.into()) } Ok(_) => { unreachable!("execvp should never return on success") } } } #[cfg(not(unix))] { // Fallback to Command::status for non-Unix systems let mut cmd = std::process::Command::new(&*prog); cmd.args(args); match cmd.status() { Ok(exit) if !exit.success() => Err(exit.code().unwrap_or(1).into()), Err(ref err) => match err.kind() { io::ErrorKind::NotFound | io::ErrorKind::InvalidInput => { Err(self.make_error_no_such_file_or_dir(&prog)) } io::ErrorKind::PermissionDenied => { uucore::show_error!( "{}", translate!("env-error-permission-denied", "program" => prog.quote()) ); Err(126.into()) } _ => { uucore::show_error!( "{}", translate!("env-error-unknown", "error" => format!("{err:?}")) ); Err(126.into()) } }, Ok(_) => Ok(()), } } } } fn apply_removal_of_all_env_vars(opts: &Options<'_>) { // remove all env vars if told to ignore presets if opts.ignore_env { for (ref name, _) in env::vars_os() { unsafe { env::remove_var(name); } } } } fn make_options(matches: &clap::ArgMatches) -> UResult> { let ignore_env = matches.get_flag("ignore-environment"); let line_ending = LineEnding::from_zero_flag(matches.get_flag("null")); let running_directory = matches.get_one::("chdir").map(|s| s.as_os_str()); let files = match matches.get_many::("file") { Some(v) => v.map(|s| s.as_os_str()).collect(), None => Vec::with_capacity(0), }; let unsets = match matches.get_many::("unset") { Some(v) => v.map(|s| s.as_os_str()).collect(), None => Vec::with_capacity(0), }; let argv0 = matches.get_one::("argv0").map(|s| s.as_os_str()); let mut opts = Options { ignore_env, line_ending, running_directory, files, unsets, sets: vec![], program: vec![], argv0, #[cfg(unix)] ignore_signal: vec![], }; #[cfg(unix)] if let Some(iter) = matches.get_many::("ignore-signal") { for opt in iter { parse_signal_opt(&mut opts, opt)?; } } let mut begin_prog_opts = false; if let Some(mut iter) = matches.get_many::("vars") { // read NAME=VALUE arguments (and up to a single program argument) while !begin_prog_opts { if let Some(opt) = iter.next() { if opt == "-" { opts.ignore_env = true; } else { begin_prog_opts = parse_name_value_opt(&mut opts, opt)?; } } else { break; } } // read any leftover program arguments for opt in iter { parse_program_opt(&mut opts, opt)?; } } Ok(opts) } fn apply_unset_env_vars(opts: &Options<'_>) -> Result<(), Box> { for name in &opts.unsets { let native_name = NativeStr::new(name); if name.is_empty() || native_name.contains(&'\0').unwrap() || native_name.contains(&'=').unwrap() { return Err(USimpleError::new( 125, translate!("env-error-cannot-unset-invalid", "name" => name.quote()), )); } unsafe { env::remove_var(name); } } Ok(()) } fn apply_change_directory(opts: &Options<'_>) -> Result<(), Box> { // GNU env tests this behavior if opts.program.is_empty() && opts.running_directory.is_some() { return Err(UUsageError::new( 125, translate!("env-error-must-specify-command-with-chdir"), )); } if let Some(d) = opts.running_directory { match env::set_current_dir(d) { Ok(()) => d, Err(error) => { return Err(USimpleError::new( 125, translate!("env-error-cannot-change-directory", "directory" => d.quote(), "error" => error), )); } }; } Ok(()) } fn apply_specified_env_vars(opts: &Options<'_>) { // set specified env vars for (name, val) in &opts.sets { /* * set_var panics if name is an empty string * set_var internally calls setenv (on unix at least), while GNU env calls putenv instead. * * putenv returns successfully if provided with something like "=a" and modifies the environ * variable to contain "=a" inside it, effectively modifying the process' current environment * to contain a malformed string in it. Using GNU's implementation, the command `env =a` * prints out the malformed string and even invokes the child process with that environment. * This can be seen by using `env -i =a env` or `env -i =a cat /proc/self/environ` * * POSIX.1-2017 doesn't seem to mention what to do if the string is malformed (at least * not in "Chapter 8, Environment Variables" or in the definition for environ and various * exec*'s or in the description of env in the "Shell & Utilities" volume). * * It also doesn't specify any checks for putenv before modifying the environ variable, which * is likely why glibc doesn't do so. However, the first set_var argument cannot point to * an empty string or a string containing '='. * * There is no benefit in replicating GNU's env behavior, since it will only modify the * environment in weird ways */ if name.is_empty() { show_warning!( "{}", translate!("env-warning-no-name-specified", "value" => val.quote()) ); continue; } unsafe { env::set_var(name, val); } } } #[cfg(unix)] fn apply_ignore_signal(opts: &Options<'_>) -> UResult<()> { for &sig_value in &opts.ignore_signal { let sig: Signal = (sig_value as i32) .try_into() .map_err(|e| io::Error::from_raw_os_error(e as i32))?; ignore_signal(sig)?; } Ok(()) } #[cfg(unix)] fn ignore_signal(sig: Signal) -> UResult<()> { // SAFETY: This is safe because we write the handler for each signal only once, and therefore "the current handler is the default", as the documentation requires it. let result = unsafe { signal(sig, SigIgn) }; if let Err(err) = result { return Err(USimpleError::new( 125, translate!("env-error-failed-set-signal-action", "signal" => (sig as i32), "error" => err.desc()), )); } Ok(()) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { // Rust ignores SIGPIPE (see https://github.com/rust-lang/rust/issues/62569). // We restore its default action here. #[cfg(unix)] unsafe { libc::signal(libc::SIGPIPE, libc::SIG_DFL); } EnvAppData::default().run_env(args) } #[cfg(test)] mod tests { use super::*; use uucore::locale; #[test] fn test_split_string_environment_vars_test() { unsafe { env::set_var("FOO", "BAR") }; assert_eq!( NCvt::convert(vec!["FOO=bar", "sh", "-c", "echo xBARx =$FOO="]), parse_args_from_str(&NCvt::convert(r#"FOO=bar sh -c "echo x${FOO}x =\$FOO=""#)) .unwrap(), ); } #[test] fn test_split_string_misc() { assert_eq!( NCvt::convert(vec!["A=B", "FOO=AR", "sh", "-c", "echo $A$FOO"]), parse_args_from_str(&NCvt::convert(r#"A=B FOO=AR sh -c "echo \$A\$FOO""#)).unwrap(), ); assert_eq!( NCvt::convert(vec!["A=B", "FOO=AR", "sh", "-c", "echo $A$FOO"]), parse_args_from_str(&NCvt::convert(r"A=B FOO=AR sh -c 'echo $A$FOO'")).unwrap() ); assert_eq!( NCvt::convert(vec!["A=B", "FOO=AR", "sh", "-c", "echo $A$FOO"]), parse_args_from_str(&NCvt::convert(r"A=B FOO=AR sh -c 'echo $A$FOO'")).unwrap() ); assert_eq!( NCvt::convert(vec!["-i", "A=B ' C"]), parse_args_from_str(&NCvt::convert(r"-i A='B \' C'")).unwrap() ); } #[test] fn test_error_cases() { let _ = locale::setup_localization("env"); // Test EnvBackslashCNotAllowedInDoubleQuotes let result = parse_args_from_str(&NCvt::convert(r#"sh -c "echo \c""#)); assert!(result.is_err()); assert_eq!( result.unwrap_err().to_string(), "'\\c' must not appear in double-quoted -S string at position 13" ); // Test EnvInvalidBackslashAtEndOfStringInMinusS let result = parse_args_from_str(&NCvt::convert(r#"sh -c "echo \"#)); assert!(result.is_err()); assert_eq!( result.unwrap_err().to_string(), "no terminating quote in -S string at position 13 for quote '\"'" ); // Test EnvInvalidSequenceBackslashXInMinusS let result = parse_args_from_str(&NCvt::convert(r#"sh -c "echo \x""#)); assert!(result.is_err()); assert!( result .unwrap_err() .to_string() .contains("invalid sequence '\\x' in -S") ); // Test EnvMissingClosingQuote let result = parse_args_from_str(&NCvt::convert(r#"sh -c "echo "#)); assert!(result.is_err()); assert_eq!( result.unwrap_err().to_string(), "no terminating quote in -S string at position 12 for quote '\"'" ); // Test variable-related errors let result = parse_args_from_str(&NCvt::convert(r"echo ${FOO")); assert!(result.is_err()); assert!( result .unwrap_err() .to_string() .contains("variable name issue (at 10): Missing closing brace") ); let result = parse_args_from_str(&NCvt::convert(r"echo ${FOO:-value")); assert!(result.is_err()); assert!( result .unwrap_err() .to_string() .contains("variable name issue (at 17): Missing closing brace after default value") ); let result = parse_args_from_str(&NCvt::convert(r"echo ${1FOO}")); assert!(result.is_err()); assert!(result.unwrap_err().to_string().contains("variable name issue (at 7): Unexpected character: '1', expected variable name must not start with 0..9")); let result = parse_args_from_str(&NCvt::convert(r"echo ${FOO?}")); assert!(result.is_err()); assert!(result.unwrap_err().to_string().contains("variable name issue (at 10): Unexpected character: '?', expected a closing brace ('}') or colon (':')")); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/src/main.rs000066400000000000000000000000261504311601400250330ustar00rootroot00000000000000uucore::bin!(uu_env); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/src/native_int_str.rs000066400000000000000000000220541504311601400271440ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // This module contains classes and functions for dealing with the differences // between operating systems regarding the lossless processing of OsStr/OsString. // In contrast to existing crates with similar purpose, this module does not use any // `unsafe` features or functions. // Due to a suboptimal design aspect of OsStr/OsString on windows, we need to // encode/decode to wide chars on windows operating system. // This prevents borrowing from OsStr on windows. Anyway, if optimally used,# // this conversion needs to be done only once in the beginning and at the end. use std::ffi::OsString; #[cfg(not(target_os = "windows"))] use std::os::unix::ffi::{OsStrExt, OsStringExt}; #[cfg(target_os = "windows")] use std::os::windows::prelude::*; use std::{borrow::Cow, ffi::OsStr}; #[cfg(not(target_os = "windows"))] use u8 as NativeIntCharU; #[cfg(target_os = "windows")] use u16 as NativeIntCharU; pub type NativeCharInt = NativeIntCharU; pub type NativeIntStr = [NativeCharInt]; pub type NativeIntString = Vec; pub struct NCvt; pub trait Convert { fn convert(f: From) -> To; } // ================ str/String ================= impl<'a> Convert<&'a str, Cow<'a, NativeIntStr>> for NCvt { fn convert(f: &'a str) -> Cow<'a, NativeIntStr> { #[cfg(target_os = "windows")] { Cow::Owned(f.encode_utf16().collect()) } #[cfg(not(target_os = "windows"))] { Cow::Borrowed(f.as_bytes()) } } } impl<'a> Convert<&'a String, Cow<'a, NativeIntStr>> for NCvt { fn convert(f: &'a String) -> Cow<'a, NativeIntStr> { #[cfg(target_os = "windows")] { Cow::Owned(f.encode_utf16().collect()) } #[cfg(not(target_os = "windows"))] { Cow::Borrowed(f.as_bytes()) } } } impl<'a> Convert> for NCvt { fn convert(f: String) -> Cow<'a, NativeIntStr> { #[cfg(target_os = "windows")] { Cow::Owned(f.encode_utf16().collect()) } #[cfg(not(target_os = "windows"))] { Cow::Owned(f.into_bytes()) } } } // ================ OsStr/OsString ================= impl<'a> Convert<&'a OsStr, Cow<'a, NativeIntStr>> for NCvt { fn convert(f: &'a OsStr) -> Cow<'a, NativeIntStr> { to_native_int_representation(f) } } impl<'a> Convert<&'a OsString, Cow<'a, NativeIntStr>> for NCvt { fn convert(f: &'a OsString) -> Cow<'a, NativeIntStr> { to_native_int_representation(f) } } impl<'a> Convert> for NCvt { fn convert(f: OsString) -> Cow<'a, NativeIntStr> { #[cfg(target_os = "windows")] { Cow::Owned(f.encode_wide().collect()) } #[cfg(not(target_os = "windows"))] { Cow::Owned(f.into_vec()) } } } // ================ Vec ================= impl<'a> Convert<&'a Vec<&'a str>, Vec>> for NCvt { fn convert(f: &'a Vec<&'a str>) -> Vec> { f.iter().map(|x| Self::convert(*x)).collect() } } impl<'a> Convert, Vec>> for NCvt { fn convert(f: Vec<&'a str>) -> Vec> { f.iter().map(|x| Self::convert(*x)).collect() } } impl<'a> Convert<&'a Vec, Vec>> for NCvt { fn convert(f: &'a Vec) -> Vec> { f.iter().map(Self::convert).collect() } } impl<'a> Convert, Vec>> for NCvt { fn convert(f: Vec) -> Vec> { f.into_iter().map(Self::convert).collect() } } pub fn to_native_int_representation(input: &OsStr) -> Cow<'_, NativeIntStr> { #[cfg(target_os = "windows")] { Cow::Owned(input.encode_wide().collect()) } #[cfg(not(target_os = "windows"))] { Cow::Borrowed(input.as_bytes()) } } #[allow(clippy::needless_pass_by_value)] // needed on windows pub fn from_native_int_representation(input: Cow<'_, NativeIntStr>) -> Cow<'_, OsStr> { #[cfg(target_os = "windows")] { Cow::Owned(OsString::from_wide(&input)) } #[cfg(not(target_os = "windows"))] { match input { Cow::Borrowed(borrow) => Cow::Borrowed(OsStr::from_bytes(borrow)), Cow::Owned(own) => Cow::Owned(OsString::from_vec(own)), } } } #[allow(clippy::needless_pass_by_value)] // needed on windows pub fn from_native_int_representation_owned(input: NativeIntString) -> OsString { #[cfg(target_os = "windows")] { OsString::from_wide(&input) } #[cfg(not(target_os = "windows"))] { OsString::from_vec(input) } } pub fn get_single_native_int_value(c: &char) -> Option { #[cfg(target_os = "windows")] { let mut buf = [0u16, 0]; let s = c.encode_utf16(&mut buf); if s.len() == 1 { Some(buf[0]) } else { None } } #[cfg(not(target_os = "windows"))] { let mut buf = [0u8, 0, 0, 0]; let s = c.encode_utf8(&mut buf); if s.len() == 1 { Some(buf[0]) } else { None } } } pub fn get_char_from_native_int(ni: NativeCharInt) -> Option<(char, NativeCharInt)> { let c_opt; #[cfg(target_os = "windows")] { c_opt = char::decode_utf16([ni; 1]).next().unwrap().ok(); }; #[cfg(not(target_os = "windows"))] { c_opt = std::str::from_utf8(&[ni; 1]) .ok() .map(|x| x.chars().next().unwrap()); }; if let Some(c) = c_opt { return Some((c, ni)); } None } pub struct NativeStr<'a> { native: Cow<'a, NativeIntStr>, } impl<'a> NativeStr<'a> { pub fn new(str: &'a OsStr) -> Self { Self { native: to_native_int_representation(str), } } pub fn native(&self) -> Cow<'a, NativeIntStr> { self.native.clone() } pub fn into_native(self) -> Cow<'a, NativeIntStr> { self.native } pub fn contains(&self, x: &char) -> Option { let n_c = get_single_native_int_value(x)?; Some(self.native.contains(&n_c)) } pub fn slice(&self, from: usize, to: usize) -> Cow<'a, OsStr> { let result = self.match_cow(|b| Ok::<_, ()>(&b[from..to]), |o| Ok(o[from..to].to_vec())); result.unwrap() } pub fn split_once(&self, pred: &char) -> Option<(Cow<'a, OsStr>, Cow<'a, OsStr>)> { let n_c = get_single_native_int_value(pred)?; let p = self.native.iter().position(|&x| x == n_c)?; let before = self.slice(0, p); let after = self.slice(p + 1, self.native.len()); Some((before, after)) } pub fn split_at(&self, pos: usize) -> (Cow<'a, OsStr>, Cow<'a, OsStr>) { let before = self.slice(0, pos); let after = self.slice(pos, self.native.len()); (before, after) } pub fn strip_prefix(&self, prefix: &OsStr) -> Option> { let n_prefix = to_native_int_representation(prefix); let result = self.match_cow( |b| b.strip_prefix(&*n_prefix).ok_or(()), |o| o.strip_prefix(&*n_prefix).map(|x| x.to_vec()).ok_or(()), ); result.ok() } pub fn strip_prefix_native(&self, prefix: &OsStr) -> Option> { let n_prefix = to_native_int_representation(prefix); let result = self.match_cow_native( |b| b.strip_prefix(&*n_prefix).ok_or(()), |o| o.strip_prefix(&*n_prefix).map(|x| x.to_vec()).ok_or(()), ); result.ok() } fn match_cow( &self, f_borrow: FnBorrow, f_owned: FnOwned, ) -> Result, Err> where FnBorrow: FnOnce(&'a [NativeCharInt]) -> Result<&'a [NativeCharInt], Err>, FnOwned: FnOnce(&Vec) -> Result, Err>, { match &self.native { Cow::Borrowed(b) => { let slice = f_borrow(b); slice.map(|x| from_native_int_representation(Cow::Borrowed(x))) } Cow::Owned(o) => { let slice = f_owned(o); let os_str = slice.map(from_native_int_representation_owned); os_str.map(Cow::Owned) } } } fn match_cow_native( &self, f_borrow: FnBorrow, f_owned: FnOwned, ) -> Result, Err> where FnBorrow: FnOnce(&'a [NativeCharInt]) -> Result<&'a [NativeCharInt], Err>, FnOwned: FnOnce(&Vec) -> Result, Err>, { match &self.native { Cow::Borrowed(b) => { let slice = f_borrow(b); slice.map(Cow::Borrowed) } Cow::Owned(o) => { let slice = f_owned(o); slice.map(Cow::Owned) } } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/src/split_iterator.rs000066400000000000000000000300571504311601400271620ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // // This file is based on work from Tomasz MiÄ…sko who published it as "shell_words" crate, // licensed under the Apache License, Version 2.0 // or the MIT license , at your option. // //! Process command line according to parsing rules of the original GNU env. //! Even though it looks quite like a POSIX syntax, the original //! `shell_words` implementation had to be adapted significantly. //! //! Apart from the grammar differences, there is a new feature integrated: $VARIABLE expansion. //! //! [GNU env] // spell-checker:ignore (words) Tomasz MiÄ…sko rntfv FFFD varname #![forbid(unsafe_code)] use std::borrow::Cow; use crate::EnvError; use crate::native_int_str::NativeCharInt; use crate::native_int_str::NativeIntStr; use crate::native_int_str::NativeIntString; use crate::native_int_str::from_native_int_representation; use crate::string_expander::StringExpander; use crate::string_parser::StringParser; use crate::variable_parser::VariableParser; const BACKSLASH: char = '\\'; const DOUBLE_QUOTES: char = '\"'; const SINGLE_QUOTES: char = '\''; const NEW_LINE: char = '\n'; const DOLLAR: char = '$'; const REPLACEMENTS: [(char, char); 9] = [ ('r', '\r'), ('n', '\n'), ('t', '\t'), ('f', '\x0C'), ('v', '\x0B'), ('_', ' '), ('#', '#'), ('$', '$'), ('"', '"'), ]; const ASCII_WHITESPACE_CHARS: [char; 6] = [' ', '\t', '\r', '\n', '\x0B', '\x0C']; pub struct SplitIterator<'a> { expander: StringExpander<'a>, words: Vec>, } impl<'a> SplitIterator<'a> { pub fn new(s: &'a NativeIntStr) -> Self { Self { expander: StringExpander::new(s), words: Vec::new(), } } fn skip_one(&mut self) -> Result<(), EnvError> { self.expander .get_parser_mut() .consume_one_ascii_or_all_non_ascii()?; Ok(()) } fn take_one(&mut self) -> Result<(), EnvError> { Ok(self.expander.take_one()?) } fn get_current_char(&self) -> Option { self.expander.peek().ok() } fn push_char_to_word(&mut self, c: char) { self.expander.put_one_char(c); } fn push_word_to_words(&mut self) { let word = self.expander.take_collected_output(); self.words.push(word); } fn get_parser(&self) -> &StringParser<'a> { self.expander.get_parser() } fn get_parser_mut(&mut self) -> &mut StringParser<'a> { self.expander.get_parser_mut() } fn substitute_variable<'x>(&'x mut self) -> Result<(), EnvError> { let mut var_parse = VariableParser::<'a, '_> { parser: self.get_parser_mut(), }; let (name, default) = var_parse.parse_variable()?; let varname_os_str_cow = from_native_int_representation(Cow::Borrowed(name)); let value = std::env::var_os(varname_os_str_cow); match (&value, default) { (None, None) => {} // do nothing, just replace it with "" (Some(value), _) => { self.expander.put_string(value); } (None, Some(default)) => { self.expander.put_native_string(default); } } Ok(()) } fn check_and_replace_ascii_escape_code(&mut self, c: char) -> Result { if let Some(replace) = REPLACEMENTS.iter().find(|&x| x.0 == c) { self.skip_one()?; self.push_char_to_word(replace.1); return Ok(true); } Ok(false) } fn make_invalid_sequence_backslash_xin_minus_s(&self, c: char) -> EnvError { EnvError::EnvInvalidSequenceBackslashXInMinusS( self.expander.get_parser().get_peek_position(), c, ) } fn state_root(&mut self) -> Result<(), EnvError> { loop { match self.state_delimiter() { Err(EnvError::EnvContinueWithDelimiter) => {} Err(EnvError::EnvReachedEnd) => return Ok(()), result => return result, } } } fn state_delimiter(&mut self) -> Result<(), EnvError> { loop { match self.get_current_char() { None => return Ok(()), Some('#') => { self.skip_one()?; self.state_comment()?; } Some(BACKSLASH) => { self.skip_one()?; self.state_delimiter_backslash()?; } Some(c) if ASCII_WHITESPACE_CHARS.contains(&c) => { self.skip_one()?; } Some(_) => { // Don't consume char. Will be done in unquoted state. self.state_unquoted()?; } } } } fn state_delimiter_backslash(&mut self) -> Result<(), EnvError> { match self.get_current_char() { None => Err(EnvError::EnvInvalidBackslashAtEndOfStringInMinusS( self.get_parser().get_peek_position(), "Delimiter".into(), )), Some('_' | NEW_LINE) => { self.skip_one()?; Ok(()) } Some(DOLLAR | BACKSLASH | '#' | SINGLE_QUOTES | DOUBLE_QUOTES) => { self.take_one()?; self.state_unquoted() } Some('c') => Err(EnvError::EnvReachedEnd), Some(c) if self.check_and_replace_ascii_escape_code(c)? => self.state_unquoted(), Some(c) => Err(self.make_invalid_sequence_backslash_xin_minus_s(c)), } } fn state_unquoted(&mut self) -> Result<(), EnvError> { loop { match self.get_current_char() { None => { self.push_word_to_words(); return Err(EnvError::EnvReachedEnd); } Some(DOLLAR) => { self.substitute_variable()?; } Some(SINGLE_QUOTES) => { self.skip_one()?; self.state_single_quoted()?; } Some(DOUBLE_QUOTES) => { self.skip_one()?; self.state_double_quoted()?; } Some(BACKSLASH) => { self.skip_one()?; self.state_unquoted_backslash()?; } Some(c) if ASCII_WHITESPACE_CHARS.contains(&c) => { self.push_word_to_words(); self.skip_one()?; return Ok(()); } Some(_) => { self.take_one()?; } } } } fn state_unquoted_backslash(&mut self) -> Result<(), EnvError> { match self.get_current_char() { None => Err(EnvError::EnvInvalidBackslashAtEndOfStringInMinusS( self.get_parser().get_peek_position(), "Unquoted".into(), )), Some(NEW_LINE) => { self.skip_one()?; Ok(()) } Some('_') => { self.skip_one()?; self.push_word_to_words(); Err(EnvError::EnvContinueWithDelimiter) } Some('c') => { self.push_word_to_words(); Err(EnvError::EnvReachedEnd) } Some(DOLLAR | BACKSLASH | SINGLE_QUOTES | DOUBLE_QUOTES) => { self.take_one()?; Ok(()) } Some(c) if self.check_and_replace_ascii_escape_code(c)? => Ok(()), Some(c) => Err(self.make_invalid_sequence_backslash_xin_minus_s(c)), } } fn state_single_quoted(&mut self) -> Result<(), EnvError> { loop { match self.get_current_char() { None => { return Err(EnvError::EnvMissingClosingQuote( self.get_parser().get_peek_position(), '\'', )); } Some(SINGLE_QUOTES) => { self.skip_one()?; return Ok(()); } Some(BACKSLASH) => { self.skip_one()?; self.split_single_quoted_backslash()?; } Some(_) => { self.take_one()?; } } } } fn split_single_quoted_backslash(&mut self) -> Result<(), EnvError> { match self.get_current_char() { None => Err(EnvError::EnvMissingClosingQuote( self.get_parser().get_peek_position(), '\'', )), Some(NEW_LINE) => { self.skip_one()?; Ok(()) } Some(SINGLE_QUOTES | BACKSLASH) => { self.take_one()?; Ok(()) } Some(c) if REPLACEMENTS.iter().any(|&x| x.0 == c) => { // See GNU test-suite e11: In single quotes, \t remains as it is. // Comparing with GNU behavior: \a is not accepted and issues an error. // So apparently only known sequences are allowed, even though they are not expanded.... bug of GNU? self.push_char_to_word(BACKSLASH); self.take_one()?; Ok(()) } Some(c) => Err(self.make_invalid_sequence_backslash_xin_minus_s(c)), } } fn state_double_quoted(&mut self) -> Result<(), EnvError> { loop { match self.get_current_char() { None => { return Err(EnvError::EnvMissingClosingQuote( self.get_parser().get_peek_position(), '"', )); } Some(DOLLAR) => { self.substitute_variable()?; } Some(DOUBLE_QUOTES) => { self.skip_one()?; return Ok(()); } Some(BACKSLASH) => { self.skip_one()?; self.state_double_quoted_backslash()?; } Some(_) => { self.take_one()?; } } } } fn state_double_quoted_backslash(&mut self) -> Result<(), EnvError> { match self.get_current_char() { None => Err(EnvError::EnvMissingClosingQuote( self.get_parser().get_peek_position(), '"', )), Some(NEW_LINE) => { self.skip_one()?; Ok(()) } Some(DOUBLE_QUOTES | DOLLAR | BACKSLASH) => { self.take_one()?; Ok(()) } Some('c') => Err(EnvError::EnvBackslashCNotAllowedInDoubleQuotes( self.get_parser().get_peek_position(), )), Some(c) if self.check_and_replace_ascii_escape_code(c)? => Ok(()), Some(c) => Err(self.make_invalid_sequence_backslash_xin_minus_s(c)), } } fn state_comment(&mut self) -> Result<(), EnvError> { loop { match self.get_current_char() { None => return Err(EnvError::EnvReachedEnd), Some(NEW_LINE) => { self.skip_one()?; return Ok(()); } Some(_) => { self.get_parser_mut().skip_until_char_or_end(NEW_LINE); } } } } pub fn split(mut self) -> Result, EnvError> { self.state_root()?; Ok(self.words) } } pub fn split(s: &NativeIntStr) -> Result, EnvError> { let split_args = SplitIterator::new(s).split()?; Ok(split_args) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/src/string_expander.rs000066400000000000000000000050361504311601400273110ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use std::{ ffi::{OsStr, OsString}, mem, }; use crate::{ native_int_str::{NativeCharInt, NativeIntStr, to_native_int_representation}, string_parser::{Chunk, Error, StringParser}, }; /// This class makes parsing and word collection more convenient. /// /// It manages an "output" buffer that is automatically filled. /// It provides `skip_one` and `take_one` that focus on /// working with ASCII separators. Thus, they will skip or take /// all consecutive non-ascii char sequences at once. pub struct StringExpander<'a> { parser: StringParser<'a>, output: Vec, } impl<'a> StringExpander<'a> { pub fn new(input: &'a NativeIntStr) -> Self { Self { parser: StringParser::new(input), output: Vec::default(), } } pub fn new_at(input: &'a NativeIntStr, pos: usize) -> Self { Self { parser: StringParser::new_at(input, pos), output: Vec::default(), } } pub fn get_parser(&self) -> &StringParser<'a> { &self.parser } pub fn get_parser_mut(&mut self) -> &mut StringParser<'a> { &mut self.parser } pub fn peek(&self) -> Result { self.parser.peek() } pub fn skip_one(&mut self) -> Result<(), Error> { self.get_parser_mut().consume_one_ascii_or_all_non_ascii()?; Ok(()) } pub fn get_peek_position(&self) -> usize { self.get_parser().get_peek_position() } pub fn take_one(&mut self) -> Result<(), Error> { let chunks = self.parser.consume_one_ascii_or_all_non_ascii()?; for chunk in chunks { match chunk { Chunk::InvalidEncoding(invalid) => self.output.extend(invalid), Chunk::ValidSingleIntChar((_c, ni)) => self.output.push(ni), } } Ok(()) } pub fn put_one_char(&mut self, c: char) { let os_str = OsString::from(c.to_string()); self.put_string(os_str); } pub fn put_string>(&mut self, os_str: S) { let native = to_native_int_representation(os_str.as_ref()); self.output.extend(&*native); } pub fn put_native_string(&mut self, n_str: &NativeIntStr) { self.output.extend(n_str); } pub fn take_collected_output(&mut self) -> Vec { mem::take(&mut self.output) } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/src/string_parser.rs000066400000000000000000000123611504311601400267760ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // // spell-checker:ignore (words) FFFD #![forbid(unsafe_code)] use std::{borrow::Cow, ffi::OsStr}; use crate::native_int_str::{ NativeCharInt, NativeIntStr, from_native_int_representation, get_char_from_native_int, get_single_native_int_value, }; #[derive(Clone, Debug, Eq, PartialEq)] pub struct Error { pub peek_position: usize, pub err_type: ErrorType, } #[derive(Clone, Debug, Eq, PartialEq)] pub enum ErrorType { EndOfInput, InternalError, } /// Provides a valid char or an invalid sequence of bytes. /// /// Invalid byte sequences can't be split in any meaningful way. /// Thus, they need to be consumed as one piece. pub enum Chunk<'a> { InvalidEncoding(&'a NativeIntStr), ValidSingleIntChar((char, NativeCharInt)), } /// This class makes parsing a [`std::ffi::OsString`] char by char more convenient. /// /// It also allows capturing the intermediate positions for later splitting. pub struct StringParser<'a> { input: &'a NativeIntStr, pointer: usize, remaining: &'a NativeIntStr, } impl<'a> StringParser<'a> { pub fn new(input: &'a NativeIntStr) -> Self { let mut instance = Self { input, pointer: 0, remaining: input, }; instance.set_pointer(0); instance } pub fn new_at(input: &'a NativeIntStr, pos: usize) -> Self { let mut instance = Self::new(input); instance.set_pointer(pos); instance } pub fn get_input(&self) -> &'a NativeIntStr { self.input } pub fn get_peek_position(&self) -> usize { self.pointer } pub fn peek(&self) -> Result { self.peek_char_at_pointer(self.pointer) } fn make_err(&self, err_type: ErrorType) -> Error { Error { peek_position: self.get_peek_position(), err_type, } } pub fn peek_char_at_pointer(&self, at_pointer: usize) -> Result { let split = self.input.split_at(at_pointer).1; if split.is_empty() { return Err(self.make_err(ErrorType::EndOfInput)); } if let Some((c, _ni)) = get_char_from_native_int(split[0]) { Ok(c) } else { Ok('\u{FFFD}') } } fn get_chunk_with_length_at(&self, pointer: usize) -> Result<(Chunk<'a>, usize), Error> { let (_before, after) = self.input.split_at(pointer); if after.is_empty() { return Err(self.make_err(ErrorType::EndOfInput)); } if let Some(c_ni) = get_char_from_native_int(after[0]) { Ok((Chunk::ValidSingleIntChar(c_ni), 1)) } else { let mut i = 1; while i < after.len() { if let Some(_c) = get_char_from_native_int(after[i]) { break; } i += 1; } let chunk = &after[0..i]; Ok((Chunk::InvalidEncoding(chunk), chunk.len())) } } pub fn peek_chunk(&self) -> Option> { self.get_chunk_with_length_at(self.pointer) .ok() .map(|(chunk, _)| chunk) } pub fn consume_chunk(&mut self) -> Result, Error> { let (chunk, len) = self.get_chunk_with_length_at(self.pointer)?; self.set_pointer(self.pointer + len); Ok(chunk) } pub fn consume_one_ascii_or_all_non_ascii(&mut self) -> Result>, Error> { let mut result = Vec::>::new(); loop { let data = self.consume_chunk()?; let was_ascii = if let Chunk::ValidSingleIntChar((c, _ni)) = &data { c.is_ascii() } else { false }; result.push(data); if was_ascii { return Ok(result); } match self.peek_chunk() { Some(Chunk::ValidSingleIntChar((c, _ni))) if c.is_ascii() => return Ok(result), None => return Ok(result), _ => {} } } } pub fn skip_multiple(&mut self, skip_byte_count: usize) { let end_ptr = self.pointer + skip_byte_count; self.set_pointer(end_ptr); } pub fn skip_until_char_or_end(&mut self, c: char) { let native_rep = get_single_native_int_value(&c).unwrap(); let pos = self.remaining.iter().position(|x| *x == native_rep); if let Some(pos) = pos { self.set_pointer(self.pointer + pos); } else { self.set_pointer(self.input.len()); } } pub fn substring(&self, range: &std::ops::Range) -> &'a NativeIntStr { let (_before1, after1) = self.input.split_at(range.start); let (middle, _after2) = after1.split_at(range.end - range.start); middle } pub fn peek_remaining(&self) -> Cow<'a, OsStr> { from_native_int_representation(Cow::Borrowed(self.remaining)) } pub fn set_pointer(&mut self, new_pointer: usize) { self.pointer = new_pointer; let (_before, after) = self.input.split_at(self.pointer); self.remaining = after; } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/env/src/variable_parser.rs000066400000000000000000000116161504311601400272570ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use std::ops::Range; use crate::EnvError; use crate::{native_int_str::NativeIntStr, string_parser::StringParser}; pub struct VariableParser<'a, 'b> { pub parser: &'b mut StringParser<'a>, } impl<'a> VariableParser<'a, '_> { fn get_current_char(&self) -> Option { self.parser.peek().ok() } fn check_variable_name_start(&self) -> Result<(), EnvError> { if let Some(c) = self.get_current_char() { if c.is_ascii_digit() { return Err(EnvError::EnvParsingOfVariableUnexpectedNumber( self.parser.get_peek_position(), c.to_string(), )); } } Ok(()) } fn skip_one(&mut self) -> Result<(), EnvError> { self.parser.consume_chunk()?; Ok(()) } fn parse_braced_variable_name( &mut self, ) -> Result<(&'a NativeIntStr, Option<&'a NativeIntStr>), EnvError> { let pos_start = self.parser.get_peek_position(); self.check_variable_name_start()?; let (varname_end, default_end); loop { match self.get_current_char() { None => { return Err(EnvError::EnvParsingOfVariableMissingClosingBrace( self.parser.get_peek_position(), )); } Some(c) if !c.is_ascii() || c.is_ascii_alphanumeric() || c == '_' => { self.skip_one()?; } Some(':') => { varname_end = self.parser.get_peek_position(); loop { match self.get_current_char() { None => { return Err( EnvError::EnvParsingOfVariableMissingClosingBraceAfterValue( self.parser.get_peek_position(), ), ); } Some('}') => { default_end = Some(self.parser.get_peek_position()); self.skip_one()?; break; } Some(_) => { self.skip_one()?; } } } break; } Some('}') => { varname_end = self.parser.get_peek_position(); default_end = None; self.skip_one()?; break; } Some(c) => { return Err(EnvError::EnvParsingOfVariableExceptedBraceOrColon( self.parser.get_peek_position(), c.to_string(), )); } } } let default_opt = if let Some(default_end) = default_end { Some(self.parser.substring(&Range { start: varname_end + 1, end: default_end, })) } else { None }; let varname = self.parser.substring(&Range { start: pos_start, end: varname_end, }); Ok((varname, default_opt)) } fn parse_unbraced_variable_name(&mut self) -> Result<&'a NativeIntStr, EnvError> { let pos_start = self.parser.get_peek_position(); self.check_variable_name_start()?; loop { match self.get_current_char() { None => break, Some(c) if c.is_ascii_alphanumeric() || c == '_' => { self.skip_one()?; } Some(_) => break, } } let pos_end = self.parser.get_peek_position(); if pos_end == pos_start { return Err(EnvError::EnvParsingOfMissingVariable(pos_start)); } let varname = self.parser.substring(&Range { start: pos_start, end: pos_end, }); Ok(varname) } pub fn parse_variable( &mut self, ) -> Result<(&'a NativeIntStr, Option<&'a NativeIntStr>), EnvError> { self.skip_one()?; let (name, default) = match self.get_current_char() { None => { return Err(EnvError::EnvParsingOfMissingVariable( self.parser.get_peek_position(), )); } Some('{') => { self.skip_one()?; self.parse_braced_variable_name()? } Some(_) => (self.parse_unbraced_variable_name()?, None), }; Ok((name, default)) } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expand/000077500000000000000000000000001504311601400234435ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expand/Cargo.toml000066400000000000000000000012071504311601400253730ustar00rootroot00000000000000[package] name = "uu_expand" description = "expand ~ (uutils) convert input tabs to spaces" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/expand" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/expand.rs" [dependencies] clap = { workspace = true } unicode-width = { workspace = true } uucore = { workspace = true } thiserror = { workspace = true } fluent = { workspace = true } [[bin]] name = "expand" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expand/LICENSE000077700000000000000000000000001504311601400263112../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expand/locales/000077500000000000000000000000001504311601400250655ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expand/locales/en-US.ftl000066400000000000000000000020431504311601400265220ustar00rootroot00000000000000expand-about = Convert tabs in each FILE to spaces, writing to standard output. With no FILE, or when FILE is -, read standard input. expand-usage = expand [OPTION]... [FILE]... # Help messages expand-help-initial = do not convert tabs after non blanks expand-help-tabs = have tabs N characters apart, not 8 or use comma separated list of explicit tab positions expand-help-no-utf8 = interpret input file as 8-bit ASCII rather than UTF-8 # Error messages expand-error-invalid-character = tab size contains invalid character(s): { $char } expand-error-specifier-not-at-start = { $specifier } specifier not at start of number: { $number } expand-error-specifier-only-allowed-with-last = { $specifier } specifier only allowed with the last value expand-error-tab-size-cannot-be-zero = tab size cannot be 0 expand-error-tab-size-too-large = tab stop is too large { $size } expand-error-tab-sizes-must-be-ascending = tab sizes must be ascending expand-error-is-directory = { $file }: Is a directory expand-error-failed-to-write-output = failed to write output coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expand/locales/fr-FR.ftl000066400000000000000000000024751504311601400265200ustar00rootroot00000000000000expand-about = Convertir les tabulations de chaque FICHIER en espaces, en écrivant vers la sortie standard. Sans FICHIER, ou quand FICHIER est -, lire l'entrée standard. expand-usage = expand [OPTION]... [FICHIER]... # Messages d'aide expand-help-initial = ne pas convertir les tabulations après les caractères non-blancs expand-help-tabs = avoir des tabulations espacées de N caractères, pas 8 ou utiliser une liste séparée par des virgules de positions de tabulation explicites expand-help-no-utf8 = interpréter le fichier d'entrée comme ASCII 8 bits plutôt que UTF-8 # Messages d'erreur expand-error-invalid-character = la taille de tabulation contient des caractères invalides : { $char } expand-error-specifier-not-at-start = le spécificateur { $specifier } n'est pas au début du nombre : { $number } expand-error-specifier-only-allowed-with-last = le spécificateur { $specifier } n'est autorisé qu'avec la dernière valeur expand-error-tab-size-cannot-be-zero = la taille de tabulation ne peut pas être 0 expand-error-tab-size-too-large = l'arrêt de tabulation est trop grand { $size } expand-error-tab-sizes-must-be-ascending = les tailles de tabulation doivent être croissantes expand-error-is-directory = { $file } : Est un répertoire expand-error-failed-to-write-output = échec de l'écriture de la sortie coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expand/src/000077500000000000000000000000001504311601400242325ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expand/src/expand.rs000066400000000000000000000420211504311601400260560ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) ctype cwidth iflag nbytes nspaces nums tspaces uflag Preprocess use clap::{Arg, ArgAction, ArgMatches, Command}; use std::ffi::OsString; use std::fs::File; use std::io::{BufRead, BufReader, BufWriter, Read, Write, stdin, stdout}; use std::num::IntErrorKind; use std::path::Path; use std::str::from_utf8; use thiserror::Error; use unicode_width::UnicodeWidthChar; use uucore::display::Quotable; use uucore::error::{FromIo, UError, UResult, set_exit_code}; use uucore::translate; use uucore::{format_usage, show_error}; pub mod options { pub static TABS: &str = "tabs"; pub static INITIAL: &str = "initial"; pub static NO_UTF8: &str = "no-utf8"; pub static FILES: &str = "FILES"; } static LONG_HELP: &str = ""; static DEFAULT_TABSTOP: usize = 8; /// The mode to use when replacing tabs beyond the last one specified in /// the `--tabs` argument. #[derive(PartialEq)] enum RemainingMode { None, Slash, Plus, } /// Decide whether the character is either a space or a comma. /// /// # Examples /// /// ```rust,ignore /// assert!(is_space_or_comma(' ')) /// assert!(is_space_or_comma(',')) /// assert!(!is_space_or_comma('a')) /// ``` fn is_space_or_comma(c: char) -> bool { c == ' ' || c == ',' } /// Decide whether the character is either a digit or a comma. fn is_digit_or_comma(c: char) -> bool { c.is_ascii_digit() || c == ',' } /// Errors that can occur when parsing a `--tabs` argument. #[derive(Debug, Error)] enum ParseError { #[error("{}", translate!("expand-error-invalid-character", "char" => .0.quote()))] InvalidCharacter(String), #[error("{}", translate!("expand-error-specifier-not-at-start", "specifier" => .0.quote(), "number" => .1.quote()))] SpecifierNotAtStartOfNumber(String, String), #[error("{}", translate!("expand-error-specifier-only-allowed-with-last", "specifier" => .0.quote()))] SpecifierOnlyAllowedWithLastValue(String), #[error("{}", translate!("expand-error-tab-size-cannot-be-zero"))] TabSizeCannotBeZero, #[error("{}", translate!("expand-error-tab-size-too-large", "size" => .0.quote()))] TabSizeTooLarge(String), #[error("{}", translate!("expand-error-tab-sizes-must-be-ascending"))] TabSizesMustBeAscending, } impl UError for ParseError {} /// Parse a list of tabstops from a `--tabs` argument. /// /// This function returns both the vector of numbers appearing in the /// comma- or space-separated list, and also an optional mode, specified /// by either a "/" or a "+" character appearing before the final number /// in the list. This mode defines the strategy to use for computing the /// number of spaces to use for columns beyond the end of the tab stop /// list specified here. fn tabstops_parse(s: &str) -> Result<(RemainingMode, Vec), ParseError> { // Leading commas and spaces are ignored. let s = s.trim_start_matches(is_space_or_comma); // If there were only commas and spaces in the string, just use the // default tabstops. if s.is_empty() { return Ok((RemainingMode::None, vec![DEFAULT_TABSTOP])); } let mut nums = vec![]; let mut remaining_mode = RemainingMode::None; let mut is_specifier_already_used = false; for word in s.split(is_space_or_comma) { let bytes = word.as_bytes(); for i in 0..bytes.len() { match bytes[i] { b'+' => remaining_mode = RemainingMode::Plus, b'/' => remaining_mode = RemainingMode::Slash, _ => { // Parse a number from the byte sequence. let s = from_utf8(&bytes[i..]).unwrap(); match s.parse::() { Ok(num) => { // Tab size must be positive. if num == 0 { return Err(ParseError::TabSizeCannotBeZero); } // Tab sizes must be ascending. if let Some(last_stop) = nums.last() { if *last_stop >= num { return Err(ParseError::TabSizesMustBeAscending); } } if is_specifier_already_used { let specifier = if remaining_mode == RemainingMode::Slash { "/".to_string() } else { "+".to_string() }; return Err(ParseError::SpecifierOnlyAllowedWithLastValue( specifier, )); } else if remaining_mode != RemainingMode::None { is_specifier_already_used = true; } // Append this tab stop to the list of all tabstops. nums.push(num); break; } Err(e) => { if *e.kind() == IntErrorKind::PosOverflow { return Err(ParseError::TabSizeTooLarge(s.to_string())); } let s = s.trim_start_matches(char::is_numeric); return if s.starts_with('/') || s.starts_with('+') { Err(ParseError::SpecifierNotAtStartOfNumber( s[0..1].to_string(), s.to_string(), )) } else { Err(ParseError::InvalidCharacter(s.to_string())) }; } } } } } } // If no numbers could be parsed (for example, if `s` were "+,+,+"), // then just use the default tabstops. if nums.is_empty() { nums = vec![DEFAULT_TABSTOP]; } if nums.len() < 2 { remaining_mode = RemainingMode::None; } Ok((remaining_mode, nums)) } struct Options { files: Vec, tabstops: Vec, tspaces: String, iflag: bool, uflag: bool, /// Strategy for expanding tabs for columns beyond those specified /// in `tabstops`. remaining_mode: RemainingMode, } impl Options { fn new(matches: &ArgMatches) -> Result { let (remaining_mode, tabstops) = match matches.get_many::(options::TABS) { Some(s) => tabstops_parse(&s.map(|s| s.as_str()).collect::>().join(","))?, None => (RemainingMode::None, vec![DEFAULT_TABSTOP]), }; let iflag = matches.get_flag(options::INITIAL); let uflag = !matches.get_flag(options::NO_UTF8); // avoid allocations when dumping out long sequences of spaces // by precomputing the longest string of spaces we will ever need let nspaces = tabstops .iter() .scan(0, |pr, &it| { let ret = Some(it - *pr); *pr = it; ret }) .max() .unwrap(); // length of tabstops is guaranteed >= 1 let tspaces = " ".repeat(nspaces); let files: Vec = match matches.get_many::(options::FILES) { Some(s) => s.map(|v| v.to_string()).collect(), None => vec!["-".to_owned()], }; Ok(Self { files, tabstops, tspaces, iflag, uflag, remaining_mode, }) } } /// Preprocess command line arguments and expand shortcuts. For example, "-7" is expanded to /// "--tabs=7" and "-1,3" to "--tabs=1 --tabs=3". fn expand_shortcuts(args: Vec) -> Vec { let mut processed_args = Vec::with_capacity(args.len()); for arg in args { if let Some(arg) = arg.to_str() { if arg.starts_with('-') && arg[1..].chars().all(is_digit_or_comma) { arg[1..] .split(',') .filter(|s| !s.is_empty()) .for_each(|s| processed_args.push(OsString::from(format!("--tabs={s}")))); continue; } } processed_args.push(arg); } processed_args } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(expand_shortcuts(args.collect()))?; expand(&Options::new(&matches)?) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("expand-about")) .after_help(LONG_HELP) .override_usage(format_usage(&translate!("expand-usage"))) .infer_long_args(true) .args_override_self(true) .arg( Arg::new(options::INITIAL) .long(options::INITIAL) .short('i') .help(translate!("expand-help-initial")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::TABS) .long(options::TABS) .short('t') .value_name("N, LIST") .action(ArgAction::Append) .help(translate!("expand-help-tabs")), ) .arg( Arg::new(options::NO_UTF8) .long(options::NO_UTF8) .short('U') .help(translate!("expand-help-no-utf8")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FILES) .action(ArgAction::Append) .hide(true) .value_hint(clap::ValueHint::FilePath), ) } fn open(path: &str) -> UResult>> { let file_buf; if path == "-" { Ok(BufReader::new(Box::new(stdin()) as Box)) } else { file_buf = File::open(path).map_err_context(|| path.to_string())?; Ok(BufReader::new(Box::new(file_buf) as Box)) } } /// Compute the number of spaces to the next tabstop. /// /// `tabstops` is the sequence of tabstop locations. /// /// `col` is the index of the current cursor in the line being written. /// /// If `remaining_mode` is [`RemainingMode::Plus`], then the last entry /// in the `tabstops` slice is interpreted as a relative number of /// spaces, which this function will return for every input value of /// `col` beyond the end of the second-to-last element of `tabstops`. fn next_tabstop(tabstops: &[usize], col: usize, remaining_mode: &RemainingMode) -> usize { let num_tabstops = tabstops.len(); match remaining_mode { RemainingMode::Plus => match tabstops[0..num_tabstops - 1].iter().find(|&&t| t > col) { Some(t) => t - col, None => { let step_size = tabstops[num_tabstops - 1]; let last_fixed_tabstop = tabstops[num_tabstops - 2]; let characters_since_last_tabstop = col - last_fixed_tabstop; let steps_required = 1 + characters_since_last_tabstop / step_size; steps_required * step_size - characters_since_last_tabstop } }, RemainingMode::Slash => match tabstops[0..num_tabstops - 1].iter().find(|&&t| t > col) { Some(t) => t - col, None => tabstops[num_tabstops - 1] - col % tabstops[num_tabstops - 1], }, RemainingMode::None => { if num_tabstops == 1 { tabstops[0] - col % tabstops[0] } else { match tabstops.iter().find(|&&t| t > col) { Some(t) => t - col, None => 1, } } } } } #[derive(PartialEq, Eq, Debug)] enum CharType { Backspace, Tab, Other, } #[allow(clippy::cognitive_complexity)] fn expand_line( buf: &mut Vec, output: &mut BufWriter, tabstops: &[usize], options: &Options, ) -> std::io::Result<()> { use self::CharType::{Backspace, Other, Tab}; let mut col = 0; let mut byte = 0; let mut init = true; while byte < buf.len() { let (ctype, cwidth, nbytes) = if options.uflag { let nbytes = char::from(buf[byte]).len_utf8(); if byte + nbytes > buf.len() { // don't overrun buffer because of invalid UTF-8 (Other, 1, 1) } else if let Ok(t) = from_utf8(&buf[byte..byte + nbytes]) { match t.chars().next() { Some('\t') => (Tab, 0, nbytes), Some('\x08') => (Backspace, 0, nbytes), Some(c) => (Other, UnicodeWidthChar::width(c).unwrap_or(0), nbytes), None => { // no valid char at start of t, so take 1 byte (Other, 1, 1) } } } else { (Other, 1, 1) // implicit assumption: non-UTF-8 char is 1 col wide } } else { ( match buf.get(byte) { // always take exactly 1 byte in strict ASCII mode Some(0x09) => Tab, Some(0x08) => Backspace, _ => Other, }, 1, 1, ) }; // figure out how many columns this char takes up match ctype { Tab => { // figure out how many spaces to the next tabstop let nts = next_tabstop(tabstops, col, &options.remaining_mode); col += nts; // now dump out either spaces if we're expanding, or a literal tab if we're not if init || !options.iflag { if nts <= options.tspaces.len() { output.write_all(&options.tspaces.as_bytes()[..nts])?; } else { output.write_all(" ".repeat(nts).as_bytes())?; } } else { output.write_all(&buf[byte..byte + nbytes])?; } } _ => { col = if ctype == Other { col + cwidth } else if col > 0 { col - 1 } else { 0 }; // if we're writing anything other than a space, then we're // done with the line's leading spaces if buf[byte] != 0x20 { init = false; } output.write_all(&buf[byte..byte + nbytes])?; } } byte += nbytes; // advance the pointer } output.flush()?; buf.truncate(0); // clear the buffer Ok(()) } fn expand(options: &Options) -> UResult<()> { let mut output = BufWriter::new(stdout()); let ts = options.tabstops.as_ref(); let mut buf = Vec::new(); for file in &options.files { if Path::new(file).is_dir() { show_error!( "{}", translate!("expand-error-is-directory", "file" => file) ); set_exit_code(1); continue; } match open(file) { Ok(mut fh) => { while match fh.read_until(b'\n', &mut buf) { Ok(s) => s > 0, Err(_) => buf.is_empty(), } { expand_line(&mut buf, &mut output, ts, options) .map_err_context(|| translate!("expand-error-failed-to-write-output"))?; } } Err(e) => { show_error!("{e}"); set_exit_code(1); } } } Ok(()) } #[cfg(test)] mod tests { use crate::is_digit_or_comma; use super::RemainingMode; use super::next_tabstop; #[test] fn test_next_tabstop_remaining_mode_none() { assert_eq!(next_tabstop(&[1, 5], 0, &RemainingMode::None), 1); assert_eq!(next_tabstop(&[1, 5], 3, &RemainingMode::None), 2); assert_eq!(next_tabstop(&[1, 5], 6, &RemainingMode::None), 1); } #[test] fn test_next_tabstop_remaining_mode_plus() { assert_eq!(next_tabstop(&[1, 5], 0, &RemainingMode::Plus), 1); assert_eq!(next_tabstop(&[1, 5], 3, &RemainingMode::Plus), 3); assert_eq!(next_tabstop(&[1, 5], 6, &RemainingMode::Plus), 5); } #[test] fn test_next_tabstop_remaining_mode_slash() { assert_eq!(next_tabstop(&[1, 5], 0, &RemainingMode::Slash), 1); assert_eq!(next_tabstop(&[1, 5], 3, &RemainingMode::Slash), 2); assert_eq!(next_tabstop(&[1, 5], 6, &RemainingMode::Slash), 4); } #[test] fn test_is_digit_or_comma() { assert!(is_digit_or_comma('1')); assert!(is_digit_or_comma(',')); assert!(!is_digit_or_comma('a')); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expand/src/main.rs000066400000000000000000000000311504311601400255160ustar00rootroot00000000000000uucore::bin!(uu_expand); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/000077500000000000000000000000001504311601400231425ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/Cargo.toml000066400000000000000000000013311504311601400250700ustar00rootroot00000000000000[package] name = "uu_expr" description = "expr ~ (uutils) display the value of EXPRESSION" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/expr" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/expr.rs" [dependencies] clap = { workspace = true } num-bigint = { workspace = true } num-traits = { workspace = true } onig = { workspace = true } uucore = { workspace = true, features = ["i18n-collator"] } thiserror = { workspace = true } fluent = { workspace = true } [[bin]] name = "expr" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/LICENSE000077700000000000000000000000001504311601400260102../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/locales/000077500000000000000000000000001504311601400245645ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/locales/en-US.ftl000066400000000000000000000062521504311601400262270ustar00rootroot00000000000000expr-about = Print the value of EXPRESSION to standard output expr-usage = expr [EXPRESSION] expr [OPTIONS] expr-after-help = Print the value of EXPRESSION to standard output. A blank line below separates increasing precedence groups. EXPRESSION may be: - ARG1 | ARG2: ARG1 if it is neither null nor 0, otherwise ARG2 - ARG1 & ARG2: ARG1 if neither argument is null or 0, otherwise 0 - ARG1 < ARG2: ARG1 is less than ARG2 - ARG1 <= ARG2: ARG1 is less than or equal to ARG2 - ARG1 = ARG2: ARG1 is equal to ARG2 - ARG1 != ARG2: ARG1 is unequal to ARG2 - ARG1 >= ARG2: ARG1 is greater than or equal to ARG2 - ARG1 > ARG2: ARG1 is greater than ARG2 - ARG1 + ARG2: arithmetic sum of ARG1 and ARG2 - ARG1 - ARG2: arithmetic difference of ARG1 and ARG2 - ARG1 * ARG2: arithmetic product of ARG1 and ARG2 - ARG1 / ARG2: arithmetic quotient of ARG1 divided by ARG2 - ARG1 % ARG2: arithmetic remainder of ARG1 divided by ARG2 - STRING : REGEXP: anchored pattern match of REGEXP in STRING - match STRING REGEXP: same as STRING : REGEXP - substr STRING POS LENGTH: substring of STRING, POS counted from 1 - index STRING CHARS: index in STRING where any CHARS is found, or 0 - length STRING: length of STRING - + TOKEN: interpret TOKEN as a string, even if it is a keyword like match or an operator like / - ( EXPRESSION ): value of EXPRESSION Beware that many operators need to be escaped or quoted for shells. Comparisons are arithmetic if both ARGs are numbers, else lexicographical. Pattern matches return the string matched between \( and \) or null; if \( and \) are not used, they return the number of characters matched or 0. Exit status is 0 if EXPRESSION is neither null nor 0, 1 if EXPRESSION is null or 0, 2 if EXPRESSION is syntactically invalid, and 3 if an error occurred. Environment variables: - EXPR_DEBUG_TOKENS=1: dump expression's tokens - EXPR_DEBUG_RPN=1: dump expression represented in reverse polish notation - EXPR_DEBUG_SYA_STEP=1: dump each parser step - EXPR_DEBUG_AST=1: dump expression represented abstract syntax tree # Help messages expr-help-version = output version information and exit expr-help-help = display this help and exit # Error messages expr-error-unexpected-argument = syntax error: unexpected argument { $arg } expr-error-missing-argument = syntax error: missing argument after { $arg } expr-error-non-integer-argument = non-integer argument expr-error-missing-operand = missing operand expr-error-division-by-zero = division by zero expr-error-invalid-regex-expression = Invalid regex expression expr-error-expected-closing-brace-after = syntax error: expecting ')' after { $arg } expr-error-expected-closing-brace-instead-of = syntax error: expecting ')' instead of { $arg } expr-error-unmatched-opening-parenthesis = Unmatched ( or \( expr-error-unmatched-closing-parenthesis = Unmatched ) or \) expr-error-unmatched-opening-brace = Unmatched {"\\{"} expr-error-invalid-bracket-content = Invalid content of {"\\{\\}"} expr-error-trailing-backslash = Trailing backslash expr-error-too-big-range-quantifier-index = Regular expression too big expr-error-match-utf8 = match does not support invalid UTF-8 encoding in { $arg } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/locales/fr-FR.ftl000066400000000000000000000071131504311601400262110ustar00rootroot00000000000000expr-about = Afficher la valeur de EXPRESSION sur la sortie standard expr-usage = expr [EXPRESSION] expr [OPTIONS] expr-after-help = Afficher la valeur de EXPRESSION sur la sortie standard. Une ligne vide ci-dessous sépare les groupes de précédence croissante. EXPRESSION peut être : - ARG1 | ARG2: ARG1 s'il n'est ni nul ni 0, sinon ARG2 - ARG1 & ARG2: ARG1 si aucun argument n'est nul ou 0, sinon 0 - ARG1 < ARG2: ARG1 est inférieur à ARG2 - ARG1 <= ARG2: ARG1 est inférieur ou égal à ARG2 - ARG1 = ARG2: ARG1 est égal à ARG2 - ARG1 != ARG2: ARG1 est différent de ARG2 - ARG1 >= ARG2: ARG1 est supérieur ou égal à ARG2 - ARG1 > ARG2: ARG1 est supérieur à ARG2 - ARG1 + ARG2: somme arithmétique de ARG1 et ARG2 - ARG1 - ARG2: différence arithmétique de ARG1 et ARG2 - ARG1 * ARG2: produit arithmétique de ARG1 et ARG2 - ARG1 / ARG2: quotient arithmétique de ARG1 divisé par ARG2 - ARG1 % ARG2: reste arithmétique de ARG1 divisé par ARG2 - STRING : REGEXP: correspondance de motif ancré de REGEXP dans STRING - match STRING REGEXP: identique à STRING : REGEXP - substr STRING POS LENGTH: sous-chaîne de STRING, POS compté à partir de 1 - index STRING CHARS: index dans STRING où l'un des CHARS est trouvé, ou 0 - length STRING: longueur de STRING - + TOKEN: interpréter TOKEN comme une chaîne, même si c'est un mot-clé comme match ou un opérateur comme / - ( EXPRESSION ): valeur de EXPRESSION Attention : de nombreux opérateurs doivent être échappés ou mis entre guillemets pour les shells. Les comparaisons sont arithmétiques si les deux ARG sont des nombres, sinon lexicographiques. Les correspondances de motifs retournent la chaîne correspondant entre \( et \) ou null ; si \( et \) ne sont pas utilisés, elles retournent le nombre de caractères correspondants ou 0. Le statut de sortie est 0 si EXPRESSION n'est ni nulle ni 0, 1 si EXPRESSION est nulle ou 0, 2 si EXPRESSION est syntaxiquement invalide, et 3 si une erreur s'est produite. Variables d'environnement : - EXPR_DEBUG_TOKENS=1: afficher les jetons de l'expression - EXPR_DEBUG_RPN=1: afficher l'expression représentée en notation polonaise inverse - EXPR_DEBUG_SYA_STEP=1: afficher chaque étape de l'analyseur - EXPR_DEBUG_AST=1: afficher l'arbre de syntaxe abstraite représentant l'expression # Messages d'aide expr-help-version = afficher les informations de version et quitter expr-help-help = afficher cette aide et quitter # Messages d'erreur expr-error-unexpected-argument = erreur de syntaxe : argument inattendu { $arg } expr-error-missing-argument = erreur de syntaxe : argument manquant après { $arg } expr-error-non-integer-argument = argument non entier expr-error-missing-operand = opérande manquant expr-error-division-by-zero = division par zéro expr-error-invalid-regex-expression = Expression regex invalide expr-error-expected-closing-brace-after = erreur de syntaxe : ')' attendu après { $arg } expr-error-expected-closing-brace-instead-of = erreur de syntaxe : ')' attendu au lieu de { $arg } expr-error-unmatched-opening-parenthesis = Parenthèse ouvrante ( ou \( non appariée expr-error-unmatched-closing-parenthesis = Parenthèse fermante ) ou \) non appariée expr-error-unmatched-opening-brace = Accolade ouvrante {"\\{"} non appariée expr-error-invalid-bracket-content = Contenu invalide de {"\\{\\}"} expr-error-trailing-backslash = Barre oblique inverse en fin expr-error-too-big-range-quantifier-index = Expression régulière trop grande expr-error-match-utf8 = match ne supporte pas l'encodage UTF-8 invalide dans { $arg } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/src/000077500000000000000000000000001504311601400237315ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/src/expr.rs000066400000000000000000000105241504311601400252570ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use clap::{Arg, ArgAction, Command}; use std::io::Write; use syntax_tree::{AstNode, is_truthy}; use thiserror::Error; use uucore::os_string_to_vec; use uucore::translate; use uucore::{ display::Quotable, error::{UError, UResult}, format_usage, }; mod locale_aware; mod syntax_tree; mod options { pub const VERSION: &str = "version"; pub const HELP: &str = "help"; pub const EXPRESSION: &str = "expression"; } pub type ExprResult = Result; #[derive(Error, Clone, Debug, PartialEq, Eq)] pub enum ExprError { #[error("{}", translate!("expr-error-unexpected-argument", "arg" => _0.quote()))] UnexpectedArgument(String), #[error("{}", translate!("expr-error-missing-argument", "arg" => _0.quote()))] MissingArgument(String), #[error("{}", translate!("expr-error-non-integer-argument"))] NonIntegerArgument, #[error("{}", translate!("expr-error-missing-operand"))] MissingOperand, #[error("{}", translate!("expr-error-division-by-zero"))] DivisionByZero, #[error("{}", translate!("expr-error-invalid-regex-expression"))] InvalidRegexExpression, #[error("{}", translate!("expr-error-expected-closing-brace-after", "arg" => _0.quote()))] ExpectedClosingBraceAfter(String), #[error("{}", translate!("expr-error-expected-closing-brace-instead-of", "arg" => _0.quote()))] ExpectedClosingBraceInsteadOf(String), #[error("{}", translate!("expr-error-unmatched-opening-parenthesis"))] UnmatchedOpeningParenthesis, #[error("{}", translate!("expr-error-unmatched-closing-parenthesis"))] UnmatchedClosingParenthesis, #[error("{}", translate!("expr-error-unmatched-opening-brace"))] UnmatchedOpeningBrace, #[error("{}", translate!("expr-error-invalid-bracket-content"))] InvalidBracketContent, #[error("{}", translate!("expr-error-trailing-backslash"))] TrailingBackslash, #[error("{}", translate!("expr-error-too-big-range-quantifier-index"))] TooBigRangeQuantifierIndex, #[error("{}", translate!("expr-error-match-utf8", "arg" => _0.quote()))] UnsupportedNonUtf8Match(String), } impl UError for ExprError { fn code(&self) -> i32 { 2 } fn usage(&self) -> bool { *self == Self::MissingOperand } } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("expr-about")) .override_usage(format_usage(&translate!("expr-usage"))) .after_help(translate!("expr-after-help")) .infer_long_args(true) .disable_help_flag(true) .disable_version_flag(true) .arg( Arg::new(options::VERSION) .long(options::VERSION) .help(translate!("expr-help-version")) .action(ArgAction::Version), ) .arg( Arg::new(options::HELP) .long(options::HELP) .help(translate!("expr-help-help")) .action(ArgAction::Help), ) .arg( Arg::new(options::EXPRESSION) .action(ArgAction::Append) .allow_hyphen_values(true), ) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { // For expr utility we do not want getopts. // The following usage should work without escaping hyphens: `expr -15 = 1 + 2 \* \( 3 - -4 \)` let args = args .skip(1) // Skip binary name .map(os_string_to_vec) .collect::, _>>()?; if args.len() == 1 && args[0] == b"--help" { let _ = uu_app().print_help(); } else if args.len() == 1 && args[0] == b"--version" { println!("{} {}", uucore::util_name(), uucore::crate_version!()); } else { // The first argument may be "--" and should be be ignored. let args = if !args.is_empty() && args[0] == b"--" { &args[1..] } else { &args }; let res = AstNode::parse(args)?.eval()?.eval_as_string(); let _ = std::io::stdout().write_all(&res); let _ = std::io::stdout().write_all(b"\n"); if !is_truthy(&res.into()) { return Err(1.into()); } } Ok(()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/src/locale_aware.rs000066400000000000000000000075151504311601400267250ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use std::cmp::Ordering; use uucore::{ CharByte, IntoCharByteIterator, i18n::{ UEncoding, collator::{AlternateHandling, CollatorOptions, locale_cmp, try_init_collator}, get_locale_encoding, }, }; use crate::syntax_tree::{MaybeNonUtf8Str, MaybeNonUtf8String}; /// Perform a locale-aware string comparison using the current locale's /// collator. pub(crate) fn locale_comparison(a: &MaybeNonUtf8Str, b: &MaybeNonUtf8Str) -> Ordering { // Initialize the collator let mut opts = CollatorOptions::default(); opts.alternate_handling = Some(AlternateHandling::Shifted); // This is black magic let _ = try_init_collator(opts); locale_cmp(a, b) } /// Perform an index search with an approach that differs with regard to the /// given locale. fn index_with_locale( left: &MaybeNonUtf8Str, right: &MaybeNonUtf8Str, encoding: UEncoding, ) -> usize { match encoding { UEncoding::Utf8 => { // In the UTF-8 case, we try to decode the strings on the fly. We // compare UTf-8 characters as long as the stream is valid, and // switch to byte comparison when the byte is an invalid sequence. left.iter_char_bytes() .position(|ch_h| right.iter_char_bytes().any(|ch_n| ch_n == ch_h)) .map_or(0, |idx| idx + 1) } UEncoding::Ascii => { // In the default case, we just perform byte-wise comparison on the // arrays. left.iter() .position(|ch_h| right.iter().any(|ch_n| ch_n == ch_h)) .map_or(0, |idx| idx + 1) } } } /// Perform an index search with an approach that differs with regard to the /// current locale. pub(crate) fn locale_aware_index(left: &MaybeNonUtf8Str, right: &MaybeNonUtf8Str) -> usize { index_with_locale(left, right, get_locale_encoding()) } /// Perform a string length calculation depending on the current locale. In /// UTF-8 locale, it will count valid UTF-8 chars, and fallback to counting /// bytes otherwise. In Non UTF-8 locale, directly return input byte length. pub(crate) fn locale_aware_length(input: &MaybeNonUtf8Str) -> usize { match get_locale_encoding() { UEncoding::Utf8 => std::str::from_utf8(input).map_or(input.len(), |s| s.chars().count()), UEncoding::Ascii => input.len(), } } fn substr_with_locale( s: MaybeNonUtf8String, pos: usize, len: usize, encoding: UEncoding, ) -> MaybeNonUtf8String { match encoding { UEncoding::Utf8 => { // Create a buffer with the heuristic that all the chars are ASCII // and are 1-byte long. let mut string = MaybeNonUtf8String::with_capacity(len); let mut buf = [0; 4]; // Iterate on char-bytes, and skip them accordingly. // For each character (or byte) in the right range, // push it to the string. for cb in s.iter_char_bytes().skip(pos).take(len) { match cb { CharByte::Char(c) => { let len = c.encode_utf8(&mut buf).len(); string.extend(&buf[..len]); } CharByte::Byte(b) => string.push(b), } } string } UEncoding::Ascii => s.into_iter().skip(pos).take(len).collect(), } } /// Given a byte sequence, a position and a length, return the corresponding /// substring depending on the current locale. pub(crate) fn locale_aware_substr( s: MaybeNonUtf8String, pos: usize, len: usize, ) -> MaybeNonUtf8String { substr_with_locale(s, pos, len, get_locale_encoding()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/src/main.rs000066400000000000000000000000271504311601400252220ustar00rootroot00000000000000uucore::bin!(uu_expr); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/expr/src/syntax_tree.rs000066400000000000000000001020051504311601400266420ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) ints paren prec multibytes use std::{cell::Cell, collections::BTreeMap}; use num_bigint::BigInt; use num_traits::ToPrimitive; use onig::{Regex, RegexOptions, Syntax}; use crate::{ ExprError, ExprResult, locale_aware::{ locale_aware_index, locale_aware_length, locale_aware_substr, locale_comparison, }, }; pub(crate) type MaybeNonUtf8String = Vec; pub(crate) type MaybeNonUtf8Str = [u8]; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BinOp { Relation(RelationOp), Numeric(NumericOp), String(StringOp), } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum RelationOp { Lt, Leq, Eq, Neq, Gt, Geq, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum NumericOp { Add, Sub, Mul, Div, Mod, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum StringOp { Match, Index, And, Or, } impl BinOp { fn eval( &self, left: ExprResult, right: ExprResult, ) -> ExprResult { match self { Self::Relation(op) => op.eval(left, right), Self::Numeric(op) => op.eval(left, right), Self::String(op) => op.eval(left, right), } } } impl RelationOp { fn eval(&self, a: ExprResult, b: ExprResult) -> ExprResult { // Make sure that the given comparison validates the relational operator. let check_cmp = |cmp| { use RelationOp::{Eq, Geq, Gt, Leq, Lt, Neq}; use std::cmp::Ordering::{Equal, Greater, Less}; matches!( (self, cmp), (Lt | Leq | Neq, Less) | (Leq | Eq | Geq, Equal) | (Gt | Geq | Neq, Greater) ) }; let a = a?; let b = b?; let b = if let (Some(a), Some(b)) = (&a.to_bigint(), &b.to_bigint()) { check_cmp(a.cmp(b)) } else { // These comparisons should be using locale settings let a = a.eval_as_string(); let b = b.eval_as_string(); check_cmp(locale_comparison(&a, &b)) }; if b { Ok(1.into()) } else { Ok(0.into()) } } } impl NumericOp { fn eval( &self, left: ExprResult, right: ExprResult, ) -> ExprResult { let a = left?.eval_as_bigint()?; let b = right?.eval_as_bigint()?; Ok(NumOrStr::Num(match self { Self::Add => a + b, Self::Sub => a - b, Self::Mul => a * b, Self::Div => match a.checked_div(&b) { Some(x) => x, None => return Err(ExprError::DivisionByZero), }, Self::Mod => { if a.checked_div(&b).is_none() { return Err(ExprError::DivisionByZero); } a % b } })) } } impl StringOp { fn eval( &self, left: ExprResult, right: ExprResult, ) -> ExprResult { match self { Self::Or => { let left = left?; if is_truthy(&left) { return Ok(left); } let right = right?; if is_truthy(&right) { return Ok(right); } Ok(0.into()) } Self::And => { let left = left?; if !is_truthy(&left) { return Ok(0.into()); } let right = right?; if !is_truthy(&right) { return Ok(0.into()); } Ok(left) } Self::Match => { let left = String::from_utf8(left?.eval_as_string()).map_err(|u| { ExprError::UnsupportedNonUtf8Match( String::from_utf8_lossy(u.as_bytes()).into_owned(), ) })?; let right = String::from_utf8(right?.eval_as_string()).map_err(|u| { ExprError::UnsupportedNonUtf8Match( String::from_utf8_lossy(u.as_bytes()).into_owned(), ) })?; check_posix_regex_errors(&right)?; // Transpile the input pattern from BRE syntax to `onig` crate's `Syntax::grep` let mut re_string = String::with_capacity(right.len() + 1); let mut pattern_chars = right.chars().peekable(); let mut prev = '\0'; let mut prev_is_escaped = false; let mut is_start_of_expression = true; // All patterns are anchored so they begin with a caret (^) if pattern_chars.peek() != Some(&'^') { re_string.push('^'); } while let Some(curr) = pattern_chars.next() { let curr_is_escaped = prev == '\\' && !prev_is_escaped; let is_first_character = prev == '\0'; match curr { // Character class negation "[^a]" // Explicitly escaped caret "\^" '^' if !is_start_of_expression && !matches!(prev, '[' | '\\') => { re_string.push_str(r"\^"); } '$' if !curr_is_escaped && !is_end_of_expression(&pattern_chars) => { re_string.push_str(r"\$"); } '\\' if !curr_is_escaped && pattern_chars.peek().is_none() => { return Err(ExprError::TrailingBackslash); } '{' if curr_is_escaped => { // Handle '{' literally at the start of an expression if is_start_of_expression { if re_string.ends_with('\\') { let _ = re_string.pop(); } re_string.push(curr); } else { // Check if the following section is a valid range quantifier verify_range_quantifier(&pattern_chars)?; re_string.push(curr); // Set the lower bound of range quantifier to 0 if it is missing if pattern_chars.peek() == Some(&',') { re_string.push('0'); } } } _ => re_string.push(curr), } // Capturing group "\(abc\)" // Alternative pattern "a\|b" is_start_of_expression = curr == '\\' && is_first_character || curr_is_escaped && matches!(curr, '(' | '|') || curr == '\\' && prev_is_escaped && matches!(prev, '(' | '|'); prev_is_escaped = curr_is_escaped; prev = curr; } let re = Regex::with_options( &re_string, RegexOptions::REGEX_OPTION_SINGLELINE, Syntax::grep(), ) .map_err(|error| match error.code() { // "invalid repeat range {lower,upper}" -123 => ExprError::InvalidBracketContent, // "too big number for repeat range" -201 => ExprError::TooBigRangeQuantifierIndex, _ => ExprError::InvalidRegexExpression, })?; Ok(if re.captures_len() > 0 { re.captures(&left) .and_then(|captures| captures.at(1)) .unwrap_or("") .to_string() } else { re.find(&left) .map_or("0".to_string(), |(start, end)| (end - start).to_string()) } .into()) } Self::Index => { let left = left?.eval_as_string(); let right = right?.eval_as_string(); Ok(locale_aware_index(&left, &right).into()) } } } } /// Check if regex pattern character iterator is at the end of a regex expression or subexpression fn is_end_of_expression(pattern_chars: &I) -> bool where I: Iterator + Clone, { let mut pattern_chars_clone = pattern_chars.clone(); match pattern_chars_clone.next() { Some('\\') => matches!(pattern_chars_clone.next(), Some(')' | '|')), None => true, // No characters left _ => false, } } /// Check if regex pattern character iterator is at the start of a valid range quantifier. /// The iterator's start position is expected to be after the opening brace. /// Range quantifier ends to closing brace. /// /// # Examples of valid range quantifiers /// /// - `r"\{3\}"` /// - `r"\{3,\}"` /// - `r"\{,6\}"` /// - `r"\{3,6\}"` /// - `r"\{,\}"` fn verify_range_quantifier(pattern_chars: &I) -> Result<(), ExprError> where I: Iterator + Clone, { let mut pattern_chars_clone = pattern_chars.clone().peekable(); if pattern_chars_clone.peek().is_none() { return Err(ExprError::UnmatchedOpeningBrace); } // Parse the string between braces let mut quantifier = String::new(); let mut prev = '\0'; let mut curr_is_escaped = false; while let Some(curr) = pattern_chars_clone.next() { curr_is_escaped = prev == '\\' && !curr_is_escaped; if curr_is_escaped && curr == '}' { break; } if pattern_chars_clone.peek().is_none() { return Err(ExprError::UnmatchedOpeningBrace); } if prev != '\0' { quantifier.push(prev); } prev = curr; } // Check if parsed quantifier is valid let re = Regex::new(r"^([0-9]*,[0-9]*|[0-9]+)$").expect("valid regular expression"); if let Some(captures) = re.captures(&quantifier) { let matched = captures.at(0).unwrap_or_default(); match matched.split_once(',') { Some(("", "")) => Ok(()), Some((x, "") | ("", x)) if x.parse::().is_ok() => Ok(()), Some((_, "") | ("", _)) => Err(ExprError::TooBigRangeQuantifierIndex), Some((f, l)) => match (f.parse::(), l.parse::()) { (Ok(f), Ok(l)) if f > l => Err(ExprError::InvalidBracketContent), (Ok(_), Ok(_)) => Ok(()), _ => Err(ExprError::TooBigRangeQuantifierIndex), }, None if matched.parse::().is_ok() => Ok(()), None => Err(ExprError::TooBigRangeQuantifierIndex), } } else { Err(ExprError::InvalidBracketContent) } } /// Check for errors in a supplied regular expression /// /// GNU coreutils shows messages for invalid regular expressions /// differently from the oniguruma library used by the regex crate. /// This method attempts to do these checks manually in one pass /// through the regular expression. /// /// This method is not comprehensively checking all cases in which /// a regular expression could be invalid; any cases not caught will /// result in a [`ExprError::InvalidRegexExpression`] when passing the /// regular expression through the Oniguruma bindings. This method is /// intended to just identify a few situations for which GNU coreutils /// has specific error messages. fn check_posix_regex_errors(pattern: &str) -> ExprResult<()> { let mut escaped_parens: u64 = 0; let mut prev = '\0'; let mut curr_is_escaped = false; for curr in pattern.chars() { curr_is_escaped = prev == '\\' && !curr_is_escaped; match (curr_is_escaped, curr) { (true, '(') => escaped_parens += 1, (true, ')') => { escaped_parens = escaped_parens .checked_sub(1) .ok_or(ExprError::UnmatchedClosingParenthesis)?; } _ => {} } prev = curr; } match escaped_parens { 0 => Ok(()), _ => Err(ExprError::UnmatchedOpeningParenthesis), } } /// Precedence for infix binary operators const PRECEDENCE: &[&[(&MaybeNonUtf8Str, BinOp)]] = &[ &[(b"|", BinOp::String(StringOp::Or))], &[(b"&", BinOp::String(StringOp::And))], &[ (b"<", BinOp::Relation(RelationOp::Lt)), (b"<=", BinOp::Relation(RelationOp::Leq)), (b"=", BinOp::Relation(RelationOp::Eq)), (b"!=", BinOp::Relation(RelationOp::Neq)), (b">=", BinOp::Relation(RelationOp::Geq)), (b">", BinOp::Relation(RelationOp::Gt)), ], &[ (b"+", BinOp::Numeric(NumericOp::Add)), (b"-", BinOp::Numeric(NumericOp::Sub)), ], &[ (b"*", BinOp::Numeric(NumericOp::Mul)), (b"/", BinOp::Numeric(NumericOp::Div)), (b"%", BinOp::Numeric(NumericOp::Mod)), ], &[(b":", BinOp::String(StringOp::Match))], ]; #[derive(Debug, Clone, PartialEq, Eq)] pub enum NumOrStr { Num(BigInt), Str(MaybeNonUtf8String), } impl From for NumOrStr { fn from(num: usize) -> Self { Self::Num(BigInt::from(num)) } } impl From for NumOrStr { fn from(num: BigInt) -> Self { Self::Num(num) } } impl From for NumOrStr { fn from(str: String) -> Self { Self::Str(str.into()) } } impl From for NumOrStr { fn from(str: MaybeNonUtf8String) -> Self { Self::Str(str) } } impl NumOrStr { pub fn to_bigint(&self) -> Option { match self { Self::Num(num) => Some(num.clone()), Self::Str(str) => std::str::from_utf8(str).ok()?.parse::().ok(), } } pub fn eval_as_bigint(self) -> ExprResult { match self { Self::Num(num) => Ok(num), Self::Str(str) => String::from_utf8(str) .map_err(|_| ExprError::NonIntegerArgument)? .parse::() .map_err(|_| ExprError::NonIntegerArgument), } } pub fn eval_as_string(self) -> MaybeNonUtf8String { match self { Self::Num(num) => num.to_string().into(), Self::Str(str) => str, } } } #[derive(Debug, Clone)] pub struct AstNode { id: u32, inner: AstNodeInner, } // We derive Eq and PartialEq only for tests because we want to ignore the id field. #[derive(Debug, Clone)] #[cfg_attr(test, derive(Eq, PartialEq))] pub enum AstNodeInner { Evaluated { value: NumOrStr, }, Leaf { value: MaybeNonUtf8String, }, BinOp { op_type: BinOp, left: Box, right: Box, }, Substr { string: Box, pos: Box, length: Box, }, Length { string: Box, }, } impl AstNode { pub fn parse(input: &[impl AsRef]) -> ExprResult { Parser::new(input).parse() } pub fn evaluated(self) -> ExprResult { Ok(Self { id: get_next_id(), inner: AstNodeInner::Evaluated { value: self.eval()?, }, }) } pub fn eval(&self) -> ExprResult { // This function implements a recursive tree-walking algorithm, but uses an explicit // stack approach instead of native recursion to avoid potential stack overflow // on deeply nested expressions. let mut stack = vec![self]; let mut result_stack = BTreeMap::new(); while let Some(node) = stack.pop() { match &node.inner { AstNodeInner::Evaluated { value, .. } => { result_stack.insert(node.id, Ok(value.clone())); } AstNodeInner::Leaf { value, .. } => { result_stack.insert(node.id, Ok(value.to_owned().into())); } AstNodeInner::BinOp { op_type, left, right, } => { let (Some(right), Some(left)) = ( result_stack.remove(&right.id), result_stack.remove(&left.id), ) else { stack.push(node); stack.push(right); stack.push(left); continue; }; let result = op_type.eval(left, right); result_stack.insert(node.id, result); } AstNodeInner::Substr { string, pos, length, } => { let (Some(string), Some(pos), Some(length)) = ( result_stack.remove(&string.id), result_stack.remove(&pos.id), result_stack.remove(&length.id), ) else { stack.push(node); stack.push(string); stack.push(pos); stack.push(length); continue; }; let string: MaybeNonUtf8String = string?.eval_as_string(); // The GNU docs say: // // > If either position or length is negative, zero, or // > non-numeric, returns the null string. // // So we coerce errors into 0 to make that the only case we // have to care about. let pos = pos? .eval_as_bigint() .ok() .and_then(|n| n.to_usize()) .unwrap_or(0); let length = length? .eval_as_bigint() .ok() .and_then(|n| n.to_usize()) .unwrap_or(0); if let (Some(pos), Some(_)) = (pos.checked_sub(1), length.checked_sub(1)) { let result = locale_aware_substr(string, pos, length); result_stack.insert(node.id, Ok(result.into())); } else { result_stack.insert(node.id, Ok(String::new().into())); } } AstNodeInner::Length { string } => { // Push onto the stack let Some(string) = result_stack.remove(&string.id) else { stack.push(node); stack.push(string); continue; }; let length = locale_aware_length(&string?.eval_as_string()); result_stack.insert(node.id, Ok(length.into())); } } } // The final result should be the only one left on the result stack result_stack.remove(&self.id).unwrap() } } thread_local! { static NODE_ID: Cell = const { Cell::new(1) }; } /// We create unique identifiers for each node in the AST. /// This is used to transform the recursive algorithm into an iterative one. /// It is used to store the result of each node's evaluation in a `BtreeMap`. fn get_next_id() -> u32 { NODE_ID.with(|id| { let current = id.get(); id.set(current + 1); current }) } struct Parser<'a, S: AsRef> { input: &'a [S], index: usize, } impl<'a, S: AsRef> Parser<'a, S> { fn new(input: &'a [S]) -> Self { Self { input, index: 0 } } fn next(&mut self) -> ExprResult<&'a MaybeNonUtf8Str> { let next = self.input.get(self.index); if let Some(next) = next { self.index += 1; Ok(next.as_ref()) } else { // The indexing won't panic, because we know that the input size // is greater than zero. Err(ExprError::MissingArgument( String::from_utf8_lossy(self.input[self.index - 1].as_ref()).into_owned(), )) } } fn accept(&mut self, f: impl Fn(&MaybeNonUtf8Str) -> Option) -> Option { let next = self.input.get(self.index)?; let tok = f(next.as_ref()); if let Some(tok) = tok { self.index += 1; Some(tok) } else { None } } fn parse(&mut self) -> ExprResult { if self.input.is_empty() { return Err(ExprError::MissingOperand); } let res = self.parse_expression()?; if let Some(arg) = self.input.get(self.index) { return Err(ExprError::UnexpectedArgument( String::from_utf8_lossy(arg.as_ref()).into_owned(), )); } Ok(res) } fn parse_expression(&mut self) -> ExprResult { self.parse_precedence(0) } fn parse_op(&mut self, precedence: usize) -> Option { self.accept(|s| { for (op_string, op) in PRECEDENCE[precedence] { if s == *op_string { return Some(*op); } } None }) } fn parse_precedence(&mut self, precedence: usize) -> ExprResult { if precedence >= PRECEDENCE.len() { return self.parse_simple_expression(); } let mut left = self.parse_precedence(precedence + 1)?; while let Some(op) = self.parse_op(precedence) { let right = self.parse_precedence(precedence + 1)?; left = AstNode { id: get_next_id(), inner: AstNodeInner::BinOp { op_type: op, left: Box::new(left), right: Box::new(right), }, }; } Ok(left) } fn parse_simple_expression(&mut self) -> ExprResult { let first = self.next()?; let inner = match first { b"match" => { let left = self.parse_simple_expression()?; let right = self.parse_simple_expression()?; AstNodeInner::BinOp { op_type: BinOp::String(StringOp::Match), left: Box::new(left), right: Box::new(right), } } b"substr" => { let string = self.parse_simple_expression()?; let pos = self.parse_simple_expression()?; let length = self.parse_simple_expression()?; AstNodeInner::Substr { string: Box::new(string), pos: Box::new(pos), length: Box::new(length), } } b"index" => { let left = self.parse_simple_expression()?; let right = self.parse_simple_expression()?; AstNodeInner::BinOp { op_type: BinOp::String(StringOp::Index), left: Box::new(left), right: Box::new(right), } } b"length" => { let string = self.parse_simple_expression()?; AstNodeInner::Length { string: Box::new(string), } } b"+" => AstNodeInner::Leaf { value: self.next()?.into(), }, b"(" => { // Evaluate the node just after parsing to we detect arithmetic // errors before checking for the closing parenthesis. let s = self.parse_expression()?.evaluated()?; match self.next() { Ok(b")") => {} // Since we have parsed at least a '(', there will be a token // at `self.index - 1`. So this indexing won't panic. Ok(_) => { return Err(ExprError::ExpectedClosingBraceInsteadOf( String::from_utf8_lossy(self.input[self.index - 1].as_ref()).into(), )); } Err(ExprError::MissingArgument(_)) => { return Err(ExprError::ExpectedClosingBraceAfter( String::from_utf8_lossy(self.input[self.index - 1].as_ref()).into(), )); } Err(e) => return Err(e), } s.inner } s => AstNodeInner::Leaf { value: s.into() }, }; Ok(AstNode { id: get_next_id(), inner, }) } } /// Determine whether `expr` should evaluate the string as "truthy" /// /// Truthy strings are either empty or match the regex "-?0+". pub fn is_truthy(s: &NumOrStr) -> bool { match s { NumOrStr::Num(num) => num != &BigInt::from(0), NumOrStr::Str(str) => { // Edge case: `-` followed by nothing is truthy if str == b"-" { return true; } let mut bytes = str.iter().copied(); // Empty string is falsy let Some(first) = bytes.next() else { return false; }; let is_zero = (first == b'-' || first == b'0') && bytes.all(|b| b == b'0'); !is_zero } } } #[cfg(test)] mod test { use crate::ExprError; use crate::syntax_tree::verify_range_quantifier; use super::{ AstNode, AstNodeInner, BinOp, NumericOp, RelationOp, StringOp, check_posix_regex_errors, get_next_id, }; impl PartialEq for AstNode { fn eq(&self, other: &Self) -> bool { self.inner == other.inner } } impl Eq for AstNode {} impl From<&str> for AstNode { fn from(value: &str) -> Self { Self { id: get_next_id(), inner: AstNodeInner::Leaf { value: value.into(), }, } } } fn op(op_type: BinOp, left: impl Into, right: impl Into) -> AstNode { AstNode { id: get_next_id(), inner: AstNodeInner::BinOp { op_type, left: Box::new(left.into()), right: Box::new(right.into()), }, } } fn length(string: impl Into) -> AstNode { AstNode { id: get_next_id(), inner: AstNodeInner::Length { string: Box::new(string.into()), }, } } fn substr( string: impl Into, pos: impl Into, length: impl Into, ) -> AstNode { AstNode { id: get_next_id(), inner: AstNodeInner::Substr { string: Box::new(string.into()), pos: Box::new(pos.into()), length: Box::new(length.into()), }, } } #[test] fn infix_operators() { let cases = [ ("|", BinOp::String(StringOp::Or)), ("&", BinOp::String(StringOp::And)), ("<", BinOp::Relation(RelationOp::Lt)), ("<=", BinOp::Relation(RelationOp::Leq)), ("=", BinOp::Relation(RelationOp::Eq)), ("!=", BinOp::Relation(RelationOp::Neq)), (">=", BinOp::Relation(RelationOp::Geq)), (">", BinOp::Relation(RelationOp::Gt)), ("+", BinOp::Numeric(NumericOp::Add)), ("-", BinOp::Numeric(NumericOp::Sub)), ("*", BinOp::Numeric(NumericOp::Mul)), ("/", BinOp::Numeric(NumericOp::Div)), ("%", BinOp::Numeric(NumericOp::Mod)), (":", BinOp::String(StringOp::Match)), ]; for (string, value) in cases { assert_eq!(AstNode::parse(&["1", string, "2"]), Ok(op(value, "1", "2"))); } } #[test] fn other_operators() { assert_eq!( AstNode::parse(&["match", "1", "2"]), Ok(op(BinOp::String(StringOp::Match), "1", "2")), ); assert_eq!( AstNode::parse(&["index", "1", "2"]), Ok(op(BinOp::String(StringOp::Index), "1", "2")), ); assert_eq!(AstNode::parse(&["length", "1"]), Ok(length("1"))); assert_eq!( AstNode::parse(&["substr", "1", "2", "3"]), Ok(substr("1", "2", "3")), ); } #[test] fn precedence() { assert_eq!( AstNode::parse(&["1", "+", "2", "*", "3"]), Ok(op( BinOp::Numeric(NumericOp::Add), "1", op(BinOp::Numeric(NumericOp::Mul), "2", "3") )) ); assert_eq!( AstNode::parse(&["(", "1", "+", "2", ")", "*", "3"]), Ok(op( BinOp::Numeric(NumericOp::Mul), op(BinOp::Numeric(NumericOp::Add), "1", "2") .evaluated() .unwrap(), "3" )) ); assert_eq!( AstNode::parse(&["1", "*", "2", "+", "3"]), Ok(op( BinOp::Numeric(NumericOp::Add), op(BinOp::Numeric(NumericOp::Mul), "1", "2"), "3" )), ); } #[test] fn missing_closing_parenthesis() { assert_eq!( AstNode::parse(&["(", "42"]), Err(ExprError::ExpectedClosingBraceAfter("42".to_string())) ); assert_eq!( AstNode::parse(&["(", "42", "a"]), Err(ExprError::ExpectedClosingBraceInsteadOf("a".to_string())) ); } #[test] fn empty_substitution() { // causes a panic in 0.0.25 let result = AstNode::parse(&["a", ":", r"\(b\)*"]) .unwrap() .eval() .unwrap(); assert_eq!(result.eval_as_string(), b""); } #[test] fn starting_stars_become_escaped() { let result = AstNode::parse(&["cats", ":", r"*cats"]) .unwrap() .eval() .unwrap(); assert_eq!(result.eval_as_string(), b"0"); let result = AstNode::parse(&["*cats", ":", r"*cats"]) .unwrap() .eval() .unwrap(); assert_eq!(result.eval_as_string(), b"5"); } #[test] fn only_match_in_beginning() { let result = AstNode::parse(&["budget", ":", r"get"]) .unwrap() .eval() .unwrap(); assert_eq!(result.eval_as_string(), b"0"); } #[test] fn check_regex_valid() { assert!(check_posix_regex_errors(r"(a+b) \(a* b\)").is_ok()); } #[test] fn check_regex_simple_repeating_pattern() { assert!(check_posix_regex_errors(r"\(a+b\)\{4\}").is_ok()); } #[test] fn check_regex_missing_closing() { assert_eq!( check_posix_regex_errors(r"\(abc"), Err(ExprError::UnmatchedOpeningParenthesis) ); } #[test] fn check_regex_missing_opening() { assert_eq!( check_posix_regex_errors(r"abc\)"), Err(ExprError::UnmatchedClosingParenthesis) ); } #[test] fn test_is_valid_range_quantifier() { assert!(verify_range_quantifier(&"3\\}".chars()).is_ok()); assert!(verify_range_quantifier(&"3,\\}".chars()).is_ok()); assert!(verify_range_quantifier(&",6\\}".chars()).is_ok()); assert!(verify_range_quantifier(&"3,6\\}".chars()).is_ok()); assert!(verify_range_quantifier(&",\\}".chars()).is_ok()); assert!(verify_range_quantifier(&"32767\\}anything".chars()).is_ok()); assert_eq!( verify_range_quantifier(&"\\{3,6\\}".chars()), Err(ExprError::InvalidBracketContent) ); assert_eq!( verify_range_quantifier(&"\\}".chars()), Err(ExprError::InvalidBracketContent) ); assert_eq!( verify_range_quantifier(&"".chars()), Err(ExprError::UnmatchedOpeningBrace) ); assert_eq!( verify_range_quantifier(&"3".chars()), Err(ExprError::UnmatchedOpeningBrace) ); assert_eq!( verify_range_quantifier(&"3,".chars()), Err(ExprError::UnmatchedOpeningBrace) ); assert_eq!( verify_range_quantifier(&",6".chars()), Err(ExprError::UnmatchedOpeningBrace) ); assert_eq!( verify_range_quantifier(&"3,6".chars()), Err(ExprError::UnmatchedOpeningBrace) ); assert_eq!( verify_range_quantifier(&",".chars()), Err(ExprError::UnmatchedOpeningBrace) ); assert_eq!( verify_range_quantifier(&"32768\\}".chars()), Err(ExprError::TooBigRangeQuantifierIndex) ); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/000077500000000000000000000000001504311601400234425ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/BENCHMARKING.md000066400000000000000000000120441504311601400256150ustar00rootroot00000000000000# Benchmarking `factor` The benchmarks for `factor` are located under `tests/benches/factor` and can be invoked with `cargo bench` in that directory. They are located outside the `uu_factor` crate, as they do not comply with the project's minimum supported Rust version, *i.e.* may require a newer version of `rustc`. ## Microbenchmarking deterministic functions We currently use [`criterion`] to benchmark deterministic functions, such as `gcd` and `table::factor`. However, microbenchmarks are by nature unstable: not only are they specific to the hardware, operating system version, etc., but they are noisy and affected by other tasks on the system (browser, compile jobs, etc.), which can cause `criterion` to report spurious performance improvements and regressions. This can be mitigated by getting as close to [idealized conditions][lemire] as possible: - minimize the amount of computation and I/O running concurrently to the benchmark, *i.e.* close your browser and IM clients, don't compile at the same time, etc. ; - ensure the CPU's [frequency stays constant] during the benchmark ; - [isolate a **physical** core], set it to `nohz_full`, and pin the benchmark to it, so it won't be preempted in the middle of a measurement ; - disable ASLR by running `setarch -R cargo bench`, so we can compare results across multiple executions. [`criterion`]: https://bheisler.github.io/criterion.rs/book/index.html [lemire]: https://lemire.me/blog/2018/01/16/microbenchmarking-calls-for-idealized-conditions/ [isolate a **physical** core]: https://pyperf.readthedocs.io/en/latest/system.html#isolate-cpus-on-linux [frequency stays constant]: ... ### Guidance for designing microbenchmarks *Note:* this guidance is specific to `factor` and takes its application domain into account; do not expect it to generalize to other projects. It is based on Daniel Lemire's [*Microbenchmarking calls for idealized conditions*][lemire], which I recommend reading if you want to add benchmarks to `factor`. 1. Select a small, self-contained, deterministic component (`gcd` and `table::factor` are good examples): - no I/O or access to external data structures ; - no call into other components ; - behavior is deterministic: no RNG, no concurrency, ... ; - the test's body is *fast* (~100ns for `gcd`, ~10µs for `factor::table`), so each sample takes a very short time, minimizing variability and maximizing the numbers of samples we can take in a given time. 1. Benchmarks are immutable (once merged in `uutils`) Modifying a benchmark means previously-collected values cannot meaningfully be compared, silently giving nonsensical results. If you must modify an existing benchmark, rename it. 1. Test common cases We are interested in overall performance, rather than specific edge-cases; use **reproducibly-randomized inputs**, sampling from either all possible input values or some subset of interest. 1. Use [`criterion`], `criterion::black_box`, ... `criterion` isn't perfect, but it is also much better than ad-hoc solutions in each benchmark. ## Wishlist ### Configurable statistical estimators `criterion` always uses the arithmetic average as estimator; in microbenchmarks, where the code under test is fully deterministic and the measurements are subject to additive, positive noise, [the minimum is more appropriate][lemire]. ### CI & reproducible performance testing Measuring performance on real hardware is important, as it relates directly to what users of `factor` experience; however, such measurements are subject to the constraints of the real-world, and aren't perfectly reproducible. Moreover, the mitigation for it (described above) isn't achievable in virtualized, multi-tenant environments such as CI. Instead, we could run the microbenchmarks in a simulated CPU with [`cachegrind`], measure execution “time†in that model (in CI), and use it to detect and report performance improvements and regressions. [`iai`] is an implementation of this idea for Rust. [`cachegrind`]: https://www.valgrind.org/docs/manual/cg-manual.html [`iai`]: https://bheisler.github.io/criterion.rs/book/iai/iai.html ### Comparing randomized implementations across multiple inputs `factor` is a challenging target for system benchmarks as it combines two characteristics: 1. integer factoring algorithms are randomized, with large variance in execution time ; 1. various inputs also have large differences in factoring time, that corresponds to no natural, linear ordering of the inputs. If (1) was untrue (i.e. if execution time wasn't random), we could faithfully compare 2 implementations (2 successive versions, or `uutils` and GNU) using a scatter plot, where each axis corresponds to the perf. of one implementation. Similarly, without (2) we could plot numbers on the X axis and their factoring time on the Y axis, using multiple lines for various quantiles. The large differences in factoring times for successive numbers, mean that such a plot would be unreadable. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/Cargo.toml000066400000000000000000000014351504311601400253750ustar00rootroot00000000000000[package] name = "uu_factor" description = "factor ~ (uutils) display the prime factors of each NUMBER" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [build-dependencies] num-traits = { workspace = true } # used in src/numerics.rs, which is included by build.rs [dependencies] clap = { workspace = true } num-traits = { workspace = true } uucore = { workspace = true } num-bigint = { workspace = true } num-prime = { workspace = true } fluent = { workspace = true } [[bin]] name = "factor" path = "src/main.rs" [lib] path = "src/factor.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/LICENSE000077700000000000000000000000001504311601400263102../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/locales/000077500000000000000000000000001504311601400250645ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/locales/en-US.ftl000066400000000000000000000007351504311601400265270ustar00rootroot00000000000000factor-about = Print the prime factors of the given NUMBER(s). If none are specified, read from standard input. factor-usage = factor [OPTION]... [NUMBER]... # Help messages factor-help-exponents = Print factors in the form p^e factor-help-help = Print help information. # Error messages factor-error-factorization-incomplete = Factorization incomplete. Remainders exists. factor-error-write-error = write error factor-error-reading-input = error reading input: { $error } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/locales/fr-FR.ftl000066400000000000000000000010411504311601400265030ustar00rootroot00000000000000factor-about = Afficher les facteurs premiers du/des NOMBRE(s) donné(s). Si aucun n'est spécifié, lire depuis l'entrée standard. factor-usage = factor [OPTION]... [NOMBRE]... # Messages d'aide factor-help-exponents = Afficher les facteurs sous la forme p^e factor-help-help = Afficher les informations d'aide. # Messages d'erreur factor-error-factorization-incomplete = Factorisation incomplète. Des restes existent. factor-error-write-error = erreur d'écriture factor-error-reading-input = erreur de lecture de l'entrée : { $error } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/src/000077500000000000000000000000001504311601400242315ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/src/factor.rs000066400000000000000000000103751504311601400260630ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore funcs use std::collections::BTreeMap; use std::io::BufRead; use std::io::{self, Write, stdin, stdout}; use clap::{Arg, ArgAction, Command}; use num_bigint::BigUint; use num_traits::FromPrimitive; use uucore::display::Quotable; use uucore::error::{FromIo, UResult, USimpleError, set_exit_code}; use uucore::translate; use uucore::{format_usage, show_error, show_warning}; mod options { pub static EXPONENTS: &str = "exponents"; pub static HELP: &str = "help"; pub static NUMBER: &str = "NUMBER"; } fn print_factors_str( num_str: &str, w: &mut io::BufWriter, print_exponents: bool, ) -> UResult<()> { let rx = num_str.trim().parse::(); let Ok(x) = rx else { // return Ok(). it's non-fatal and we should try the next number. show_warning!("{}: {}", num_str.maybe_quote(), rx.unwrap_err()); set_exit_code(1); return Ok(()); }; let (factorization, remaining) = if x > BigUint::from_u32(1).unwrap() { num_prime::nt_funcs::factors(x.clone(), None) } else { (BTreeMap::new(), None) }; if let Some(_remaining) = remaining { return Err(USimpleError::new( 1, translate!("factor-error-factorization-incomplete"), )); } write_result(w, &x, factorization, print_exponents) .map_err_context(|| translate!("factor-error-write-error"))?; Ok(()) } fn write_result( w: &mut io::BufWriter, x: &BigUint, factorization: BTreeMap, print_exponents: bool, ) -> io::Result<()> { write!(w, "{x}:")?; for (factor, n) in factorization { if print_exponents { if n > 1 { write!(w, " {factor}^{n}")?; } else { write!(w, " {factor}")?; } } else { w.write_all(format!(" {factor}").repeat(n).as_bytes())?; } } writeln!(w)?; w.flush() } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; // If matches find --exponents flag than variable print_exponents is true and p^e output format will be used. let print_exponents = matches.get_flag(options::EXPONENTS); let stdout = stdout(); // We use a smaller buffer here to pass a gnu test. 4KiB appears to be the default pipe size for bash. let mut w = io::BufWriter::with_capacity(4 * 1024, stdout.lock()); if let Some(values) = matches.get_many::(options::NUMBER) { for number in values { print_factors_str(number, &mut w, print_exponents)?; } } else { let stdin = stdin(); let lines = stdin.lock().lines(); for line in lines { match line { Ok(line) => { for number in line.split_whitespace() { print_factors_str(number, &mut w, print_exponents)?; } } Err(e) => { set_exit_code(1); show_error!("{}", translate!("factor-error-reading-input", "error" => e)); return Ok(()); } } } } if let Err(e) = w.flush() { show_error!("{e}"); } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("factor-about")) .override_usage(format_usage(&translate!("factor-usage"))) .infer_long_args(true) .disable_help_flag(true) .args_override_self(true) .arg(Arg::new(options::NUMBER).action(ArgAction::Append)) .arg( Arg::new(options::EXPONENTS) .short('h') .long(options::EXPONENTS) .help(translate!("factor-help-exponents")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::HELP) .long(options::HELP) .help(translate!("factor-help-help")) .action(ArgAction::Help), ) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/factor/src/main.rs000066400000000000000000000000311504311601400255150ustar00rootroot00000000000000uucore::bin!(uu_factor); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/false/000077500000000000000000000000001504311601400232565ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/false/Cargo.toml000066400000000000000000000010641504311601400252070ustar00rootroot00000000000000[package] name = "uu_false" description = "false ~ (uutils) do nothing and fail" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/false" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/false.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true } fluent = { workspace = true } [[bin]] name = "false" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/false/LICENSE000077700000000000000000000000001504311601400261242../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/false/locales/000077500000000000000000000000001504311601400247005ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/false/locales/en-US.ftl000066400000000000000000000005721504311601400263420ustar00rootroot00000000000000false-about = Returns false, an unsuccessful exit status. Immediately returns with the exit status 1. When invoked with one of the recognized options it will try to write the help or version text. Any IO error during this operation is diagnosed, yet the program will also return 1. false-help-text = Print help information false-version-text = Print version information coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/false/locales/fr-FR.ftl000066400000000000000000000006531504311601400263270ustar00rootroot00000000000000false-about = Renvoie false, un code de sortie indiquant un échec. Retourne immédiatement avec le code de sortie 1. Lorsqu'il est invoqué avec l'une des options reconnues, il tente d'afficher l'aide ou la version. Toute erreur d'entrée/sortie pendant cette opération est signalée, mais le programme retourne également 1. false-help-text = Afficher l'aide false-version-text = Afficher les informations de version coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/false/src/000077500000000000000000000000001504311601400240455ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/false/src/false.rs000066400000000000000000000043331504311601400255100ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use clap::{Arg, ArgAction, Command}; use std::{ffi::OsString, io::Write}; use uucore::error::{UResult, set_exit_code}; use uucore::translate; #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let mut command = uu_app(); // Mirror GNU options, always return `1`. In particular even the 'successful' cases of no-op, // and the interrupted display of help and version should return `1`. Also, we return Ok in all // paths to avoid the allocation of an error object, an operation that could, in theory, fail // and unwind through the standard library allocation handling machinery. set_exit_code(1); let args: Vec = args.collect(); if args.len() > 2 { return Ok(()); } if let Err(e) = command.try_get_matches_from_mut(args) { let error = match e.kind() { clap::error::ErrorKind::DisplayHelp => command.print_help(), clap::error::ErrorKind::DisplayVersion => { write!(std::io::stdout(), "{}", command.render_version()) } _ => Ok(()), }; // Try to display this error. if let Err(print_fail) = error { // Completely ignore any error here, no more failover and we will fail in any case. let _ = writeln!(std::io::stderr(), "{}: {print_fail}", uucore::util_name()); } } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("false-about")) // We provide our own help and version options, to ensure maximum compatibility with GNU. .disable_help_flag(true) .disable_version_flag(true) .arg( Arg::new("help") .long("help") .help(translate!("false-help-text")) .action(ArgAction::Help), ) .arg( Arg::new("version") .long("version") .help(translate!("false-version-text")) .action(ArgAction::Version), ) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/false/src/main.rs000066400000000000000000000000301504311601400253300ustar00rootroot00000000000000uucore::bin!(uu_false); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/000077500000000000000000000000001504311601400227525ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/Cargo.toml000066400000000000000000000011741504311601400247050ustar00rootroot00000000000000[package] name = "uu_fmt" description = "fmt ~ (uutils) reformat each paragraph of input" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/fmt" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/fmt.rs" [dependencies] clap = { workspace = true } unicode-width = { workspace = true } uucore = { workspace = true } thiserror = { workspace = true } fluent = { workspace = true } [[bin]] name = "fmt" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/LICENSE000077700000000000000000000000001504311601400256202../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/locales/000077500000000000000000000000001504311601400243745ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/locales/en-US.ftl000066400000000000000000000053411504311601400260350ustar00rootroot00000000000000fmt-about = Reformat paragraphs from input (or standard input) to stdout. fmt-usage = [OPTION]... [FILE]... # Help messages fmt-crown-margin-help = First and second line of paragraph may have different indentations, in which case the first line's indentation is preserved, and each subsequent line's indentation matches the second line. fmt-tagged-paragraph-help = Like -c, except that the first and second line of a paragraph *must* have different indentation or they are treated as separate paragraphs. fmt-preserve-headers-help = Attempt to detect and preserve mail headers in the input. Be careful when combining this flag with -p. fmt-split-only-help = Split lines only, do not reflow. fmt-uniform-spacing-help = Insert exactly one space between words, and two between sentences. Sentence breaks in the input are detected as [?!.] followed by two spaces or a newline; other punctuation is not interpreted as a sentence break. fmt-prefix-help = Reformat only lines beginning with PREFIX, reattaching PREFIX to reformatted lines. Unless -x is specified, leading whitespace will be ignored when matching PREFIX. fmt-skip-prefix-help = Do not reformat lines beginning with PSKIP. Unless -X is specified, leading whitespace will be ignored when matching PSKIP fmt-exact-prefix-help = PREFIX must match at the beginning of the line with no preceding whitespace. fmt-exact-skip-prefix-help = PSKIP must match at the beginning of the line with no preceding whitespace. fmt-width-help = Fill output lines up to a maximum of WIDTH columns, default 75. This can be specified as a negative number in the first argument. fmt-goal-help = Goal width, default of 93% of WIDTH. Must be less than or equal to WIDTH. fmt-quick-help = Break lines more quickly at the expense of a potentially more ragged appearance. fmt-tab-width-help = Treat tabs as TABWIDTH spaces for determining line length, default 8. Note that this is used only for calculating line lengths; tabs are preserved in the output. # Error messages fmt-error-invalid-goal = invalid goal: {$goal} fmt-error-goal-greater-than-width = GOAL cannot be greater than WIDTH. fmt-error-invalid-width = invalid width: {$width} fmt-error-width-out-of-range = invalid width: '{$width}': Numerical result out of range fmt-error-invalid-tabwidth = Invalid TABWIDTH specification: {$tabwidth} fmt-error-first-option-width = invalid option -- {$option}; -WIDTH is recognized only when it is the first option; use -w N instead Try 'fmt --help' for more information. fmt-error-read = read error fmt-error-invalid-width-malformed = invalid width: {$width} fmt-error-cannot-open-for-reading = cannot open {$file} for reading fmt-error-cannot-get-metadata = cannot get metadata for {$file} fmt-error-failed-to-write-output = failed to write output coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/locales/fr-FR.ftl000066400000000000000000000063251504311601400260250ustar00rootroot00000000000000fmt-about = Reformate les paragraphes depuis l'entrée (ou l'entrée standard) vers la sortie standard. fmt-usage = [OPTION]... [FICHIER]... # Messages d'aide fmt-crown-margin-help = La première et la deuxième ligne d'un paragraphe peuvent avoir des indentations différentes, auquel cas l'indentation de la première ligne est préservée, et chaque ligne suivante correspond à l'indentation de la deuxième ligne. fmt-tagged-paragraph-help = Comme -c, sauf que la première et la deuxième ligne d'un paragraphe *doivent* avoir des indentations différentes ou elles sont traitées comme des paragraphes séparés. fmt-preserve-headers-help = Tente de détecter et préserver les en-têtes de courrier dans l'entrée. Attention en combinant ce drapeau avec -p. fmt-split-only-help = Divise les lignes seulement, ne les reformate pas. fmt-uniform-spacing-help = Insère exactement un espace entre les mots, et deux entre les phrases. Les fins de phrase dans l'entrée sont détectées comme [?!.] suivies de deux espaces ou d'une nouvelle ligne ; les autres ponctuations ne sont pas interprétées comme des fins de phrase. fmt-prefix-help = Reformate seulement les lignes commençant par PRÉFIXE, en rattachant PRÉFIXE aux lignes reformatées. À moins que -x soit spécifié, les espaces de début seront ignorés lors de la correspondance avec PRÉFIXE. fmt-skip-prefix-help = Ne reformate pas les lignes commençant par PSKIP. À moins que -X soit spécifié, les espaces de début seront ignorés lors de la correspondance avec PSKIP fmt-exact-prefix-help = PRÉFIXE doit correspondre au début de la ligne sans espace précédent. fmt-exact-skip-prefix-help = PSKIP doit correspondre au début de la ligne sans espace précédent. fmt-width-help = Remplit les lignes de sortie jusqu'à un maximum de WIDTH colonnes, par défaut 75. Cela peut être spécifié comme un nombre négatif dans le premier argument. fmt-goal-help = Largeur objectif, par défaut 93% de WIDTH. Doit être inférieur ou égal à WIDTH. fmt-quick-help = Divise les lignes plus rapidement au détriment d'un aspect potentiellement plus irrégulier. fmt-tab-width-help = Traite les tabulations comme TABWIDTH espaces pour déterminer la longueur de ligne, par défaut 8. Notez que ceci n'est utilisé que pour calculer les longueurs de ligne ; les tabulations sont préservées dans la sortie. # Messages d'erreur fmt-error-invalid-goal = objectif invalide : {$goal} fmt-error-goal-greater-than-width = GOAL ne peut pas être supérieur à WIDTH. fmt-error-invalid-width = largeur invalide : {$width} fmt-error-width-out-of-range = largeur invalide : '{$width}' : Résultat numérique hors limites fmt-error-invalid-tabwidth = Spécification TABWIDTH invalide : {$tabwidth} fmt-error-first-option-width = option invalide -- {$option} ; -WIDTH n'est reconnu que lorsqu'il est la première option ; utilisez -w N à la place Essayez 'fmt --help' pour plus d'informations. fmt-error-read = erreur de lecture fmt-error-invalid-width-malformed = largeur invalide : {$width} fmt-error-cannot-open-for-reading = impossible d'ouvrir {$file} en lecture fmt-error-cannot-get-metadata = impossible d'obtenir les métadonnées pour {$file} fmt-error-failed-to-write-output = échec de l'écriture de sortie coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/src/000077500000000000000000000000001504311601400235415ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/src/fmt.rs000066400000000000000000000406671504311601400247120ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) PSKIP linebreak ostream parasplit tabwidth xanti xprefix use clap::{Arg, ArgAction, ArgMatches, Command}; use std::fs::File; use std::io::{BufReader, BufWriter, Read, Stdout, Write, stdin, stdout}; use uucore::display::Quotable; use uucore::error::{FromIo, UResult, USimpleError}; use uucore::translate; use uucore::format_usage; use linebreak::break_lines; use parasplit::ParagraphStream; use thiserror::Error; mod linebreak; mod parasplit; #[derive(Debug, Error)] enum FmtError { #[error("{}", translate!("fmt-error-invalid-goal", "goal" => .0.quote()))] InvalidGoal(String), #[error("{}", translate!("fmt-error-goal-greater-than-width"))] GoalGreaterThanWidth, #[error("{}", translate!("fmt-error-invalid-width", "width" => .0.quote()))] InvalidWidth(String), #[error("{}", translate!("fmt-error-width-out-of-range", "width" => .0))] WidthOutOfRange(usize), #[error("{}", translate!("fmt-error-invalid-tabwidth", "tabwidth" => .0.quote()))] InvalidTabWidth(String), #[error("{}", translate!("fmt-error-first-option-width", "option" => .0))] FirstOptionWidth(char), #[error("{}", translate!("fmt-error-read"))] ReadError, #[error("{}", translate!("fmt-error-invalid-width-malformed", "width" => .0.quote()))] InvalidWidthMalformed(String), } impl From for Box { fn from(err: FmtError) -> Self { USimpleError::new(1, err.to_string()) } } const MAX_WIDTH: usize = 2500; const DEFAULT_GOAL: usize = 70; const DEFAULT_WIDTH: usize = 75; // by default, goal is 93% of width const DEFAULT_GOAL_TO_WIDTH_RATIO: usize = 93; mod options { pub const CROWN_MARGIN: &str = "crown-margin"; pub const TAGGED_PARAGRAPH: &str = "tagged-paragraph"; pub const PRESERVE_HEADERS: &str = "preserve-headers"; pub const SPLIT_ONLY: &str = "split-only"; pub const UNIFORM_SPACING: &str = "uniform-spacing"; pub const PREFIX: &str = "prefix"; pub const SKIP_PREFIX: &str = "skip-prefix"; pub const EXACT_PREFIX: &str = "exact-prefix"; pub const EXACT_SKIP_PREFIX: &str = "exact-skip-prefix"; pub const WIDTH: &str = "width"; pub const GOAL: &str = "goal"; pub const QUICK: &str = "quick"; pub const TAB_WIDTH: &str = "tab-width"; pub const FILES_OR_WIDTH: &str = "files"; } pub type FileOrStdReader = BufReader>; pub struct FmtOptions { crown: bool, tagged: bool, mail: bool, split_only: bool, prefix: Option, xprefix: bool, anti_prefix: Option, xanti_prefix: bool, uniform: bool, quick: bool, width: usize, goal: usize, tabwidth: usize, } impl FmtOptions { fn from_matches(matches: &ArgMatches) -> UResult { let mut tagged = matches.get_flag(options::TAGGED_PARAGRAPH); let mut crown = matches.get_flag(options::CROWN_MARGIN); let mail = matches.get_flag(options::PRESERVE_HEADERS); let uniform = matches.get_flag(options::UNIFORM_SPACING); let quick = matches.get_flag(options::QUICK); let split_only = matches.get_flag(options::SPLIT_ONLY); if crown { tagged = false; } if split_only { crown = false; tagged = false; } let xprefix = matches.contains_id(options::EXACT_PREFIX); let xanti_prefix = matches.contains_id(options::SKIP_PREFIX); let prefix = matches.get_one::(options::PREFIX).map(String::from); let anti_prefix = matches .get_one::(options::SKIP_PREFIX) .map(String::from); let width_opt = extract_width(matches)?; let goal_opt_str = matches.get_one::(options::GOAL); let goal_opt = if let Some(goal_str) = goal_opt_str { match goal_str.parse::() { Ok(goal) => Some(goal), Err(_) => { return Err(FmtError::InvalidGoal(goal_str.clone()).into()); } } } else { None }; let (width, goal) = match (width_opt, goal_opt) { (Some(w), Some(g)) => { if g > w { return Err(FmtError::GoalGreaterThanWidth.into()); } (w, g) } (Some(0), None) => { // Only allow a goal of zero if the width is set to be zero (0, 0) } (Some(w), None) => { let g = (w * DEFAULT_GOAL_TO_WIDTH_RATIO / 100).max(1); (w, g) } (None, Some(g)) => { if g > DEFAULT_WIDTH { return Err(FmtError::GoalGreaterThanWidth.into()); } let w = (g * 100 / DEFAULT_GOAL_TO_WIDTH_RATIO).max(g + 3); (w, g) } (None, None) => (DEFAULT_WIDTH, DEFAULT_GOAL), }; debug_assert!( width >= goal, "GOAL {goal} should not be greater than WIDTH {width} when given {width_opt:?} and {goal_opt:?}." ); if width > MAX_WIDTH { return Err(FmtError::WidthOutOfRange(width).into()); } let mut tabwidth = 8; if let Some(s) = matches.get_one::(options::TAB_WIDTH) { tabwidth = match s.parse::() { Ok(t) => t, Err(_) => { return Err(FmtError::InvalidTabWidth(s.clone()).into()); } }; } if tabwidth < 1 { tabwidth = 1; } Ok(Self { crown, tagged, mail, split_only, prefix, xprefix, anti_prefix, xanti_prefix, uniform, quick, width, goal, tabwidth, }) } } /// Process the content of a file and format it according to the provided options. /// /// # Arguments /// /// * `file_name` - The name of the file to process. A value of "-" represents the standard input. /// * `fmt_opts` - A reference to a `FmtOptions` struct containing the formatting options. /// * `ostream` - A mutable reference to a `BufWriter` wrapping the standard output. /// /// # Returns /// /// A `UResult<()>` indicating success or failure. fn process_file( file_name: &str, fmt_opts: &FmtOptions, ostream: &mut BufWriter, ) -> UResult<()> { let mut fp = BufReader::new(match file_name { "-" => Box::new(stdin()) as Box, _ => { let f = File::open(file_name).map_err_context( || translate!("fmt-error-cannot-open-for-reading", "file" => file_name.quote()), )?; if f.metadata() .map_err_context( || translate!("fmt-error-cannot-get-metadata", "file" => file_name.quote()), )? .is_dir() { return Err(FmtError::ReadError.into()); } Box::new(f) as Box } }); let p_stream = ParagraphStream::new(fmt_opts, &mut fp); for para_result in p_stream { match para_result { Err(s) => { ostream .write_all(s.as_bytes()) .map_err_context(|| translate!("fmt-error-failed-to-write-output"))?; ostream .write_all(b"\n") .map_err_context(|| translate!("fmt-error-failed-to-write-output"))?; } Ok(para) => break_lines(¶, fmt_opts, ostream) .map_err_context(|| translate!("fmt-error-failed-to-write-output"))?, } } // flush the output after each file ostream .flush() .map_err_context(|| translate!("fmt-error-failed-to-write-output"))?; Ok(()) } /// Extract the file names from the positional arguments, ignoring any negative width in the first /// position. /// /// # Returns /// A `UResult<()>` with the file names, or an error if one of the file names could not be parsed /// (e.g., it is given as a negative number not in the first argument and not after a -- fn extract_files(matches: &ArgMatches) -> UResult> { let in_first_pos = matches .index_of(options::FILES_OR_WIDTH) .is_some_and(|x| x == 1); let is_neg = |s: &str| s.parse::().is_ok_and(|w| w < 0); let files: UResult> = matches .get_many::(options::FILES_OR_WIDTH) .into_iter() .flatten() .enumerate() .filter_map(|(i, x)| { if is_neg(x) { if in_first_pos && i == 0 { None } else { let first_num = x .chars() .nth(1) .expect("a negative number should be at least two characters long"); Some(Err(FmtError::FirstOptionWidth(first_num).into())) } } else { Some(Ok(x.clone())) } }) .collect(); if files.as_ref().is_ok_and(|f| f.is_empty()) { Ok(vec!["-".into()]) } else { files } } fn extract_width(matches: &ArgMatches) -> UResult> { let width_opt = matches.get_one::(options::WIDTH); if let Some(width_str) = width_opt { return if let Ok(width) = width_str.parse::() { Ok(Some(width)) } else { Err(FmtError::InvalidWidth(width_str.clone()).into()) }; } if let Some(1) = matches.index_of(options::FILES_OR_WIDTH) { let width_arg = matches.get_one::(options::FILES_OR_WIDTH).unwrap(); if let Some(num) = width_arg.strip_prefix('-') { Ok(num.parse::().ok()) } else { // will be treated as a file name Ok(None) } } else { Ok(None) } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let args: Vec<_> = args.collect(); // Warn the user if it looks like we're trying to pass a number in the first // argument with non-numeric characters if let Some(first_arg) = args.get(1) { let first_arg = first_arg.to_string_lossy(); let malformed_number = first_arg.starts_with('-') && first_arg.chars().nth(1).is_some_and(|c| c.is_ascii_digit()) && first_arg.chars().skip(2).any(|c| !c.is_ascii_digit()); if malformed_number { return Err(FmtError::InvalidWidthMalformed( first_arg.strip_prefix('-').unwrap().to_string(), ) .into()); } } let matches = uu_app().try_get_matches_from(&args)?; let files = extract_files(&matches)?; let fmt_opts = FmtOptions::from_matches(&matches)?; let mut ostream = BufWriter::new(stdout()); for file_name in &files { process_file(file_name, &fmt_opts, &mut ostream)?; } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("fmt-about")) .override_usage(format_usage(&translate!("fmt-usage"))) .infer_long_args(true) .args_override_self(true) .arg( Arg::new(options::CROWN_MARGIN) .short('c') .long(options::CROWN_MARGIN) .help(translate!("fmt-crown-margin-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::TAGGED_PARAGRAPH) .short('t') .long("tagged-paragraph") .help(translate!("fmt-tagged-paragraph-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::PRESERVE_HEADERS) .short('m') .long("preserve-headers") .help(translate!("fmt-preserve-headers-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SPLIT_ONLY) .short('s') .long("split-only") .help(translate!("fmt-split-only-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::UNIFORM_SPACING) .short('u') .long("uniform-spacing") .help(translate!("fmt-uniform-spacing-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::PREFIX) .short('p') .long("prefix") .help(translate!("fmt-prefix-help")) .value_name("PREFIX"), ) .arg( Arg::new(options::SKIP_PREFIX) .short('P') .long("skip-prefix") .help(translate!("fmt-skip-prefix-help")) .value_name("PSKIP"), ) .arg( Arg::new(options::EXACT_PREFIX) .short('x') .long("exact-prefix") .help(translate!("fmt-exact-prefix-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::EXACT_SKIP_PREFIX) .short('X') .long("exact-skip-prefix") .help(translate!("fmt-exact-skip-prefix-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::WIDTH) .short('w') .long("width") .help(translate!("fmt-width-help")) // We must accept invalid values if they are overridden later. This is not supported by clap, so accept all strings instead. .value_name("WIDTH"), ) .arg( Arg::new(options::GOAL) .short('g') .long("goal") .help(translate!("fmt-goal-help")) // We must accept invalid values if they are overridden later. This is not supported by clap, so accept all strings instead. .value_name("GOAL"), ) .arg( Arg::new(options::QUICK) .short('q') .long("quick") .help(translate!("fmt-quick-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::TAB_WIDTH) .short('T') .long("tab-width") .help(translate!("fmt-tab-width-help")) .value_name("TABWIDTH"), ) .arg( Arg::new(options::FILES_OR_WIDTH) .action(ArgAction::Append) .value_name("FILES") .value_hint(clap::ValueHint::FilePath) .allow_negative_numbers(true), ) } #[cfg(test)] mod tests { use crate::uu_app; use crate::{extract_files, extract_width}; #[test] fn parse_negative_width() { let matches = uu_app() .try_get_matches_from(vec!["fmt", "-3", "some-file"]) .unwrap(); assert_eq!(extract_files(&matches).unwrap(), vec!["some-file"]); assert_eq!(extract_width(&matches).ok(), Some(Some(3))); } #[test] fn parse_width_as_arg() { let matches = uu_app() .try_get_matches_from(vec!["fmt", "-w3", "some-file"]) .unwrap(); assert_eq!(extract_files(&matches).unwrap(), vec!["some-file"]); assert_eq!(extract_width(&matches).ok(), Some(Some(3))); } #[test] fn parse_no_args() { let matches = uu_app().try_get_matches_from(vec!["fmt"]).unwrap(); assert_eq!(extract_files(&matches).unwrap(), vec!["-"]); assert_eq!(extract_width(&matches).ok(), Some(None)); } #[test] fn parse_just_file_name() { let matches = uu_app() .try_get_matches_from(vec!["fmt", "some-file"]) .unwrap(); assert_eq!(extract_files(&matches).unwrap(), vec!["some-file"]); assert_eq!(extract_width(&matches).ok(), Some(None)); } #[test] fn parse_with_both_widths_positional_first() { let matches = uu_app() .try_get_matches_from(vec!["fmt", "-10", "-w3", "some-file"]) .unwrap(); assert_eq!(extract_files(&matches).unwrap(), vec!["some-file"]); assert_eq!(extract_width(&matches).ok(), Some(Some(3))); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/src/linebreak.rs000066400000000000000000000430011504311601400260410ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) INFTY MULT accum breakwords linebreak linebreaking linebreaks linelen maxlength minlength nchars ostream overlen parasplit plass posn powf punct signum slen sstart tabwidth tlen underlen winfo wlen wordlen use std::io::{BufWriter, Stdout, Write}; use std::{cmp, mem}; use crate::FmtOptions; use crate::parasplit::{ParaWords, Paragraph, WordInfo}; struct BreakArgs<'a> { opts: &'a FmtOptions, init_len: usize, indent_str: &'a str, indent_len: usize, uniform: bool, ostream: &'a mut BufWriter, } impl BreakArgs<'_> { fn compute_width(&self, winfo: &WordInfo, posn: usize, fresh: bool) -> usize { if fresh { 0 } else { let post = winfo.after_tab; match winfo.before_tab { None => post, Some(pre) => { post + ((pre + posn) / self.opts.tabwidth + 1) * self.opts.tabwidth - posn } } } } } pub fn break_lines( para: &Paragraph, opts: &FmtOptions, ostream: &mut BufWriter, ) -> std::io::Result<()> { // indent let p_indent = ¶.indent_str; let p_indent_len = para.indent_len; // words let p_words = ParaWords::new(opts, para); let mut p_words_words = p_words.words(); // the first word will *always* appear on the first line // make sure of this here let Some(winfo) = p_words_words.next() else { return ostream.write_all(b"\n"); }; // print the init, if it exists, and get its length let p_init_len = winfo.word_nchars + if opts.crown || opts.tagged { // handle "init" portion ostream.write_all(para.init_str.as_bytes())?; para.init_len } else if !para.mail_header { // for non-(crown, tagged) that's the same as a normal indent ostream.write_all(p_indent.as_bytes())?; p_indent_len } else { // except that mail headers get no indent at all 0 }; // write first word after writing init ostream.write_all(winfo.word.as_bytes())?; // does this paragraph require uniform spacing? let uniform = para.mail_header || opts.uniform; let mut break_args = BreakArgs { opts, init_len: p_init_len, indent_str: p_indent, indent_len: p_indent_len, uniform, ostream, }; if opts.quick || para.mail_header { break_simple(p_words_words, &mut break_args) } else { break_knuth_plass(p_words_words, &mut break_args) } } /// `break_simple` implements a "greedy" breaking algorithm: print words until /// maxlength would be exceeded, then print a linebreak and indent and continue. fn break_simple<'a, T: Iterator>>( mut iter: T, args: &mut BreakArgs<'a>, ) -> std::io::Result<()> { iter.try_fold((args.init_len, false), |(l, prev_punct), winfo| { accum_words_simple(args, l, prev_punct, winfo) })?; args.ostream.write_all(b"\n") } fn accum_words_simple<'a>( args: &mut BreakArgs<'a>, l: usize, prev_punct: bool, winfo: &'a WordInfo<'a>, ) -> std::io::Result<(usize, bool)> { // compute the length of this word, considering how tabs will expand at this position on the line let wlen = winfo.word_nchars + args.compute_width(winfo, l, false); let slen = compute_slen( args.uniform, winfo.new_line, winfo.sentence_start, prev_punct, ); if l + wlen + slen > args.opts.width { write_newline(args.indent_str, args.ostream)?; write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?; Ok((args.indent_len + winfo.word_nchars, winfo.ends_punct)) } else { write_with_spaces(winfo.word, slen, args.ostream)?; Ok((l + wlen + slen, winfo.ends_punct)) } } /// `break_knuth_plass` implements an "optimal" breaking algorithm in the style of /// Knuth, D.E., and Plass, M.F. "Breaking Paragraphs into Lines." in Software, /// Practice and Experience. Vol. 11, No. 11, November 1981. /// fn break_knuth_plass<'a, T: Clone + Iterator>>( mut iter: T, args: &mut BreakArgs<'a>, ) -> std::io::Result<()> { // run the algorithm to get the breakpoints let breakpoints = find_kp_breakpoints(iter.clone(), args); // iterate through the breakpoints (note that breakpoints is in reverse break order, so we .rev() it let result: std::io::Result<(bool, bool)> = breakpoints.iter().rev().try_fold( (false, false), |(mut prev_punct, mut fresh), &(next_break, break_before)| { if fresh { write_newline(args.indent_str, args.ostream)?; } // at each breakpoint, keep emitting words until we find the word matching this breakpoint for winfo in &mut iter { let (slen, word) = slice_if_fresh( fresh, winfo.word, winfo.word_start, args.uniform, winfo.new_line, winfo.sentence_start, prev_punct, ); fresh = false; prev_punct = winfo.ends_punct; // We find identical breakpoints here by comparing addresses of the references. // This is OK because the backing vector is not mutating once we are linebreaking. if std::ptr::eq(winfo, next_break) { // OK, we found the matching word if break_before { write_newline(args.indent_str, args.ostream)?; write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?; } else { // breaking after this word, so that means "fresh" is true for the next iteration write_with_spaces(word, slen, args.ostream)?; fresh = true; } break; } write_with_spaces(word, slen, args.ostream)?; } Ok((prev_punct, fresh)) }, ); let (mut prev_punct, mut fresh) = result?; // after the last linebreak, write out the rest of the final line. for winfo in iter { if fresh { write_newline(args.indent_str, args.ostream)?; } let (slen, word) = slice_if_fresh( fresh, winfo.word, winfo.word_start, args.uniform, winfo.new_line, winfo.sentence_start, prev_punct, ); prev_punct = winfo.ends_punct; fresh = false; write_with_spaces(word, slen, args.ostream)?; } args.ostream.write_all(b"\n") } struct LineBreak<'a> { prev: usize, linebreak: Option<&'a WordInfo<'a>>, break_before: bool, demerits: i64, prev_rat: f32, length: usize, fresh: bool, } #[allow(clippy::cognitive_complexity)] fn find_kp_breakpoints<'a, T: Iterator>>( iter: T, args: &BreakArgs<'a>, ) -> Vec<(&'a WordInfo<'a>, bool)> { let mut iter = iter.peekable(); // set up the initial null linebreak let mut linebreaks = vec![LineBreak { prev: 0, linebreak: None, break_before: false, demerits: 0, prev_rat: 0.0, length: args.init_len, fresh: false, }]; // this vec holds the current active linebreaks; next_ holds the breaks that will be active for // the next word let mut active_breaks = vec![0]; let mut next_active_breaks = vec![]; let stretch = args.opts.width - args.opts.goal; let minlength = if args.opts.goal <= 10 { 1 } else { args.opts.goal.max(stretch + 1) - stretch }; let mut new_linebreaks = vec![]; let mut is_sentence_start = false; let mut least_demerits = 0; loop { let Some(w) = iter.next() else { break }; // if this is the last word, we don't add additional demerits for this break let (is_last_word, is_sentence_end) = match iter.peek() { None => (true, true), Some(&&WordInfo { sentence_start: st, new_line: nl, .. }) => (false, st || (nl && w.ends_punct)), }; // should we be adding extra space at the beginning of the next sentence? let slen = compute_slen(args.uniform, w.new_line, is_sentence_start, false); let mut ld_new = i64::MAX; let mut ld_next = i64::MAX; let mut ld_idx = 0; new_linebreaks.clear(); next_active_breaks.clear(); // go through each active break, extending it and possibly adding a new active // break if we are above the minimum required length #[allow(clippy::explicit_iter_loop)] for &i in active_breaks.iter() { let active = &mut linebreaks[i]; // normalize demerits to avoid overflow, and record if this is the least active.demerits -= least_demerits; if active.demerits < ld_next { ld_next = active.demerits; ld_idx = i; } // get the new length let tlen = w.word_nchars + args.compute_width(w, active.length, active.fresh) + slen + active.length; // if tlen is longer than args.opts.width, we drop this break from the active list // otherwise, we extend the break, and possibly add a new break at this point if tlen <= args.opts.width { // this break will still be active next time next_active_breaks.push(i); // we can put this word on this line active.fresh = false; active.length = tlen; // if we're above the minlength, we can also consider breaking here if tlen >= minlength { let (new_demerits, new_ratio) = if is_last_word { // there is no penalty for the final line's length (0, 0.0) } else { compute_demerits( args.opts.goal as isize - tlen as isize, stretch, w.word_nchars, active.prev_rat, ) }; // do not even consider adding a line that has too many demerits // also, try to detect overflow by checking signum let total_demerits = new_demerits + active.demerits; if new_demerits < BAD_INFTY_SQ && total_demerits < ld_new && active.demerits.signum() <= new_demerits.signum() { ld_new = total_demerits; new_linebreaks.push(LineBreak { prev: i, linebreak: Some(w), break_before: false, demerits: total_demerits, prev_rat: new_ratio, length: args.indent_len, fresh: true, }); } } } } // if we generated any new linebreaks, add the last one to the list // the last one is always the best because we don't add to new_linebreaks unless // it's better than the best one so far match new_linebreaks.pop() { None => (), Some(lb) => { next_active_breaks.push(linebreaks.len()); linebreaks.push(lb); } } if next_active_breaks.is_empty() { // every potential linebreak is too long! choose the linebreak with the least demerits, ld_idx let new_break = restart_active_breaks(args, &linebreaks[ld_idx], ld_idx, w, slen, minlength); next_active_breaks.push(linebreaks.len()); linebreaks.push(new_break); least_demerits = 0; } else { // next time around, normalize out the demerits fields // on active linebreaks to make overflow less likely least_demerits = cmp::max(ld_next, 0); } // swap in new list of active breaks mem::swap(&mut active_breaks, &mut next_active_breaks); // If this was the last word in a sentence, the next one must be the first in the next. is_sentence_start = is_sentence_end; } // return the best path build_best_path(&linebreaks, &active_breaks) } fn build_best_path<'a>(paths: &[LineBreak<'a>], active: &[usize]) -> Vec<(&'a WordInfo<'a>, bool)> { // of the active paths, we select the one with the fewest demerits active .iter() .min_by_key(|&&a| paths[a].demerits) .map(|&(mut best_idx)| { let mut breakwords = vec![]; // now, chase the pointers back through the break list, recording // the words at which we should break loop { let next_best = &paths[best_idx]; match next_best.linebreak { None => return breakwords, Some(prev) => { breakwords.push((prev, next_best.break_before)); best_idx = next_best.prev; } } } }) .unwrap_or_default() } // "infinite" badness is more like (1+BAD_INFTY)^2 because of how demerits are computed const BAD_INFTY: i64 = 10_000_000; const BAD_INFTY_SQ: i64 = BAD_INFTY * BAD_INFTY; // badness = BAD_MULT * abs(r) ^ 3 const BAD_MULT: f32 = 200.0; // DR_MULT is multiplier for delta-R between lines const DR_MULT: f32 = 600.0; // DL_MULT is penalty multiplier for short words at end of line const DL_MULT: f32 = 10.0; fn compute_demerits(delta_len: isize, stretch: usize, wlen: usize, prev_rat: f32) -> (i64, f32) { // how much stretch are we using? let ratio = if delta_len == 0 { 0.0f32 } else { delta_len as f32 / stretch as f32 }; // compute badness given the stretch ratio let bad_linelen = if ratio.abs() > 1.0f32 { BAD_INFTY } else { (BAD_MULT * ratio.powi(3).abs()) as i64 }; // we penalize lines ending in really short words let bad_wordlen = if wlen >= stretch { 0 } else { (DL_MULT * ((stretch - wlen) as f32 / (stretch - 1) as f32) .powi(3) .abs()) as i64 }; // we penalize lines that have very different ratios from previous lines let bad_delta_r = (DR_MULT * ((ratio - prev_rat) / 2.0).powi(3).abs()) as i64; let demerits = i64::pow(1 + bad_linelen + bad_wordlen + bad_delta_r, 2); (demerits, ratio) } fn restart_active_breaks<'a>( args: &BreakArgs<'a>, active: &LineBreak<'a>, act_idx: usize, w: &'a WordInfo<'a>, slen: usize, min: usize, ) -> LineBreak<'a> { let (break_before, line_length) = if active.fresh { // never break before a word if that word would be the first on a line (false, args.indent_len) } else { // choose the lesser evil: breaking too early, or breaking too late let wlen = w.word_nchars + args.compute_width(w, active.length, active.fresh); let underlen = min as isize - active.length as isize; let overlen = (wlen + slen + active.length) as isize - args.opts.width as isize; if overlen > underlen { // break early, put this word on the next line (true, args.indent_len + w.word_nchars) } else { (false, args.indent_len) } }; // restart the linebreak. This will be our only active path. LineBreak { prev: act_idx, linebreak: Some(w), break_before, demerits: 0, // this is the only active break, so we can reset the demerit count prev_rat: if break_before { 1.0 } else { -1.0 }, length: line_length, fresh: !break_before, } } /// Number of spaces to add before a word, based on mode, newline, sentence start. fn compute_slen(uniform: bool, newline: bool, start: bool, punct: bool) -> usize { if uniform || newline { if start || (newline && punct) { 2 } else { 1 } } else { 0 } } /// If we're on a fresh line, `slen=0` and we slice off leading whitespace. /// Otherwise, compute `slen` and leave whitespace alone. fn slice_if_fresh( fresh: bool, word: &str, start: usize, uniform: bool, newline: bool, sstart: bool, punct: bool, ) -> (usize, &str) { if fresh { (0, &word[start..]) } else { (compute_slen(uniform, newline, sstart, punct), word) } } /// Write a newline and add the indent. fn write_newline(indent: &str, ostream: &mut BufWriter) -> std::io::Result<()> { ostream.write_all(b"\n")?; ostream.write_all(indent.as_bytes()) } /// Write the word, along with slen spaces. fn write_with_spaces( word: &str, slen: usize, ostream: &mut BufWriter, ) -> std::io::Result<()> { if slen == 2 { ostream.write_all(b" ")?; } else if slen == 1 { ostream.write_all(b" ")?; } ostream.write_all(word.as_bytes()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/src/main.rs000066400000000000000000000000261504311601400250310ustar00rootroot00000000000000uucore::bin!(uu_fmt); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fmt/src/parasplit.rs000066400000000000000000000535031504311601400261140ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) INFTY MULT PSKIP accum aftertab beforetab breakwords fmt's formatline linebreak linebreaking linebreaks linelen maxlength minlength nchars noformat noformatline ostream overlen parasplit plass pmatch poffset posn powf prefixindent punct signum slen sstart tabwidth tlen underlen winfo wlen wordlen wordsplits xanti xprefix use std::io::{BufRead, Lines}; use std::iter::Peekable; use std::slice::Iter; use unicode_width::UnicodeWidthChar; use crate::FileOrStdReader; use crate::FmtOptions; fn char_width(c: char) -> usize { if (c as usize) < 0xA0 { // if it is ASCII, call it exactly 1 wide (including control chars) // calling control chars' widths 1 is consistent with OpenBSD fmt 1 } else { // otherwise, get the unicode width // note that we shouldn't actually get None here because only c < 0xA0 // can return None, but for safety and future-proofing we do it this way UnicodeWidthChar::width(c).unwrap_or(1) } } /// GNU fmt has a more restrictive definition of whitespace than Unicode. /// It only considers ASCII whitespace characters (space, tab, newline, etc.) /// and excludes many Unicode whitespace characters like non-breaking spaces. fn is_fmt_whitespace(c: char) -> bool { // Only ASCII whitespace characters are considered whitespace in GNU fmt matches!(c, ' ' | '\t' | '\n' | '\r' | '\x0B' | '\x0C') } // lines with PSKIP, lacking PREFIX, or which are entirely blank are // NoFormatLines; otherwise, they are FormatLines #[derive(Debug)] pub enum Line { FormatLine(FileLine), NoFormatLine(String, bool), } impl Line { /// when we know that it's a [`Line::FormatLine`], as in the [`ParagraphStream`] iterator fn get_formatline(self) -> FileLine { match self { Self::FormatLine(fl) => fl, Self::NoFormatLine(..) => panic!("Found NoFormatLine when expecting FormatLine"), } } /// when we know that it's a [`Line::NoFormatLine`], as in the [`ParagraphStream`] iterator fn get_noformatline(self) -> (String, bool) { match self { Self::NoFormatLine(s, b) => (s, b), Self::FormatLine(..) => panic!("Found FormatLine when expecting NoFormatLine"), } } } /// Each line's prefix has to be considered to know whether to merge it with /// the next line or not #[derive(Debug)] pub struct FileLine { line: String, /// The end of the indent, always the start of the text indent_end: usize, /// The end of the PREFIX's indent, that is, the spaces before the prefix prefix_indent_end: usize, /// Display length of indent taking into account tabs indent_len: usize, /// PREFIX indent length taking into account tabs prefix_len: usize, } /// Iterator that produces a stream of Lines from a file pub struct FileLines<'a> { opts: &'a FmtOptions, lines: Lines<&'a mut FileOrStdReader>, } impl FileLines<'_> { fn new<'b>(opts: &'b FmtOptions, lines: Lines<&'b mut FileOrStdReader>) -> FileLines<'b> { FileLines { opts, lines } } /// returns true if this line should be formatted fn match_prefix(&self, line: &str) -> (bool, usize) { let Some(prefix) = &self.opts.prefix else { return (true, 0); }; FileLines::match_prefix_generic(prefix, line, self.opts.xprefix) } /// returns true if this line should be formatted fn match_anti_prefix(&self, line: &str) -> bool { let Some(anti_prefix) = &self.opts.anti_prefix else { return true; }; match FileLines::match_prefix_generic(anti_prefix, line, self.opts.xanti_prefix) { (true, _) => false, (_, _) => true, } } fn match_prefix_generic(pfx: &str, line: &str, exact: bool) -> (bool, usize) { if line.starts_with(pfx) { return (true, 0); } if !exact { // we do it this way rather than byte indexing to support unicode whitespace chars for (i, char) in line.char_indices() { if line[i..].starts_with(pfx) { return (true, i); } else if !is_fmt_whitespace(char) { break; } } } (false, 0) } fn compute_indent(&self, string: &str, prefix_end: usize) -> (usize, usize, usize) { let mut prefix_len = 0; let mut indent_len = 0; let mut indent_end = 0; for (os, c) in string.char_indices() { if os == prefix_end { // we found the end of the prefix, so this is the printed length of the prefix here prefix_len = indent_len; } if (os >= prefix_end) && !is_fmt_whitespace(c) { // found first non-whitespace after prefix, this is indent_end indent_end = os; break; } else if c == '\t' { // compute tab length indent_len = (indent_len / self.opts.tabwidth + 1) * self.opts.tabwidth; } else { // non-tab character indent_len += char_width(c); } } (indent_end, prefix_len, indent_len) } } impl Iterator for FileLines<'_> { type Item = Line; fn next(&mut self) -> Option { let n = self.lines.next()?.ok()?; // if this line is entirely whitespace, // emit a blank line // Err(true) indicates that this was a linebreak, // which is important to know when detecting mail headers if n.chars().all(is_fmt_whitespace) { return Some(Line::NoFormatLine(String::new(), true)); } let (pmatch, poffset) = self.match_prefix(&n[..]); // if this line does not match the prefix, // emit the line unprocessed and iterate again if !pmatch { return Some(Line::NoFormatLine(n, false)); } // if the line matches the prefix, but is blank after, // don't allow lines to be combined through it (that is, // treat it like a blank line, except that since it's // not truly blank we will not allow mail headers on the // following line) if pmatch && n[poffset + self.opts.prefix.as_ref().map_or(0, |s| s.len())..] .chars() .all(is_fmt_whitespace) { return Some(Line::NoFormatLine(n, false)); } // skip if this line matches the anti_prefix // (NOTE definition of match_anti_prefix is TRUE if we should process) if !self.match_anti_prefix(&n[..]) { return Some(Line::NoFormatLine(n, false)); } // figure out the indent, prefix, and prefixindent ending points let prefix_end = poffset + self.opts.prefix.as_ref().map_or(0, |s| s.len()); let (indent_end, prefix_len, indent_len) = self.compute_indent(&n[..], prefix_end); Some(Line::FormatLine(FileLine { line: n, indent_end, prefix_indent_end: poffset, indent_len, prefix_len, })) } } /// A paragraph : a collection of [`FileLines`] that are to be formatted /// plus info about the paragraph's indentation /// /// We only retain the String from the [`FileLine`]; the other info /// is only there to help us in deciding how to merge lines into Paragraphs #[derive(Debug)] pub struct Paragraph { /// the lines of the file lines: Vec, /// string representing the init, that is, the first line's indent pub init_str: String, /// printable length of the init string considering TABWIDTH pub init_len: usize, /// byte location of end of init in first line String init_end: usize, /// string representing indent pub indent_str: String, /// length of above pub indent_len: usize, /// byte location of end of indent (in crown and tagged mode, only applies to 2nd line and onward) indent_end: usize, /// we need to know if this is a mail header because we do word splitting differently in that case pub mail_header: bool, } /// An iterator producing a stream of paragraphs from a stream of lines /// given a set of options. pub struct ParagraphStream<'a> { lines: Peekable>, next_mail: bool, opts: &'a FmtOptions, } impl ParagraphStream<'_> { pub fn new<'b>(opts: &'b FmtOptions, reader: &'b mut FileOrStdReader) -> ParagraphStream<'b> { let lines = FileLines::new(opts, reader.lines()).peekable(); // at the beginning of the file, we might find mail headers ParagraphStream { lines, next_mail: true, opts, } } /// Detect RFC822 mail header fn is_mail_header(line: &FileLine) -> bool { // a mail header begins with either "From " (envelope sender line) // or with a sequence of printable ASCII chars (33 to 126, inclusive, // except colon) followed by a colon. if line.indent_end > 0 { false } else { let l_slice = &line.line[..]; if l_slice.starts_with("From ") { true } else { let Some(colon_posn) = l_slice.find(':') else { return false; }; // header field must be nonzero length if colon_posn == 0 { return false; } l_slice[..colon_posn] .chars() .all(|x| !matches!(x as usize, y if !(33..=126).contains(&y))) } } } } impl Iterator for ParagraphStream<'_> { type Item = Result; #[allow(clippy::cognitive_complexity)] fn next(&mut self) -> Option> { // return a NoFormatLine in an Err; it should immediately be output let noformat = match self.lines.peek()? { Line::FormatLine(_) => false, Line::NoFormatLine(_, _) => true, }; // found a NoFormatLine, immediately dump it out if noformat { let (s, nm) = self.lines.next().unwrap().get_noformatline(); self.next_mail = nm; return Some(Err(s)); } // found a FormatLine, now build a paragraph let mut init_str = String::new(); let mut init_end = 0; let mut init_len = 0; let mut indent_str = String::new(); let mut indent_end = 0; let mut indent_len = 0; let mut prefix_len = 0; let mut prefix_indent_end = 0; let mut p_lines = Vec::new(); let mut in_mail = false; let mut second_done = false; // for when we use crown or tagged mode loop { // peek ahead // need to explicitly force fl out of scope before we can call self.lines.next() let Some(Line::FormatLine(fl)) = self.lines.peek() else { break; }; if p_lines.is_empty() { // first time through the loop, get things set up // detect mail header if self.opts.mail && self.next_mail && ParagraphStream::is_mail_header(fl) { in_mail = true; // there can't be any indent or prefixindent because otherwise is_mail_header // would fail since there cannot be any whitespace before the colon in a // valid header field indent_str.push_str(" "); indent_len = 2; } else { if self.opts.crown || self.opts.tagged { init_str.push_str(&fl.line[..fl.indent_end]); init_len = fl.indent_len; init_end = fl.indent_end; } else { second_done = true; } // these will be overwritten in the 2nd line of crown or tagged mode, but // we are not guaranteed to get to the 2nd line, e.g., if the next line // is a NoFormatLine or None. Thus, we set sane defaults the 1st time around indent_str.push_str(&fl.line[..fl.indent_end]); indent_len = fl.indent_len; indent_end = fl.indent_end; // save these to check for matching lines prefix_len = fl.prefix_len; prefix_indent_end = fl.prefix_indent_end; // in tagged mode, add 4 spaces of additional indenting by default // (gnu fmt's behavior is different: it seems to find the closest column to // indent_end that is divisible by 3. But honestly that behavior seems // pretty arbitrary. // Perhaps a better default would be 1 TABWIDTH? But ugh that's so big. if self.opts.tagged { indent_str.push_str(" "); indent_len += 4; } } } else if in_mail { // lines following mail headers must begin with spaces if fl.indent_end == 0 || (self.opts.prefix.is_some() && fl.prefix_indent_end == 0) { break; // this line does not begin with spaces } } else if !second_done { // now we have enough info to handle crown margin and tagged mode // in both crown and tagged modes we require that prefix_len is the same if prefix_len != fl.prefix_len || prefix_indent_end != fl.prefix_indent_end { break; } // in tagged mode, indent has to be *different* on following lines if self.opts.tagged && indent_len - 4 == fl.indent_len && indent_end == fl.indent_end { break; } // this is part of the same paragraph, get the indent info from this line indent_str.clear(); indent_str.push_str(&fl.line[..fl.indent_end]); indent_len = fl.indent_len; indent_end = fl.indent_end; second_done = true; } else { // detect mismatch if indent_end != fl.indent_end || prefix_indent_end != fl.prefix_indent_end || indent_len != fl.indent_len || prefix_len != fl.prefix_len { break; } } p_lines.push(self.lines.next().unwrap().get_formatline().line); // when we're in split-only mode, we never join lines, so stop here if self.opts.split_only { break; } } // if this was a mail header, then the next line can be detected as one. Otherwise, it cannot. // NOTE next_mail is true at ParagraphStream instantiation, and is set to true after a blank // NoFormatLine. self.next_mail = in_mail; Some(Ok(Paragraph { lines: p_lines, init_str, init_len, init_end, indent_str, indent_len, indent_end, mail_header: in_mail, })) } } pub struct ParaWords<'a> { opts: &'a FmtOptions, para: &'a Paragraph, words: Vec>, } impl<'a> ParaWords<'a> { pub fn new(opts: &'a FmtOptions, para: &'a Paragraph) -> Self { let mut pw = ParaWords { opts, para, words: Vec::new(), }; pw.create_words(); pw } fn create_words(&mut self) { if self.para.mail_header { // no extra spacing for mail headers; always exactly 1 space // safe to trim_start on every line of a mail header, since the // first line is guaranteed not to have any spaces self.words.extend( self.para .lines .iter() .flat_map(|x| x.split_whitespace()) .map(|x| WordInfo { word: x, word_start: 0, word_nchars: x.len(), // OK for mail headers; only ASCII allowed (unicode is escaped) before_tab: None, after_tab: 0, sentence_start: false, ends_punct: false, new_line: false, }), ); } else { // first line self.words.extend(if self.opts.crown || self.opts.tagged { // crown and tagged mode has the "init" in the first line, so slice from there WordSplit::new(self.opts, &self.para.lines[0][self.para.init_end..]) } else { // otherwise we slice from the indent WordSplit::new(self.opts, &self.para.lines[0][self.para.indent_end..]) }); if self.para.lines.len() > 1 { let indent_end = self.para.indent_end; let opts = self.opts; self.words.extend( self.para .lines .iter() .skip(1) .flat_map(|x| WordSplit::new(opts, &x[indent_end..])), ); } } } pub fn words(&'a self) -> Iter<'a, WordInfo<'a>> { self.words.iter() } } struct WordSplit<'a> { opts: &'a FmtOptions, string: &'a str, length: usize, position: usize, prev_punct: bool, } impl WordSplit<'_> { fn analyze_tabs(&self, string: &str) -> (Option, usize, Option) { // given a string, determine (length before tab) and (printed length after first tab) // if there are no tabs, beforetab = -1 and aftertab is the printed length let mut beforetab = None; let mut aftertab = 0; let mut word_start = None; for (os, c) in string.char_indices() { if !is_fmt_whitespace(c) { word_start = Some(os); break; } else if c == '\t' { if beforetab.is_none() { beforetab = Some(aftertab); aftertab = 0; } else { aftertab = (aftertab / self.opts.tabwidth + 1) * self.opts.tabwidth; } } else { aftertab += 1; } } (beforetab, aftertab, word_start) } } impl WordSplit<'_> { fn new<'b>(opts: &'b FmtOptions, string: &'b str) -> WordSplit<'b> { // wordsplits *must* start at a non-whitespace character let trim_string = string.trim_start_matches(is_fmt_whitespace); WordSplit { opts, string: trim_string, length: string.len(), position: 0, prev_punct: false, } } fn is_punctuation(c: char) -> bool { matches!(c, '!' | '.' | '?') } } pub struct WordInfo<'a> { pub word: &'a str, pub word_start: usize, pub word_nchars: usize, pub before_tab: Option, pub after_tab: usize, pub sentence_start: bool, pub ends_punct: bool, pub new_line: bool, } // returns (&str, is_start_of_sentence) impl<'a> Iterator for WordSplit<'a> { type Item = WordInfo<'a>; fn next(&mut self) -> Option> { if self.position >= self.length { return None; } let old_position = self.position; let new_line = old_position == 0; // find the start of the next word, and record if we find a tab character let (before_tab, after_tab, word_start) = if let (b, a, Some(s)) = self.analyze_tabs(&self.string[old_position..]) { (b, a, s + old_position) } else { self.position = self.length; return None; }; // find the beginning of the next whitespace // note that this preserves the invariant that self.position // points to whitespace character OR end of string let mut word_nchars = 0; self.position = match self.string[word_start..].find(|x: char| { if is_fmt_whitespace(x) { true } else { word_nchars += char_width(x); false } }) { None => self.length, Some(s) => s + word_start, }; let word_start_relative = word_start - old_position; // if the previous sentence was punctuation and this sentence has >2 whitespace or one tab, is a new sentence. let is_start_of_sentence = self.prev_punct && (before_tab.is_some() || word_start_relative > 1); // now record whether this word ends in punctuation self.prev_punct = match self.string[..self.position].chars().next_back() { Some(ch) => WordSplit::is_punctuation(ch), _ => panic!("fatal: expected word not to be empty"), }; let (word, word_start_relative, before_tab, after_tab) = if self.opts.uniform { (&self.string[word_start..self.position], 0, None, 0) } else { ( &self.string[old_position..self.position], word_start_relative, before_tab, after_tab, ) }; Some(WordInfo { word, word_start: word_start_relative, word_nchars, before_tab, after_tab, sentence_start: is_start_of_sentence, ends_punct: self.prev_punct, new_line, }) } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fold/000077500000000000000000000000001504311601400231105ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fold/Cargo.toml000066400000000000000000000010621504311601400250370ustar00rootroot00000000000000[package] name = "uu_fold" description = "fold ~ (uutils) wrap each line of input" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/fold" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/fold.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true } fluent = { workspace = true } [[bin]] name = "fold" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fold/LICENSE000077700000000000000000000000001504311601400257562../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fold/locales/000077500000000000000000000000001504311601400245325ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fold/locales/en-US.ftl000066400000000000000000000010121504311601400261620ustar00rootroot00000000000000fold-about = Writes each file (or standard input if no files are given) to standard output whilst breaking long lines fold-usage = fold [OPTION]... [FILE]... fold-bytes-help = count using bytes rather than columns (meaning control characters such as newline are not treated specially) fold-spaces-help = break lines at word boundaries rather than a hard cut-off fold-width-help = set WIDTH as the maximum line width rather than 80 fold-error-illegal-width = illegal width value fold-error-readline = failed to read line coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fold/locales/fr-FR.ftl000066400000000000000000000011421504311601400261530ustar00rootroot00000000000000fold-about = Écrit chaque fichier (ou l'entrée standard si aucun fichier n'est donné) sur la sortie standard en coupant les lignes trop longues fold-usage = fold [OPTION]... [FICHIER]... fold-bytes-help = compter en octets plutôt qu'en colonnes (les caractères de contrôle comme retour chariot ne sont pas traités spécialement) fold-spaces-help = couper les lignes aux limites de mots plutôt qu'à une largeur fixe fold-width-help = définir WIDTH comme largeur de ligne maximale au lieu de 80 fold-error-illegal-width = valeur de largeur illégale fold-error-readline = échec de lecture de la ligne coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fold/src/000077500000000000000000000000001504311601400236775ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fold/src/fold.rs000066400000000000000000000214711504311601400251760ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDOs) ncount routput use clap::{Arg, ArgAction, Command}; use std::fs::File; use std::io::{BufRead, BufReader, Read, Write, stdin, stdout}; use std::path::Path; use uucore::display::Quotable; use uucore::error::{FromIo, UResult, USimpleError}; use uucore::format_usage; use uucore::translate; const TAB_WIDTH: usize = 8; const NL: u8 = b'\n'; const CR: u8 = b'\r'; const TAB: u8 = b'\t'; mod options { pub const BYTES: &str = "bytes"; pub const SPACES: &str = "spaces"; pub const WIDTH: &str = "width"; pub const FILE: &str = "file"; } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let args = args.collect_lossy(); let (args, obs_width) = handle_obsolete(&args[..]); let matches = uu_app().try_get_matches_from(args)?; let bytes = matches.get_flag(options::BYTES); let spaces = matches.get_flag(options::SPACES); let poss_width = match matches.get_one::(options::WIDTH) { Some(v) => Some(v.clone()), None => obs_width, }; let width = match poss_width { Some(inp_width) => inp_width.parse::().map_err(|e| { USimpleError::new( 1, translate!("fold-error-illegal-width", "width" => inp_width.quote(), "error" => e), ) })?, None => 80, }; let files = match matches.get_many::(options::FILE) { Some(v) => v.cloned().collect(), None => vec!["-".to_owned()], }; fold(&files, bytes, spaces, width) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .override_usage(format_usage(&translate!("fold-usage"))) .about(translate!("fold-about")) .infer_long_args(true) .arg( Arg::new(options::BYTES) .long(options::BYTES) .short('b') .help(translate!("fold-bytes-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SPACES) .long(options::SPACES) .short('s') .help(translate!("fold-spaces-help")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::WIDTH) .long(options::WIDTH) .short('w') .help(translate!("fold-width-help")) .value_name("WIDTH") .allow_hyphen_values(true), ) .arg( Arg::new(options::FILE) .hide(true) .action(ArgAction::Append) .value_hint(clap::ValueHint::FilePath), ) } fn handle_obsolete(args: &[String]) -> (Vec, Option) { for (i, arg) in args.iter().enumerate() { let slice = &arg; if slice.starts_with('-') && slice.chars().nth(1).is_some_and(|c| c.is_ascii_digit()) { let mut v = args.to_vec(); v.remove(i); return (v, Some(slice[1..].to_owned())); } } (args.to_vec(), None) } fn fold(filenames: &[String], bytes: bool, spaces: bool, width: usize) -> UResult<()> { for filename in filenames { let filename: &str = filename; let mut stdin_buf; let mut file_buf; let buffer = BufReader::new(if filename == "-" { stdin_buf = stdin(); &mut stdin_buf as &mut dyn Read } else { file_buf = File::open(Path::new(filename)).map_err_context(|| filename.to_string())?; &mut file_buf as &mut dyn Read }); if bytes { fold_file_bytewise(buffer, spaces, width)?; } else { fold_file(buffer, spaces, width)?; } } Ok(()) } /// Fold `file` to fit `width` (number of columns), counting all characters as /// one column. /// /// This function handles folding for the `-b`/`--bytes` option, counting /// tab, backspace, and carriage return as occupying one column, identically /// to all other characters in the stream. /// /// If `spaces` is `true`, attempt to break lines at whitespace boundaries. fn fold_file_bytewise(mut file: BufReader, spaces: bool, width: usize) -> UResult<()> { let mut line = Vec::new(); loop { if file .read_until(NL, &mut line) .map_err_context(|| translate!("fold-error-readline"))? == 0 { break; } if line == [NL] { println!(); line.truncate(0); continue; } let len = line.len(); let mut i = 0; while i < len { let width = if len - i >= width { width } else { len - i }; let slice = { let slice = &line[i..i + width]; if spaces && i + width < len { match slice .iter() .enumerate() .rev() .find(|(_, c)| c.is_ascii_whitespace() && **c != CR) { Some((m, _)) => &slice[..=m], None => slice, } } else { slice } }; // Don't duplicate trailing newlines: if the slice is "\n", the // previous iteration folded just before the end of the line and // has already printed this newline. if slice == [NL] { break; } i += slice.len(); let at_eol = i >= len; if at_eol { stdout().write_all(slice)?; } else { stdout().write_all(slice)?; stdout().write_all(&[NL])?; } } line.truncate(0); } Ok(()) } /// Fold `file` to fit `width` (number of columns). /// /// By default `fold` treats tab, backspace, and carriage return specially: /// tab characters count as 8 columns, backspace decreases the /// column count, and carriage return resets the column count to 0. /// /// If `spaces` is `true`, attempt to break lines at whitespace boundaries. #[allow(unused_assignments)] #[allow(clippy::cognitive_complexity)] fn fold_file(mut file: BufReader, spaces: bool, width: usize) -> UResult<()> { let mut line = Vec::new(); let mut output = Vec::new(); let mut col_count = 0; let mut last_space = None; /// Print the output line, resetting the column and character counts. /// /// If `spaces` is `true`, print the output line up to the last /// encountered whitespace character (inclusive) and set the remaining /// characters as the start of the next line. macro_rules! emit_output { () => { let consume = match last_space { Some(i) => i + 1, None => output.len(), }; stdout().write_all(&output[..consume])?; stdout().write_all(&[NL])?; output.drain(..consume); // we know there are no tabs left in output, so each char counts // as 1 column col_count = output.len(); last_space = None; }; } loop { if file .read_until(NL, &mut line) .map_err_context(|| translate!("fold-error-readline"))? == 0 { break; } for ch in &line { if *ch == NL { // make sure to _not_ split output at whitespace, since we // know the entire output will fit last_space = None; emit_output!(); break; } if col_count >= width { emit_output!(); } match *ch { CR => col_count = 0, TAB => { let next_tab_stop = col_count + TAB_WIDTH - col_count % TAB_WIDTH; if next_tab_stop > width && !output.is_empty() { emit_output!(); } col_count = next_tab_stop; last_space = if spaces { Some(output.len()) } else { None }; } 0x08 => { col_count = col_count.saturating_sub(1); } _ if spaces && ch.is_ascii_whitespace() => { last_space = Some(output.len()); col_count += 1; } _ => col_count += 1, } output.push(*ch); } if !output.is_empty() { stdout().write_all(&output)?; output.truncate(0); } line.truncate(0); } Ok(()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/fold/src/main.rs000066400000000000000000000000271504311601400251700ustar00rootroot00000000000000uucore::bin!(uu_fold); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/groups/000077500000000000000000000000001504311601400235035ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/groups/Cargo.toml000066400000000000000000000012171504311601400254340ustar00rootroot00000000000000[package] name = "uu_groups" description = "groups ~ (uutils) display group memberships for USERNAME" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/groups" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/groups.rs" [dependencies] clap = { workspace = true } thiserror = { workspace = true } uucore = { workspace = true, features = ["entries", "process"] } fluent = { workspace = true } [[bin]] name = "groups" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/groups/LICENSE000077700000000000000000000000001504311601400263512../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/groups/locales/000077500000000000000000000000001504311601400251255ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/groups/locales/en-US.ftl000066400000000000000000000005411504311601400265630ustar00rootroot00000000000000groups-about = Print group memberships for each USERNAME or, if no USERNAME is specified, for the current process (which may differ if the groups dataâ€base has changed). groups-usage = groups [OPTION]... [USERNAME]... groups-error-fetch = failed to fetch groups groups-error-notfound = cannot find name for group ID groups-error-user = no such user coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/groups/locales/fr-FR.ftl000066400000000000000000000006631504311601400265550ustar00rootroot00000000000000groups-about = Affiche les groupes d'appartenance pour chaque NOM_UTILISATEUR ou, s'il n'est pas précisé, pour le processus courant (ce qui peut différer si la base de données des groupes a changé). groups-usage = groups [OPTION]... [NOM_UTILISATEUR]... groups-error-fetch = échec de récupération des groupes groups-error-notfound = impossible de trouver le nom pour l'ID de groupe groups-error-user = utilisateur inexistant coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/groups/src/000077500000000000000000000000001504311601400242725ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/groups/src/groups.rs000066400000000000000000000052641504311601400261660ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) passwd use thiserror::Error; use uucore::{ display::Quotable, entries::{Locate, Passwd, get_groups_gnu, gid2grp}, error::{UError, UResult}, format_usage, show, }; use clap::{Arg, ArgAction, Command}; use uucore::translate; mod options { pub const USERS: &str = "USERNAME"; } #[derive(Debug, Error)] enum GroupsError { #[error("{message}", message = translate!("groups-error-fetch"))] GetGroupsFailed, #[error("{message} {gid}", message = translate!("groups-error-notfound"), gid = .0)] GroupNotFound(u32), #[error("{user}: {message}", user = .0.quote(), message = translate!("groups-error-user"))] UserNotFound(String), } impl UError for GroupsError {} fn infallible_gid2grp(gid: &u32) -> String { match gid2grp(*gid) { Ok(grp) => grp, Err(_) => { // The `show!()` macro sets the global exit code for the program. show!(GroupsError::GroupNotFound(*gid)); gid.to_string() } } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let users: Vec = matches .get_many::(options::USERS) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); if users.is_empty() { let Ok(gids) = get_groups_gnu(None) else { return Err(GroupsError::GetGroupsFailed.into()); }; let groups: Vec = gids.iter().map(infallible_gid2grp).collect(); println!("{}", groups.join(" ")); return Ok(()); } for user in users { match Passwd::locate(user.as_str()) { Ok(p) => { let groups: Vec = p.belongs_to().iter().map(infallible_gid2grp).collect(); println!("{user} : {}", groups.join(" ")); } Err(_) => { // The `show!()` macro sets the global exit code for the program. show!(GroupsError::UserNotFound(user)); } } } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("groups-about")) .override_usage(format_usage(&translate!("groups-usage"))) .infer_long_args(true) .arg( Arg::new(options::USERS) .action(ArgAction::Append) .value_name(options::USERS) .value_hint(clap::ValueHint::Username), ) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/groups/src/main.rs000066400000000000000000000000311504311601400255560ustar00rootroot00000000000000uucore::bin!(uu_groups); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/000077500000000000000000000000001504311601400236345ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/BENCHMARKING.md000066400000000000000000000003371504311601400260110ustar00rootroot00000000000000# Benchmarking hashsum ## To bench blake2 Taken from: With a large file: ```shell hyperfine "./target/release/coreutils hashsum --b2sum large-file" "b2sum large-file" ``` coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/Cargo.toml000066400000000000000000000011501504311601400255610ustar00rootroot00000000000000[package] name = "uu_hashsum" description = "hashsum ~ (uutils) display or check input digests" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/hashsum" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/hashsum.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["checksum", "sum"] } fluent = { workspace = true } [[bin]] name = "hashsum" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/LICENSE000077700000000000000000000000001504311601400265022../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/locales/000077500000000000000000000000001504311601400252565ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/locales/en-US.ftl000066400000000000000000000036051504311601400267200ustar00rootroot00000000000000hashsum-about = Compute and check message digests. hashsum-usage = hashsum -- [OPTIONS]... [FILE]... # Help messages hashsum-help-binary-windows = read or check in binary mode (default) hashsum-help-binary-other = read in binary mode hashsum-help-text-windows = read or check in text mode hashsum-help-text-other = read in text mode (default) hashsum-help-check = read hashsums from the FILEs and check them hashsum-help-tag = create a BSD-style checksum hashsum-help-quiet = don't print OK for each successfully verified file hashsum-help-status = don't output anything, status code shows success hashsum-help-strict = exit non-zero for improperly formatted checksum lines hashsum-help-ignore-missing = don't fail or report status for missing files hashsum-help-warn = warn about improperly formatted checksum lines hashsum-help-zero = end each output line with NUL, not newline hashsum-help-length = digest length in bits; must not exceed the max for the blake2 algorithm and must be a multiple of 8 hashsum-help-no-names = Omits filenames in the output (option not present in GNU/Coreutils) hashsum-help-bits = set the size of the output (only for SHAKE) # Algorithm help messages hashsum-help-md5 = work with MD5 hashsum-help-sha1 = work with SHA1 hashsum-help-sha224 = work with SHA224 hashsum-help-sha256 = work with SHA256 hashsum-help-sha384 = work with SHA384 hashsum-help-sha512 = work with SHA512 hashsum-help-sha3 = work with SHA3 hashsum-help-sha3-224 = work with SHA3-224 hashsum-help-sha3-256 = work with SHA3-256 hashsum-help-sha3-384 = work with SHA3-384 hashsum-help-sha3-512 = work with SHA3-512 hashsum-help-shake128 = work with SHAKE128 using BITS for the output size hashsum-help-shake256 = work with SHAKE256 using BITS for the output size hashsum-help-b2sum = work with BLAKE2 hashsum-help-b3sum = work with BLAKE3 # Error messages hashsum-error-failed-to-read-input = failed to read input coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/locales/fr-FR.ftl000066400000000000000000000043541504311601400267070ustar00rootroot00000000000000hashsum-about = Calculer et vérifier les empreintes de messages. hashsum-usage = hashsum -- [OPTION]... [FICHIER]... # Messages d'aide hashsum-help-binary-windows = lire ou vérifier en mode binaire (par défaut) hashsum-help-binary-other = lire en mode binaire hashsum-help-text-windows = lire ou vérifier en mode texte hashsum-help-text-other = lire en mode texte (par défaut) hashsum-help-check = lire les empreintes depuis les FICHIERs et les vérifier hashsum-help-tag = créer une somme de contrôle de style BSD hashsum-help-quiet = ne pas afficher OK pour chaque fichier vérifié avec succès hashsum-help-status = ne rien afficher, le code de statut indique le succès hashsum-help-strict = sortir avec un code non-zéro pour les lignes de somme de contrôle mal formatées hashsum-help-ignore-missing = ne pas échouer ou rapporter le statut pour les fichiers manquants hashsum-help-warn = avertir des lignes de somme de contrôle mal formatées hashsum-help-zero = terminer chaque ligne de sortie avec NUL, pas de retour à la ligne hashsum-help-length = longueur de l'empreinte en bits ; ne doit pas dépasser le maximum pour l'algorithme blake2 et doit être un multiple de 8 hashsum-help-no-names = Omet les noms de fichiers dans la sortie (option non présente dans GNU/Coreutils) hashsum-help-bits = définir la taille de la sortie (uniquement pour SHAKE) # Messages d'aide des algorithmes hashsum-help-md5 = travailler avec MD5 hashsum-help-sha1 = travailler avec SHA1 hashsum-help-sha224 = travailler avec SHA224 hashsum-help-sha256 = travailler avec SHA256 hashsum-help-sha384 = travailler avec SHA384 hashsum-help-sha512 = travailler avec SHA512 hashsum-help-sha3 = travailler avec SHA3 hashsum-help-sha3-224 = travailler avec SHA3-224 hashsum-help-sha3-256 = travailler avec SHA3-256 hashsum-help-sha3-384 = travailler avec SHA3-384 hashsum-help-sha3-512 = travailler avec SHA3-512 hashsum-help-shake128 = travailler avec SHAKE128 en utilisant BITS pour la taille de sortie hashsum-help-shake256 = travailler avec SHAKE256 en utilisant BITS pour la taille de sortie hashsum-help-b2sum = travailler avec BLAKE2 hashsum-help-b3sum = travailler avec BLAKE3 # Messages d'erreur hashsum-error-failed-to-read-input = échec de la lecture de l'entrée coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/src/000077500000000000000000000000001504311601400244235ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/src/hashsum.rs000066400000000000000000000455301504311601400264500ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) algo, algoname, regexes, nread, nonames use clap::ArgAction; use clap::builder::ValueParser; use clap::value_parser; use clap::{Arg, ArgMatches, Command}; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::io::{BufReader, Read, stdin}; use std::iter; use std::num::ParseIntError; use std::path::Path; use uucore::checksum::ChecksumError; use uucore::checksum::ChecksumOptions; use uucore::checksum::ChecksumVerbose; use uucore::checksum::HashAlgorithm; use uucore::checksum::calculate_blake2b_length; use uucore::checksum::create_sha3; use uucore::checksum::detect_algo; use uucore::checksum::digest_reader; use uucore::checksum::escape_filename; use uucore::checksum::perform_checksum_validation; use uucore::error::{FromIo, UResult}; use uucore::format_usage; use uucore::sum::{Digest, Sha3_224, Sha3_256, Sha3_384, Sha3_512, Shake128, Shake256}; use uucore::translate; const NAME: &str = "hashsum"; struct Options<'a> { algoname: &'static str, digest: Box, binary: bool, binary_name: &'a str, //check: bool, tag: bool, nonames: bool, //status: bool, //quiet: bool, //strict: bool, //warn: bool, output_bits: usize, zero: bool, //ignore_missing: bool, } /// Creates a hasher instance based on the command-line flags. /// /// # Arguments /// /// * `matches` - A reference to the `ArgMatches` object containing the command-line arguments. /// /// # Returns /// /// Returns a [`UResult`] of a tuple containing the algorithm name, the hasher instance, and /// the output length in bits or an Err if multiple hash algorithms are specified or if a /// required flag is missing. #[allow(clippy::cognitive_complexity)] fn create_algorithm_from_flags(matches: &ArgMatches) -> UResult { let mut alg: Option = None; let mut set_or_err = |new_alg: HashAlgorithm| -> UResult<()> { if alg.is_some() { return Err(ChecksumError::CombineMultipleAlgorithms.into()); } alg = Some(new_alg); Ok(()) }; if matches.get_flag("md5") { set_or_err(detect_algo("md5sum", None)?)?; } if matches.get_flag("sha1") { set_or_err(detect_algo("sha1sum", None)?)?; } if matches.get_flag("sha224") { set_or_err(detect_algo("sha224sum", None)?)?; } if matches.get_flag("sha256") { set_or_err(detect_algo("sha256sum", None)?)?; } if matches.get_flag("sha384") { set_or_err(detect_algo("sha384sum", None)?)?; } if matches.get_flag("sha512") { set_or_err(detect_algo("sha512sum", None)?)?; } if matches.get_flag("b2sum") { set_or_err(detect_algo("b2sum", None)?)?; } if matches.get_flag("b3sum") { set_or_err(detect_algo("b3sum", None)?)?; } if matches.get_flag("sha3") { let bits = matches.get_one::("bits").copied(); set_or_err(create_sha3(bits)?)?; } if matches.get_flag("sha3-224") { set_or_err(HashAlgorithm { name: "SHA3-224", create_fn: Box::new(|| Box::new(Sha3_224::new())), bits: 224, })?; } if matches.get_flag("sha3-256") { set_or_err(HashAlgorithm { name: "SHA3-256", create_fn: Box::new(|| Box::new(Sha3_256::new())), bits: 256, })?; } if matches.get_flag("sha3-384") { set_or_err(HashAlgorithm { name: "SHA3-384", create_fn: Box::new(|| Box::new(Sha3_384::new())), bits: 384, })?; } if matches.get_flag("sha3-512") { set_or_err(HashAlgorithm { name: "SHA3-512", create_fn: Box::new(|| Box::new(Sha3_512::new())), bits: 512, })?; } if matches.get_flag("shake128") { match matches.get_one::("bits") { Some(bits) => set_or_err(HashAlgorithm { name: "SHAKE128", create_fn: Box::new(|| Box::new(Shake128::new())), bits: *bits, })?, None => return Err(ChecksumError::BitsRequiredForShake128.into()), } } if matches.get_flag("shake256") { match matches.get_one::("bits") { Some(bits) => set_or_err(HashAlgorithm { name: "SHAKE256", create_fn: Box::new(|| Box::new(Shake256::new())), bits: *bits, })?, None => return Err(ChecksumError::BitsRequiredForShake256.into()), } } if alg.is_none() { return Err(ChecksumError::NeedAlgorithmToHash.into()); } Ok(alg.unwrap()) } // TODO: return custom error type fn parse_bit_num(arg: &str) -> Result { arg.parse() } #[uucore::main] pub fn uumain(mut args: impl uucore::Args) -> UResult<()> { // if there is no program name for some reason, default to "hashsum" let program = args.next().unwrap_or_else(|| OsString::from(NAME)); let binary_name = Path::new(&program) .file_stem() .unwrap_or_else(|| OsStr::new(NAME)) .to_string_lossy(); let args = iter::once(program.clone()).chain(args); // Default binary in Windows, text mode otherwise let binary_flag_default = cfg!(windows); let (command, is_hashsum_bin) = uu_app(&binary_name); // FIXME: this should use try_get_matches_from() and crash!(), but at the moment that just // causes "error: " to be printed twice (once from crash!() and once from clap). With // the current setup, the name of the utility is not printed, but I think this is at // least somewhat better from a user's perspective. let matches = command.try_get_matches_from(args)?; let input_length: Option<&usize> = if binary_name == "b2sum" { matches.get_one::(options::LENGTH) } else { None }; let length = match input_length { Some(length) => calculate_blake2b_length(*length)?, None => None, }; let algo = if is_hashsum_bin { create_algorithm_from_flags(&matches)? } else { detect_algo(&binary_name, length)? }; let binary = if matches.get_flag("binary") { true } else if matches.get_flag("text") { false } else { binary_flag_default }; let check = matches.get_flag("check"); let status = matches.get_flag("status"); let quiet = matches.get_flag("quiet") || status; let strict = matches.get_flag("strict"); let warn = matches.get_flag("warn") && !status; let ignore_missing = matches.get_flag("ignore-missing"); if ignore_missing && !check { // --ignore-missing needs -c return Err(ChecksumError::IgnoreNotCheck.into()); } if check { // on Windows, allow --binary/--text to be used with --check // and keep the behavior of defaulting to binary #[cfg(not(windows))] let binary = { let text_flag = matches.get_flag("text"); let binary_flag = matches.get_flag("binary"); if binary_flag || text_flag { return Err(ChecksumError::BinaryTextConflict.into()); } false }; // Execute the checksum validation based on the presence of files or the use of stdin // Determine the source of input: a list of files or stdin. let input = matches.get_many::(options::FILE).map_or_else( || iter::once(OsStr::new("-")).collect::>(), |files| files.map(OsStr::new).collect::>(), ); let verbose = ChecksumVerbose::new(status, quiet, warn); let opts = ChecksumOptions { binary, ignore_missing, strict, verbose, }; // Execute the checksum validation return perform_checksum_validation( input.iter().copied(), Some(algo.name), Some(algo.bits), opts, ); } else if quiet { return Err(ChecksumError::QuietNotCheck.into()); } else if strict { return Err(ChecksumError::StrictNotCheck.into()); } let nonames = *matches .try_get_one("no-names") .unwrap_or(None) .unwrap_or(&false); let zero = matches.get_flag("zero"); let opts = Options { algoname: algo.name, digest: (algo.create_fn)(), output_bits: algo.bits, binary, binary_name: &binary_name, tag: matches.get_flag("tag"), nonames, //status, //quiet, //warn, zero, //ignore_missing, }; // Show the hashsum of the input match matches.get_many::(options::FILE) { Some(files) => hashsum(opts, files.map(|f| f.as_os_str())), None => hashsum(opts, iter::once(OsStr::new("-"))), } } mod options { //pub const ALGORITHM: &str = "algorithm"; pub const FILE: &str = "file"; //pub const UNTAGGED: &str = "untagged"; pub const TAG: &str = "tag"; pub const LENGTH: &str = "length"; //pub const RAW: &str = "raw"; //pub const BASE64: &str = "base64"; pub const CHECK: &str = "check"; pub const STRICT: &str = "strict"; pub const TEXT: &str = "text"; pub const BINARY: &str = "binary"; pub const STATUS: &str = "status"; pub const WARN: &str = "warn"; pub const QUIET: &str = "quiet"; } pub fn uu_app_common() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("hashsum-about")) .override_usage(format_usage(&translate!("hashsum-usage"))) .infer_long_args(true) .args_override_self(true) .arg( Arg::new(options::BINARY) .short('b') .long("binary") .help({ #[cfg(windows)] { translate!("hashsum-help-binary-windows") } #[cfg(not(windows))] { translate!("hashsum-help-binary-other") } }) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::CHECK) .short('c') .long("check") .help(translate!("hashsum-help-check")) .action(ArgAction::SetTrue) .conflicts_with("tag"), ) .arg( Arg::new(options::TAG) .long("tag") .help(translate!("hashsum-help-tag")) .action(ArgAction::SetTrue) .conflicts_with("text"), ) .arg( Arg::new(options::TEXT) .short('t') .long("text") .help({ #[cfg(windows)] { translate!("hashsum-help-text-windows") } #[cfg(not(windows))] { translate!("hashsum-help-text-other") } }) .conflicts_with("binary") .action(ArgAction::SetTrue), ) .arg( Arg::new(options::QUIET) .short('q') .long(options::QUIET) .help(translate!("hashsum-help-quiet")) .action(ArgAction::SetTrue) .overrides_with_all([options::STATUS, options::WARN]), ) .arg( Arg::new(options::STATUS) .short('s') .long("status") .help(translate!("hashsum-help-status")) .action(ArgAction::SetTrue) .overrides_with_all([options::QUIET, options::WARN]), ) .arg( Arg::new(options::STRICT) .long("strict") .help(translate!("hashsum-help-strict")) .action(ArgAction::SetTrue), ) .arg( Arg::new("ignore-missing") .long("ignore-missing") .help(translate!("hashsum-help-ignore-missing")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::WARN) .short('w') .long("warn") .help(translate!("hashsum-help-warn")) .action(ArgAction::SetTrue) .overrides_with_all([options::QUIET, options::STATUS]), ) .arg( Arg::new("zero") .short('z') .long("zero") .help(translate!("hashsum-help-zero")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FILE) .index(1) .action(ArgAction::Append) .value_name(options::FILE) .value_hint(clap::ValueHint::FilePath) .value_parser(ValueParser::os_string()), ) } pub fn uu_app_length() -> Command { uu_app_opt_length(uu_app_common()) } fn uu_app_opt_length(command: Command) -> Command { command.arg( Arg::new(options::LENGTH) .long(options::LENGTH) .value_parser(value_parser!(usize)) .short('l') .help(translate!("hashsum-help-length")) .overrides_with(options::LENGTH) .action(ArgAction::Set), ) } pub fn uu_app_b3sum() -> Command { uu_app_b3sum_opts(uu_app_common()) } fn uu_app_b3sum_opts(command: Command) -> Command { command.arg( Arg::new("no-names") .long("no-names") .help(translate!("hashsum-help-no-names")) .action(ArgAction::SetTrue), ) } pub fn uu_app_bits() -> Command { uu_app_opt_bits(uu_app_common()) } fn uu_app_opt_bits(command: Command) -> Command { // Needed for variable-length output sums (e.g. SHAKE) command.arg( Arg::new("bits") .long("bits") .help(translate!("hashsum-help-bits")) .value_name("BITS") // XXX: should we actually use validators? they're not particularly efficient .value_parser(parse_bit_num), ) } pub fn uu_app_custom() -> Command { let mut command = uu_app_b3sum_opts(uu_app_opt_bits(uu_app_common())); let algorithms = &[ ("md5", translate!("hashsum-help-md5")), ("sha1", translate!("hashsum-help-sha1")), ("sha224", translate!("hashsum-help-sha224")), ("sha256", translate!("hashsum-help-sha256")), ("sha384", translate!("hashsum-help-sha384")), ("sha512", translate!("hashsum-help-sha512")), ("sha3", translate!("hashsum-help-sha3")), ("sha3-224", translate!("hashsum-help-sha3-224")), ("sha3-256", translate!("hashsum-help-sha3-256")), ("sha3-384", translate!("hashsum-help-sha3-384")), ("sha3-512", translate!("hashsum-help-sha3-512")), ("shake128", translate!("hashsum-help-shake128")), ("shake256", translate!("hashsum-help-shake256")), ("b2sum", translate!("hashsum-help-b2sum")), ("b3sum", translate!("hashsum-help-b3sum")), ]; for (name, desc) in algorithms { command = command.arg( Arg::new(*name) .long(name) .help(desc) .action(ArgAction::SetTrue), ); } command } /// hashsum is handled differently in build.rs /// therefore, this is different from other utilities. fn uu_app(binary_name: &str) -> (Command, bool) { match binary_name { // These all support the same options. "md5sum" | "sha1sum" | "sha224sum" | "sha256sum" | "sha384sum" | "sha512sum" => { (uu_app_common(), false) } // b2sum supports the md5sum options plus -l/--length. "b2sum" => (uu_app_length(), false), // These have never been part of GNU Coreutils, but can function with the same // options as md5sum. "sha3-224sum" | "sha3-256sum" | "sha3-384sum" | "sha3-512sum" => (uu_app_common(), false), // These have never been part of GNU Coreutils, and require an additional --bits // option to specify their output size. "sha3sum" | "shake128sum" | "shake256sum" => (uu_app_bits(), false), // b3sum has never been part of GNU Coreutils, and has a --no-names option in // addition to the b2sum options. "b3sum" => (uu_app_b3sum(), false), // We're probably just being called as `hashsum`, so give them everything. _ => (uu_app_custom(), true), } } #[allow(clippy::cognitive_complexity)] fn hashsum<'a, I>(mut options: Options, files: I) -> UResult<()> where I: Iterator, { let binary_marker = if options.binary { "*" } else { " " }; let mut err_found = None; for filename in files { let filename = Path::new(filename); let mut file = BufReader::new(if filename == OsStr::new("-") { Box::new(stdin()) as Box } else { let file_buf = match File::open(filename) { Ok(f) => f, Err(e) => { eprintln!( "{}: {}: {e}", options.binary_name, filename.to_string_lossy() ); err_found = Some(ChecksumError::Io(e)); continue; } }; Box::new(file_buf) as Box }); let (sum, _) = digest_reader( &mut options.digest, &mut file, options.binary, options.output_bits, ) .map_err_context(|| translate!("hashsum-error-failed-to-read-input"))?; let (escaped_filename, prefix) = escape_filename(filename); if options.tag { if options.algoname == "blake2b" { if options.digest.output_bits() == 512 { println!("BLAKE2b ({escaped_filename}) = {sum}"); } else { // special case for BLAKE2b with non-default output length println!( "BLAKE2b-{} ({escaped_filename}) = {sum}", options.digest.output_bits() ); } } else { println!( "{prefix}{} ({escaped_filename}) = {sum}", options.algoname.to_ascii_uppercase() ); } } else if options.nonames { println!("{sum}"); } else if options.zero { // with zero, we don't escape the filename print!("{sum} {binary_marker}{}\0", filename.display()); } else { println!("{prefix}{sum} {binary_marker}{escaped_filename}"); } } match err_found { None => Ok(()), Some(e) => Err(Box::new(e)), } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hashsum/src/main.rs000066400000000000000000000000321504311601400257100ustar00rootroot00000000000000uucore::bin!(uu_hashsum); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/000077500000000000000000000000001504311601400230655ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/BENCHMARKING.md000066400000000000000000000030121504311601400252330ustar00rootroot00000000000000# Benchmarking to measure performance To compare the performance of the `uutils` version of `head` with the GNU version of `head`, you can use a benchmarking tool like [hyperfine][0]. On Ubuntu 18.04 or later, you can install `hyperfine` by running ```shell sudo apt-get install hyperfine ``` Next, build the `head` binary under the release profile: ```shell cargo build --release -p uu_head ``` Now, get a text file to test `head` on. I used the *Complete Works of William Shakespeare*, which is in the public domain in the United States and most other parts of the world. ```shell wget -O shakespeare.txt https://www.gutenberg.org/files/100/100-0.txt ``` This particular file has about 170,000 lines, each of which is no longer than 96 characters: ```shell $ wc -lL shakespeare.txt 170592 96 shakespeare.txt ``` You could use files of different shapes and sizes to test the performance of `head` in different situations. For a larger file, you could download a [database dump of Wikidata][1] or some related files that the Wikimedia project provides. For example, [this file][2] contains about 130 million lines. Finally, you can compare the performance of the two versions of `head` by running, for example, ```shell hyperfine \ "head -n 100000 shakespeare.txt" \ "target/release/head -n 100000 shakespeare.txt" ``` [0]: https://github.com/sharkdp/hyperfine [1]: https://www.wikidata.org/wiki/Wikidata:Database_download [2]: https://dumps.wikimedia.org/wikidatawiki/20211001/wikidatawiki-20211001-pages-logging.xml.gz coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/Cargo.toml000066400000000000000000000012711504311601400250160ustar00rootroot00000000000000[package] name = "uu_head" description = "head ~ (uutils) display the first lines of input" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/head" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/head.rs" [dependencies] clap = { workspace = true } memchr = { workspace = true } thiserror = { workspace = true } uucore = { workspace = true, features = [ "parser", "ringbuffer", "lines", "fs", ] } fluent = { workspace = true } [[bin]] name = "head" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/LICENSE000077700000000000000000000000001504311601400257332../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/locales/000077500000000000000000000000001504311601400245075ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/locales/en-US.ftl000066400000000000000000000026031504311601400261460ustar00rootroot00000000000000head-about = Print the first 10 lines of each FILE to standard output. With more than one FILE, precede each with a header giving the file name. With no FILE, or when FILE is -, read standard input. Mandatory arguments to long flags are mandatory for short flags too. head-usage = head [FLAG]... [FILE]... # Help messages head-help-bytes = print the first NUM bytes of each file; with the leading '-', print all but the last NUM bytes of each file head-help-lines = print the first NUM lines instead of the first 10; with the leading '-', print all but the last NUM lines of each file head-help-quiet = never print headers giving file names head-help-verbose = always print headers giving file names head-help-zero-terminated = line delimiter is NUL, not newline # Error messages head-error-reading-file = error reading {$name}: {$err} head-error-parse-error = parse error: {$err} head-error-bad-encoding = bad argument encoding head-error-num-too-large = number of -bytes or -lines is too large head-error-clap = clap error: {$err} head-error-invalid-bytes = invalid number of bytes: {$err} head-error-invalid-lines = invalid number of lines: {$err} head-error-bad-argument-format = bad argument format: {$arg} head-error-writing-stdout = error writing 'standard output': {$err} head-error-cannot-open = cannot open {$name} for reading # Output headers head-header-stdin = ==> standard input <== coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/locales/fr-FR.ftl000066400000000000000000000032231504311601400261320ustar00rootroot00000000000000head-about = Affiche les 10 premières lignes de chaque FICHIER sur la sortie standard. Avec plus d'un FICHIER, précède chacun d'un en-tête donnant le nom du fichier. Sans FICHIER, ou quand FICHIER est -, lit l'entrée standard. Les arguments obligatoires pour les drapeaux longs sont obligatoires pour les drapeaux courts aussi. head-usage = head [DRAPEAU]... [FICHIER]... # Messages d'aide head-help-bytes = affiche les premiers NUM octets de chaque fichier ; avec le préfixe '-', affiche tout sauf les derniers NUM octets de chaque fichier head-help-lines = affiche les premières NUM lignes au lieu des 10 premières ; avec le préfixe '-', affiche tout sauf les dernières NUM lignes de chaque fichier head-help-quiet = n'affiche jamais les en-têtes donnant les noms de fichiers head-help-verbose = affiche toujours les en-têtes donnant les noms de fichiers head-help-zero-terminated = le délimiteur de ligne est NUL, pas nouvelle ligne # Messages d'erreur head-error-reading-file = erreur lors de la lecture de {$name} : {$err} head-error-parse-error = erreur d'analyse : {$err} head-error-bad-encoding = mauvais encodage d'argument head-error-num-too-large = le nombre d'octets ou de lignes est trop grand head-error-clap = erreur clap : {$err} head-error-invalid-bytes = nombre d'octets invalide : {$err} head-error-invalid-lines = nombre de lignes invalide : {$err} head-error-bad-argument-format = format d'argument incorrect : {$arg} head-error-writing-stdout = erreur lors de l'écriture sur 'sortie standard' : {$err} head-error-cannot-open = impossible d'ouvrir {$name} en lecture # En-têtes de sortie head-header-stdin = ==> entrée standard <== coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/src/000077500000000000000000000000001504311601400236545ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/src/head.rs000066400000000000000000000642531504311601400251350ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (vars) seekable memrchr use clap::{Arg, ArgAction, ArgMatches, Command}; use memchr::memrchr_iter; use std::ffi::OsString; use std::fs::File; use std::io::{self, BufWriter, Read, Seek, SeekFrom, Write}; use std::num::TryFromIntError; #[cfg(unix)] use std::os::fd::{AsRawFd, FromRawFd}; use thiserror::Error; use uucore::display::Quotable; use uucore::error::{FromIo, UError, UResult}; use uucore::line_ending::LineEnding; use uucore::translate; use uucore::{format_usage, show}; const BUF_SIZE: usize = 65536; mod options { pub const BYTES_NAME: &str = "BYTES"; pub const LINES_NAME: &str = "LINES"; pub const QUIET_NAME: &str = "QUIET"; pub const VERBOSE_NAME: &str = "VERBOSE"; pub const ZERO_NAME: &str = "ZERO"; pub const FILES_NAME: &str = "FILE"; pub const PRESUME_INPUT_PIPE: &str = "-PRESUME-INPUT-PIPE"; } mod parse; mod take; use take::copy_all_but_n_bytes; use take::copy_all_but_n_lines; use take::take_lines; #[derive(Error, Debug)] enum HeadError { /// Wrapper around `io::Error` #[error("{}", translate!("head-error-reading-file", "name" => name.clone(), "err" => err))] Io { name: String, err: io::Error }, #[error("{}", translate!("head-error-parse-error", "err" => 0))] ParseError(String), #[error("{}", translate!("head-error-bad-encoding"))] BadEncoding, #[error("{}", translate!("head-error-num-too-large"))] NumTooLarge(#[from] TryFromIntError), #[error("{}", translate!("head-error-clap", "err" => 0))] Clap(#[from] clap::Error), #[error("{0}")] MatchOption(String), } impl UError for HeadError { fn code(&self) -> i32 { 1 } } type HeadResult = Result; pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("head-about")) .override_usage(format_usage(&translate!("head-usage"))) .infer_long_args(true) .arg( Arg::new(options::BYTES_NAME) .short('c') .long("bytes") .value_name("[-]NUM") .help(translate!("head-help-bytes")) .overrides_with_all([options::BYTES_NAME, options::LINES_NAME]) .allow_hyphen_values(true), ) .arg( Arg::new(options::LINES_NAME) .short('n') .long("lines") .value_name("[-]NUM") .help(translate!("head-help-lines")) .overrides_with_all([options::LINES_NAME, options::BYTES_NAME]) .allow_hyphen_values(true), ) .arg( Arg::new(options::QUIET_NAME) .short('q') .long("quiet") .visible_alias("silent") .help(translate!("head-help-quiet")) .overrides_with_all([options::VERBOSE_NAME, options::QUIET_NAME]) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::VERBOSE_NAME) .short('v') .long("verbose") .help(translate!("head-help-verbose")) .overrides_with_all([options::QUIET_NAME, options::VERBOSE_NAME]) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::PRESUME_INPUT_PIPE) .long("presume-input-pipe") .alias("-presume-input-pipe") .hide(true) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ZERO_NAME) .short('z') .long("zero-terminated") .help(translate!("head-help-zero-terminated")) .overrides_with(options::ZERO_NAME) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::FILES_NAME) .action(ArgAction::Append) .value_hint(clap::ValueHint::FilePath), ) } #[derive(Debug, PartialEq)] enum Mode { FirstLines(u64), AllButLastLines(u64), FirstBytes(u64), AllButLastBytes(u64), } impl Default for Mode { fn default() -> Self { Self::FirstLines(10) } } impl Mode { fn from(matches: &ArgMatches) -> Result { if let Some(v) = matches.get_one::(options::BYTES_NAME) { let (n, all_but_last) = parse::parse_num(v) .map_err(|err| translate!("head-error-invalid-bytes", "err" => err))?; if all_but_last { Ok(Self::AllButLastBytes(n)) } else { Ok(Self::FirstBytes(n)) } } else if let Some(v) = matches.get_one::(options::LINES_NAME) { let (n, all_but_last) = parse::parse_num(v) .map_err(|err| translate!("head-error-invalid-lines", "err" => err))?; if all_but_last { Ok(Self::AllButLastLines(n)) } else { Ok(Self::FirstLines(n)) } } else { Ok(Self::default()) } } } fn arg_iterate<'a>( mut args: impl uucore::Args + 'a, ) -> HeadResult + 'a>> { // argv[0] is always present let first = args.next().unwrap(); if let Some(second) = args.next() { if let Some(s) = second.to_str() { match parse::parse_obsolete(s) { Some(Ok(iter)) => Ok(Box::new(vec![first].into_iter().chain(iter).chain(args))), Some(Err(parse::ParseError)) => Err(HeadError::ParseError( translate!("head-error-bad-argument-format", "arg" => s.quote()), )), None => Ok(Box::new(vec![first, second].into_iter().chain(args))), } } else { Err(HeadError::BadEncoding) } } else { Ok(Box::new(vec![first].into_iter())) } } #[derive(Debug, PartialEq, Default)] struct HeadOptions { pub quiet: bool, pub verbose: bool, pub line_ending: LineEnding, pub presume_input_pipe: bool, pub mode: Mode, pub files: Vec, } impl HeadOptions { ///Construct options from matches pub fn get_from(matches: &ArgMatches) -> Result { let mut options = Self::default(); options.quiet = matches.get_flag(options::QUIET_NAME); options.verbose = matches.get_flag(options::VERBOSE_NAME); options.line_ending = LineEnding::from_zero_flag(matches.get_flag(options::ZERO_NAME)); options.presume_input_pipe = matches.get_flag(options::PRESUME_INPUT_PIPE); options.mode = Mode::from(matches)?; options.files = match matches.get_many::(options::FILES_NAME) { Some(v) => v.cloned().collect(), None => vec!["-".to_owned()], }; Ok(options) } } #[inline] fn wrap_in_stdout_error(err: io::Error) -> io::Error { io::Error::new( err.kind(), translate!("head-error-writing-stdout", "err" => err), ) } fn read_n_bytes(input: impl Read, n: u64) -> io::Result { // Read the first `n` bytes from the `input` reader. let mut reader = input.take(n); // Write those bytes to `stdout`. let stdout = io::stdout(); let mut stdout = stdout.lock(); let bytes_written = io::copy(&mut reader, &mut stdout).map_err(wrap_in_stdout_error)?; // Make sure we finish writing everything to the target before // exiting. Otherwise, when Rust is implicitly flushing, any // error will be silently ignored. stdout.flush().map_err(wrap_in_stdout_error)?; Ok(bytes_written) } fn read_n_lines(input: &mut impl io::BufRead, n: u64, separator: u8) -> io::Result { // Read the first `n` lines from the `input` reader. let mut reader = take_lines(input, n, separator); // Write those bytes to `stdout`. let stdout = io::stdout(); let stdout = stdout.lock(); let mut writer = BufWriter::with_capacity(BUF_SIZE, stdout); let bytes_written = io::copy(&mut reader, &mut writer).map_err(wrap_in_stdout_error)?; // Make sure we finish writing everything to the target before // exiting. Otherwise, when Rust is implicitly flushing, any // error will be silently ignored. writer.flush().map_err(wrap_in_stdout_error)?; Ok(bytes_written) } fn catch_too_large_numbers_in_backwards_bytes_or_lines(n: u64) -> Option { usize::try_from(n).ok() } fn read_but_last_n_bytes(mut input: impl Read, n: u64) -> io::Result { let mut bytes_written: u64 = 0; if let Some(n) = catch_too_large_numbers_in_backwards_bytes_or_lines(n) { let stdout = io::stdout(); let mut stdout = stdout.lock(); bytes_written = copy_all_but_n_bytes(&mut input, &mut stdout, n) .map_err(wrap_in_stdout_error)? .try_into() .unwrap(); // Make sure we finish writing everything to the target before // exiting. Otherwise, when Rust is implicitly flushing, any // error will be silently ignored. stdout.flush().map_err(wrap_in_stdout_error)?; } Ok(bytes_written) } fn read_but_last_n_lines(mut input: impl Read, n: u64, separator: u8) -> io::Result { let stdout = io::stdout(); let mut stdout = stdout.lock(); if n == 0 { return io::copy(&mut input, &mut stdout).map_err(wrap_in_stdout_error); } let mut bytes_written: u64 = 0; if let Some(n) = catch_too_large_numbers_in_backwards_bytes_or_lines(n) { bytes_written = copy_all_but_n_lines(input, &mut stdout, n, separator) .map_err(wrap_in_stdout_error)? .try_into() .unwrap(); // Make sure we finish writing everything to the target before // exiting. Otherwise, when Rust is implicitly flushing, any // error will be silently ignored. stdout.flush().map_err(wrap_in_stdout_error)?; } Ok(bytes_written) } /// Return the index in `input` just after the `n`th line from the end. /// /// If `n` exceeds the number of lines in this file, then return 0. /// This function rewinds the cursor to the /// beginning of the input just before returning unless there is an /// I/O error. /// /// # Errors /// /// This function returns an error if there is a problem seeking /// through or reading the input. /// /// # Examples /// /// The function returns the index of the byte immediately following /// the line ending character of the `n`th line from the end of the /// input: /// /// ```rust,ignore /// let mut input = Cursor::new("x\ny\nz\n"); /// assert_eq!(find_nth_line_from_end(&mut input, 0, false).unwrap(), 6); /// assert_eq!(find_nth_line_from_end(&mut input, 1, false).unwrap(), 4); /// assert_eq!(find_nth_line_from_end(&mut input, 2, false).unwrap(), 2); /// ``` /// /// If `n` exceeds the number of lines in the file, always return 0: /// /// ```rust,ignore /// let mut input = Cursor::new("x\ny\nz\n"); /// assert_eq!(find_nth_line_from_end(&mut input, 3, false).unwrap(), 0); /// assert_eq!(find_nth_line_from_end(&mut input, 4, false).unwrap(), 0); /// assert_eq!(find_nth_line_from_end(&mut input, 1000, false).unwrap(), 0); /// ``` fn find_nth_line_from_end(input: &mut R, n: u64, separator: u8) -> io::Result where R: Read + Seek, { let file_size = input.seek(SeekFrom::End(0))?; let mut buffer = [0u8; BUF_SIZE]; let mut lines = 0u64; let mut check_last_byte_first_loop = true; let mut bytes_remaining_to_search = file_size; loop { // the casts here are ok, `buffer.len()` should never be above a few k let bytes_to_read_this_loop = bytes_remaining_to_search.min(buffer.len().try_into().unwrap()); let read_start_offset = bytes_remaining_to_search - bytes_to_read_this_loop; let buffer = &mut buffer[..bytes_to_read_this_loop.try_into().unwrap()]; bytes_remaining_to_search -= bytes_to_read_this_loop; input.seek(SeekFrom::Start(read_start_offset))?; input.read_exact(buffer)?; // Unfortunately need special handling for the case that the input file doesn't have // a terminating `separator` character. // If the input file doesn't end with a `separator` character, add an extra line to our // `line` counter. In the case that `n` is 0 we need to return here since we've // obviously found our 0th-line-from-the-end offset. if check_last_byte_first_loop { check_last_byte_first_loop = false; if let Some(last_byte_of_file) = buffer.last() { if last_byte_of_file != &separator { if n == 0 { input.rewind()?; return Ok(file_size); } assert_eq!(lines, 0); lines = 1; } } } for separator_offset in memrchr_iter(separator, &buffer[..]) { lines += 1; if lines == n + 1 { input.rewind()?; return Ok(read_start_offset + TryInto::::try_into(separator_offset).unwrap() + 1); } } if read_start_offset == 0 { input.rewind()?; return Ok(0); } } } fn is_seekable(input: &mut File) -> bool { let current_pos = input.stream_position(); current_pos.is_ok() && input.seek(SeekFrom::End(0)).is_ok() && input.seek(SeekFrom::Start(current_pos.unwrap())).is_ok() } fn head_backwards_file(input: &mut File, options: &HeadOptions) -> io::Result { let st = input.metadata()?; let seekable = is_seekable(input); let blksize_limit = uucore::fs::sane_blksize::sane_blksize_from_metadata(&st); if !seekable || st.len() <= blksize_limit || options.presume_input_pipe { head_backwards_without_seek_file(input, options) } else { head_backwards_on_seekable_file(input, options) } } fn head_backwards_without_seek_file(input: &mut File, options: &HeadOptions) -> io::Result { match options.mode { Mode::AllButLastBytes(n) => read_but_last_n_bytes(input, n), Mode::AllButLastLines(n) => read_but_last_n_lines(input, n, options.line_ending.into()), _ => unreachable!(), } } fn head_backwards_on_seekable_file(input: &mut File, options: &HeadOptions) -> io::Result { match options.mode { Mode::AllButLastBytes(n) => { let size = input.metadata()?.len(); if n >= size { Ok(0) } else { read_n_bytes(input, size - n) } } Mode::AllButLastLines(n) => { let found = find_nth_line_from_end(input, n, options.line_ending.into())?; read_n_bytes(input, found) } _ => unreachable!(), } } fn head_file(input: &mut File, options: &HeadOptions) -> io::Result { match options.mode { Mode::FirstBytes(n) => read_n_bytes(input, n), Mode::FirstLines(n) => read_n_lines( &mut io::BufReader::with_capacity(BUF_SIZE, input), n, options.line_ending.into(), ), Mode::AllButLastBytes(_) | Mode::AllButLastLines(_) => head_backwards_file(input, options), } } #[allow(clippy::cognitive_complexity)] fn uu_head(options: &HeadOptions) -> UResult<()> { let mut first = true; for file in &options.files { let res = match file.as_str() { "-" => { if (options.files.len() > 1 && !options.quiet) || options.verbose { if !first { println!(); } println!("{}", translate!("head-header-stdin")); } let stdin = io::stdin(); #[cfg(unix)] { let stdin_raw_fd = stdin.as_raw_fd(); let mut stdin_file = unsafe { File::from_raw_fd(stdin_raw_fd) }; let current_pos = stdin_file.stream_position(); if let Ok(current_pos) = current_pos { // We have a seekable file. Ensure we set the input stream to the // last byte read so that any tools that parse the remainder of // the stdin stream read from the correct place. let bytes_read = head_file(&mut stdin_file, options)?; stdin_file.seek(SeekFrom::Start(current_pos + bytes_read))?; } else { let _bytes_read = head_file(&mut stdin_file, options)?; } } #[cfg(not(unix))] { let mut stdin = stdin.lock(); match options.mode { Mode::FirstBytes(n) => read_n_bytes(&mut stdin, n), Mode::AllButLastBytes(n) => read_but_last_n_bytes(&mut stdin, n), Mode::FirstLines(n) => { read_n_lines(&mut stdin, n, options.line_ending.into()) } Mode::AllButLastLines(n) => { read_but_last_n_lines(&mut stdin, n, options.line_ending.into()) } }?; } Ok(()) } name => { let mut file = match File::open(name) { Ok(f) => f, Err(err) => { show!(err.map_err_context( || translate!("head-error-cannot-open", "name" => name.quote()) )); continue; } }; if (options.files.len() > 1 && !options.quiet) || options.verbose { if !first { println!(); } println!("==> {name} <=="); } head_file(&mut file, options)?; Ok(()) } }; if let Err(e) = res { let name = if file.as_str() == "-" { "standard input" } else { file }; return Err(HeadError::Io { name: name.to_string(), err: e, } .into()); } first = false; } // Even though this is returning `Ok`, it is possible that a call // to `show!()` and thus a call to `set_exit_code()` has been // called above. If that happens, then this process will exit with // a non-zero exit code. Ok(()) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(arg_iterate(args)?)?; let args = match HeadOptions::get_from(&matches) { Ok(o) => o, Err(s) => { return Err(HeadError::MatchOption(s).into()); } }; uu_head(&args) } #[cfg(test)] mod tests { use io::Cursor; use std::ffi::OsString; use super::*; fn options(args: &str) -> Result { let combined = "head ".to_owned() + args; let args = combined.split_whitespace().map(OsString::from); let matches = uu_app() .get_matches_from(arg_iterate(args).map_err(|_| String::from("Arg iterate failed"))?); HeadOptions::get_from(&matches) } #[test] fn test_args_modes() { let args = options("-n -10M -vz").unwrap(); assert_eq!(args.line_ending, LineEnding::Nul); assert!(args.verbose); assert_eq!(args.mode, Mode::AllButLastLines(10 * 1024 * 1024)); } #[test] fn test_gnu_compatibility() { let args = options("-n 1 -c 1 -n 5 -c kiB -vqvqv").unwrap(); // spell-checker:disable-line assert_eq!(args.mode, Mode::FirstBytes(1024)); assert!(args.verbose); assert_eq!(options("-5").unwrap().mode, Mode::FirstLines(5)); assert_eq!(options("-2b").unwrap().mode, Mode::FirstBytes(1024)); assert_eq!(options("-5 -c 1").unwrap().mode, Mode::FirstBytes(1)); } #[test] #[allow(clippy::cognitive_complexity)] fn all_args_test() { assert!(options("--silent").unwrap().quiet); assert!(options("--quiet").unwrap().quiet); assert!(options("-q").unwrap().quiet); assert!(options("--verbose").unwrap().verbose); assert!(options("-v").unwrap().verbose); assert_eq!( options("--zero-terminated").unwrap().line_ending, LineEnding::Nul ); assert_eq!(options("-z").unwrap().line_ending, LineEnding::Nul); assert_eq!(options("--lines 15").unwrap().mode, Mode::FirstLines(15)); assert_eq!(options("-n 15").unwrap().mode, Mode::FirstLines(15)); assert_eq!(options("--bytes 15").unwrap().mode, Mode::FirstBytes(15)); assert_eq!(options("-c 15").unwrap().mode, Mode::FirstBytes(15)); } #[test] fn test_options_errors() { assert!(options("-n IsThisTheRealLife?").is_err()); assert!(options("-c IsThisJustFantasy").is_err()); } #[test] fn test_options_correct_defaults() { let opts = HeadOptions::default(); assert!(!opts.verbose); assert!(!opts.quiet); assert_eq!(opts.line_ending, LineEnding::Newline); assert_eq!(opts.mode, Mode::FirstLines(10)); assert!(opts.files.is_empty()); } fn arg_outputs(src: &str) -> Result { let split = src.split_whitespace().map(OsString::from); match arg_iterate(split) { Ok(args) => { let vec = args .map(|s| s.to_str().unwrap().to_owned()) .collect::>(); Ok(vec.join(" ")) } Err(_) => Err(()), } } #[test] fn test_arg_iterate() { // test that normal args remain unchanged assert_eq!( arg_outputs("head -n -5 -zv"), Ok("head -n -5 -zv".to_owned()) ); // tests that nonsensical args are unchanged assert_eq!( arg_outputs("head -to_be_or_not_to_be,..."), Ok("head -to_be_or_not_to_be,...".to_owned()) ); //test that the obsolete syntax is unrolled assert_eq!( arg_outputs("head -123qvqvqzc"), // spell-checker:disable-line Ok("head -q -z -c 123".to_owned()) ); //test that bad obsoletes are an error assert!(arg_outputs("head -123FooBar").is_err()); //test overflow assert!(arg_outputs("head -100000000000000000000000000000000000000000").is_ok()); //test that empty args remain unchanged assert_eq!(arg_outputs("head"), Ok("head".to_owned())); } #[test] #[cfg(target_os = "linux")] fn test_arg_iterate_bad_encoding() { use std::os::unix::ffi::OsStringExt; let invalid = OsString::from_vec(vec![b'\x80', b'\x81']); // this arises from a conversion from OsString to &str assert!(arg_iterate(vec![OsString::from("head"), invalid].into_iter()).is_err()); } #[test] fn read_early_exit() { let mut empty = io::BufReader::new(Cursor::new(Vec::new())); assert!(read_n_bytes(&mut empty, 0).is_ok()); assert!(read_n_lines(&mut empty, 0, b'\n').is_ok()); } #[test] fn test_find_nth_line_from_end() { // Make sure our input buffer is several multiples of BUF_SIZE in size // such that we can be reasonably confident we've exercised all logic paths. // Make the contents of the buffer look like... // aaaa\n // aaaa\n // aaaa\n // aaaa\n // aaaa\n // ... // This will make it easier to validate the results since each line will have // 5 bytes in it. let minimum_buffer_size = BUF_SIZE * 4; let mut input_buffer = vec![]; let mut loop_iteration: u64 = 0; while input_buffer.len() < minimum_buffer_size { for _n in 0..4 { input_buffer.push(b'a'); } loop_iteration += 1; input_buffer.push(b'\n'); } let lines_in_input_file = loop_iteration; let input_length = lines_in_input_file * 5; assert_eq!(input_length, input_buffer.len().try_into().unwrap()); let mut input = Cursor::new(input_buffer); // We now have loop_iteration lines in the buffer Now walk backwards through the buffer // to confirm everything parses correctly. // Use a large step size to prevent the test from taking too long, but don't use a power // of 2 in case we miss some corner case. let step_size = 511; for n in (0..lines_in_input_file).filter(|v| v % step_size == 0) { // The 5*n comes from 5-bytes per row. assert_eq!( find_nth_line_from_end(&mut input, n, b'\n').unwrap(), input_length - 5 * n ); } // Now confirm that if we query with a value >= lines_in_input_file we get an offset // of 0 assert_eq!( find_nth_line_from_end(&mut input, lines_in_input_file, b'\n').unwrap(), 0 ); assert_eq!( find_nth_line_from_end(&mut input, lines_in_input_file + 1, b'\n').unwrap(), 0 ); assert_eq!( find_nth_line_from_end(&mut input, lines_in_input_file + 1000, b'\n').unwrap(), 0 ); } #[test] fn test_find_nth_line_from_end_non_terminated() { // Validate the find_nth_line_from_end for files that are not terminated with a final // newline character. let input_file = "a\nb"; let mut input = Cursor::new(input_file); assert_eq!(find_nth_line_from_end(&mut input, 0, b'\n').unwrap(), 3); assert_eq!(find_nth_line_from_end(&mut input, 1, b'\n').unwrap(), 2); } #[test] fn test_find_nth_line_from_end_empty() { // Validate the find_nth_line_from_end for files that are empty. let input_file = ""; let mut input = Cursor::new(input_file); assert_eq!(find_nth_line_from_end(&mut input, 0, b'\n').unwrap(), 0); assert_eq!(find_nth_line_from_end(&mut input, 1, b'\n').unwrap(), 0); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/src/main.rs000066400000000000000000000000271504311601400251450ustar00rootroot00000000000000uucore::bin!(uu_head); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/src/parse.rs000066400000000000000000000152111504311601400253340ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use std::ffi::OsString; use uucore::parser::parse_size::{ParseSizeError, parse_size_u64_max}; #[derive(PartialEq, Eq, Debug)] pub struct ParseError; /// Parses obsolete syntax /// head -NUM\[kmzv\] // spell-checker:disable-line pub fn parse_obsolete(src: &str) -> Option, ParseError>> { let mut chars = src.char_indices(); if let Some((mut num_start, '-')) = chars.next() { num_start += 1; let mut num_end = src.len(); let mut has_num = false; let mut plus_possible = false; let mut last_char = 0 as char; for (n, c) in &mut chars { if c.is_ascii_digit() { has_num = true; plus_possible = false; } else if c == '+' && plus_possible { plus_possible = false; num_start += 1; } else { num_end = n; last_char = c; break; } } if has_num { process_num_block(&src[num_start..num_end], last_char, &mut chars) } else { None } } else { None } } /// Processes the numeric block of the input string to generate the appropriate options. fn process_num_block( src: &str, last_char: char, chars: &mut std::str::CharIndices, ) -> Option, ParseError>> { let num = match src.parse::() { Ok(n) => n, Err(e) if *e.kind() == std::num::IntErrorKind::PosOverflow => usize::MAX, _ => return Some(Err(ParseError)), }; let mut quiet = false; let mut verbose = false; let mut zero_terminated = false; let mut multiplier = None; let mut c = last_char; loop { // note that here, we only match lower case 'k', 'c', and 'm' match c { // we want to preserve order // this also saves us 1 heap allocation 'q' => { quiet = true; verbose = false; } 'v' => { verbose = true; quiet = false; } 'z' => zero_terminated = true, 'c' => multiplier = Some(1), 'b' => multiplier = Some(512), 'k' => multiplier = Some(1024), 'm' => multiplier = Some(1024 * 1024), '\0' => {} _ => return Some(Err(ParseError)), } if let Some((_, next)) = chars.next() { c = next; } else { break; } } let mut options = Vec::new(); if quiet { options.push(OsString::from("-q")); } if verbose { options.push(OsString::from("-v")); } if zero_terminated { options.push(OsString::from("-z")); } if let Some(n) = multiplier { options.push(OsString::from("-c")); let num = num.saturating_mul(n); options.push(OsString::from(format!("{num}"))); } else { options.push(OsString::from("-n")); options.push(OsString::from(format!("{num}"))); } Some(Ok(options)) } /// Parses an -c or -n argument, /// the bool specifies whether to read from the end pub fn parse_num(src: &str) -> Result<(u64, bool), ParseSizeError> { let mut size_string = src.trim(); let mut all_but_last = false; if let Some(c) = size_string.chars().next() { if c == '+' || c == '-' { // head: '+' is not documented (8.32 man pages) size_string = &size_string[1..]; if c == '-' { all_but_last = true; } } } else { return Err(ParseSizeError::ParseFailure(src.to_string())); } // remove leading zeros so that size is interpreted as decimal, not octal let trimmed_string = size_string.trim_start_matches('0'); if trimmed_string.is_empty() { Ok((0, all_but_last)) } else { parse_size_u64_max(trimmed_string).map(|n| (n, all_but_last)) } } #[cfg(test)] mod tests { use super::*; fn obsolete(src: &str) -> Option, ParseError>> { let r = parse_obsolete(src); match r { Some(s) => match s { Ok(v) => Some(Ok(v .into_iter() .map(|s| s.to_str().unwrap().to_owned()) .collect())), Err(e) => Some(Err(e)), }, None => None, } } fn obsolete_result(src: &[&str]) -> Option, ParseError>> { Some(Ok(src.iter().map(|&s| s.to_string()).collect())) } #[test] #[allow(clippy::cognitive_complexity)] fn test_parse_numbers_obsolete() { assert_eq!(obsolete("-5"), obsolete_result(&["-n", "5"])); assert_eq!(obsolete("-100"), obsolete_result(&["-n", "100"])); assert_eq!(obsolete("-5m"), obsolete_result(&["-c", "5242880"])); assert_eq!(obsolete("-1k"), obsolete_result(&["-c", "1024"])); assert_eq!(obsolete("-2b"), obsolete_result(&["-c", "1024"])); assert_eq!(obsolete("-1mmk"), obsolete_result(&["-c", "1024"])); assert_eq!(obsolete("-1vz"), obsolete_result(&["-v", "-z", "-n", "1"])); assert_eq!( obsolete("-1vzqvq"), // spell-checker:disable-line obsolete_result(&["-q", "-z", "-n", "1"]) ); assert_eq!(obsolete("-1vzc"), obsolete_result(&["-v", "-z", "-c", "1"])); assert_eq!( obsolete("-105kzm"), obsolete_result(&["-z", "-c", "110100480"]) ); } #[test] fn test_parse_errors_obsolete() { assert_eq!(obsolete("-5n"), Some(Err(ParseError))); assert_eq!(obsolete("-5c5"), Some(Err(ParseError))); } #[test] fn test_parse_obsolete_no_match() { assert_eq!(obsolete("-k"), None); assert_eq!(obsolete("asd"), None); } #[test] #[cfg(target_pointer_width = "64")] fn test_parse_obsolete_overflow_x64() { assert_eq!( obsolete("-1000000000000000m"), obsolete_result(&["-c", "18446744073709551615"]) ); assert_eq!( obsolete("-10000000000000000000000"), obsolete_result(&["-n", "18446744073709551615"]) ); } #[test] #[cfg(target_pointer_width = "32")] fn test_parse_obsolete_overflow_x32() { assert_eq!( obsolete("-42949672960"), obsolete_result(&["-n", "4294967295"]) ); assert_eq!( obsolete("-42949672k"), obsolete_result(&["-c", "4294967295"]) ); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/head/src/take.rs000066400000000000000000000655311504311601400251600ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. //! Take all but the last elements of an iterator. use memchr::memchr_iter; use std::collections::VecDeque; use std::io::{ErrorKind, Read, Write}; const BUF_SIZE: usize = 65536; struct TakeAllBuffer { buffer: Vec, start_index: usize, } impl TakeAllBuffer { fn new() -> Self { TakeAllBuffer { buffer: vec![], start_index: 0, } } fn fill_buffer(&mut self, reader: &mut impl Read) -> std::io::Result { self.buffer.resize(BUF_SIZE, 0); self.start_index = 0; loop { match reader.read(&mut self.buffer[..]) { Ok(n) => { self.buffer.truncate(n); return Ok(n); } Err(e) if e.kind() == ErrorKind::Interrupted => (), Err(e) => return Err(e), } } } fn write_bytes_exact(&mut self, writer: &mut impl Write, bytes: usize) -> std::io::Result<()> { let buffer_to_write = &self.remaining_buffer()[..bytes]; writer.write_all(buffer_to_write)?; self.start_index += bytes; assert!(self.start_index <= self.buffer.len()); Ok(()) } fn write_all(&mut self, writer: &mut impl Write) -> std::io::Result { let remaining_bytes = self.remaining_bytes(); self.write_bytes_exact(writer, remaining_bytes)?; Ok(remaining_bytes) } fn write_bytes_limit( &mut self, writer: &mut impl Write, max_bytes: usize, ) -> std::io::Result { let bytes_to_write = self.remaining_bytes().min(max_bytes); self.write_bytes_exact(writer, bytes_to_write)?; Ok(bytes_to_write) } fn remaining_buffer(&self) -> &[u8] { &self.buffer[self.start_index..] } fn remaining_bytes(&self) -> usize { self.remaining_buffer().len() } fn is_empty(&self) -> bool { assert!(self.start_index <= self.buffer.len()); self.start_index == self.buffer.len() } } /// Function to copy all but `n` bytes from the reader to the writer. /// /// If `n` exceeds the number of bytes in the input file then nothing is copied. /// If no errors are encountered then the function returns the number of bytes /// copied. /// /// Algorithm for this function is as follows... /// 1 - Chunks of the input file are read into a queue of [`TakeAllBuffer`] instances. /// Chunks are read until at least we have enough data to write out the entire contents of the /// first [`TakeAllBuffer`] in the queue whilst still retaining at least `n` bytes in the queue. /// If we hit `EoF` at any point, stop reading. /// 2 - Assess whether we managed to queue up greater-than `n` bytes. If not, we must be done, in /// which case break and return. /// 3 - Write either the full first buffer of data, or just enough bytes to get back down to having /// the required `n` bytes of data queued. /// 4 - Go back to (1). pub fn copy_all_but_n_bytes( reader: &mut impl Read, writer: &mut impl Write, n: usize, ) -> std::io::Result { let mut buffers: VecDeque = VecDeque::new(); let mut empty_buffer_pool: Vec = vec![]; let mut buffered_bytes: usize = 0; let mut total_bytes_copied = 0; loop { loop { // Try to buffer at least enough to write the entire first buffer. let front_buffer = buffers.front(); if let Some(front_buffer) = front_buffer { if buffered_bytes >= n + front_buffer.remaining_bytes() { break; } } let mut new_buffer = empty_buffer_pool.pop().unwrap_or_else(TakeAllBuffer::new); let filled_bytes = new_buffer.fill_buffer(reader)?; if filled_bytes == 0 { // filled_bytes==0 => Eof break; } buffers.push_back(new_buffer); buffered_bytes += filled_bytes; } // If we've got <=n bytes buffered here we have nothing left to do. if buffered_bytes <= n { break; } let excess_buffered_bytes = buffered_bytes - n; // Since we have some data buffered, can assume we have >=1 buffer - i.e. safe to unwrap. let front_buffer = buffers.front_mut().unwrap(); let bytes_written = front_buffer.write_bytes_limit(writer, excess_buffered_bytes)?; buffered_bytes -= bytes_written; total_bytes_copied += bytes_written; // If the front buffer is empty (which it probably is), push it into the empty-buffer-pool. if front_buffer.is_empty() { empty_buffer_pool.push(buffers.pop_front().unwrap()); } } Ok(total_bytes_copied) } struct TakeAllLinesBuffer { inner: TakeAllBuffer, terminated_lines: usize, partial_line: bool, } struct BytesAndLines { bytes: usize, terminated_lines: usize, } impl TakeAllLinesBuffer { fn new() -> Self { TakeAllLinesBuffer { inner: TakeAllBuffer::new(), terminated_lines: 0, partial_line: false, } } fn fill_buffer( &mut self, reader: &mut impl Read, separator: u8, ) -> std::io::Result { let bytes_read = self.inner.fill_buffer(reader)?; // Count the number of lines... self.terminated_lines = memchr_iter(separator, self.inner.remaining_buffer()).count(); if let Some(last_char) = self.inner.remaining_buffer().last() { if *last_char != separator { self.partial_line = true; } } Ok(BytesAndLines { bytes: bytes_read, terminated_lines: self.terminated_lines, }) } fn write_lines( &mut self, writer: &mut impl Write, max_lines: usize, separator: u8, ) -> std::io::Result { assert!(max_lines > 0, "Must request at least 1 line."); let ret; if max_lines > self.terminated_lines { ret = BytesAndLines { bytes: self.inner.write_all(writer)?, terminated_lines: self.terminated_lines, }; self.terminated_lines = 0; } else { let index = memchr_iter(separator, self.inner.remaining_buffer()).nth(max_lines - 1); assert!( index.is_some(), "Somehow we're being asked to write more lines than we have, that's a bug in copy_all_but_lines." ); let index = index.unwrap(); // index is the offset of the separator character, zero indexed. Need to add 1 to get the number // of bytes to write. let bytes_to_write = index + 1; self.inner.write_bytes_exact(writer, bytes_to_write)?; ret = BytesAndLines { bytes: bytes_to_write, terminated_lines: max_lines, }; self.terminated_lines -= max_lines; } Ok(ret) } fn is_empty(&self) -> bool { self.inner.is_empty() } fn terminated_lines(&self) -> usize { self.terminated_lines } fn partial_line(&self) -> bool { self.partial_line } } /// Function to copy all but `n` lines from the reader to the writer. /// /// Lines are inferred from the `separator` value passed in by the client. /// If `n` exceeds the number of lines in the input file then nothing is copied. /// The last line in the file is not required to end with a `separator` character. /// If no errors are encountered then they function returns the number of bytes /// copied. /// /// Algorithm for this function is as follows... /// 1 - Chunks of the input file are read into a queue of [`TakeAllLinesBuffer`] instances. /// Chunks are read until at least we have enough lines that we can write out the entire /// contents of the first [`TakeAllLinesBuffer`] in the queue whilst still retaining at least /// `n` lines in the queue. /// If we hit `EoF` at any point, stop reading. /// 2 - Asses whether we managed to queue up greater-than `n` lines. If not, we must be done, in /// which case break and return. /// 3 - Write either the full first buffer of data, or just enough lines to get back down to /// having the required `n` lines of data queued. /// 4 - Go back to (1). /// /// Note that lines will regularly straddle multiple [`TakeAllLinesBuffer`] instances. The `partial_line` /// flag on [`TakeAllLinesBuffer`] tracks this, and we use that to ensure that we write out enough /// lines in the case that the input file doesn't end with a `separator` character. pub fn copy_all_but_n_lines( mut reader: R, writer: &mut W, n: usize, separator: u8, ) -> std::io::Result { // This function requires `n` > 0. Assert it! assert!(n > 0); let mut buffers: VecDeque = VecDeque::new(); let mut buffered_terminated_lines: usize = 0; let mut empty_buffers = vec![]; let mut total_bytes_copied = 0; loop { // Try to buffer enough such that we can write out the entire first buffer. loop { // First check if we have enough lines buffered that we can write out the entire // front buffer. If so, break. let front_buffer = buffers.front(); if let Some(front_buffer) = front_buffer { if buffered_terminated_lines > n + front_buffer.terminated_lines() { break; } } // Else we need to try to buffer more data... let mut new_buffer = empty_buffers.pop().unwrap_or_else(TakeAllLinesBuffer::new); let fill_result = new_buffer.fill_buffer(&mut reader, separator)?; if fill_result.bytes == 0 { // fill_result.bytes == 0 => EoF. break; } buffered_terminated_lines += fill_result.terminated_lines; buffers.push_back(new_buffer); } // If we've not buffered more lines than we need to hold back we must be done. if buffered_terminated_lines < n || (buffered_terminated_lines == n && !buffers.back().unwrap().partial_line()) { break; } let excess_buffered_terminated_lines = buffered_terminated_lines - n; // Since we have some data buffered can assume we have at least 1 buffer, so safe to unwrap. let lines_to_write = if buffers.back().unwrap().partial_line() { excess_buffered_terminated_lines + 1 } else { excess_buffered_terminated_lines }; let front_buffer = buffers.front_mut().unwrap(); let write_result = front_buffer.write_lines(writer, lines_to_write, separator)?; buffered_terminated_lines -= write_result.terminated_lines; total_bytes_copied += write_result.bytes; // If the front buffer is empty (which it probably is), push it into the empty-buffer-pool. if front_buffer.is_empty() { empty_buffers.push(buffers.pop_front().unwrap()); } } Ok(total_bytes_copied) } /// Like `std::io::Take`, but for lines instead of bytes. /// /// This struct is generally created by calling [`take_lines`] on a /// reader. Please see the documentation of [`take_lines`] for more /// details. pub struct TakeLines { inner: T, limit: u64, separator: u8, } impl Read for TakeLines { /// Read bytes from a buffer up to the requested number of lines. fn read(&mut self, buf: &mut [u8]) -> std::io::Result { if self.limit == 0 { return Ok(0); } match self.inner.read(buf) { Ok(0) => Ok(0), Ok(n) => { for i in memchr_iter(self.separator, &buf[..n]) { self.limit -= 1; if self.limit == 0 { return Ok(i + 1); } } Ok(n) } Err(e) => Err(e), } } } /// Create an adaptor that will read at most `limit` lines from a given reader. /// /// This function returns a new instance of `Read` that will read at /// most `limit` lines, after which it will always return EOF /// (`Ok(0)`). /// /// The `separator` defines the character to interpret as the line /// ending. For the usual notion of "line", set this to `b'\n'`. pub fn take_lines(reader: R, limit: u64, separator: u8) -> TakeLines { TakeLines { inner: reader, limit, separator, } } #[cfg(test)] mod tests { use std::io::{BufRead, BufReader}; use crate::take::{ TakeAllBuffer, TakeAllLinesBuffer, copy_all_but_n_bytes, copy_all_but_n_lines, take_lines, }; #[test] fn test_take_all_buffer_exact_bytes() { let input_buffer = "abc"; let mut input_reader = std::io::Cursor::new(input_buffer); let mut take_all_buffer = TakeAllBuffer::new(); let bytes_read = take_all_buffer.fill_buffer(&mut input_reader).unwrap(); assert_eq!(bytes_read, input_buffer.len()); assert_eq!(take_all_buffer.remaining_bytes(), input_buffer.len()); assert_eq!(take_all_buffer.remaining_buffer(), input_buffer.as_bytes()); assert!(!take_all_buffer.is_empty()); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); for (index, c) in input_buffer.bytes().enumerate() { take_all_buffer .write_bytes_exact(&mut output_reader, 1) .unwrap(); let buf_ref = output_reader.get_ref(); assert_eq!(buf_ref.len(), index + 1); assert_eq!(buf_ref[index], c); assert_eq!( take_all_buffer.remaining_bytes(), input_buffer.len() - (index + 1) ); assert_eq!( take_all_buffer.remaining_buffer(), &input_buffer.as_bytes()[index + 1..] ); } assert!(take_all_buffer.is_empty()); assert_eq!(take_all_buffer.remaining_bytes(), 0); assert_eq!(take_all_buffer.remaining_buffer(), "".as_bytes()); } #[test] fn test_take_all_buffer_all_bytes() { let input_buffer = "abc"; let mut input_reader = std::io::Cursor::new(input_buffer); let mut take_all_buffer = TakeAllBuffer::new(); let bytes_read = take_all_buffer.fill_buffer(&mut input_reader).unwrap(); assert_eq!(bytes_read, input_buffer.len()); assert_eq!(take_all_buffer.remaining_bytes(), input_buffer.len()); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_written = take_all_buffer.write_all(&mut output_reader).unwrap(); assert_eq!(bytes_written, input_buffer.len()); assert_eq!(output_reader.get_ref().as_slice(), input_buffer.as_bytes()); assert!(take_all_buffer.is_empty()); assert_eq!(take_all_buffer.remaining_bytes(), 0); assert_eq!(take_all_buffer.remaining_buffer(), "".as_bytes()); // Now do a write_all on an empty TakeAllBuffer. Confirm correct behavior. let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_written = take_all_buffer.write_all(&mut output_reader).unwrap(); assert_eq!(bytes_written, 0); assert_eq!(output_reader.get_ref().as_slice().len(), 0); } #[test] fn test_take_all_buffer_limit_bytes() { let input_buffer = "abc"; let mut input_reader = std::io::Cursor::new(input_buffer); let mut take_all_buffer = TakeAllBuffer::new(); let bytes_read = take_all_buffer.fill_buffer(&mut input_reader).unwrap(); assert_eq!(bytes_read, input_buffer.len()); assert_eq!(take_all_buffer.remaining_bytes(), input_buffer.len()); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); // Write all but 1 bytes. let bytes_to_write = input_buffer.len() - 1; let bytes_written = take_all_buffer .write_bytes_limit(&mut output_reader, bytes_to_write) .unwrap(); assert_eq!(bytes_written, bytes_to_write); assert_eq!( output_reader.get_ref().as_slice(), &input_buffer.as_bytes()[..bytes_to_write] ); assert!(!take_all_buffer.is_empty()); assert_eq!(take_all_buffer.remaining_bytes(), 1); assert_eq!( take_all_buffer.remaining_buffer(), &input_buffer.as_bytes()[bytes_to_write..] ); // Write 1 more byte - i.e. last byte in buffer. let bytes_to_write = 1; let bytes_written = take_all_buffer .write_bytes_limit(&mut output_reader, bytes_to_write) .unwrap(); assert_eq!(bytes_written, bytes_to_write); assert_eq!(output_reader.get_ref().as_slice(), input_buffer.as_bytes()); assert!(take_all_buffer.is_empty()); assert_eq!(take_all_buffer.remaining_bytes(), 0); assert_eq!(take_all_buffer.remaining_buffer(), "".as_bytes()); // Write 1 more byte - i.e. confirm behavior on already empty buffer. let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_to_write = 1; let bytes_written = take_all_buffer .write_bytes_limit(&mut output_reader, bytes_to_write) .unwrap(); assert_eq!(bytes_written, 0); assert_eq!(output_reader.get_ref().as_slice().len(), 0); assert!(take_all_buffer.is_empty()); assert_eq!(take_all_buffer.remaining_bytes(), 0); assert_eq!(take_all_buffer.remaining_buffer(), "".as_bytes()); } #[test] #[allow(clippy::cognitive_complexity)] fn test_take_all_lines_buffer() { // 3 lines with new-lines and one partial line. let input_buffer = "a\nb\nc\ndef"; let separator = b'\n'; let mut input_reader = std::io::Cursor::new(input_buffer); let mut take_all_lines_buffer = TakeAllLinesBuffer::new(); let fill_result = take_all_lines_buffer .fill_buffer(&mut input_reader, separator) .unwrap(); assert_eq!(fill_result.bytes, input_buffer.len()); assert_eq!(fill_result.terminated_lines, 3); assert_eq!(take_all_lines_buffer.terminated_lines(), 3); assert!(!take_all_lines_buffer.is_empty()); assert!(take_all_lines_buffer.partial_line()); // Write 1st line. let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let lines_to_write = 1; let write_result = take_all_lines_buffer .write_lines(&mut output_reader, lines_to_write, separator) .unwrap(); assert_eq!(write_result.bytes, 2); assert_eq!(write_result.terminated_lines, lines_to_write); assert_eq!(output_reader.get_ref().as_slice(), "a\n".as_bytes()); assert!(!take_all_lines_buffer.is_empty()); assert_eq!(take_all_lines_buffer.terminated_lines(), 2); // Write 2nd line. let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let lines_to_write = 1; let write_result = take_all_lines_buffer .write_lines(&mut output_reader, lines_to_write, separator) .unwrap(); assert_eq!(write_result.bytes, 2); assert_eq!(write_result.terminated_lines, lines_to_write); assert_eq!(output_reader.get_ref().as_slice(), "b\n".as_bytes()); assert!(!take_all_lines_buffer.is_empty()); assert_eq!(take_all_lines_buffer.terminated_lines(), 1); // Now try to write 3 lines even though we have only 1 line remaining. Should write everything left in the buffer. let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let lines_to_write = 3; let write_result = take_all_lines_buffer .write_lines(&mut output_reader, lines_to_write, separator) .unwrap(); assert_eq!(write_result.bytes, 5); assert_eq!(write_result.terminated_lines, 1); assert_eq!(output_reader.get_ref().as_slice(), "c\ndef".as_bytes()); assert!(take_all_lines_buffer.is_empty()); assert_eq!(take_all_lines_buffer.terminated_lines(), 0); // Test empty buffer. let input_buffer = ""; let mut input_reader = std::io::Cursor::new(input_buffer); let mut take_all_lines_buffer = TakeAllLinesBuffer::new(); let fill_result = take_all_lines_buffer .fill_buffer(&mut input_reader, separator) .unwrap(); assert_eq!(fill_result.bytes, 0); assert_eq!(fill_result.terminated_lines, 0); assert_eq!(take_all_lines_buffer.terminated_lines(), 0); assert!(take_all_lines_buffer.is_empty()); assert!(!take_all_lines_buffer.partial_line()); // Test buffer that ends with newline. let input_buffer = "\n"; let mut input_reader = std::io::Cursor::new(input_buffer); let mut take_all_lines_buffer = TakeAllLinesBuffer::new(); let fill_result = take_all_lines_buffer .fill_buffer(&mut input_reader, separator) .unwrap(); assert_eq!(fill_result.bytes, 1); assert_eq!(fill_result.terminated_lines, 1); assert_eq!(take_all_lines_buffer.terminated_lines(), 1); assert!(!take_all_lines_buffer.is_empty()); assert!(!take_all_lines_buffer.partial_line()); } #[test] fn test_copy_all_but_n_bytes() { // Test the copy_all_but_bytes fn. Test several scenarios... // 1 - Hold back more bytes than the input will provide. Should have nothing written to output. let input_buffer = "a\nb\nc\ndef"; let mut input_reader = std::io::Cursor::new(input_buffer); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_copied = copy_all_but_n_bytes( &mut input_reader, &mut output_reader, input_buffer.len() + 1, ) .unwrap(); assert_eq!(bytes_copied, 0); // 2 - Hold back exactly the number of bytes the input will provide. Should have nothing written to output. let mut input_reader = std::io::Cursor::new(input_buffer); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_copied = copy_all_but_n_bytes(&mut input_reader, &mut output_reader, input_buffer.len()) .unwrap(); assert_eq!(bytes_copied, 0); // 3 - Hold back 1 fewer byte than input will provide. Should have one byte written to output. let mut input_reader = std::io::Cursor::new(input_buffer); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_copied = copy_all_but_n_bytes( &mut input_reader, &mut output_reader, input_buffer.len() - 1, ) .unwrap(); assert_eq!(bytes_copied, 1); assert_eq!(output_reader.get_ref()[..], input_buffer.as_bytes()[0..1]); } #[test] fn test_copy_all_but_n_lines() { // Test the copy_all_but_lines fn. Test several scenarios... // 1 - Hold back more lines than the input will provide. Should have nothing written to output. let input_buffer = "a\nb\nc\ndef"; let separator = b'\n'; let mut input_reader = std::io::Cursor::new(input_buffer); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_copied = copy_all_but_n_lines(&mut input_reader, &mut output_reader, 5, separator).unwrap(); assert_eq!(bytes_copied, 0); // 2 - Hold back exactly the number of lines the input will provide. Should have nothing written to output. let mut input_reader = std::io::Cursor::new(input_buffer); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_copied = copy_all_but_n_lines(&mut input_reader, &mut output_reader, 4, separator).unwrap(); assert_eq!(bytes_copied, 0); // 3 - Hold back 1 fewer lines than input will provide. Should have one line written to output. let mut input_reader = std::io::Cursor::new(input_buffer); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_copied = copy_all_but_n_lines(&mut input_reader, &mut output_reader, 3, separator).unwrap(); assert_eq!(bytes_copied, 2); assert_eq!(output_reader.get_ref()[..], input_buffer.as_bytes()[0..2]); // Now test again with an input that has a new-line ending... // 4 - Hold back more lines than the input will provide. Should have nothing written to output. let input_buffer = "a\nb\nc\ndef\n"; let mut input_reader = std::io::Cursor::new(input_buffer); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_copied = copy_all_but_n_lines(&mut input_reader, &mut output_reader, 5, separator).unwrap(); assert_eq!(bytes_copied, 0); // 5 - Hold back exactly the number of lines the input will provide. Should have nothing written to output. let mut input_reader = std::io::Cursor::new(input_buffer); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_copied = copy_all_but_n_lines(&mut input_reader, &mut output_reader, 4, separator).unwrap(); assert_eq!(bytes_copied, 0); // 6 - Hold back 1 fewer lines than input will provide. Should have one line written to output. let mut input_reader = std::io::Cursor::new(input_buffer); let mut output_reader = std::io::Cursor::new(vec![0x10; 0]); let bytes_copied = copy_all_but_n_lines(&mut input_reader, &mut output_reader, 3, separator).unwrap(); assert_eq!(bytes_copied, 2); assert_eq!(output_reader.get_ref()[..], input_buffer.as_bytes()[0..2]); } #[test] fn test_zero_lines() { let input_reader = std::io::Cursor::new("a\nb\nc\n"); let output_reader = BufReader::new(take_lines(input_reader, 0, b'\n')); let mut iter = output_reader.lines().map(|l| l.unwrap()); assert_eq!(None, iter.next()); } #[test] fn test_fewer_lines() { let input_reader = std::io::Cursor::new("a\nb\nc\n"); let output_reader = BufReader::new(take_lines(input_reader, 2, b'\n')); let mut iter = output_reader.lines().map(|l| l.unwrap()); assert_eq!(Some(String::from("a")), iter.next()); assert_eq!(Some(String::from("b")), iter.next()); assert_eq!(None, iter.next()); } #[test] fn test_more_lines() { let input_reader = std::io::Cursor::new("a\nb\nc\n"); let output_reader = BufReader::new(take_lines(input_reader, 4, b'\n')); let mut iter = output_reader.lines().map(|l| l.unwrap()); assert_eq!(Some(String::from("a")), iter.next()); assert_eq!(Some(String::from("b")), iter.next()); assert_eq!(Some(String::from("c")), iter.next()); assert_eq!(None, iter.next()); } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostid/000077500000000000000000000000001504311601400234565ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostid/Cargo.toml000066400000000000000000000011631504311601400254070ustar00rootroot00000000000000[package] name = "uu_hostid" description = "hostid ~ (uutils) display the numeric identifier of the current host" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/hostid" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/hostid.rs" [dependencies] clap = { workspace = true } libc = { workspace = true } uucore = { workspace = true } fluent = { workspace = true } [[bin]] name = "hostid" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostid/LICENSE000077700000000000000000000000001504311601400263242../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostid/locales/000077500000000000000000000000001504311601400251005ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostid/locales/en-US.ftl000066400000000000000000000001621504311601400265350ustar00rootroot00000000000000hostid-about = Print the numeric identifier (in hexadecimal) for the current host hostid-usage = hostid [options] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostid/src/000077500000000000000000000000001504311601400242455ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostid/src/hostid.rs000066400000000000000000000022231504311601400261040ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) gethostid use clap::Command; use libc::{c_long, gethostid}; use uucore::{error::UResult, format_usage}; use uucore::translate; #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { uu_app().try_get_matches_from(args)?; hostid(); Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("hostid-about")) .override_usage(format_usage(&translate!("hostid-usage"))) .infer_long_args(true) } fn hostid() { /* * POSIX says gethostid returns a "32-bit identifier" but is silent * whether it's sign-extended. Turn off any sign-extension. This * is a no-op unless unsigned int is wider than 32 bits. */ let mut result: c_long; unsafe { result = gethostid(); } #[allow(overflowing_literals)] let mask = 0xffff_ffff; result &= mask; println!("{result:0>8x}"); } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostid/src/main.rs000066400000000000000000000000311504311601400255310ustar00rootroot00000000000000uucore::bin!(uu_hostid); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostname/000077500000000000000000000000001504311601400240025ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostname/Cargo.toml000066400000000000000000000017661504311601400257440ustar00rootroot00000000000000[package] name = "uu_hostname" description = "hostname ~ (uutils) display or set the host name of the current host" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/hostname" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/hostname.rs" [dependencies] clap = { workspace = true } hostname = { workspace = true, features = ["set"] } uucore = { workspace = true, features = ["wide"] } fluent = { workspace = true } [target.'cfg(any(target_os = "freebsd", target_os = "openbsd"))'.dependencies] dns-lookup = { workspace = true } [target.'cfg(target_os = "windows")'.dependencies] windows-sys = { workspace = true, features = [ "Win32_Networking_WinSock", "Win32_Foundation", ] } [[bin]] name = "hostname" path = "src/main.rs" [package.metadata.cargo-udeps.ignore] normal = ["uucore_procs"] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostname/LICENSE000077700000000000000000000000001504311601400266502../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostname/locales/000077500000000000000000000000001504311601400254245ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostname/locales/en-US.ftl000066400000000000000000000015501504311601400270630ustar00rootroot00000000000000hostname-about = Display or set the system's host name. hostname-usage = hostname [OPTION]... [HOSTNAME] hostname-help-domain = Display the name of the DNS domain if possible hostname-help-ip-address = Display the network address(es) of the host hostname-help-fqdn = Display the FQDN (Fully Qualified Domain Name) (default) hostname-help-short = Display the short hostname (the portion before the first dot) if possible hostname-error-permission = hostname: you must be root to change the host name hostname-error-invalid-name = hostname: invalid hostname '{ $name }' hostname-error-resolve-failed = hostname: unable to resolve host name '{ $name }' hostname-error-winsock = failed to start Winsock hostname-error-set-hostname = failed to set hostname hostname-error-get-hostname = failed to get hostname hostname-error-resolve-socket = failed to resolve socket addresses coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostname/locales/fr-FR.ftl000066400000000000000000000017201504311601400270470ustar00rootroot00000000000000hostname-about = Afficher ou définir le nom d'hôte du système. hostname-usage = hostname [OPTION]... [NOM_HÔTE] hostname-help-domain = Afficher le nom du domaine DNS si possible hostname-help-ip-address = Afficher la ou les adresses réseau de l'hôte hostname-help-fqdn = Afficher le FQDN (nom de domaine pleinement qualifié) (par défaut) hostname-help-short = Afficher le nom d'hôte court (la partie avant le premier point) si possible hostname-error-permission = hostname : vous devez être root pour changer le nom d'hôte hostname-error-invalid-name = hostname : nom d'hôte invalide '{ $name }' hostname-error-resolve-failed = hostname : impossible de résoudre le nom d'hôte '{ $name }' hostname-error-winsock = échec du démarrage de Winsock hostname-error-set-hostname = échec de la définition du nom d'hôte hostname-error-get-hostname = échec de l'obtention du nom d'hôte hostname-error-resolve-socket = échec de la résolution des adresses de socket coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostname/src/000077500000000000000000000000001504311601400245715ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostname/src/hostname.rs000066400000000000000000000132661504311601400267650ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore hashset Addrs addrs #[cfg(not(any(target_os = "freebsd", target_os = "openbsd")))] use std::net::ToSocketAddrs; use std::str; use std::{collections::hash_set::HashSet, ffi::OsString}; use clap::builder::ValueParser; use clap::{Arg, ArgAction, ArgMatches, Command}; #[cfg(any(target_os = "freebsd", target_os = "openbsd"))] use dns_lookup::lookup_host; use uucore::translate; use uucore::{ error::{FromIo, UResult}, format_usage, }; static OPT_DOMAIN: &str = "domain"; static OPT_IP_ADDRESS: &str = "ip-address"; static OPT_FQDN: &str = "fqdn"; static OPT_SHORT: &str = "short"; static OPT_HOST: &str = "host"; #[cfg(windows)] mod wsa { use std::io; use windows_sys::Win32::Networking::WinSock::{WSACleanup, WSADATA, WSAStartup}; pub(super) struct WsaHandle(()); pub(super) fn start() -> io::Result { let err = unsafe { let mut data = std::mem::MaybeUninit::::uninit(); WSAStartup(0x0202, data.as_mut_ptr()) }; if err == 0 { Ok(WsaHandle(())) } else { Err(io::Error::from_raw_os_error(err)) } } impl Drop for WsaHandle { fn drop(&mut self) { unsafe { // This possibly returns an error but we can't handle it let _err = WSACleanup(); } } } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; #[cfg(windows)] let _handle = wsa::start().map_err_context(|| translate!("hostname-error-winsock"))?; match matches.get_one::(OPT_HOST) { None => display_hostname(&matches), Some(host) => { hostname::set(host).map_err_context(|| translate!("hostname-error-set-hostname")) } } } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("hostname-about")) .override_usage(format_usage(&translate!("hostname-usage"))) .infer_long_args(true) .arg( Arg::new(OPT_DOMAIN) .short('d') .long("domain") .overrides_with_all([OPT_DOMAIN, OPT_IP_ADDRESS, OPT_FQDN, OPT_SHORT]) .help(translate!("hostname-help-domain")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_IP_ADDRESS) .short('i') .long("ip-address") .overrides_with_all([OPT_DOMAIN, OPT_IP_ADDRESS, OPT_FQDN, OPT_SHORT]) .help(translate!("hostname-help-ip-address")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_FQDN) .short('f') .long("fqdn") .overrides_with_all([OPT_DOMAIN, OPT_IP_ADDRESS, OPT_FQDN, OPT_SHORT]) .help(translate!("hostname-help-fqdn")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_SHORT) .short('s') .long("short") .overrides_with_all([OPT_DOMAIN, OPT_IP_ADDRESS, OPT_FQDN, OPT_SHORT]) .help(translate!("hostname-help-short")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_HOST) .value_parser(ValueParser::os_string()) .value_hint(clap::ValueHint::Hostname), ) } fn display_hostname(matches: &ArgMatches) -> UResult<()> { let hostname = hostname::get() .map_err_context(|| "failed to get hostname".to_owned())? .to_string_lossy() .into_owned(); if matches.get_flag(OPT_IP_ADDRESS) { let addresses; #[cfg(not(any(target_os = "freebsd", target_os = "openbsd")))] { let hostname = hostname + ":1"; let addrs = hostname .to_socket_addrs() .map_err_context(|| "failed to resolve socket addresses".to_owned())?; addresses = addrs; } // DNS reverse lookup via "hostname:1" does not work on FreeBSD and OpenBSD // use dns-lookup crate instead #[cfg(any(target_os = "freebsd", target_os = "openbsd"))] { let addrs: Vec = lookup_host(hostname.as_str()).unwrap(); addresses = addrs; } let mut hashset = HashSet::new(); let mut output = String::new(); for addr in addresses { // XXX: not sure why this is necessary... if !hashset.contains(&addr) { let mut ip = addr.to_string(); if ip.ends_with(":1") { let len = ip.len(); ip.truncate(len - 2); } output.push_str(&ip); output.push(' '); hashset.insert(addr); } } let len = output.len(); if len > 0 { println!("{}", &output[0..len - 1]); } Ok(()) } else { if matches.get_flag(OPT_SHORT) || matches.get_flag(OPT_DOMAIN) { let mut it = hostname.char_indices().filter(|&ci| ci.1 == '.'); if let Some(ci) = it.next() { if matches.get_flag(OPT_SHORT) { println!("{}", &hostname[0..ci.0]); } else { println!("{}", &hostname[ci.0 + 1..]); } return Ok(()); } } println!("{hostname}"); Ok(()) } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/hostname/src/main.rs000066400000000000000000000000331504311601400260570ustar00rootroot00000000000000uucore::bin!(uu_hostname); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/id/000077500000000000000000000000001504311601400225605ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/id/Cargo.toml000066400000000000000000000012661504311601400245150ustar00rootroot00000000000000[package] name = "uu_id" description = "id ~ (uutils) display user and group information for USER" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/id" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/id.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["entries", "process"] } selinux = { workspace = true, optional = true } fluent = { workspace = true } [[bin]] name = "id" path = "src/main.rs" [features] feat_selinux = ["selinux"] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/id/LICENSE000077700000000000000000000000001504311601400254262../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/id/locales/000077500000000000000000000000001504311601400242025ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/id/locales/en-US.ftl000066400000000000000000000047611504311601400256500ustar00rootroot00000000000000id-about = Print user and group information for each specified USER, or (when USER omitted) for the current user. id-usage = id [OPTION]... [USER]... id-after-help = The id utility displays the user and group names and numeric IDs, of the calling process, to the standard output. If the real and effective IDs are different, both are displayed, otherwise only the real ID is displayed. If a user (login name or user ID) is specified, the user and group IDs of that user are displayed. In this case, the real and effective IDs are assumed to be the same. # Context help text id-context-help-disabled = print only the security context of the process (not enabled) id-context-help-enabled = print only the security context of the process # Error messages id-error-names-real-ids-require-flags = printing only names or real IDs requires -u, -g, or -G id-error-zero-not-permitted-default = option --zero not permitted in default format id-error-cannot-print-context-with-user = cannot print security context when user specified id-error-cannot-get-context = can't get process context id-error-context-selinux-only = --context (-Z) works only on an SELinux-enabled kernel id-error-no-such-user = { $user }: no such user id-error-cannot-find-group-name = cannot find name for group ID { $gid } id-error-cannot-find-user-name = cannot find name for user ID { $uid } id-error-audit-retrieve = couldn't retrieve information # Help text for command-line arguments id-help-audit = Display the process audit user ID and other process audit properties, which requires privilege (not available on Linux). id-help-user = Display only the effective user ID as a number. id-help-group = Display only the effective group ID as a number id-help-groups = Display only the different group IDs as white-space separated numbers, in no particular order. id-help-human-readable = Make the output human-readable. Each display is on a separate line. id-help-name = Display the name of the user or group ID for the -G, -g and -u options instead of the number. If any of the ID numbers cannot be mapped into names, the number will be displayed as usual. id-help-password = Display the id as a password file entry. id-help-real = Display the real ID for the -G, -g and -u options instead of the effective ID. id-help-zero = delimit entries with NUL characters, not whitespace; not permitted in default format # Output labels id-output-uid = uid id-output-groups = groups id-output-login = login id-output-euid = euid id-output-context = context coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/id/locales/fr-FR.ftl000066400000000000000000000057501504311601400256340ustar00rootroot00000000000000id-about = Affiche les informations d'utilisateur et de groupe pour chaque UTILISATEUR spécifié, ou (si UTILISATEUR est omis) pour l'utilisateur actuel. id-usage = id [OPTION]... [UTILISATEUR]... id-after-help = L'utilitaire id affiche les noms d'utilisateur et de groupe ainsi que leurs ID numériques du processus appelant, vers la sortie standard. Si les ID réels et effectifs sont différents, les deux sont affichés, sinon seul l'ID réel est affiché. Si un utilisateur (nom de connexion ou ID utilisateur) est spécifié, les ID utilisateur et groupe de cet utilisateur sont affichés. Dans ce cas, les ID réels et effectifs sont supposés être identiques. # Texte d'aide pour le contexte id-context-help-disabled = affiche uniquement le contexte de sécurité du processus (non activé) id-context-help-enabled = affiche uniquement le contexte de sécurité du processus # Messages d'erreur id-error-names-real-ids-require-flags = l'affichage des noms uniquement ou des ID réels nécessite -u, -g, ou -G id-error-zero-not-permitted-default = l'option --zero n'est pas autorisée dans le format par défaut id-error-cannot-print-context-with-user = impossible d'afficher le contexte de sécurité quand un utilisateur est spécifié id-error-cannot-get-context = impossible d'obtenir le contexte du processus id-error-context-selinux-only = --context (-Z) ne fonctionne que sur un noyau avec SELinux activé id-error-no-such-user = { $user } : utilisateur inexistant id-error-cannot-find-group-name = impossible de trouver le nom pour l'ID de groupe { $gid } id-error-cannot-find-user-name = impossible de trouver le nom pour l'ID utilisateur { $uid } id-error-audit-retrieve = impossible de récupérer les informations # Texte d'aide pour les arguments de ligne de commande id-help-audit = Affiche l'ID utilisateur d'audit du processus et autres propriétés d'audit, ce qui nécessite des privilèges (non disponible sous Linux). id-help-user = Affiche uniquement l'ID utilisateur effectif sous forme de nombre. id-help-group = Affiche uniquement l'ID de groupe effectif sous forme de nombre id-help-groups = Affiche uniquement les différents ID de groupe sous forme de nombres séparés par des espaces, dans un ordre quelconque. id-help-human-readable = Rend la sortie lisible par l'humain. Chaque affichage est sur une ligne séparée. id-help-name = Affiche le nom de l'ID utilisateur ou groupe pour les options -G, -g et -u au lieu du nombre. Si certains ID numériques ne peuvent pas être convertis en noms, le nombre sera affiché comme d'habitude. id-help-password = Affiche l'id comme une entrée de fichier de mots de passe. id-help-real = Affiche l'ID réel pour les options -G, -g et -u au lieu de l'ID effectif. id-help-zero = délimite les entrées avec des caractères NUL, pas des espaces ; non autorisé dans le format par défaut # Étiquettes de sortie id-output-uid = uid id-output-groups = groupes id-output-login = connexion id-output-euid = euid id-output-context = contexte coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/id/src/000077500000000000000000000000001504311601400233475ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/id/src/id.rs000066400000000000000000000544361504311601400243250ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) asid auditid auditinfo auid cstr egid emod euid getaudit getlogin gflag nflag pline rflag termid uflag gsflag zflag cflag // README: // This was originally based on BSD's `id` // (noticeable in functionality, usage text, options text, etc.) // and synced with: // http://ftp-archive.freebsd.org/mirror/FreeBSD-Archive/old-releases/i386/1.0-RELEASE/ports/shellutils/src/id.c // http://www.opensource.apple.com/source/shell_cmds/shell_cmds-118/id/id.c // // * This was partially rewritten in order for stdout/stderr/exit_code // to be conform with GNU coreutils (8.32) test suite for `id`. // // * This supports multiple users (a feature that was introduced in coreutils 8.31) // // * This passes GNU's coreutils Test suite (8.32) // for "tests/id/uid.sh" and "tests/id/zero/sh". // // * Option '--zero' does not exist for BSD's `id`, therefore '--zero' is only // allowed together with other options that are available on GNU's `id`. // // * Help text based on BSD's `id` manpage and GNU's `id` manpage. // // * This passes GNU's coreutils Test suite (8.32) for "tests/id/context.sh" if compiled with // `--features feat_selinux`. It should also pass "tests/id/no-context.sh", but that depends on // `uu_ls -Z` being implemented and therefore fails at the moment // #![allow(non_camel_case_types)] #![allow(dead_code)] use clap::{Arg, ArgAction, Command}; use std::ffi::CStr; use uucore::display::Quotable; use uucore::entries::{self, Group, Locate, Passwd}; use uucore::error::UResult; use uucore::error::{USimpleError, set_exit_code}; pub use uucore::libc; use uucore::libc::{getlogin, uid_t}; use uucore::line_ending::LineEnding; use uucore::translate; use uucore::process::{getegid, geteuid, getgid, getuid}; use uucore::{format_usage, show_error}; macro_rules! cstr2cow { ($v:expr) => { unsafe { let ptr = $v; // Must be not null to call cstr2cow if ptr.is_null() { None } else { Some({ CStr::from_ptr(ptr) }.to_string_lossy()) } } }; } fn get_context_help_text() -> String { #[cfg(not(feature = "selinux"))] return translate!("id-context-help-disabled"); #[cfg(feature = "selinux")] return translate!("id-context-help-enabled"); } mod options { pub const OPT_AUDIT: &str = "audit"; // GNU's id does not have this pub const OPT_CONTEXT: &str = "context"; pub const OPT_EFFECTIVE_USER: &str = "user"; pub const OPT_GROUP: &str = "group"; pub const OPT_GROUPS: &str = "groups"; pub const OPT_HUMAN_READABLE: &str = "human-readable"; // GNU's id does not have this pub const OPT_NAME: &str = "name"; pub const OPT_PASSWORD: &str = "password"; // GNU's id does not have this pub const OPT_REAL_ID: &str = "real"; pub const OPT_ZERO: &str = "zero"; // BSD's id does not have this pub const ARG_USERS: &str = "USER"; } struct Ids { uid: u32, // user id gid: u32, // group id euid: u32, // effective uid egid: u32, // effective gid } struct State { nflag: bool, // --name uflag: bool, // --user gflag: bool, // --group gsflag: bool, // --groups rflag: bool, // --real zflag: bool, // --zero cflag: bool, // --context selinux_supported: bool, ids: Option, // The behavior for calling GNU's `id` and calling GNU's `id $USER` is similar but different. // * The SELinux context is only displayed without a specified user. // * The `getgroups` system call is only used without a specified user, this causes // the order of the displayed groups to be different between `id` and `id $USER`. // // Example: // $ strace -e getgroups id -G $USER // 1000 10 975 968 // +++ exited with 0 +++ // $ strace -e getgroups id -G // getgroups(0, NULL) = 4 // getgroups(4, [10, 968, 975, 1000]) = 4 // 1000 10 968 975 // +++ exited with 0 +++ user_specified: bool, } #[uucore::main] #[allow(clippy::cognitive_complexity)] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app() .after_help(translate!("id-after-help")) .try_get_matches_from(args)?; let users: Vec = matches .get_many::(options::ARG_USERS) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); let mut state = State { nflag: matches.get_flag(options::OPT_NAME), uflag: matches.get_flag(options::OPT_EFFECTIVE_USER), gflag: matches.get_flag(options::OPT_GROUP), gsflag: matches.get_flag(options::OPT_GROUPS), rflag: matches.get_flag(options::OPT_REAL_ID), zflag: matches.get_flag(options::OPT_ZERO), cflag: matches.get_flag(options::OPT_CONTEXT), selinux_supported: { #[cfg(feature = "selinux")] { uucore::selinux::is_selinux_enabled() } #[cfg(not(feature = "selinux"))] { false } }, user_specified: !users.is_empty(), ids: None, }; let default_format = { // "default format" is when none of '-ugG' was used !(state.uflag || state.gflag || state.gsflag) }; if (state.nflag || state.rflag) && default_format && !state.cflag { return Err(USimpleError::new( 1, translate!("id-error-names-real-ids-require-flags"), )); } if state.zflag && default_format && !state.cflag { // NOTE: GNU test suite "id/zero.sh" needs this stderr output: return Err(USimpleError::new( 1, translate!("id-error-zero-not-permitted-default"), )); } if state.user_specified && state.cflag { return Err(USimpleError::new( 1, translate!("id-error-cannot-print-context-with-user"), )); } let delimiter = if state.zflag { "\0" } else { " " }; let line_ending = LineEnding::from_zero_flag(state.zflag); if state.cflag { return if state.selinux_supported { // print SElinux context and exit #[cfg(all(any(target_os = "linux", target_os = "android"), feature = "selinux"))] if let Ok(context) = selinux::SecurityContext::current(false) { let bytes = context.as_bytes(); print!("{}{line_ending}", String::from_utf8_lossy(bytes)); } else { // print error because `cflag` was explicitly requested return Err(USimpleError::new( 1, translate!("id-error-cannot-get-context"), )); } Ok(()) } else { Err(USimpleError::new( 1, translate!("id-error-context-selinux-only"), )) }; } for i in 0..=users.len() { let possible_pw = if state.user_specified { match Passwd::locate(users[i].as_str()) { Ok(p) => Some(p), Err(_) => { show_error!( "{}", translate!("id-error-no-such-user", "user" => users[i].quote() ) ); set_exit_code(1); if i + 1 >= users.len() { break; } continue; } } } else { None }; // GNU's `id` does not support the flags: -p/-P/-A. if matches.get_flag(options::OPT_PASSWORD) { // BSD's `id` ignores all but the first specified user pline(possible_pw.as_ref().map(|v| v.uid)); return Ok(()); } if matches.get_flag(options::OPT_HUMAN_READABLE) { // BSD's `id` ignores all but the first specified user pretty(possible_pw); return Ok(()); } if matches.get_flag(options::OPT_AUDIT) { // BSD's `id` ignores specified users auditid(); return Ok(()); } let (uid, gid) = possible_pw.as_ref().map_or( { let use_effective = !state.rflag && (state.uflag || state.gflag || state.gsflag); if use_effective { (geteuid(), getegid()) } else { (getuid(), getgid()) } }, |p| (p.uid, p.gid), ); state.ids = Some(Ids { uid, gid, euid: geteuid(), egid: getegid(), }); if state.gflag { print!( "{}", if state.nflag { entries::gid2grp(gid).unwrap_or_else(|_| { show_error!( "{}", translate!("id-error-cannot-find-group-name", "gid" => gid) ); set_exit_code(1); gid.to_string() }) } else { gid.to_string() } ); } if state.uflag { print!( "{}", if state.nflag { entries::uid2usr(uid).unwrap_or_else(|_| { show_error!( "{}", translate!("id-error-cannot-find-user-name", "uid" => uid) ); set_exit_code(1); uid.to_string() }) } else { uid.to_string() } ); } let groups = entries::get_groups_gnu(Some(gid)).unwrap(); let groups = if state.user_specified { possible_pw.as_ref().map(|p| p.belongs_to()).unwrap() } else { groups.clone() }; if state.gsflag { print!( "{}{}", groups .iter() .map(|&id| { if state.nflag { entries::gid2grp(id).unwrap_or_else(|_| { show_error!( "{}", translate!("id-error-cannot-find-group-name", "gid" => id) ); set_exit_code(1); id.to_string() }) } else { id.to_string() } }) .collect::>() .join(delimiter), // NOTE: this is necessary to pass GNU's "tests/id/zero.sh": if state.zflag && state.user_specified && users.len() > 1 { "\0" } else { "" } ); } if default_format { id_print(&state, &groups); } print!("{line_ending}"); if i + 1 >= users.len() { break; } } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("id-about")) .override_usage(format_usage(&translate!("id-usage"))) .infer_long_args(true) .args_override_self(true) .arg( Arg::new(options::OPT_AUDIT) .short('A') .conflicts_with_all([ options::OPT_GROUP, options::OPT_EFFECTIVE_USER, options::OPT_HUMAN_READABLE, options::OPT_PASSWORD, options::OPT_GROUPS, options::OPT_ZERO, ]) .help(translate!("id-help-audit")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OPT_EFFECTIVE_USER) .short('u') .long(options::OPT_EFFECTIVE_USER) .conflicts_with(options::OPT_GROUP) .help(translate!("id-help-user")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OPT_GROUP) .short('g') .long(options::OPT_GROUP) .conflicts_with(options::OPT_EFFECTIVE_USER) .help(translate!("id-help-group")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OPT_GROUPS) .short('G') .long(options::OPT_GROUPS) .conflicts_with_all([ options::OPT_GROUP, options::OPT_EFFECTIVE_USER, options::OPT_CONTEXT, options::OPT_HUMAN_READABLE, options::OPT_PASSWORD, options::OPT_AUDIT, ]) .help(translate!("id-help-groups")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OPT_HUMAN_READABLE) .short('p') .help(translate!("id-help-human-readable")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OPT_NAME) .short('n') .long(options::OPT_NAME) .help(translate!("id-help-name")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OPT_PASSWORD) .short('P') .help(translate!("id-help-password")) .conflicts_with(options::OPT_HUMAN_READABLE) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OPT_REAL_ID) .short('r') .long(options::OPT_REAL_ID) .help(translate!("id-help-real")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OPT_ZERO) .short('z') .long(options::OPT_ZERO) .help(translate!("id-help-zero")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::OPT_CONTEXT) .short('Z') .long(options::OPT_CONTEXT) .conflicts_with_all([options::OPT_GROUP, options::OPT_EFFECTIVE_USER]) .help(get_context_help_text()) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::ARG_USERS) .action(ArgAction::Append) .value_name(options::ARG_USERS) .value_hint(clap::ValueHint::Username), ) } fn pretty(possible_pw: Option) { if let Some(p) = possible_pw { print!( "{}\t{}\n{}\t", translate!("id-output-uid"), p.name, translate!("id-output-groups") ); println!( "{}", p.belongs_to() .iter() .map(|&gr| entries::gid2grp(gr).unwrap()) .collect::>() .join(" ") ); } else { let login = cstr2cow!(getlogin().cast_const()); let rid = getuid(); if let Ok(p) = Passwd::locate(rid) { if let Some(user_name) = login { println!("{}\t{user_name}", translate!("id-output-login")); } println!("{}\t{}", translate!("id-output-uid"), p.name); } else { println!("{}\t{rid}", translate!("id-output-uid")); } let eid = getegid(); if eid == rid { if let Ok(p) = Passwd::locate(eid) { println!("{}\t{}", translate!("id-output-euid"), p.name); } else { println!("{}\t{eid}", translate!("id-output-euid")); } } let rid = getgid(); if rid != eid { if let Ok(g) = Group::locate(rid) { println!("{}\t{}", translate!("id-output-euid"), g.name); } else { println!("{}\t{rid}", translate!("id-output-euid")); } } println!( "{}\t{}", translate!("id-output-groups"), entries::get_groups_gnu(None) .unwrap() .iter() .map(|&gr| entries::gid2grp(gr).unwrap()) .collect::>() .join(" ") ); } } #[cfg(any(target_vendor = "apple", target_os = "freebsd"))] fn pline(possible_uid: Option) { let uid = possible_uid.unwrap_or_else(getuid); let pw = Passwd::locate(uid).unwrap(); println!( "{}:{}:{}:{}:{}:{}:{}:{}:{}:{}", pw.name, pw.user_passwd.unwrap_or_default(), pw.uid, pw.gid, pw.user_access_class.unwrap_or_default(), pw.passwd_change_time, pw.expiration, pw.user_info.unwrap_or_default(), pw.user_dir.unwrap_or_default(), pw.user_shell.unwrap_or_default() ); } #[cfg(any(target_os = "linux", target_os = "android", target_os = "openbsd"))] fn pline(possible_uid: Option) { let uid = possible_uid.unwrap_or_else(getuid); let pw = Passwd::locate(uid).unwrap(); println!( "{}:{}:{}:{}:{}:{}:{}", pw.name, pw.user_passwd.unwrap_or_default(), pw.uid, pw.gid, pw.user_info.unwrap_or_default(), pw.user_dir.unwrap_or_default(), pw.user_shell.unwrap_or_default() ); } #[cfg(any(target_os = "linux", target_os = "android", target_os = "openbsd"))] fn auditid() {} #[cfg(not(any(target_os = "linux", target_os = "android", target_os = "openbsd")))] fn auditid() { use std::mem::MaybeUninit; let mut auditinfo: MaybeUninit = MaybeUninit::uninit(); let address = auditinfo.as_mut_ptr(); if unsafe { audit::getaudit(address) } < 0 { println!("{}", translate!("id-error-audit-retrieve")); return; } // SAFETY: getaudit wrote a valid struct to auditinfo let auditinfo = unsafe { auditinfo.assume_init() }; println!("auid={}", auditinfo.ai_auid); println!("mask.success=0x{:x}", auditinfo.ai_mask.am_success); println!("mask.failure=0x{:x}", auditinfo.ai_mask.am_failure); println!("termid.port=0x{:x}", auditinfo.ai_termid.port); println!("asid={}", auditinfo.ai_asid); } fn id_print(state: &State, groups: &[u32]) { let uid = state.ids.as_ref().unwrap().uid; let gid = state.ids.as_ref().unwrap().gid; let euid = state.ids.as_ref().unwrap().euid; let egid = state.ids.as_ref().unwrap().egid; print!( "uid={uid}({})", entries::uid2usr(uid).unwrap_or_else(|_| { show_error!( "{}", translate!("id-error-cannot-find-user-name", "uid" => uid) ); set_exit_code(1); uid.to_string() }) ); print!( " gid={gid}({})", entries::gid2grp(gid).unwrap_or_else(|_| { show_error!( "{}", translate!("id-error-cannot-find-group-name", "gid" => gid) ); set_exit_code(1); gid.to_string() }) ); if !state.user_specified && (euid != uid) { print!( " euid={euid}({})", entries::uid2usr(euid).unwrap_or_else(|_| { show_error!( "{}", translate!("id-error-cannot-find-user-name", "uid" => euid) ); set_exit_code(1); euid.to_string() }) ); } if !state.user_specified && (egid != gid) { // BUG? printing egid={euid} ? print!( " egid={egid}({})", entries::gid2grp(egid).unwrap_or_else(|_| { show_error!( "{}", translate!("id-error-cannot-find-group-name", "gid" => egid) ); set_exit_code(1); egid.to_string() }) ); } print!( " groups={}", groups .iter() .map(|&gr| format!( "{gr}({})", entries::gid2grp(gr).unwrap_or_else(|_| { show_error!( "{}", translate!("id-error-cannot-find-group-name", "gid" => gr) ); set_exit_code(1); gr.to_string() }) )) .collect::>() .join(",") ); #[cfg(all(any(target_os = "linux", target_os = "android"), feature = "selinux"))] if state.selinux_supported && !state.user_specified && std::env::var_os("POSIXLY_CORRECT").is_none() { // print SElinux context (does not depend on "-Z") if let Ok(context) = selinux::SecurityContext::current(false) { let bytes = context.as_bytes(); print!(" context={}", String::from_utf8_lossy(bytes)); } } } #[cfg(not(any(target_os = "linux", target_os = "android", target_os = "openbsd")))] mod audit { use super::libc::{c_int, c_uint, dev_t, pid_t, uid_t}; pub type au_id_t = uid_t; pub type au_asid_t = pid_t; pub type au_event_t = c_uint; pub type au_emod_t = c_uint; pub type au_class_t = c_int; pub type au_flag_t = u64; #[repr(C)] pub struct au_mask { pub am_success: c_uint, pub am_failure: c_uint, } pub type au_mask_t = au_mask; #[repr(C)] pub struct au_tid_addr { pub port: dev_t, } pub type au_tid_addr_t = au_tid_addr; #[repr(C)] #[expect(clippy::struct_field_names)] pub struct c_auditinfo_addr { pub ai_auid: au_id_t, // Audit user ID pub ai_mask: au_mask_t, // Audit masks. pub ai_termid: au_tid_addr_t, // Terminal ID. pub ai_asid: au_asid_t, // Audit session ID. pub ai_flags: au_flag_t, // Audit session flags } pub type c_auditinfo_addr_t = c_auditinfo_addr; unsafe extern "C" { pub fn getaudit(auditinfo_addr: *mut c_auditinfo_addr_t) -> c_int; } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/id/src/main.rs000066400000000000000000000000251504311601400246360ustar00rootroot00000000000000uucore::bin!(uu_id); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/000077500000000000000000000000001504311601400236325ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/Cargo.toml000066400000000000000000000015371504311601400255700ustar00rootroot00000000000000[package] name = "uu_install" description = "install ~ (uutils) copy files from SOURCE to DESTINATION (with specified attributes)" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/install" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/install.rs" [dependencies] clap = { workspace = true } filetime = { workspace = true } file_diff = { workspace = true } thiserror = { workspace = true } uucore = { workspace = true, features = [ "backup-control", "buf-copy", "fs", "mode", "perms", "entries", "process", ] } fluent = { workspace = true } [features] selinux = ["uucore/selinux"] [[bin]] name = "install" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/LICENSE000077700000000000000000000000001504311601400265002../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/locales/000077500000000000000000000000001504311601400252545ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/locales/en-US.ftl000066400000000000000000000073241504311601400267200ustar00rootroot00000000000000install-about = Copy SOURCE to DEST or multiple SOURCE(s) to the existing DIRECTORY, while setting permission modes and owner/group install-usage = install [OPTION]... [FILE]... # Help messages install-help-ignored = ignored install-help-compare = compare each pair of source and destination files, and in some cases, do not modify the destination at all install-help-directory = treat all arguments as directory names. create all components of the specified directories install-help-create-leading = create all leading components of DEST except the last, then copy SOURCE to DEST install-help-group = set group ownership, instead of process's current group install-help-mode = set permission mode (as in chmod), instead of rwxr-xr-x install-help-owner = set ownership (super-user only) install-help-preserve-timestamps = apply access/modification times of SOURCE files to corresponding destination files install-help-strip = strip symbol tables (no action Windows) install-help-strip-program = program used to strip binaries (no action Windows) install-help-target-directory = move all SOURCE arguments into DIRECTORY install-help-no-target-directory = treat DEST as a normal file install-help-verbose = explain what is being done install-help-preserve-context = preserve security context install-help-context = set security context of files and directories # Error messages install-error-dir-needs-arg = { $util_name } with -d requires at least one argument. install-error-create-dir-failed = failed to create { $path } install-error-chmod-failed = failed to chmod { $path } install-error-chmod-failed-detailed = { $path }: chmod failed with error { $error } install-error-chown-failed = failed to chown { $path }: { $error } install-error-invalid-target = invalid target { $path }: No such file or directory install-error-target-not-dir = target { $path } is not a directory install-error-backup-failed = cannot backup { $from } to { $to } install-error-install-failed = cannot install { $from } to { $to } install-error-strip-failed = strip program failed: { $error } install-error-strip-abnormal = strip process terminated abnormally - exit code: { $code } install-error-metadata-failed = metadata error install-error-invalid-user = invalid user: { $user } install-error-invalid-group = invalid group: { $group } install-error-omitting-directory = omitting directory { $path } install-error-not-a-directory = failed to access { $path }: Not a directory install-error-override-directory-failed = cannot overwrite directory { $dir } with non-directory { $file } install-error-same-file = '{ $file1 }' and '{ $file2 }' are the same file install-error-extra-operand = extra operand { $operand } { $usage } install-error-invalid-mode = Invalid mode string: { $error } install-error-mutually-exclusive-target = Options --target-directory and --no-target-directory are mutually exclusive install-error-mutually-exclusive-compare-preserve = Options --compare and --preserve-timestamps are mutually exclusive install-error-mutually-exclusive-compare-strip = Options --compare and --strip are mutually exclusive install-error-missing-file-operand = missing file operand install-error-missing-destination-operand = missing destination file operand after '{ $path }' install-error-failed-to-remove = Failed to remove existing file { $path }. Error: { $error } # Warning messages install-warning-compare-ignored = the --compare (-C) option is ignored when you specify a mode with non-permission bits # Verbose output install-verbose-creating-directory = creating directory { $path } install-verbose-creating-directory-step = install: creating directory { $path } install-verbose-removed = removed { $path } install-verbose-copy = { $from } -> { $to } install-verbose-backup = (backup: { $backup }) coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/locales/fr-FR.ftl000066400000000000000000000103451504311601400267020ustar00rootroot00000000000000install-about = Copier SOURCE vers DEST ou plusieurs SOURCE(s) vers le RÉPERTOIRE existant, tout en définissant les modes de permission et propriétaire/groupe install-usage = install [OPTION]... [FICHIER]... # Messages d'aide install-help-ignored = ignoré install-help-compare = comparer chaque paire de fichiers source et destination, et dans certains cas, ne pas modifier la destination du tout install-help-directory = traiter tous les arguments comme des noms de répertoires. créer tous les composants des répertoires spécifiés install-help-create-leading = créer tous les composants principaux de DEST sauf le dernier, puis copier SOURCE vers DEST install-help-group = définir la propriété du groupe, au lieu du groupe actuel du processus install-help-mode = définir le mode de permission (comme dans chmod), au lieu de rwxr-xr-x install-help-owner = définir la propriété (super-utilisateur uniquement) install-help-preserve-timestamps = appliquer les temps d'accès/modification des fichiers SOURCE aux fichiers de destination correspondants install-help-strip = supprimer les tables de symboles (aucune action Windows) install-help-strip-program = programme utilisé pour supprimer les binaires (aucune action Windows) install-help-target-directory = déplacer tous les arguments SOURCE dans RÉPERTOIRE install-help-no-target-directory = traiter DEST comme un fichier normal install-help-verbose = expliquer ce qui est fait install-help-preserve-context = préserver le contexte de sécurité install-help-context = définir le contexte de sécurité des fichiers et répertoires # Messages d'erreur install-error-dir-needs-arg = { $util_name } avec -d nécessite au moins un argument. install-error-create-dir-failed = échec de la création de { $path } install-error-chmod-failed = échec du chmod { $path } install-error-chmod-failed-detailed = { $path } : échec du chmod avec l'erreur { $error } install-error-chown-failed = échec du chown { $path } : { $error } install-error-invalid-target = cible invalide { $path } : Aucun fichier ou répertoire de ce type install-error-target-not-dir = la cible { $path } n'est pas un répertoire install-error-backup-failed = impossible de sauvegarder { $from } vers { $to } install-error-install-failed = impossible d'installer { $from } vers { $to } install-error-strip-failed = échec du programme strip : { $error } install-error-strip-abnormal = le processus strip s'est terminé anormalement - code de sortie : { $code } install-error-metadata-failed = erreur de métadonnées install-error-invalid-user = utilisateur invalide : { $user } install-error-invalid-group = groupe invalide : { $group } install-error-omitting-directory = omission du répertoire { $path } install-error-not-a-directory = échec de l'accès à { $path } : N'est pas un répertoire install-error-override-directory-failed = impossible d'écraser le répertoire { $dir } avec un non-répertoire { $file } install-error-same-file = '{ $file1 }' et '{ $file2 }' sont le même fichier install-error-extra-operand = opérande supplémentaire { $operand } { $usage } install-error-invalid-mode = Chaîne de mode invalide : { $error } install-error-mutually-exclusive-target = Les options --target-directory et --no-target-directory sont mutuellement exclusives install-error-mutually-exclusive-compare-preserve = Les options --compare et --preserve-timestamps sont mutuellement exclusives install-error-mutually-exclusive-compare-strip = Les options --compare et --strip sont mutuellement exclusives install-error-missing-file-operand = opérande de fichier manquant install-error-missing-destination-operand = opérande de fichier de destination manquant après '{ $path }' install-error-failed-to-remove = Échec de la suppression du fichier existant { $path }. Erreur : { $error } # Messages d'avertissement install-warning-compare-ignored = l'option --compare (-C) est ignorée quand un mode est indiqué avec des bits non liés à des droits # Sortie détaillée install-verbose-creating-directory = création du répertoire { $path } install-verbose-creating-directory-step = install : création du répertoire { $path } install-verbose-removed = supprimé { $path } install-verbose-copy = { $from } -> { $to } install-verbose-backup = (sauvegarde : { $backup }) coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/src/000077500000000000000000000000001504311601400244215ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/src/install.rs000066400000000000000000001060061504311601400264400ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) rwxr sourcepath targetpath Isnt uioerror mod mode; use clap::{Arg, ArgAction, ArgMatches, Command}; use file_diff::diff; use filetime::{FileTime, set_file_times}; use std::fmt::Debug; use std::fs::File; use std::fs::{self, metadata}; use std::path::{MAIN_SEPARATOR, Path, PathBuf}; use std::process; use thiserror::Error; use uucore::backup_control::{self, BackupMode}; use uucore::buf_copy::copy_stream; use uucore::display::Quotable; use uucore::entries::{grp2gid, usr2uid}; use uucore::error::{FromIo, UError, UResult, UUsageError}; use uucore::fs::dir_strip_dot_for_creation; use uucore::mode::get_umask; use uucore::perms::{Verbosity, VerbosityLevel, wrap_chown}; use uucore::process::{getegid, geteuid}; #[cfg(feature = "selinux")] use uucore::selinux::{contexts_differ, set_selinux_security_context}; use uucore::translate; use uucore::{format_usage, show, show_error, show_if_err}; #[cfg(unix)] use std::os::unix::fs::{FileTypeExt, MetadataExt}; #[cfg(unix)] use std::os::unix::prelude::OsStrExt; const DEFAULT_MODE: u32 = 0o755; const DEFAULT_STRIP_PROGRAM: &str = "strip"; #[allow(dead_code)] pub struct Behavior { main_function: MainFunction, specified_mode: Option, backup_mode: BackupMode, suffix: String, owner_id: Option, group_id: Option, verbose: bool, preserve_timestamps: bool, compare: bool, strip: bool, strip_program: String, create_leading: bool, target_dir: Option, no_target_dir: bool, preserve_context: bool, context: Option, } #[derive(Error, Debug)] enum InstallError { #[error("{}", translate!("install-error-dir-needs-arg", "util_name" => uucore::util_name()))] DirNeedsArg, #[error("{}", translate!("install-error-create-dir-failed", "path" => .0.quote()))] CreateDirFailed(PathBuf, #[source] std::io::Error), #[error("{}", translate!("install-error-chmod-failed", "path" => .0.quote()))] ChmodFailed(PathBuf), #[error("{}", translate!("install-error-chown-failed", "path" => .0.quote(), "error" => .1.clone()))] ChownFailed(PathBuf, String), #[error("{}", translate!("install-error-invalid-target", "path" => .0.quote()))] InvalidTarget(PathBuf), #[error("{}", translate!("install-error-target-not-dir", "path" => .0.quote()))] TargetDirIsntDir(PathBuf), #[error("{}", translate!("install-error-backup-failed", "from" => .0.to_string_lossy(), "to" => .1.to_string_lossy()))] BackupFailed(PathBuf, PathBuf, #[source] std::io::Error), #[error("{}", translate!("install-error-install-failed", "from" => .0.to_string_lossy(), "to" => .1.to_string_lossy()))] InstallFailed(PathBuf, PathBuf, #[source] std::io::Error), #[error("{}", translate!("install-error-strip-failed", "error" => .0.clone()))] StripProgramFailed(String), #[error("{}", translate!("install-error-metadata-failed"))] MetadataFailed(#[source] std::io::Error), #[error("{}", translate!("install-error-invalid-user", "user" => .0.quote()))] InvalidUser(String), #[error("{}", translate!("install-error-invalid-group", "group" => .0.quote()))] InvalidGroup(String), #[error("{}", translate!("install-error-omitting-directory", "path" => .0.quote()))] OmittingDirectory(PathBuf), #[error("{}", translate!("install-error-not-a-directory", "path" => .0.quote()))] NotADirectory(PathBuf), #[error("{}", translate!("install-error-override-directory-failed", "dir" => .0.quote(), "file" => .1.quote()))] OverrideDirectoryFailed(PathBuf, PathBuf), #[error("{}", translate!("install-error-same-file", "file1" => .0.to_string_lossy(), "file2" => .1.to_string_lossy()))] SameFile(PathBuf, PathBuf), #[error("{}", translate!("install-error-extra-operand", "operand" => .0.quote(), "usage" => .1.clone()))] ExtraOperand(String, String), #[cfg(feature = "selinux")] #[error("{}", .0)] SelinuxContextFailed(String), } impl UError for InstallError { fn code(&self) -> i32 { 1 } fn usage(&self) -> bool { false } } #[derive(Clone, Eq, PartialEq)] pub enum MainFunction { /// Create directories Directory, /// Install files to locations (primary functionality) Standard, } impl Behavior { /// Determine the mode for chmod after copy. pub fn mode(&self) -> u32 { self.specified_mode.unwrap_or(DEFAULT_MODE) } } static OPT_COMPARE: &str = "compare"; static OPT_DIRECTORY: &str = "directory"; static OPT_IGNORED: &str = "ignored"; static OPT_CREATE_LEADING: &str = "create-leading"; static OPT_GROUP: &str = "group"; static OPT_MODE: &str = "mode"; static OPT_OWNER: &str = "owner"; static OPT_PRESERVE_TIMESTAMPS: &str = "preserve-timestamps"; static OPT_STRIP: &str = "strip"; static OPT_STRIP_PROGRAM: &str = "strip-program"; static OPT_TARGET_DIRECTORY: &str = "target-directory"; static OPT_NO_TARGET_DIRECTORY: &str = "no-target-directory"; static OPT_VERBOSE: &str = "verbose"; static OPT_PRESERVE_CONTEXT: &str = "preserve-context"; static OPT_CONTEXT: &str = "context"; static ARG_FILES: &str = "files"; /// Main install utility function, called from main.rs. /// /// Returns a program return code. /// #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let paths: Vec = matches .get_many::(ARG_FILES) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); let behavior = behavior(&matches)?; match behavior.main_function { MainFunction::Directory => directory(&paths, &behavior), MainFunction::Standard => standard(paths, &behavior), } } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("install-about")) .override_usage(format_usage(&translate!("install-usage"))) .infer_long_args(true) .args_override_self(true) .arg(backup_control::arguments::backup()) .arg(backup_control::arguments::backup_no_args()) .arg( Arg::new(OPT_IGNORED) .short('c') .help(translate!("install-help-ignored")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_COMPARE) .short('C') .long(OPT_COMPARE) .help(translate!("install-help-compare")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_DIRECTORY) .short('d') .long(OPT_DIRECTORY) .help(translate!("install-help-directory")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_CREATE_LEADING) .short('D') .help(translate!("install-help-create-leading")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_GROUP) .short('g') .long(OPT_GROUP) .help(translate!("install-help-group")) .value_name("GROUP"), ) .arg( Arg::new(OPT_MODE) .short('m') .long(OPT_MODE) .help(translate!("install-help-mode")) .value_name("MODE"), ) .arg( Arg::new(OPT_OWNER) .short('o') .long(OPT_OWNER) .help(translate!("install-help-owner")) .value_name("OWNER") .value_hint(clap::ValueHint::Username), ) .arg( Arg::new(OPT_PRESERVE_TIMESTAMPS) .short('p') .long(OPT_PRESERVE_TIMESTAMPS) .help(translate!("install-help-preserve-timestamps")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_STRIP) .short('s') .long(OPT_STRIP) .help(translate!("install-help-strip")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_STRIP_PROGRAM) .long(OPT_STRIP_PROGRAM) .help(translate!("install-help-strip-program")) .value_name("PROGRAM") .value_hint(clap::ValueHint::CommandName), ) .arg(backup_control::arguments::suffix()) .arg( Arg::new(OPT_TARGET_DIRECTORY) .short('t') .long(OPT_TARGET_DIRECTORY) .help(translate!("install-help-target-directory")) .value_name("DIRECTORY") .value_hint(clap::ValueHint::DirPath), ) .arg( Arg::new(OPT_NO_TARGET_DIRECTORY) .short('T') .long(OPT_NO_TARGET_DIRECTORY) .help(translate!("install-help-no-target-directory")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_VERBOSE) .short('v') .long(OPT_VERBOSE) .help(translate!("install-help-verbose")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_PRESERVE_CONTEXT) .short('P') .long(OPT_PRESERVE_CONTEXT) .help(translate!("install-help-preserve-context")) .action(ArgAction::SetTrue), ) .arg( Arg::new(OPT_CONTEXT) .short('Z') .long(OPT_CONTEXT) .help(translate!("install-help-context")) .value_name("CONTEXT") .value_parser(clap::value_parser!(String)) .num_args(0..=1), ) .arg( Arg::new(ARG_FILES) .action(ArgAction::Append) .num_args(1..) .value_hint(clap::ValueHint::AnyPath), ) } /// Determine behavior, given command line arguments. /// /// If successful, returns a filled-out Behavior struct. /// /// # Errors /// /// In event of failure, returns an integer intended as a program return code. /// fn behavior(matches: &ArgMatches) -> UResult { let main_function = if matches.get_flag(OPT_DIRECTORY) { MainFunction::Directory } else { MainFunction::Standard }; let considering_dir: bool = MainFunction::Directory == main_function; let specified_mode: Option = if matches.contains_id(OPT_MODE) { let x = matches.get_one::(OPT_MODE).ok_or(1)?; Some(mode::parse(x, considering_dir, get_umask()).map_err(|err| { show_error!( "{}", translate!("install-error-invalid-mode", "error" => err) ); 1 })?) } else { None }; let backup_mode = backup_control::determine_backup_mode(matches)?; let target_dir = matches.get_one::(OPT_TARGET_DIRECTORY).cloned(); let no_target_dir = matches.get_flag(OPT_NO_TARGET_DIRECTORY); if target_dir.is_some() && no_target_dir { show_error!("{}", translate!("install-error-mutually-exclusive-target")); return Err(1.into()); } let preserve_timestamps = matches.get_flag(OPT_PRESERVE_TIMESTAMPS); let compare = matches.get_flag(OPT_COMPARE); let strip = matches.get_flag(OPT_STRIP); if preserve_timestamps && compare { show_error!( "{}", translate!("install-error-mutually-exclusive-compare-preserve") ); return Err(1.into()); } if compare && strip { show_error!( "{}", translate!("install-error-mutually-exclusive-compare-strip") ); return Err(1.into()); } // Check if compare is used with non-permission mode bits if compare && specified_mode.is_some() { let mode = specified_mode.unwrap(); let non_permission_bits = 0o7000; // setuid, setgid, sticky bits if mode & non_permission_bits != 0 { show_error!("{}", translate!("install-warning-compare-ignored")); } } let owner = matches .get_one::(OPT_OWNER) .map_or("", |s| s.as_str()) .to_string(); let owner_id = if owner.is_empty() { None } else { match usr2uid(&owner) { Ok(u) => Some(u), Err(_) => return Err(InstallError::InvalidUser(owner.clone()).into()), } }; let group = matches .get_one::(OPT_GROUP) .map_or("", |s| s.as_str()) .to_string(); let group_id = if group.is_empty() { None } else { match grp2gid(&group) { Ok(g) => Some(g), Err(_) => return Err(InstallError::InvalidGroup(group.clone()).into()), } }; let context = matches.get_one::(OPT_CONTEXT).cloned(); Ok(Behavior { main_function, specified_mode, backup_mode, suffix: backup_control::determine_backup_suffix(matches), owner_id, group_id, verbose: matches.get_flag(OPT_VERBOSE), preserve_timestamps, compare, strip, strip_program: String::from( matches .get_one::(OPT_STRIP_PROGRAM) .map_or(DEFAULT_STRIP_PROGRAM, |s| s.as_str()), ), create_leading: matches.get_flag(OPT_CREATE_LEADING), target_dir, no_target_dir, preserve_context: matches.get_flag(OPT_PRESERVE_CONTEXT), context, }) } /// Creates directories. /// /// GNU man pages describe this functionality as creating 'all components of /// the specified directories'. /// /// Returns a Result type with the Err variant containing the error message. /// fn directory(paths: &[String], b: &Behavior) -> UResult<()> { if paths.is_empty() { Err(InstallError::DirNeedsArg.into()) } else { for path in paths.iter().map(Path::new) { // if the path already exist, don't try to create it again if !path.exists() { // Special case to match GNU's behavior: // install -d foo/. should work and just create foo/ // std::fs::create_dir("foo/."); fails in pure Rust // See also mkdir.rs for another occurrence of this let path_to_create = dir_strip_dot_for_creation(path); // Differently than the primary functionality // (MainFunction::Standard), the directory functionality should // create all ancestors (or components) of a directory // regardless of the presence of the "-D" flag. // // NOTE: the GNU "install" sets the expected mode only for the // target directory. All created ancestor directories will have // the default mode. Hence it is safe to use fs::create_dir_all // and then only modify the target's dir mode. if let Err(e) = fs::create_dir_all(path_to_create.as_path()) .map_err_context(|| path_to_create.as_path().maybe_quote().to_string()) { show!(e); continue; } if b.verbose { println!( "{}", translate!("install-verbose-creating-directory", "path" => path_to_create.quote()) ); } } if mode::chmod(path, b.mode()).is_err() { // Error messages are printed by the mode::chmod function! uucore::error::set_exit_code(1); continue; } show_if_err!(chown_optional_user_group(path, b)); // Set SELinux context for directory if needed #[cfg(feature = "selinux")] show_if_err!(set_selinux_context(path, b)); } // If the exit code was set, or show! has been called at least once // (which sets the exit code as well), function execution will end after // this return. Ok(()) } } /// Test if the path is a new file path that can be /// created immediately fn is_new_file_path(path: &Path) -> bool { !path.exists() && (path.parent().is_none_or(Path::is_dir) || path.parent().unwrap().as_os_str().is_empty()) // In case of a simple file } /// Test if the path is an existing directory or ends with a trailing separator. /// /// Returns true, if one of the conditions above is met; else false. /// #[cfg(unix)] fn is_potential_directory_path(path: &Path) -> bool { let separator = MAIN_SEPARATOR as u8; path.as_os_str().as_bytes().last() == Some(&separator) || path.is_dir() } #[cfg(not(unix))] fn is_potential_directory_path(path: &Path) -> bool { let path_str = path.to_string_lossy(); path_str.ends_with(MAIN_SEPARATOR) || path_str.ends_with('/') || path.is_dir() } /// Perform an install, given a list of paths and behavior. /// /// Returns a Result type with the Err variant containing the error message. /// #[allow(clippy::cognitive_complexity)] fn standard(mut paths: Vec, b: &Behavior) -> UResult<()> { // first check that paths contains at least one element if paths.is_empty() { return Err(UUsageError::new( 1, translate!("install-error-missing-file-operand"), )); } if b.no_target_dir && paths.len() > 2 { return Err(InstallError::ExtraOperand( paths[2].clone(), format_usage(&translate!("install-usage")), ) .into()); } // get the target from either "-t foo" param or from the last given paths argument let target: PathBuf = if let Some(path) = &b.target_dir { path.into() } else { let last_path: PathBuf = paths.pop().unwrap().into(); // paths has to contain more elements if paths.is_empty() { return Err(UUsageError::new( 1, translate!("install-error-missing-destination-operand", "path" => last_path.to_str().unwrap()), )); } last_path }; let sources = &paths.iter().map(PathBuf::from).collect::>(); if b.create_leading { // if -t is used in combination with -D, create whole target because it does not include filename let to_create: Option<&Path> = if b.target_dir.is_some() { Some(target.as_path()) // if source and target are filenames used in combination with -D, create target's parent } else if !(sources.len() > 1 || is_potential_directory_path(&target)) { target.parent() } else { None }; if let Some(to_create) = to_create { // if the path ends in /, remove it let to_create = if to_create.to_string_lossy().ends_with('/') { Path::new(to_create.to_str().unwrap().trim_end_matches('/')) } else { to_create }; if !to_create.exists() { if b.verbose { let mut result = PathBuf::new(); // When creating directories with -Dv, show directory creations step by step for part in to_create.components() { result.push(part.as_os_str()); if !result.is_dir() { // Don't display when the directory already exists println!( "{}", translate!("install-verbose-creating-directory-step", "path" => result.quote()) ); } } } if let Err(e) = fs::create_dir_all(to_create) { return Err(InstallError::CreateDirFailed(to_create.to_path_buf(), e).into()); } } } if b.target_dir.is_some() { let p = to_create.unwrap(); if !p.exists() || !p.is_dir() { return Err(InstallError::NotADirectory(p.to_path_buf()).into()); } } } if sources.len() > 1 { copy_files_into_dir(sources, &target, b) } else { let source = sources.first().unwrap(); if source.is_dir() { return Err(InstallError::OmittingDirectory(source.clone()).into()); } if b.no_target_dir && target.is_dir() { return Err( InstallError::OverrideDirectoryFailed(target.clone(), source.clone()).into(), ); } if is_potential_directory_path(&target) { return copy_files_into_dir(sources, &target, b); } if target.is_file() || is_new_file_path(&target) { copy(source, &target, b) } else { Err(InstallError::InvalidTarget(target).into()) } } } /// Copy some files into a directory. /// /// Prints verbose information and error messages. /// Returns a Result type with the Err variant containing the error message. /// /// # Parameters /// /// `files` must all exist as non-directories. /// `target_dir` must be a directory. /// fn copy_files_into_dir(files: &[PathBuf], target_dir: &Path, b: &Behavior) -> UResult<()> { if !target_dir.is_dir() { return Err(InstallError::TargetDirIsntDir(target_dir.to_path_buf()).into()); } for sourcepath in files { if let Err(err) = sourcepath .metadata() .map_err_context(|| format!("cannot stat {}", sourcepath.quote())) { show!(err); continue; } if sourcepath.is_dir() { let err = InstallError::OmittingDirectory(sourcepath.clone()); show!(err); continue; } let mut targetpath = target_dir.to_path_buf(); let filename = sourcepath.components().next_back().unwrap(); targetpath.push(filename); show_if_err!(copy(sourcepath, &targetpath, b)); } // If the exit code was set, or show! has been called at least once // (which sets the exit code as well), function execution will end after // this return. Ok(()) } /// Handle incomplete user/group parings for chown. /// /// Returns a Result type with the Err variant containing the error message. /// If the user is root, revert the uid & gid /// /// # Parameters /// /// _path_ must exist. /// /// # Errors /// /// If the owner or group are invalid or copy system call fails, we print a verbose error and /// return an empty error value. /// fn chown_optional_user_group(path: &Path, b: &Behavior) -> UResult<()> { // GNU coreutils doesn't print chown operations during install with verbose flag. let verbosity = Verbosity { groups_only: b.owner_id.is_none(), level: VerbosityLevel::Normal, }; // Determine the owner and group IDs to be used for chown. let (owner_id, group_id) = if b.owner_id.is_some() || b.group_id.is_some() { (b.owner_id, b.group_id) } else if geteuid() == 0 { // Special case for root user. (Some(0), Some(0)) } else { // No chown operation needed. return Ok(()); }; let meta = match metadata(path) { Ok(meta) => meta, Err(e) => return Err(InstallError::MetadataFailed(e).into()), }; match wrap_chown(path, &meta, owner_id, group_id, false, verbosity) { Ok(msg) if b.verbose && !msg.is_empty() => println!("chown: {msg}"), Ok(_) => {} Err(e) => return Err(InstallError::ChownFailed(path.to_path_buf(), e).into()), } Ok(()) } /// Perform backup before overwriting. /// /// # Parameters /// /// * `to` - The destination file path. /// * `b` - The behavior configuration. /// /// # Returns /// /// Returns an Option containing the backup path, or None if backup is not needed. /// fn perform_backup(to: &Path, b: &Behavior) -> UResult> { if to.exists() { if b.verbose { println!( "{}", translate!("install-verbose-removed", "path" => to.quote()) ); } let backup_path = backup_control::get_backup_path(b.backup_mode, to, &b.suffix); if let Some(ref backup_path) = backup_path { fs::rename(to, backup_path).map_err(|err| { InstallError::BackupFailed(to.to_path_buf(), backup_path.clone(), err) })?; } Ok(backup_path) } else { Ok(None) } } /// Copy a non-special file using [`fs::copy`]. /// /// # Parameters /// * `from` - The source file path. /// * `to` - The destination file path. /// /// # Returns /// /// Returns an empty Result or an error in case of failure. fn copy_normal_file(from: &Path, to: &Path) -> UResult<()> { if let Err(err) = fs::copy(from, to) { return Err(InstallError::InstallFailed(from.to_path_buf(), to.to_path_buf(), err).into()); } Ok(()) } /// Copy a file from one path to another. Handles the certain cases of special /// files (e.g character specials). /// /// # Parameters /// /// * `from` - The source file path. /// * `to` - The destination file path. /// /// # Returns /// /// Returns an empty Result or an error in case of failure. /// fn copy_file(from: &Path, to: &Path) -> UResult<()> { if let Ok(to_abs) = to.canonicalize() { if from.canonicalize()? == to_abs { return Err(InstallError::SameFile(from.to_path_buf(), to.to_path_buf()).into()); } } if to.is_dir() && !from.is_dir() { return Err(InstallError::OverrideDirectoryFailed( to.to_path_buf().clone(), from.to_path_buf().clone(), ) .into()); } // fs::copy fails if destination is a invalid symlink. // so lets just remove all existing files at destination before copy. if let Err(e) = fs::remove_file(to) { if e.kind() != std::io::ErrorKind::NotFound { show_error!( "{}", translate!("install-error-failed-to-remove", "path" => to.display(), "error" => format!("{e:?}")) ); } } let ft = match metadata(from) { Ok(ft) => ft.file_type(), Err(err) => { return Err( InstallError::InstallFailed(from.to_path_buf(), to.to_path_buf(), err).into(), ); } }; // Stream-based copying to get around the limitations of std::fs::copy #[cfg(unix)] if ft.is_char_device() || ft.is_block_device() || ft.is_fifo() { let mut handle = File::open(from)?; let mut dest = File::create(to)?; copy_stream(&mut handle, &mut dest)?; return Ok(()); } copy_normal_file(from, to)?; Ok(()) } /// Strip a file using an external program. /// /// # Parameters /// /// * `to` - The destination file path. /// * `b` - The behavior configuration. /// /// # Returns /// /// Returns an empty Result or an error in case of failure. /// fn strip_file(to: &Path, b: &Behavior) -> UResult<()> { // Check if the filename starts with a hyphen and adjust the path let to_str = to.as_os_str().to_str().unwrap_or_default(); let to = if to_str.starts_with('-') { let mut new_path = PathBuf::from("."); new_path.push(to); new_path } else { to.to_path_buf() }; match process::Command::new(&b.strip_program).arg(&to).status() { Ok(status) => { if !status.success() { // Follow GNU's behavior: if strip fails, removes the target let _ = fs::remove_file(to); return Err(InstallError::StripProgramFailed( translate!("install-error-strip-abnormal", "code" => status.code().unwrap()), ) .into()); } } Err(e) => { // Follow GNU's behavior: if strip fails, removes the target let _ = fs::remove_file(to); return Err(InstallError::StripProgramFailed(e.to_string()).into()); } } Ok(()) } /// Set ownership and permissions on the destination file. /// /// # Parameters /// /// * `to` - The destination file path. /// * `b` - The behavior configuration. /// /// # Returns /// /// Returns an empty Result or an error in case of failure. /// fn set_ownership_and_permissions(to: &Path, b: &Behavior) -> UResult<()> { // Silent the warning as we want to the error message #[allow(clippy::question_mark)] if mode::chmod(to, b.mode()).is_err() { return Err(InstallError::ChmodFailed(to.to_path_buf()).into()); } chown_optional_user_group(to, b)?; Ok(()) } /// Preserve timestamps on the destination file. /// /// # Parameters /// /// * `from` - The source file path. /// * `to` - The destination file path. /// /// # Returns /// /// Returns an empty Result or an error in case of failure. /// fn preserve_timestamps(from: &Path, to: &Path) -> UResult<()> { let meta = match metadata(from) { Ok(meta) => meta, Err(e) => return Err(InstallError::MetadataFailed(e).into()), }; let modified_time = FileTime::from_last_modification_time(&meta); let accessed_time = FileTime::from_last_access_time(&meta); if let Err(e) = set_file_times(to, accessed_time, modified_time) { show_error!("{e}"); // ignore error } Ok(()) } /// Copy one file to a new location, changing metadata. /// /// Returns a Result type with the Err variant containing the error message. /// /// # Parameters /// /// _from_ must exist as a non-directory. /// _to_ must be a non-existent file, whose parent directory exists. /// /// # Errors /// /// If the copy system call fails, we print a verbose error and return an empty error value. /// fn copy(from: &Path, to: &Path, b: &Behavior) -> UResult<()> { if b.compare && !need_copy(from, to, b) { return Ok(()); } // Declare the path here as we may need it for the verbose output below. let backup_path = perform_backup(to, b)?; copy_file(from, to)?; #[cfg(not(windows))] if b.strip { strip_file(to, b)?; } set_ownership_and_permissions(to, b)?; if b.preserve_timestamps { preserve_timestamps(from, to)?; } #[cfg(feature = "selinux")] if b.preserve_context { uucore::selinux::preserve_security_context(from, to) .map_err(|e| InstallError::SelinuxContextFailed(e.to_string()))?; } else if b.context.is_some() { set_selinux_context(to, b)?; } if b.verbose { print!( "{}", translate!("install-verbose-copy", "from" => from.quote(), "to" => to.quote()) ); match backup_path { Some(path) => println!( " {}", translate!("install-verbose-backup", "backup" => path.quote()) ), None => println!(), } } Ok(()) } /// Check if a file needs to be copied due to ownership differences when no explicit group is specified. /// Returns true if the destination file's ownership would differ from what it should be after installation. fn needs_copy_for_ownership(to: &Path, to_meta: &fs::Metadata) -> bool { use std::os::unix::fs::MetadataExt; // Check if the destination file's owner differs from the effective user ID if to_meta.uid() != geteuid() { return true; } // For group, we need to determine what the group would be after installation // If no group is specified, the behavior depends on the directory: // - If the directory has setgid bit, the file inherits the directory's group // - Otherwise, the file gets the user's effective group let expected_gid = to .parent() .and_then(|parent| metadata(parent).ok()) .filter(|parent_meta| parent_meta.mode() & 0o2000 != 0) .map_or(getegid(), |parent_meta| parent_meta.gid()); to_meta.gid() != expected_gid } /// Return true if a file is necessary to copy. This is the case when: /// /// - _from_ or _to_ is nonexistent; /// - either file has a sticky bit or set\[ug\]id bit, or the user specified one; /// - either file isn't a regular file; /// - the sizes of _from_ and _to_ differ; /// - _to_'s owner differs from intended; or /// - the contents of _from_ and _to_ differ. /// /// # Parameters /// /// _from_ and _to_, if existent, must be non-directories. /// /// # Errors /// /// Crashes the program if a nonexistent owner or group is specified in _b_. /// fn need_copy(from: &Path, to: &Path, b: &Behavior) -> bool { // Attempt to retrieve metadata for the source file. // If this fails, assume the file needs to be copied. let Ok(from_meta) = metadata(from) else { return true; }; // Attempt to retrieve metadata for the destination file. // If this fails, assume the file needs to be copied. let Ok(to_meta) = metadata(to) else { return true; }; // Check if the destination is a symlink (should always be replaced) if let Ok(to_symlink_meta) = fs::symlink_metadata(to) { if to_symlink_meta.file_type().is_symlink() { return true; } } // Define special file mode bits (setuid, setgid, sticky). let extra_mode: u32 = 0o7000; // Define all file mode bits (including permissions). // setuid || setgid || sticky || permissions let all_modes: u32 = 0o7777; // Check if any special mode bits are set in the specified mode, // source file mode, or destination file mode. if b.mode() & extra_mode != 0 || from_meta.mode() & extra_mode != 0 || to_meta.mode() & extra_mode != 0 { return true; } // Check if the mode of the destination file differs from the specified mode. if b.mode() != to_meta.mode() & all_modes { return true; } // Check if either the source or destination is not a file. if !from_meta.is_file() || !to_meta.is_file() { return true; } // Check if the file sizes differ. if from_meta.len() != to_meta.len() { return true; } #[cfg(feature = "selinux")] if b.preserve_context && contexts_differ(from, to) { return true; } // TODO: if -P (#1809) and from/to contexts mismatch, return true. // Check if the owner ID is specified and differs from the destination file's owner. if let Some(owner_id) = b.owner_id { if owner_id != to_meta.uid() { return true; } } // Check if the group ID is specified and differs from the destination file's group. if let Some(group_id) = b.group_id { if group_id != to_meta.gid() { return true; } } else if needs_copy_for_ownership(to, &to_meta) { return true; } // Check if the contents of the source and destination files differ. if !diff(from.to_str().unwrap(), to.to_str().unwrap()) { return true; } false } #[cfg(feature = "selinux")] fn set_selinux_context(path: &Path, behavior: &Behavior) -> UResult<()> { if !behavior.preserve_context && behavior.context.is_some() { // Use the provided context set by -Z/--context set_selinux_security_context(path, behavior.context.as_ref()) .map_err(|e| InstallError::SelinuxContextFailed(e.to_string()))?; } Ok(()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/src/main.rs000066400000000000000000000000321504311601400257060ustar00rootroot00000000000000uucore::bin!(uu_install); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/install/src/mode.rs000066400000000000000000000026521504311601400257200ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use std::fs; use std::path::Path; #[cfg(not(windows))] use uucore::mode; use uucore::translate; /// Takes a user-supplied string and tries to parse to u16 mode bitmask. pub fn parse(mode_string: &str, considering_dir: bool, umask: u32) -> Result { if mode_string.chars().any(|c| c.is_ascii_digit()) { mode::parse_numeric(0, mode_string, considering_dir) } else { mode::parse_symbolic(0, mode_string, umask, considering_dir) } } /// chmod a file or directory on UNIX. /// /// Adapted from mkdir.rs. Handles own error printing. /// #[cfg(any(unix, target_os = "redox"))] pub fn chmod(path: &Path, mode: u32) -> Result<(), ()> { use std::os::unix::fs::PermissionsExt; use uucore::{display::Quotable, show_error}; fs::set_permissions(path, fs::Permissions::from_mode(mode)).map_err(|err| { show_error!( "{}", translate!("install-error-chmod-failed-detailed", "path" => path.maybe_quote(), "error" => err) ); }) } /// chmod a file or directory on Windows. /// /// Adapted from mkdir.rs. /// #[cfg(windows)] pub fn chmod(path: &Path, mode: u32) -> Result<(), ()> { // chmod on Windows only sets the readonly flag, which isn't even honored on directories Ok(()) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/000077500000000000000000000000001504311601400231235ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/BENCHMARKING.md000066400000000000000000000056121504311601400253010ustar00rootroot00000000000000# Benchmarking join ## Performance profile The amount of time spent in which part of the code can vary depending on the files being joined and the flags used. A benchmark with `-j` and `-i` shows the following time: | Function/Method | Fraction of Samples | Why? | | ---------------- | ------------------- | ---- | | `Line::new` | 27% | Linear search for field separators, plus some vector operations. | | `read_until` | 22% | Mostly libc reading file contents, with a few vector operations to represent them. | | `Input::compare` | 20% | ~2/3 making the keys lowercase, ~1/3 comparing them. | | `print_fields` | 11% | Writing to and flushing the buffer. | | Other | 20% | | | libc | 25% | I/O and memory allocation. | More detailed profiles can be obtained via [flame graphs](https://github.com/flamegraph-rs/flamegraph): ```shell cargo flamegraph --bin join --package uu_join -- file1 file2 > /dev/null ``` You may need to add the following lines to the top-level `Cargo.toml` to get full stack traces: ```toml [profile.release] debug = true ``` ## How to benchmark Benchmarking typically requires files large enough to ensure that the benchmark is not overwhelmed by background system noise; say, on the order of tens of MB. While `join` operates on line-oriented data, and not properly formatted CSVs (e.g., `join` is not designed to accommodate escaped or quoted delimiters), in practice many CSV datasets will function well after being sorted. Like most of the utils, the recommended tool for benchmarking is [hyperfine](https://github.com/sharkdp/hyperfine). To benchmark your changes: - checkout the main branch (without your changes), do a `--release` build, and back up the executable produced at `target/release/join` - checkout your working branch (with your changes), do a `--release` build - run ```shell hyperfine -w 5 "/path/to/main/branch/build/join file1 file2" "/path/to/working/branch/build/join file1 file2" ``` - you'll likely need to add additional options to both commands, such as a field separator, or if you're benchmarking some particular behavior - you can also optionally benchmark against GNU's join ## What to benchmark The following options can have a non-trivial impact on performance: - `-a`/`-v` if one of the two files has significantly more lines than the other - `-j`/`-1`/`-2` cause work to be done to grab the appropriate field - `-i` uses our custom code for case-insensitive text comparisons - `--nocheck-order` causes some calls of `Input::compare` to be skipped The content of the files being joined has a very significant impact on the performance. Things like how long each line is, how many fields there are, how long the key fields are, how many lines there are, how many lines can be joined, and how many lines each line can be joined with all change the behavior of the hotpaths. coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/Cargo.toml000066400000000000000000000012131504311601400250500ustar00rootroot00000000000000[package] name = "uu_join" description = "join ~ (uutils) merge lines from inputs with matching join fields" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/join" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/join.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true } memchr = { workspace = true } thiserror = { workspace = true } fluent = { workspace = true } [[bin]] name = "join" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/LICENSE000077700000000000000000000000001504311601400257712../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/locales/000077500000000000000000000000001504311601400245455ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/locales/en-US.ftl000066400000000000000000000037571504311601400262170ustar00rootroot00000000000000join-about = For each pair of input lines with identical join fields, write a line to standard output. The default join field is the first, delimited by blanks. When FILE1 or FILE2 (not both) is -, read standard input. join-usage = join [OPTION]... FILE1 FILE2 # Join help messages join-help-a = also print unpairable lines from file FILENUM, where FILENUM is 1 or 2, corresponding to FILE1 or FILE2 join-help-v = like -a FILENUM, but suppress joined output lines join-help-e = replace missing input fields with EMPTY join-help-i = ignore differences in case when comparing fields join-help-j = equivalent to '-1 FIELD -2 FIELD' join-help-o = obey FORMAT while constructing output line join-help-t = use CHAR as input and output field separator join-help-1 = join on this FIELD of file 1 join-help-2 = join on this FIELD of file 2 join-help-check-order = check that the input is correctly sorted, even if all input lines are pairable join-help-nocheck-order = do not check that the input is correctly sorted join-help-header = treat the first line in each file as field headers, print them without trying to pair them join-help-z = line delimiter is NUL, not newline # Join error messages join-error-io = io error: { $error } join-error-non-utf8-tab = non-UTF-8 multi-byte tab join-error-unprintable-separators = unprintable field separators are only supported on unix-like platforms join-error-multi-character-tab = multi-character tab { $value } join-error-both-files-stdin = both files cannot be standard input join-error-invalid-field-specifier = invalid field specifier: { $spec } join-error-invalid-file-number = invalid file number in field spec: { $spec } join-error-invalid-file-number-simple = invalid file number: { $value } join-error-invalid-field-number = invalid field number: { $value } join-error-incompatible-fields = incompatible join fields { $field1 }, { $field2 } join-error-not-sorted = { $file }:{ $line_num }: is not sorted: { $content } join-error-input-not-sorted = input is not in sorted order coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/locales/fr-FR.ftl000066400000000000000000000046541504311601400262010ustar00rootroot00000000000000join-about = Pour chaque paire de lignes d'entrée avec des champs de jointure identiques, écrire une ligne sur la sortie standard. Le champ de jointure par défaut est le premier, délimité par des espaces. Quand FILE1 ou FILE2 (mais pas les deux) est -, lire l'entrée standard. join-usage = join [OPTION]... FICHIER1 FICHIER2 # Messages d'aide de join join-help-a = afficher aussi les lignes non appariables du fichier NUMÉRO_FICHIER, où NUMÉRO_FICHIER est 1 ou 2, correspondant à FICHIER1 ou FICHIER2 join-help-v = comme -a NUMÉRO_FICHIER, mais supprimer les lignes de sortie jointes join-help-e = remplacer les champs d'entrée manquants par VIDE join-help-i = ignorer les différences de casse lors de la comparaison des champs join-help-j = équivalent à '-1 CHAMP -2 CHAMP' join-help-o = obéir au FORMAT lors de la construction de la ligne de sortie join-help-t = utiliser CHAR comme séparateur de champ d'entrée et de sortie join-help-1 = joindre sur ce CHAMP du fichier 1 join-help-2 = joindre sur ce CHAMP du fichier 2 join-help-check-order = vérifier que l'entrée est correctement triée, même si toutes les lignes d'entrée sont appariables join-help-nocheck-order = ne pas vérifier que l'entrée est correctement triée join-help-header = traiter la première ligne de chaque fichier comme des en-têtes de champs, les imprimer sans essayer de les apparier join-help-z = le délimiteur de ligne est NUL, pas de nouvelle ligne # Messages d'erreur de join join-error-io = erreur d'E/S : { $error } join-error-non-utf8-tab = tabulation multi-octets non-UTF-8 join-error-unprintable-separators = les séparateurs de champs non imprimables ne sont pris en charge que sur les plateformes de type unix join-error-multi-character-tab = tabulation multi-caractères { $value } join-error-both-files-stdin = les deux fichiers ne peuvent pas être l'entrée standard join-error-invalid-field-specifier = spécificateur de champ invalide : { $spec } join-error-invalid-file-number = numéro de fichier invalide dans la spécification de champ : { $spec } join-error-invalid-file-number-simple = numéro de fichier invalide : { $value } join-error-invalid-field-number = numéro de champ invalide : { $value } join-error-incompatible-fields = champs de jointure incompatibles { $field1 }, { $field2 } join-error-not-sorted = { $file }:{ $line_num } : n'est pas trié : { $content } join-error-input-not-sorted = l'entrée n'est pas dans l'ordre trié coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/src/000077500000000000000000000000001504311601400237125ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/src/join.rs000066400000000000000000001006041504311601400252200ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) autoformat FILENUM whitespaces pairable unpairable nocheck memmem use clap::builder::ValueParser; use clap::{Arg, ArgAction, Command}; use memchr::{Memchr3, memchr_iter, memmem::Finder}; use std::cmp::Ordering; use std::ffi::OsString; use std::fs::File; use std::io::{BufRead, BufReader, BufWriter, Split, Stdin, Write, stdin, stdout}; use std::num::IntErrorKind; #[cfg(unix)] use std::os::unix::ffi::OsStrExt; use thiserror::Error; use uucore::display::Quotable; use uucore::error::{FromIo, UError, UResult, USimpleError, set_exit_code}; use uucore::format_usage; use uucore::line_ending::LineEnding; use uucore::translate; #[derive(Debug, Error)] enum JoinError { #[error("{}", translate!("join-error-io", "error" => .0))] IOError(#[from] std::io::Error), #[error("{0}")] UnorderedInput(String), } // If you still need the UError implementation for compatibility: impl UError for JoinError { fn code(&self) -> i32 { 1 } } #[derive(Copy, Clone, PartialEq)] enum FileNum { File1, File2, } #[derive(Clone)] enum SepSetting { /// Any single-byte separator. Byte(u8), /// A single character more than one byte long. Char(Vec), /// No separators, join on the entire line. Line, /// Whitespace separators. Whitespaces, } trait Separator: Clone { /// Using this separator, return the start and end index of all fields in the haystack. fn field_ranges(&self, haystack: &[u8], len_guess: usize) -> Vec<(usize, usize)>; /// The separator as it appears when in the output. fn output_separator(&self) -> &[u8]; } /// Simple separators one byte in length. #[derive(Copy, Clone)] struct OneByteSep { byte: [u8; 1], } impl Separator for OneByteSep { fn field_ranges(&self, haystack: &[u8], len_guess: usize) -> Vec<(usize, usize)> { let mut field_ranges = Vec::with_capacity(len_guess); let mut last_end = 0; for i in memchr_iter(self.byte[0], haystack) { field_ranges.push((last_end, i)); last_end = i + 1; } field_ranges.push((last_end, haystack.len())); field_ranges } fn output_separator(&self) -> &[u8] { &self.byte } } /// Multi-byte (but still single character) separators. #[derive(Clone)] struct MultiByteSep<'a> { finder: Finder<'a>, } impl Separator for MultiByteSep<'_> { fn field_ranges(&self, haystack: &[u8], len_guess: usize) -> Vec<(usize, usize)> { let mut field_ranges = Vec::with_capacity(len_guess); let mut last_end = 0; for i in self.finder.find_iter(haystack) { field_ranges.push((last_end, i)); last_end = i + self.finder.needle().len(); } field_ranges.push((last_end, haystack.len())); field_ranges } fn output_separator(&self) -> &[u8] { self.finder.needle() } } /// Whole-line separator. #[derive(Copy, Clone)] struct LineSep {} impl Separator for LineSep { fn field_ranges(&self, haystack: &[u8], _len_guess: usize) -> Vec<(usize, usize)> { vec![(0, haystack.len())] } fn output_separator(&self) -> &[u8] { &[] } } /// Default whitespace separator. #[derive(Copy, Clone)] struct WhitespaceSep {} impl Separator for WhitespaceSep { fn field_ranges(&self, haystack: &[u8], len_guess: usize) -> Vec<(usize, usize)> { let mut field_ranges = Vec::with_capacity(len_guess); let mut last_end = 0; // GNU join used Bourne shell field splitters by default // FIXME: but now uses locale-dependent whitespace for i in Memchr3::new(b' ', b'\t', b'\n', haystack) { // leading whitespace should be dropped, contiguous whitespace merged if i > last_end { field_ranges.push((last_end, i)); } last_end = i + 1; } field_ranges.push((last_end, haystack.len())); field_ranges } fn output_separator(&self) -> &[u8] { b" " } } #[derive(Copy, Clone, PartialEq)] enum CheckOrder { Default, Disabled, Enabled, } struct Settings { key1: usize, key2: usize, print_unpaired1: bool, print_unpaired2: bool, print_joined: bool, ignore_case: bool, line_ending: LineEnding, separator: SepSetting, autoformat: bool, format: Vec, empty: Vec, check_order: CheckOrder, headers: bool, } impl Default for Settings { fn default() -> Self { Self { key1: 0, key2: 0, print_unpaired1: false, print_unpaired2: false, print_joined: true, ignore_case: false, line_ending: LineEnding::Newline, separator: SepSetting::Whitespaces, autoformat: false, format: vec![], empty: vec![], check_order: CheckOrder::Default, headers: false, } } } /// Output representation. struct Repr<'a, Sep: Separator> { line_ending: LineEnding, separator: Sep, format: Vec, empty: &'a [u8], } impl<'a, Sep: Separator> Repr<'a, Sep> { fn new(line_ending: LineEnding, separator: Sep, format: Vec, empty: &'a [u8]) -> Self { Repr { line_ending, separator, format, empty, } } fn uses_format(&self) -> bool { !self.format.is_empty() } /// Print the field or empty filler if the field is not set. fn print_field( &self, writer: &mut impl Write, field: Option<&[u8]>, ) -> Result<(), std::io::Error> { let value = match field { Some(field) => field, None => self.empty, }; writer.write_all(value) } /// Print each field except the one at the index. fn print_fields( &self, writer: &mut impl Write, line: &Line, index: usize, ) -> Result<(), std::io::Error> { for i in 0..line.field_ranges.len() { if i != index { writer.write_all(self.separator.output_separator())?; writer.write_all(line.get_field(i).unwrap())?; } } Ok(()) } /// Print each field or the empty filler if the field is not set. fn print_format(&self, writer: &mut impl Write, f: F) -> Result<(), std::io::Error> where F: Fn(&Spec) -> Option<&'a [u8]>, { for i in 0..self.format.len() { if i > 0 { writer.write_all(self.separator.output_separator())?; } let field = match f(&self.format[i]) { Some(value) => value, None => self.empty, }; writer.write_all(field)?; } Ok(()) } fn print_line_ending(&self, writer: &mut impl Write) -> Result<(), std::io::Error> { writer.write_all(&[self.line_ending as u8]) } } /// Byte slice wrapper whose Ord implementation is case-insensitive on ASCII. #[derive(Eq)] struct CaseInsensitiveSlice<'a> { v: &'a [u8], } impl Ord for CaseInsensitiveSlice<'_> { fn cmp(&self, other: &Self) -> Ordering { if let Some((s, o)) = std::iter::zip(self.v.iter(), other.v.iter()).find(|(s, o)| !s.eq_ignore_ascii_case(o)) { // first characters that differ, return the case-insensitive comparison let s = s.to_ascii_lowercase(); let o = o.to_ascii_lowercase(); s.cmp(&o) } else { // one of the strings is a substring or equal of the other self.v.len().cmp(&other.v.len()) } } } impl PartialOrd for CaseInsensitiveSlice<'_> { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl PartialEq for CaseInsensitiveSlice<'_> { fn eq(&self, other: &Self) -> bool { self.v.eq_ignore_ascii_case(other.v) } } /// Input processing parameters. struct Input { separator: Sep, ignore_case: bool, check_order: CheckOrder, } impl Input { fn new(separator: Sep, ignore_case: bool, check_order: CheckOrder) -> Self { Self { separator, ignore_case, check_order, } } fn compare(&self, field1: Option<&[u8]>, field2: Option<&[u8]>) -> Ordering { if let (Some(field1), Some(field2)) = (field1, field2) { if self.ignore_case { let field1 = CaseInsensitiveSlice { v: field1 }; let field2 = CaseInsensitiveSlice { v: field2 }; field1.cmp(&field2) } else { field1.cmp(field2) } } else { match field1 { Some(_) => Ordering::Greater, None => match field2 { Some(_) => Ordering::Less, None => Ordering::Equal, }, } } } } enum Spec { Key, Field(FileNum, usize), } impl Spec { fn parse(format: &str) -> UResult { let mut chars = format.chars(); let file_num = match chars.next() { Some('0') => { // Must be all alone without a field specifier. if chars.next().is_none() { return Ok(Self::Key); } return Err(USimpleError::new( 1, translate!("join-error-invalid-field-specifier", "spec" => format.quote()), )); } Some('1') => FileNum::File1, Some('2') => FileNum::File2, _ => { return Err(USimpleError::new( 1, translate!("join-error-invalid-file-number", "spec" => format.quote()), )); } }; if let Some('.') = chars.next() { return Ok(Self::Field(file_num, parse_field_number(chars.as_str())?)); } Err(USimpleError::new( 1, translate!("join-error-invalid-field-specifier", "spec" => format.quote()), )) } } struct Line { field_ranges: Vec<(usize, usize)>, string: Vec, } impl Line { fn new(string: Vec, separator: &Sep, len_guess: usize) -> Self { let field_ranges = separator.field_ranges(&string, len_guess); Self { field_ranges, string, } } /// Get field at index. fn get_field(&self, index: usize) -> Option<&[u8]> { if index < self.field_ranges.len() { let (low, high) = self.field_ranges[index]; Some(&self.string[low..high]) } else { None } } } struct State<'a> { key: usize, file_name: &'a str, file_num: FileNum, print_unpaired: bool, lines: Split>, max_len: usize, seq: Vec, line_num: usize, has_failed: bool, has_unpaired: bool, } impl<'a> State<'a> { fn new( file_num: FileNum, name: &'a str, stdin: &'a Stdin, key: usize, line_ending: LineEnding, print_unpaired: bool, ) -> UResult { let file_buf = if name == "-" { Box::new(stdin.lock()) as Box } else { let file = File::open(name).map_err_context(|| format!("{}", name.maybe_quote()))?; Box::new(BufReader::new(file)) as Box }; Ok(State { key, file_name: name, file_num, print_unpaired, lines: file_buf.split(line_ending as u8), max_len: 1, seq: Vec::new(), line_num: 0, has_failed: false, has_unpaired: false, }) } /// Skip the current unpaired line. fn skip_line( &mut self, writer: &mut impl Write, input: &Input, repr: &Repr<'a, Sep>, ) -> UResult<()> { if self.print_unpaired { self.print_first_line(writer, repr)?; } self.reset_next_line(input)?; Ok(()) } /// Keep reading line sequence until the key does not change, return /// the first line whose key differs. fn extend(&mut self, input: &Input) -> UResult> { while let Some(line) = self.next_line(input)? { let diff = input.compare(self.get_current_key(), line.get_field(self.key)); if diff == Ordering::Equal { self.seq.push(line); } else { return Ok(Some(line)); } } Ok(None) } /// Print lines in the buffers as headers. fn print_headers( &self, writer: &mut impl Write, other: &State, repr: &Repr<'a, Sep>, ) -> Result<(), std::io::Error> { if self.has_line() { if other.has_line() { self.combine(writer, other, repr)?; } else { self.print_first_line(writer, repr)?; } } else if other.has_line() { other.print_first_line(writer, repr)?; } Ok(()) } /// Combine two line sequences. fn combine( &self, writer: &mut impl Write, other: &State, repr: &Repr<'a, Sep>, ) -> Result<(), std::io::Error> { let key = self.get_current_key(); for line1 in &self.seq { for line2 in &other.seq { if repr.uses_format() { repr.print_format(writer, |spec| match *spec { Spec::Key => key, Spec::Field(file_num, field_num) => { if file_num == self.file_num { return line1.get_field(field_num); } if file_num == other.file_num { return line2.get_field(field_num); } None } })?; } else { repr.print_field(writer, key)?; repr.print_fields(writer, line1, self.key)?; repr.print_fields(writer, line2, other.key)?; } repr.print_line_ending(writer)?; } } Ok(()) } /// Reset with the next line. fn reset(&mut self, next_line: Option) { self.seq.clear(); if let Some(line) = next_line { self.seq.push(line); } } fn reset_read_line( &mut self, input: &Input, ) -> Result<(), std::io::Error> { let line = self.read_line(&input.separator)?; self.reset(line); Ok(()) } fn reset_next_line(&mut self, input: &Input) -> Result<(), JoinError> { let line = self.next_line(input)?; self.reset(line); Ok(()) } fn has_line(&self) -> bool { !self.seq.is_empty() } fn initialize( &mut self, read_sep: &Sep, autoformat: bool, ) -> std::io::Result { if let Some(line) = self.read_line(read_sep)? { self.seq.push(line); if autoformat { return Ok(self.seq[0].field_ranges.len()); } } Ok(0) } fn finalize( &mut self, writer: &mut impl Write, input: &Input, repr: &Repr<'a, Sep>, ) -> UResult<()> { if self.has_line() { if self.print_unpaired { self.print_first_line(writer, repr)?; } let mut next_line = self.next_line(input)?; while let Some(line) = &next_line { if self.print_unpaired { self.print_line(writer, line, repr)?; } self.reset(next_line); next_line = self.next_line(input)?; } } Ok(()) } /// Get the next line without the order check. fn read_line(&mut self, sep: &Sep) -> Result, std::io::Error> { match self.lines.next() { Some(value) => { self.line_num += 1; let line = Line::new(value?, sep, self.max_len); if line.field_ranges.len() > self.max_len { self.max_len = line.field_ranges.len(); } Ok(Some(line)) } None => Ok(None), } } /// Get the next line with the order check. fn next_line(&mut self, input: &Input) -> Result, JoinError> { if let Some(line) = self.read_line(&input.separator)? { if input.check_order == CheckOrder::Disabled { return Ok(Some(line)); } let diff = input.compare(self.get_current_key(), line.get_field(self.key)); if diff == Ordering::Greater && (input.check_order == CheckOrder::Enabled || (self.has_unpaired && !self.has_failed)) { let err_msg = translate!("join-error-not-sorted", "file" => self.file_name.maybe_quote(), "line_num" => self.line_num, "content" => String::from_utf8_lossy(&line.string)); // This is fatal if the check is enabled. if input.check_order == CheckOrder::Enabled { return Err(JoinError::UnorderedInput(err_msg)); } eprintln!("{}: {err_msg}", uucore::execution_phrase()); self.has_failed = true; } Ok(Some(line)) } else { Ok(None) } } /// Gets the key value of the lines stored in seq. fn get_current_key(&self) -> Option<&[u8]> { self.seq[0].get_field(self.key) } fn print_line( &self, writer: &mut impl Write, line: &Line, repr: &Repr<'a, Sep>, ) -> Result<(), std::io::Error> { if repr.uses_format() { repr.print_format(writer, |spec| match *spec { Spec::Key => line.get_field(self.key), Spec::Field(file_num, field_num) => { if file_num == self.file_num { line.get_field(field_num) } else { None } } })?; } else { repr.print_field(writer, line.get_field(self.key))?; repr.print_fields(writer, line, self.key)?; } repr.print_line_ending(writer) } fn print_first_line( &self, writer: &mut impl Write, repr: &Repr<'a, Sep>, ) -> Result<(), std::io::Error> { self.print_line(writer, &self.seq[0], repr) } } fn parse_separator(value_os: &OsString) -> UResult { // Five possible separator values: // No argument supplied, separate on whitespace; handled implicitly as the default elsewhere // An empty string arg, whole line separation // On unix-likes only, a single arbitrary byte // The two-character "\0" string, interpreted as a single 0 byte // A single scalar valid in the locale encoding (currently only UTF-8) if value_os.is_empty() { return Ok(SepSetting::Line); } #[cfg(unix)] { let value = value_os.as_bytes(); if value.len() == 1 { return Ok(SepSetting::Byte(value[0])); } } let Some(value) = value_os.to_str() else { #[cfg(unix)] return Err(USimpleError::new(1, translate!("join-error-non-utf8-tab"))); #[cfg(not(unix))] return Err(USimpleError::new( 1, translate!("join-error-unprintable-separators"), )); }; let mut chars = value.chars(); let c = chars.next().expect("valid string with at least one byte"); match chars.next() { None => Ok(SepSetting::Char(value.into())), Some('0') if c == '\\' => Ok(SepSetting::Byte(0)), _ => Err(USimpleError::new( 1, translate!("join-error-multi-character-tab", "value" => value), )), } } fn parse_print_settings(matches: &clap::ArgMatches) -> UResult<(bool, bool, bool)> { let mut print_joined = true; let mut print_unpaired1 = false; let mut print_unpaired2 = false; let v_values = matches.get_many::("v"); if v_values.is_some() { print_joined = false; } let unpaired = v_values .unwrap_or_default() .chain(matches.get_many("a").unwrap_or_default()); for file_num in unpaired { match parse_file_number(file_num)? { FileNum::File1 => print_unpaired1 = true, FileNum::File2 => print_unpaired2 = true, } } Ok((print_joined, print_unpaired1, print_unpaired2)) } fn get_and_parse_field_number(matches: &clap::ArgMatches, key: &str) -> UResult> { let value = matches.get_one::(key).map(|s| s.as_str()); parse_field_number_option(value) } /// Parses the command-line arguments and constructs a `Settings` struct. /// /// This function takes the matches from the command-line arguments, processes them, /// and returns a `Settings` struct that encapsulates the configuration for the program. #[allow(clippy::field_reassign_with_default)] fn parse_settings(matches: &clap::ArgMatches) -> UResult { let keys = get_and_parse_field_number(matches, "j")?; let key1 = get_and_parse_field_number(matches, "1")?; let key2 = get_and_parse_field_number(matches, "2")?; let (print_joined, print_unpaired1, print_unpaired2) = parse_print_settings(matches)?; let mut settings = Settings::default(); settings.print_joined = print_joined; settings.print_unpaired1 = print_unpaired1; settings.print_unpaired2 = print_unpaired2; settings.ignore_case = matches.get_flag("i"); settings.key1 = get_field_number(keys, key1)?; settings.key2 = get_field_number(keys, key2)?; if let Some(value_os) = matches.get_one::("t") { settings.separator = parse_separator(value_os)?; } if let Some(format) = matches.get_one::("o") { if format == "auto" { settings.autoformat = true; } else { let mut specs = vec![]; for part in format.split([' ', ',', '\t']) { specs.push(Spec::parse(part)?); } settings.format = specs; } } if let Some(empty) = matches.get_one::("e") { settings.empty = empty.as_bytes().to_vec(); } if matches.get_flag("nocheck-order") { settings.check_order = CheckOrder::Disabled; } if matches.get_flag("check-order") { settings.check_order = CheckOrder::Enabled; } if matches.get_flag("header") { settings.headers = true; } settings.line_ending = LineEnding::from_zero_flag(matches.get_flag("z")); Ok(settings) } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let settings = parse_settings(&matches)?; let file1 = matches.get_one::("file1").unwrap(); let file2 = matches.get_one::("file2").unwrap(); if file1 == "-" && file2 == "-" { return Err(USimpleError::new( 1, translate!("join-error-both-files-stdin"), )); } let sep = settings.separator.clone(); match sep { SepSetting::Byte(byte) => exec(file1, file2, settings, OneByteSep { byte: [byte] }), SepSetting::Char(c) => exec( file1, file2, settings, MultiByteSep { finder: Finder::new(&c), }, ), SepSetting::Whitespaces => exec(file1, file2, settings, WhitespaceSep {}), SepSetting::Line => exec(file1, file2, settings, LineSep {}), } } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("join-about")) .override_usage(format_usage(&translate!("join-usage"))) .infer_long_args(true) .arg( Arg::new("a") .short('a') .action(ArgAction::Append) .num_args(1) .value_parser(["1", "2"]) .value_name("FILENUM") .help(translate!("join-help-a")), ) .arg( Arg::new("v") .short('v') .action(ArgAction::Append) .num_args(1) .value_parser(["1", "2"]) .value_name("FILENUM") .help(translate!("join-help-v")), ) .arg( Arg::new("e") .short('e') .value_name("EMPTY") .help(translate!("join-help-e")), ) .arg( Arg::new("i") .short('i') .long("ignore-case") .help(translate!("join-help-i")) .action(ArgAction::SetTrue), ) .arg( Arg::new("j") .short('j') .value_name("FIELD") .help(translate!("join-help-j")), ) .arg( Arg::new("o") .short('o') .value_name("FORMAT") .help(translate!("join-help-o")), ) .arg( Arg::new("t") .short('t') .value_name("CHAR") .value_parser(ValueParser::os_string()) .help(translate!("join-help-t")), ) .arg( Arg::new("1") .short('1') .value_name("FIELD") .help(translate!("join-help-1")), ) .arg( Arg::new("2") .short('2') .value_name("FIELD") .help(translate!("join-help-2")), ) .arg( Arg::new("check-order") .long("check-order") .help(translate!("join-help-check-order")) .action(ArgAction::SetTrue), ) .arg( Arg::new("nocheck-order") .long("nocheck-order") .help(translate!("join-help-nocheck-order")) .action(ArgAction::SetTrue), ) .arg( Arg::new("header") .long("header") .help(translate!("join-help-header")) .action(ArgAction::SetTrue), ) .arg( Arg::new("z") .short('z') .long("zero-terminated") .help(translate!("join-help-z")) .action(ArgAction::SetTrue), ) .arg( Arg::new("file1") .required(true) .value_name("FILE1") .value_hint(clap::ValueHint::FilePath) .hide(true), ) .arg( Arg::new("file2") .required(true) .value_name("FILE2") .value_hint(clap::ValueHint::FilePath) .hide(true), ) } fn exec(file1: &str, file2: &str, settings: Settings, sep: Sep) -> UResult<()> { let stdin = stdin(); let mut state1 = State::new( FileNum::File1, file1, &stdin, settings.key1, settings.line_ending, settings.print_unpaired1, )?; let mut state2 = State::new( FileNum::File2, file2, &stdin, settings.key2, settings.line_ending, settings.print_unpaired2, )?; let input = Input::new(sep.clone(), settings.ignore_case, settings.check_order); let format = if settings.autoformat { let mut format = vec![Spec::Key]; let mut initialize = |state: &mut State| -> UResult<()> { let max_fields = state.initialize(&sep, settings.autoformat)?; for i in 0..max_fields { if i != state.key { format.push(Spec::Field(state.file_num, i)); } } Ok(()) }; initialize(&mut state1)?; initialize(&mut state2)?; format } else { state1.initialize(&sep, settings.autoformat)?; state2.initialize(&sep, settings.autoformat)?; settings.format }; let repr = Repr::new(settings.line_ending, sep, format, &settings.empty); let stdout = stdout(); let mut writer = BufWriter::new(stdout.lock()); if settings.headers { state1.print_headers(&mut writer, &state2, &repr)?; state1.reset_read_line(&input)?; state2.reset_read_line(&input)?; } while state1.has_line() && state2.has_line() { let diff = input.compare(state1.get_current_key(), state2.get_current_key()); match diff { Ordering::Less => { if let Err(e) = state1.skip_line(&mut writer, &input, &repr) { writer.flush()?; return Err(e); } state1.has_unpaired = true; state2.has_unpaired = true; } Ordering::Greater => { if let Err(e) = state2.skip_line(&mut writer, &input, &repr) { writer.flush()?; return Err(e); } state1.has_unpaired = true; state2.has_unpaired = true; } Ordering::Equal => { let next_line1 = match state1.extend(&input) { Ok(line) => line, Err(e) => { writer.flush()?; return Err(e); } }; let next_line2 = match state2.extend(&input) { Ok(line) => line, Err(e) => { writer.flush()?; return Err(e); } }; if settings.print_joined { state1.combine(&mut writer, &state2, &repr)?; } state1.reset(next_line1); state2.reset(next_line2); } } } if let Err(e) = state1.finalize(&mut writer, &input, &repr) { writer.flush()?; return Err(e); } if let Err(e) = state2.finalize(&mut writer, &input, &repr) { writer.flush()?; return Err(e); } writer.flush()?; if state1.has_failed || state2.has_failed { eprintln!( "{}: {}", uucore::execution_phrase(), translate!("join-error-input-not-sorted") ); set_exit_code(1); } Ok(()) } /// Check that keys for both files and for a particular file are not /// contradictory and return the key index. fn get_field_number(keys: Option, key: Option) -> UResult { if let Some(keys) = keys { if let Some(key) = key { if keys != key { // Show zero-based field numbers as one-based. return Err(USimpleError::new( 1, translate!("join-error-incompatible-fields", "field1" => (keys + 1), "field2" => (key + 1)), )); } } return Ok(keys); } Ok(key.unwrap_or(0)) } /// Parse the specified field string as a natural number and return /// the zero-based field number. fn parse_field_number(value: &str) -> UResult { match value.parse::() { Ok(result) if result > 0 => Ok(result - 1), Err(e) if e.kind() == &IntErrorKind::PosOverflow => Ok(usize::MAX), _ => Err(USimpleError::new( 1, translate!("join-error-invalid-field-number", "value" => value.quote()), )), } } fn parse_file_number(value: &str) -> UResult { match value { "1" => Ok(FileNum::File1), "2" => Ok(FileNum::File2), value => Err(USimpleError::new( 1, translate!("join-error-invalid-file-number-simple", "value" => value.quote()), )), } } fn parse_field_number_option(value: Option<&str>) -> UResult> { match value { None => Ok(None), Some(val) => Ok(Some(parse_field_number(val)?)), } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/join/src/main.rs000066400000000000000000000000271504311601400252030ustar00rootroot00000000000000uucore::bin!(uu_join); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/kill/000077500000000000000000000000001504311601400231175ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/kill/Cargo.toml000066400000000000000000000011771504311601400250550ustar00rootroot00000000000000[package] name = "uu_kill" description = "kill ~ (uutils) send a signal to a process" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/kill" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/kill.rs" [dependencies] clap = { workspace = true } nix = { workspace = true, features = ["signal"] } uucore = { workspace = true, features = ["signals"] } fluent = { workspace = true } [[bin]] name = "kill" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/kill/LICENSE000077700000000000000000000000001504311601400257652../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/kill/locales/000077500000000000000000000000001504311601400245415ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/kill/locales/en-US.ftl000066400000000000000000000010561504311601400262010ustar00rootroot00000000000000kill-about = Send signal to processes or list information about signals. kill-usage = kill [OPTIONS]... PID... # Help messages kill-help-list = Lists signals kill-help-table = Lists table of signals kill-help-signal = Sends given signal instead of SIGTERM # Error messages kill-error-no-process-id = no process ID specified Try --help for more information. kill-error-invalid-signal = { $signal }: invalid signal kill-error-parse-argument = failed to parse argument { $argument }: { $error } kill-error-sending-signal = sending signal to { $pid } failed coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/kill/locales/fr-FR.ftl000066400000000000000000000011771504311601400261720ustar00rootroot00000000000000kill-about = Envoyer un signal aux processus ou lister les informations sur les signaux. kill-usage = kill [OPTIONS]... PID... # Messages d'aide kill-help-list = Liste les signaux kill-help-table = Liste le tableau des signaux kill-help-signal = Envoie le signal donné au lieu de SIGTERM # Messages d'erreur kill-error-no-process-id = aucun ID de processus spécifié Essayez --help pour plus d'informations. kill-error-invalid-signal = { $signal } : signal invalide kill-error-parse-argument = échec de l'analyse de l'argument { $argument } : { $error } kill-error-sending-signal = échec de l'envoi du signal au processus { $pid } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/kill/src/000077500000000000000000000000001504311601400237065ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/kill/src/kill.rs000066400000000000000000000172271504311601400252200ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) signalname pids killpg use clap::{Arg, ArgAction, Command}; use nix::sys::signal::{self, Signal}; use nix::unistd::Pid; use std::io::Error; use uucore::display::Quotable; use uucore::error::{FromIo, UResult, USimpleError}; use uucore::translate; use uucore::signals::{ALL_SIGNALS, signal_by_name_or_value, signal_name_by_value}; use uucore::{format_usage, show}; // When the -l option is selected, the program displays the type of signal related to a certain // value or string. In case of a value, the program should control the lower 8 bits, but there is // a particular case in which if the value is in range [128, 159], it is translated to a signal const OFFSET: usize = 128; pub mod options { pub static PIDS_OR_SIGNALS: &str = "pids_or_signals"; pub static LIST: &str = "list"; pub static TABLE: &str = "table"; pub static SIGNAL: &str = "signal"; } #[derive(Clone, Copy)] pub enum Mode { Kill, Table, List, } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let mut args = args.collect_ignore(); let obs_signal = handle_obsolete(&mut args); let matches = uu_app().try_get_matches_from(args)?; let mode = if matches.get_flag(options::TABLE) { Mode::Table } else if matches.get_flag(options::LIST) { Mode::List } else { Mode::Kill }; let pids_or_signals: Vec = matches .get_many::(options::PIDS_OR_SIGNALS) .map(|v| v.map(ToString::to_string).collect()) .unwrap_or_default(); match mode { Mode::Kill => { let sig = if let Some(signal) = obs_signal { signal } else if let Some(signal) = matches.get_one::(options::SIGNAL) { parse_signal_value(signal)? } else { 15_usize //SIGTERM }; let sig_name = signal_name_by_value(sig); // Signal does not support converting from EXIT // Instead, nix::signal::kill expects Option::None to properly handle EXIT let sig: Option = if sig_name.is_some_and(|name| name == "EXIT") { None } else { let sig = (sig as i32) .try_into() .map_err(|e| Error::from_raw_os_error(e as i32))?; Some(sig) }; let pids = parse_pids(&pids_or_signals)?; if pids.is_empty() { Err(USimpleError::new(1, translate!("kill-error-no-process-id"))) } else { kill(sig, &pids); Ok(()) } } Mode::Table => { table(); Ok(()) } Mode::List => { list(&pids_or_signals); Ok(()) } } } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("kill-about")) .override_usage(format_usage(&translate!("kill-usage"))) .infer_long_args(true) .allow_negative_numbers(true) .arg( Arg::new(options::LIST) .short('l') .long(options::LIST) .help(translate!("kill-help-list")) .conflicts_with(options::TABLE) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::TABLE) .short('t') .short_alias('L') .long(options::TABLE) .help(translate!("kill-help-table")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SIGNAL) .short('s') .short_alias('n') // For bash compatibility, like in GNU coreutils .long(options::SIGNAL) .value_name("signal") .help(translate!("kill-help-signal")) .conflicts_with_all([options::LIST, options::TABLE]), ) .arg( Arg::new(options::PIDS_OR_SIGNALS) .hide(true) .action(ArgAction::Append), ) } fn handle_obsolete(args: &mut Vec) -> Option { // Sanity check if args.len() > 2 { // Old signal can only be in the first argument position let slice = args[1].as_str(); if let Some(signal) = slice.strip_prefix('-') { // With '-', a signal name must start with an uppercase char if signal.chars().next().is_some_and(|c| c.is_lowercase()) { return None; } // Check if it is a valid signal let opt_signal = signal_by_name_or_value(signal); if opt_signal.is_some() { // remove the signal before return args.remove(1); return opt_signal; } } } None } fn table() { for (idx, signal) in ALL_SIGNALS.iter().enumerate() { println!("{idx: >#2} {signal}"); } } fn print_signal(signal_name_or_value: &str) -> UResult<()> { // Closure used to track the last 8 bits of the signal value // when the -l option is passed only the lower 8 bits are important // or the value is in range [128, 159] // Example: kill -l 143 => TERM because 143 = 15 + 128 // Example: kill -l 2304 => EXIT let lower_8_bits = |x: usize| x & 0xff; let option_num_parse = signal_name_or_value.parse::().ok(); for (value, &signal) in ALL_SIGNALS.iter().enumerate() { if signal.eq_ignore_ascii_case(signal_name_or_value) || format!("SIG{signal}").eq_ignore_ascii_case(signal_name_or_value) { println!("{value}"); return Ok(()); } else if signal_name_or_value == value.to_string() || option_num_parse.is_some_and(|signal_value| lower_8_bits(signal_value) == value) || option_num_parse.is_some_and(|signal_value| signal_value == value + OFFSET) { println!("{signal}"); return Ok(()); } } Err(USimpleError::new( 1, translate!("kill-error-invalid-signal", "signal" => signal_name_or_value.quote()), )) } fn print_signals() { for signal in ALL_SIGNALS { println!("{signal}"); } } fn list(signals: &Vec) { if signals.is_empty() { print_signals(); } else { for signal in signals { if let Err(e) = print_signal(signal) { uucore::show!(e); } } } } fn parse_signal_value(signal_name: &str) -> UResult { let optional_signal_value = signal_by_name_or_value(signal_name); match optional_signal_value { Some(x) => Ok(x), None => Err(USimpleError::new( 1, translate!("kill-error-invalid-signal", "signal" => signal_name.quote()), )), } } fn parse_pids(pids: &[String]) -> UResult> { pids.iter() .map(|x| { x.parse::().map_err(|e| { USimpleError::new( 1, translate!("kill-error-parse-argument", "argument" => x.quote(), "error" => e), ) }) }) .collect() } fn kill(sig: Option, pids: &[i32]) { for &pid in pids { if let Err(e) = signal::kill(Pid::from_raw(pid), sig) { show!( Error::from_raw_os_error(e as i32) .map_err_context(|| { translate!("kill-error-sending-signal", "pid" => pid) }) ); } } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/kill/src/main.rs000066400000000000000000000000271504311601400251770ustar00rootroot00000000000000uucore::bin!(uu_kill); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/link/000077500000000000000000000000001504311601400231215ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/link/Cargo.toml000066400000000000000000000011031504311601400250440ustar00rootroot00000000000000[package] name = "uu_link" description = "link ~ (uutils) create a hard (file system) link to FILE" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/link" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/link.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true } fluent = { workspace = true } [[bin]] name = "link" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/link/LICENSE000077700000000000000000000000001504311601400257672../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/link/locales/000077500000000000000000000000001504311601400245435ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/link/locales/en-US.ftl000066400000000000000000000002761504311601400262060ustar00rootroot00000000000000link-about = Call the link function to create a link named FILE2 to an existing FILE1. link-usage = link FILE1 FILE2 link-error-cannot-create-link = cannot create link { $new } to { $old } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/link/locales/fr-FR.ftl000066400000000000000000000003461504311601400261710ustar00rootroot00000000000000link-about = Appelle la fonction link pour créer un lien nommé FILE2 vers un FILE1 existant. link-usage = link FILE1 FILE2 # Messages d'erreur link-error-cannot-create-link = impossible de créer le lien { $new } vers { $old } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/link/src/000077500000000000000000000000001504311601400237105ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/link/src/link.rs000066400000000000000000000027051504311601400252170ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use clap::builder::ValueParser; use clap::{Arg, Command}; use std::ffi::OsString; use std::fs::hard_link; use std::path::Path; use uucore::display::Quotable; use uucore::error::{FromIo, UResult}; use uucore::format_usage; use uucore::translate; pub mod options { pub static FILES: &str = "FILES"; } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let matches = uu_app().try_get_matches_from(args)?; let files: Vec<_> = matches .get_many::(options::FILES) .unwrap_or_default() .collect(); let old = Path::new(files[0]); let new = Path::new(files[1]); hard_link(old, new).map_err_context( || translate!("link-error-cannot-create-link", "new" => new.quote(), "old" => old.quote()), ) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("link-about")) .override_usage(format_usage(&translate!("link-usage"))) .infer_long_args(true) .arg( Arg::new(options::FILES) .hide(true) .required(true) .num_args(2) .value_hint(clap::ValueHint::AnyPath) .value_parser(ValueParser::os_string()), ) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/link/src/main.rs000066400000000000000000000000271504311601400252010ustar00rootroot00000000000000uucore::bin!(uu_link); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ln/000077500000000000000000000000001504311601400225755ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ln/Cargo.toml000066400000000000000000000011741504311601400245300ustar00rootroot00000000000000[package] name = "uu_ln" description = "ln ~ (uutils) create a (file system) link to TARGET" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/ln" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/ln.rs" [dependencies] clap = { workspace = true } uucore = { workspace = true, features = ["backup-control", "fs"] } thiserror = { workspace = true } fluent = { workspace = true } [[bin]] name = "ln" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ln/LICENSE000077700000000000000000000000001504311601400254432../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ln/locales/000077500000000000000000000000001504311601400242175ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ln/locales/en-US.ftl000066400000000000000000000042231504311601400256560ustar00rootroot00000000000000ln-about = Make links between files. ln-usage = ln [OPTION]... [-T] TARGET LINK_NAME ln [OPTION]... TARGET ln [OPTION]... TARGET... DIRECTORY ln [OPTION]... -t DIRECTORY TARGET... ln-after-help = In the 1st form, create a link to TARGET with the name LINK_NAME. In the 2nd form, create a link to TARGET in the current directory. In the 3rd and 4th forms, create links to each TARGET in DIRECTORY. Create hard links by default, symbolic links with --symbolic. By default, each destination (name of new link) should not already exist. When creating hard links, each TARGET must exist. Symbolic links can hold arbitrary text; if later resolved, a relative link is interpreted in relation to its parent directory. ln-help-force = remove existing destination files ln-help-interactive = prompt whether to remove existing destination files ln-help-no-dereference = treat LINK_NAME as a normal file if it is a symbolic link to a directory ln-help-logical = follow TARGETs that are symbolic links ln-help-physical = make hard links directly to symbolic links ln-help-symbolic = make symbolic links instead of hard links ln-help-target-directory = specify the DIRECTORY in which to create the links ln-help-no-target-directory = treat LINK_NAME as a normal file always ln-help-relative = create symbolic links relative to link location ln-help-verbose = print name of each linked file ln-error-target-is-not-directory = target {$target} is not a directory ln-error-same-file = {$file1} and {$file2} are the same file ln-error-missing-destination = missing destination file operand after {$operand} ln-error-extra-operand = extra operand {$operand} Try '{$program} --help' for more information. ln-error-could-not-update = Could not update {$target}: {$error} ln-error-cannot-stat = cannot stat {$path}: No such file or directory ln-error-will-not-overwrite = will not overwrite just-created '{$target}' with '{$source}' ln-prompt-replace = replace {$file}? ln-cannot-backup = cannot backup {$file} ln-failed-to-access = failed to access {$file} ln-failed-to-create-hard-link = failed to create hard link {$source} => {$dest} ln-backup = backup: {$backup} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ln/locales/fr-FR.ftl000066400000000000000000000050371504311601400256470ustar00rootroot00000000000000ln-about = Créer des liens entre fichiers ln-usage = ln [OPTION]... [-T] CIBLE NOM_LIEN ln [OPTION]... CIBLE ln [OPTION]... CIBLE... RÉPERTOIRE ln [OPTION]... -t RÉPERTOIRE CIBLE... ln-after-help = Dans la 1ère forme, créer un lien vers CIBLE avec le nom NOM_LIEN. Dans la 2ème forme, créer un lien vers CIBLE dans le répertoire courant. Dans les 3ème et 4ème formes, créer des liens vers chaque CIBLE dans RÉPERTOIRE. Créer des liens physiques par défaut, des liens symboliques avec --symbolic. Par défaut, chaque destination (nom du nouveau lien) ne doit pas déjà exister. Lors de la création de liens physiques, chaque CIBLE doit exister. Les liens symboliques peuvent contenir du texte arbitraire ; s'ils sont résolus plus tard, un lien relatif est interprété en relation avec son répertoire parent. ln-help-force = supprimer les fichiers de destination existants ln-help-interactive = demander avant de supprimer les fichiers de destination existants ln-help-no-dereference = traiter NOM_LIEN comme un fichier normal s'il s'agit d'un lien symbolique vers un répertoire ln-help-logical = suivre les CIBLEs qui sont des liens symboliques ln-help-physical = créer des liens physiques directement vers les liens symboliques ln-help-symbolic = créer des liens symboliques au lieu de liens physiques ln-help-target-directory = spécifier le RÉPERTOIRE dans lequel créer les liens ln-help-no-target-directory = toujours traiter NOM_LIEN comme un fichier normal ln-help-relative = créer des liens symboliques relatifs à l'emplacement du lien ln-help-verbose = afficher le nom de chaque fichier lié ln-error-target-is-not-directory = la cible {$target} n'est pas un répertoire ln-error-same-file = {$file1} et {$file2} sont le même fichier ln-error-missing-destination = opérande de fichier de destination manquant après {$operand} ln-error-extra-operand = opérande supplémentaire {$operand} Essayez « {$program} --help » pour plus d'informations. ln-error-could-not-update = Impossible de mettre à jour {$target} : {$error} ln-error-cannot-stat = impossible d'analyser {$path} : Aucun fichier ou répertoire de ce nom ln-error-will-not-overwrite = ne remplacera pas le fichier « {$target} » qui vient d'être créé par « {$source} » ln-prompt-replace = remplacer {$file} ? ln-cannot-backup = impossible de sauvegarder {$file} ln-failed-to-access = échec d'accès à {$file} ln-failed-to-create-hard-link = échec de création du lien physique {$source} => {$dest} ln-backup = sauvegarde : {$backup} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ln/src/000077500000000000000000000000001504311601400233645ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ln/src/ln.rs000066400000000000000000000401111504311601400243400ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) srcpath targetpath EEXIST use clap::{Arg, ArgAction, Command}; use uucore::display::Quotable; use uucore::error::{FromIo, UError, UResult}; use uucore::fs::{make_path_relative_to, paths_refer_to_same_file}; use uucore::translate; use uucore::{format_usage, prompt_yes, show_error}; use std::borrow::Cow; use std::collections::HashSet; use std::ffi::OsString; use std::fs; use thiserror::Error; #[cfg(any(unix, target_os = "redox"))] use std::os::unix::fs::symlink; #[cfg(windows)] use std::os::windows::fs::{symlink_dir, symlink_file}; use std::path::{Path, PathBuf}; use uucore::backup_control::{self, BackupMode}; use uucore::fs::{MissingHandling, ResolveMode, canonicalize}; pub struct Settings { overwrite: OverwriteMode, backup: BackupMode, suffix: String, symbolic: bool, relative: bool, logical: bool, target_dir: Option, no_target_dir: bool, no_dereference: bool, verbose: bool, } #[derive(Clone, Debug, Eq, PartialEq)] pub enum OverwriteMode { NoClobber, Interactive, Force, } #[derive(Error, Debug)] enum LnError { #[error("{}", translate!("ln-error-target-is-not-directory", "target" => _0.quote()))] TargetIsNotADirectory(PathBuf), #[error("")] SomeLinksFailed, #[error("{}", translate!("ln-error-same-file", "file1" => _0.quote(), "file2" => _1.quote()))] SameFile(PathBuf, PathBuf), #[error("{}", translate!("ln-error-missing-destination", "operand" => _0.quote()))] MissingDestination(PathBuf), #[error("{}", translate!("ln-error-extra-operand", "operand" => format!("{_0:?}").trim_matches('"'), "program" => _1.clone()))] ExtraOperand(OsString, String), } impl UError for LnError { fn code(&self) -> i32 { 1 } } mod options { pub const FORCE: &str = "force"; //pub const DIRECTORY: &str = "directory"; pub const INTERACTIVE: &str = "interactive"; pub const NO_DEREFERENCE: &str = "no-dereference"; pub const SYMBOLIC: &str = "symbolic"; pub const LOGICAL: &str = "logical"; pub const PHYSICAL: &str = "physical"; pub const TARGET_DIRECTORY: &str = "target-directory"; pub const NO_TARGET_DIRECTORY: &str = "no-target-directory"; pub const RELATIVE: &str = "relative"; pub const VERBOSE: &str = "verbose"; } static ARG_FILES: &str = "files"; #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let after_help = format!( "{}\n\n{}", translate!("ln-after-help"), backup_control::BACKUP_CONTROL_LONG_HELP ); let matches = uu_app().after_help(after_help).try_get_matches_from(args)?; /* the list of files */ let paths: Vec = matches .get_many::(ARG_FILES) .unwrap() .map(PathBuf::from) .collect(); let symbolic = matches.get_flag(options::SYMBOLIC); let overwrite_mode = if matches.get_flag(options::FORCE) { OverwriteMode::Force } else if matches.get_flag(options::INTERACTIVE) { OverwriteMode::Interactive } else { OverwriteMode::NoClobber }; let backup_mode = backup_control::determine_backup_mode(&matches)?; let backup_suffix = backup_control::determine_backup_suffix(&matches); // When we have "-L" or "-L -P", false otherwise let logical = matches.get_flag(options::LOGICAL); let settings = Settings { overwrite: overwrite_mode, backup: backup_mode, suffix: backup_suffix, symbolic, logical, relative: matches.get_flag(options::RELATIVE), target_dir: matches .get_one::(options::TARGET_DIRECTORY) .map(String::from), no_target_dir: matches.get_flag(options::NO_TARGET_DIRECTORY), no_dereference: matches.get_flag(options::NO_DEREFERENCE), verbose: matches.get_flag(options::VERBOSE), }; exec(&paths[..], &settings) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .about(translate!("ln-about")) .override_usage(format_usage(&translate!("ln-usage"))) .infer_long_args(true) .arg(backup_control::arguments::backup()) .arg(backup_control::arguments::backup_no_args()) /*.arg( Arg::new(options::DIRECTORY) .short('d') .long(options::DIRECTORY) .help("allow users with appropriate privileges to attempt to make hard links to directories") )*/ .arg( Arg::new(options::FORCE) .short('f') .long(options::FORCE) .help(translate!("ln-help-force")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::INTERACTIVE) .short('i') .long(options::INTERACTIVE) .help(translate!("ln-help-interactive")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::NO_DEREFERENCE) .short('n') .long(options::NO_DEREFERENCE) .help(translate!("ln-help-no-dereference")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::LOGICAL) .short('L') .long(options::LOGICAL) .help(translate!("ln-help-logical")) .overrides_with(options::PHYSICAL) .action(ArgAction::SetTrue), ) .arg( // Not implemented yet Arg::new(options::PHYSICAL) .short('P') .long(options::PHYSICAL) .help(translate!("ln-help-physical")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::SYMBOLIC) .short('s') .long(options::SYMBOLIC) .help(translate!("ln-help-symbolic")) // override added for https://github.com/uutils/coreutils/issues/2359 .overrides_with(options::SYMBOLIC) .action(ArgAction::SetTrue), ) .arg(backup_control::arguments::suffix()) .arg( Arg::new(options::TARGET_DIRECTORY) .short('t') .long(options::TARGET_DIRECTORY) .help(translate!("ln-help-target-directory")) .value_name("DIRECTORY") .value_hint(clap::ValueHint::DirPath) .conflicts_with(options::NO_TARGET_DIRECTORY), ) .arg( Arg::new(options::NO_TARGET_DIRECTORY) .short('T') .long(options::NO_TARGET_DIRECTORY) .help(translate!("ln-help-no-target-directory")) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::RELATIVE) .short('r') .long(options::RELATIVE) .help(translate!("ln-help-relative")) .requires(options::SYMBOLIC) .action(ArgAction::SetTrue), ) .arg( Arg::new(options::VERBOSE) .short('v') .long(options::VERBOSE) .help(translate!("ln-help-verbose")) .action(ArgAction::SetTrue), ) .arg( Arg::new(ARG_FILES) .action(ArgAction::Append) .value_hint(clap::ValueHint::AnyPath) .required(true) .num_args(1..), ) } fn exec(files: &[PathBuf], settings: &Settings) -> UResult<()> { // Handle cases where we create links in a directory first. if let Some(ref name) = settings.target_dir { // 4th form: a directory is specified by -t. return link_files_in_dir(files, &PathBuf::from(name), settings); } if !settings.no_target_dir { if files.len() == 1 { // 2nd form: the target directory is the current directory. return link_files_in_dir(files, &PathBuf::from("."), settings); } let last_file = &PathBuf::from(files.last().unwrap()); if files.len() > 2 || last_file.is_dir() { // 3rd form: create links in the last argument. return link_files_in_dir(&files[0..files.len() - 1], last_file, settings); } } // 1st form. Now there should be only two operands, but if -T is // specified we may have a wrong number of operands. if files.len() == 1 { return Err(LnError::MissingDestination(files[0].clone()).into()); } if files.len() > 2 { return Err(LnError::ExtraOperand( files[2].clone().into(), uucore::execution_phrase().to_string(), ) .into()); } assert!(!files.is_empty()); link(&files[0], &files[1], settings) } #[allow(clippy::cognitive_complexity)] fn link_files_in_dir(files: &[PathBuf], target_dir: &Path, settings: &Settings) -> UResult<()> { if !target_dir.is_dir() { return Err(LnError::TargetIsNotADirectory(target_dir.to_owned()).into()); } // remember the linked destinations for further usage let mut linked_destinations: HashSet = HashSet::with_capacity(files.len()); let mut all_successful = true; for srcpath in files { let targetpath = if settings.no_dereference && matches!(settings.overwrite, OverwriteMode::Force) && target_dir.is_symlink() { // In that case, we don't want to do link resolution // We need to clean the target if target_dir.is_file() { if let Err(e) = fs::remove_file(target_dir) { show_error!( "{}", translate!("ln-error-could-not-update", "target" => target_dir.quote(), "error" => e) ); } } #[cfg(windows)] if target_dir.is_dir() { // Not sure why but on Windows, the symlink can be // considered as a dir // See test_ln::test_symlink_no_deref_dir if let Err(e) = fs::remove_dir(target_dir) { show_error!( "{}", translate!("ln-error-could-not-update", "target" => target_dir.quote(), "error" => e) ); } } target_dir.to_path_buf() } else { match srcpath.as_os_str().to_str() { Some(name) => { match Path::new(name).file_name() { Some(basename) => target_dir.join(basename), // This can be None only for "." or "..". Trying // to create a link with such name will fail with // EEXIST, which agrees with the behavior of GNU // coreutils. None => target_dir.join(name), } } None => { show_error!( "{}", translate!("ln-error-cannot-stat", "path" => srcpath.quote()) ); all_successful = false; continue; } } }; if linked_destinations.contains(&targetpath) { // If the target file was already created in this ln call, do not overwrite show_error!( "{}", translate!("ln-error-will-not-overwrite", "target" => targetpath.display(), "source" => srcpath.display()) ); all_successful = false; } else if let Err(e) = link(srcpath, &targetpath, settings) { show_error!("{e}"); all_successful = false; } linked_destinations.insert(targetpath.clone()); } if all_successful { Ok(()) } else { Err(LnError::SomeLinksFailed.into()) } } fn relative_path<'a>(src: &'a Path, dst: &Path) -> Cow<'a, Path> { if let Ok(src_abs) = canonicalize(src, MissingHandling::Missing, ResolveMode::Physical) { if let Ok(dst_abs) = canonicalize( dst.parent().unwrap(), MissingHandling::Missing, ResolveMode::Physical, ) { return make_path_relative_to(src_abs, dst_abs).into(); } } src.into() } #[allow(clippy::cognitive_complexity)] fn link(src: &Path, dst: &Path, settings: &Settings) -> UResult<()> { let mut backup_path = None; let source: Cow<'_, Path> = if settings.relative { relative_path(src, dst) } else { src.into() }; if dst.is_symlink() || dst.exists() { backup_path = match settings.backup { BackupMode::None => None, BackupMode::Simple => Some(simple_backup_path(dst, &settings.suffix)), BackupMode::Numbered => Some(numbered_backup_path(dst)), BackupMode::Existing => Some(existing_backup_path(dst, &settings.suffix)), }; if settings.backup == BackupMode::Existing && !settings.symbolic { // when ln --backup f f, it should detect that it is the same file if paths_refer_to_same_file(src, dst, true) { return Err(LnError::SameFile(src.to_owned(), dst.to_owned()).into()); } } if let Some(ref p) = backup_path { fs::rename(dst, p) .map_err_context(|| translate!("ln-cannot-backup", "file" => dst.quote()))?; } match settings.overwrite { OverwriteMode::NoClobber => {} OverwriteMode::Interactive => { if !prompt_yes!("{}", translate!("ln-prompt-replace", "file" => dst.quote())) { return Err(LnError::SomeLinksFailed.into()); } if fs::remove_file(dst).is_ok() {} // In case of error, don't do anything } OverwriteMode::Force => { if !dst.is_symlink() && paths_refer_to_same_file(src, dst, true) { return Err(LnError::SameFile(src.to_owned(), dst.to_owned()).into()); } if fs::remove_file(dst).is_ok() {} // In case of error, don't do anything } } } if settings.symbolic { symlink(&source, dst)?; } else { let p = if settings.logical && source.is_symlink() { // if we want to have an hard link, // source is a symlink and -L is passed // we want to resolve the symlink to create the hardlink fs::canonicalize(&source) .map_err_context(|| translate!("ln-failed-to-access", "file" => source.quote()))? } else { source.to_path_buf() }; fs::hard_link(p, dst).map_err_context(|| { translate!("ln-failed-to-create-hard-link", "source" => source.quote(), "dest" => dst.quote()) })?; } if settings.verbose { print!("{} -> {}", dst.quote(), source.quote()); match backup_path { Some(path) => println!(" ({})", translate!("ln-backup", "backup" => path.quote())), None => println!(), } } Ok(()) } fn simple_backup_path(path: &Path, suffix: &str) -> PathBuf { let mut p = path.as_os_str().to_str().unwrap().to_owned(); p.push_str(suffix); PathBuf::from(p) } fn numbered_backup_path(path: &Path) -> PathBuf { let mut i: u64 = 1; loop { let new_path = simple_backup_path(path, &format!(".~{i}~")); if !new_path.exists() { return new_path; } i += 1; } } fn existing_backup_path(path: &Path, suffix: &str) -> PathBuf { let test_path = simple_backup_path(path, ".~1~"); if test_path.exists() { return numbered_backup_path(path); } simple_backup_path(path, suffix) } #[cfg(windows)] pub fn symlink, P2: AsRef>(src: P1, dst: P2) -> std::io::Result<()> { if src.as_ref().is_dir() { symlink_dir(src, dst) } else { symlink_file(src, dst) } } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ln/src/main.rs000066400000000000000000000000251504311601400246530ustar00rootroot00000000000000uucore::bin!(uu_ln); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/logname/000077500000000000000000000000001504311601400236065ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/logname/Cargo.toml000066400000000000000000000011601504311601400255340ustar00rootroot00000000000000[package] name = "uu_logname" description = "logname ~ (uutils) display the login name of the current user" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/logname" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/logname.rs" [dependencies] libc = { workspace = true } clap = { workspace = true } uucore = { workspace = true } fluent = { workspace = true } [[bin]] name = "logname" path = "src/main.rs" coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/logname/LICENSE000077700000000000000000000000001504311601400264542../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/logname/locales/000077500000000000000000000000001504311601400252305ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/logname/locales/en-US.ftl000066400000000000000000000001241504311601400266630ustar00rootroot00000000000000logname-about = Print user's login name logname-error-no-login-name = no login name coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/logname/locales/fr-FR.ftl000066400000000000000000000001631504311601400266530ustar00rootroot00000000000000logname-about = Afficher le nom de connexion de l'utilisateur logname-error-no-login-name = aucun nom de connexion coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/logname/src/000077500000000000000000000000001504311601400243755ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/logname/src/logname.rs000066400000000000000000000022041504311601400263630ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) getlogin userlogin use clap::Command; use std::ffi::CStr; use uucore::translate; use uucore::{error::UResult, show_error}; fn get_userlogin() -> Option { unsafe { let login: *const libc::c_char = libc::getlogin(); if login.is_null() { None } else { Some(String::from_utf8_lossy(CStr::from_ptr(login).to_bytes()).to_string()) } } } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let _ = uu_app().try_get_matches_from(args)?; match get_userlogin() { Some(userlogin) => println!("{userlogin}"), None => show_error!("{}", translate!("logname-error-no-login-name")), } Ok(()) } pub fn uu_app() -> Command { Command::new(uucore::util_name()) .version(uucore::crate_version!()) .override_usage(uucore::util_name()) .about(translate!("logname-about")) .infer_long_args(true) } coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/logname/src/main.rs000066400000000000000000000000321504311601400256620ustar00rootroot00000000000000uucore::bin!(uu_logname); coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ls/000077500000000000000000000000001504311601400226025ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ls/BENCHMARKING.md000066400000000000000000000066371504311601400247700ustar00rootroot00000000000000# Benchmarking ls ls majorly involves fetching a lot of details (depending upon what details are requested, eg. time/date, inode details, etc) for each path using system calls. Ideally, any system call should be done only once for each of the paths - not adhering to this principle leads to a lot of system call overhead multiplying and bubbling up, especially for recursive ls, therefore it is important to always benchmark multiple scenarios. ls _also_ prints a lot of information, so optimizing formatting operations is also critical: - Try to avoid using `format` unless required. - Try to avoid repeated string copies unless necessary. - If a temporary buffer is required, try to allocate a reasonable capacity to start with to avoid repeated reallocations. - Some values might be expensive to compute (e.g. current line width), but are only required in some cases. Consider delaying computations, e.g. by wrapping the evaluation in a LazyCell. This is an overview over what was benchmarked, and if you make changes to `ls`, you are encouraged to check how performance was affected for the workloads listed below. Feel free to add other workloads to the list that we should improve / make sure not to regress. Run `cargo build --release` before benchmarking after you make a change! ## Simple recursive ls - Get a large tree, for example linux kernel source tree. - Benchmark simple recursive ls with hyperfine: `hyperfine --warmup 2 "target/release/coreutils ls -R tree > /dev/null"`. ## Recursive ls with all and long options - Same tree as above - Benchmark recursive ls with -al -R options with hyperfine: `hyperfine --warmup 2 "target/release/coreutils ls -al -R tree > /dev/null"`. ## Comparing with GNU ls Hyperfine accepts multiple commands to run and will compare them. To compare performance with GNU ls duplicate the string you passed to hyperfine but remove the `target/release/coreutils` bit from it. Example: `hyperfine --warmup 2 "target/release/coreutils ls -al -R tree > /dev/null"` becomes `hyperfine --warmup 2 "target/release/coreutils ls -al -R tree > /dev/null" "ls -al -R tree > /dev/null"` (This assumes GNU ls is installed as `ls`) This can also be used to compare with version of ls built before your changes to ensure your change does not regress this. Here is a `bash` script for doing this comparison: ```shell #!/bin/bash cargo build --no-default-features --features ls --release args="$@" hyperfine "ls $args" "target/release/coreutils ls $args" ``` **Note**: No localization is currently implemented. This means that the comparison above is not really fair. We can fix this by setting `LC_ALL=C`, so GNU `ls` can ignore localization. ## Checking system call count - Another thing to look at would be system calls count using strace (on linux) or equivalent on other operating systems. - Example: `strace -c target/release/coreutils ls -al -R tree` ## Cargo Flamegraph With Cargo Flamegraph you can easily make a flamegraph of `ls`: ```shell cargo flamegraph --cmd coreutils -- ls [additional parameters] ``` However, if the `-R` option is given, the output becomes pretty much useless due to recursion. We can fix this by merging all the direct recursive calls with `uniq`, below is a `bash` script that does this. ```shell #!/bin/bash cargo build --release --no-default-features --features ls perf record target/release/coreutils ls "$@" perf script | uniq | inferno-collapse-perf | inferno-flamegraph > flamegraph.svg ``` coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ls/Cargo.toml000066400000000000000000000021111504311601400245250ustar00rootroot00000000000000# spell-checker:ignore tzdb zoneinfo [package] name = "uu_ls" description = "ls ~ (uutils) display directory contents" repository = "https://github.com/uutils/coreutils/tree/main/src/uu/ls" version.workspace = true authors.workspace = true license.workspace = true homepage.workspace = true keywords.workspace = true categories.workspace = true edition.workspace = true readme.workspace = true [lints] workspace = true [lib] path = "src/ls.rs" [dependencies] ansi-width = { workspace = true } clap = { workspace = true, features = ["env"] } glob = { workspace = true } hostname = { workspace = true } lscolors = { workspace = true } selinux = { workspace = true, optional = true } terminal_size = { workspace = true } thiserror = { workspace = true } uucore = { workspace = true, features = [ "colors", "entries", "format", "fs", "fsext", "fsxattr", "parser", "quoting-style", "time", "version-cmp", ] } uutils_term_grid = { workspace = true } fluent = { workspace = true } [[bin]] name = "ls" path = "src/main.rs" [features] feat_selinux = ["selinux", "uucore/selinux"] coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ls/LICENSE000077700000000000000000000000001504311601400254502../../../LICENSEustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ls/locales/000077500000000000000000000000001504311601400242245ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ls/locales/en-US.ftl000066400000000000000000000177731504311601400257010ustar00rootroot00000000000000ls-about = List directory contents. Ignore files and directories starting with a '.' by default ls-usage = ls [OPTION]... [FILE]... ls-after-help = The TIME_STYLE argument can be full-iso, long-iso, iso, locale or +FORMAT. FORMAT is interpreted like in date. Also the TIME_STYLE environment variable sets the default style to use. # Error messages ls-error-invalid-line-width = invalid line width: {$width} ls-error-general-io = general io error: {$error} ls-error-cannot-access-no-such-file = cannot access '{$path}': No such file or directory ls-error-cannot-access-operation-not-permitted = cannot access '{$path}': Operation not permitted ls-error-cannot-open-directory-permission-denied = cannot open directory '{$path}': Permission denied ls-error-cannot-open-file-permission-denied = cannot open file '{$path}': Permission denied ls-error-cannot-open-directory-bad-descriptor = cannot open directory '{$path}': Bad file descriptor ls-error-unknown-io-error = unknown io error: '{$path}', '{$error}' ls-error-invalid-block-size = invalid --block-size argument {$size} ls-error-dired-and-zero-incompatible = --dired and --zero are incompatible ls-error-not-listing-already-listed = {$path}: not listing already-listed directory ls-error-invalid-time-style = invalid --time-style argument {$style} Possible values are: - [posix-]full-iso - [posix-]long-iso - [posix-]iso - [posix-]locale - +FORMAT (e.g., +%H:%M) for a 'date'-style format For more information try --help # Help messages ls-help-print-help = Print help information. ls-help-set-display-format = Set the display format. ls-help-display-files-columns = Display the files in columns. ls-help-display-detailed-info = Display detailed information. ls-help-list-entries-rows = List entries in rows instead of in columns. ls-help-assume-tab-stops = Assume tab stops at each COLS instead of 8 ls-help-list-entries-commas = List entries separated by commas. ls-help-list-entries-nul = List entries separated by ASCII NUL characters. ls-help-generate-dired-output = generate output designed for Emacs' dired (Directory Editor) mode ls-help-hyperlink-filenames = hyperlink file names WHEN ls-help-list-one-file-per-line = List one file per line. ls-help-long-format-no-group = Long format without group information. Identical to --format=long with --no-group. ls-help-long-no-owner = Long format without owner information. ls-help-long-numeric-uid-gid = -l with numeric UIDs and GIDs. ls-help-set-quoting-style = Set quoting style. ls-help-literal-quoting-style = Use literal quoting style. Equivalent to `--quoting-style=literal` ls-help-escape-quoting-style = Use escape quoting style. Equivalent to `--quoting-style=escape` ls-help-c-quoting-style = Use C quoting style. Equivalent to `--quoting-style=c` ls-help-replace-control-chars = Replace control characters with '?' if they are not escaped. ls-help-show-control-chars = Show control characters 'as is' if they are not escaped. ls-help-show-time-field = Show time in : access time (-u): atime, access, use; change time (-t): ctime, status. modification time: mtime, modification. birth time: birth, creation; ls-help-time-change = If the long listing format (e.g., -l, -o) is being used, print the status change time (the 'ctime' in the inode) instead of the modification time. When explicitly sorting by time (--sort=time or -t) or when not using a long listing format, sort according to the status change time. ls-help-time-access = If the long listing format (e.g., -l, -o) is being used, print the status access time instead of the modification time. When explicitly sorting by time (--sort=time or -t) or when not using a long listing format, sort according to the access time. ls-help-hide-pattern = do not list implied entries matching shell PATTERN (overridden by -a or -A) ls-help-ignore-pattern = do not list implied entries matching shell PATTERN ls-help-ignore-backups = Ignore entries which end with ~. ls-help-sort-by-field = Sort by : name, none (-U), time (-t), size (-S), extension (-X) or width ls-help-sort-by-size = Sort by file size, largest first. ls-help-sort-by-time = Sort by modification time (the 'mtime' in the inode), newest first. ls-help-sort-by-version = Natural sort of (version) numbers in the filenames. ls-help-sort-by-extension = Sort alphabetically by entry extension. ls-help-sort-none = Do not sort; list the files in whatever order they are stored in the directory. This is especially useful when listing very large directories, since not doing any sorting can be noticeably faster. ls-help-dereference-all = When showing file information for a symbolic link, show information for the file the link references rather than the link itself. ls-help-dereference-dir-args = Do not follow symlinks except when they link to directories and are given as command line arguments. ls-help-dereference-args = Do not follow symlinks except when given as command line arguments. ls-help-no-group = Do not show group in long format. ls-help-author = Show author in long format. On the supported platforms, the author always matches the file owner. ls-help-all-files = Do not ignore hidden files (files with names that start with '.'). ls-help-almost-all = In a directory, do not ignore all file names that start with '.', only ignore '.' and '..'. ls-help-directory = Only list the names of directories, rather than listing directory contents. This will not follow symbolic links unless one of `--dereference-command-line (-H)`, `--dereference (-L)`, or `--dereference-command-line-symlink-to-dir` is specified. ls-help-human-readable = Print human readable file sizes (e.g. 1K 234M 56G). ls-help-kibibytes = default to 1024-byte blocks for file system usage; used only with -s and per directory totals ls-help-si = Print human readable file sizes using powers of 1000 instead of 1024. ls-help-block-size = scale sizes by BLOCK_SIZE when printing them ls-help-print-inode = print the index number of each file ls-help-reverse-sort = Reverse whatever the sorting method is e.g., list files in reverse alphabetical order, youngest first, smallest first, or whatever. ls-help-recursive = List the contents of all directories recursively. ls-help-terminal-width = Assume that the terminal is COLS columns wide. ls-help-allocation-size = print the allocated size of each file, in blocks ls-help-color-output = Color output based on file type. ls-help-indicator-style = Append indicator with style WORD to entry names: none (default), slash (-p), file-type (--file-type), classify (-F) ls-help-classify = Append a character to each file name indicating the file type. Also, for regular files that are executable, append '*'. The file type indicators are '/' for directories, '@' for symbolic links, '|' for FIFOs, '=' for sockets, '>' for doors, and nothing for regular files. when may be omitted, or one of: none - Do not classify. This is the default. auto - Only classify if standard output is a terminal. always - Always classify. Specifying --classify and no when is equivalent to --classify=always. This will not follow symbolic links listed on the command line unless the --dereference-command-line (-H), --dereference (-L), or --dereference-command-line-symlink-to-dir options are specified. ls-help-file-type = Same as --classify, but do not append '*' ls-help-slash-directories = Append / indicator to directories. ls-help-time-style = time/date format with -l; see TIME_STYLE below ls-help-full-time = like -l --time-style=full-iso ls-help-context = print any security context of each file ls-help-group-directories-first = group directories before files; can be augmented with a --sort option, but any use of --sort=none (-U) disables grouping ls-invalid-quoting-style = {$program}: Ignoring invalid value of environment variable QUOTING_STYLE: '{$style}' ls-invalid-columns-width = ignoring invalid width in environment variable COLUMNS: {$width} ls-invalid-ignore-pattern = Invalid pattern for ignore: {$pattern} ls-invalid-hide-pattern = Invalid pattern for hide: {$pattern} ls-total = total {$size} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ls/locales/fr-FR.ftl000066400000000000000000000224671504311601400256620ustar00rootroot00000000000000ls-about = Lister le contenu des répertoires. Ignorer les fichiers et répertoires commençant par un '.' par défaut ls-usage = ls [OPTION]... [FICHIER]... ls-after-help = L'argument TIME_STYLE peut être full-iso, long-iso, iso, locale ou +FORMAT. FORMAT est interprété comme dans date. De plus, la variable d'environnement TIME_STYLE définit le style par défaut à utiliser. # Messages d'erreur ls-error-invalid-line-width = largeur de ligne invalide : {$width} ls-error-general-io = erreur d'E/S générale : {$error} ls-error-cannot-access-no-such-file = impossible d'accéder à '{$path}' : Aucun fichier ou répertoire de ce type ls-error-cannot-access-operation-not-permitted = impossible d'accéder à '{$path}' : Opération non autorisée ls-error-cannot-open-directory-permission-denied = impossible d'ouvrir le répertoire '{$path}' : Permission refusée ls-error-cannot-open-file-permission-denied = impossible d'ouvrir le fichier '{$path}' : Permission refusée ls-error-cannot-open-directory-bad-descriptor = impossible d'ouvrir le répertoire '{$path}' : Mauvais descripteur de fichier ls-error-unknown-io-error = erreur d'E/S inconnue : '{$path}', '{$error}' ls-error-invalid-block-size = argument --block-size invalide {$size} ls-error-dired-and-zero-incompatible = --dired et --zero sont incompatibles ls-error-not-listing-already-listed = {$path} : ne liste pas un répertoire déjà listé ls-error-invalid-time-style = argument --time-style invalide {$style} Les valeurs possibles sont : - [posix-]full-iso - [posix-]long-iso - [posix-]iso - [posix-]locale - +FORMAT (e.g., +%H:%M) pour un format de type 'date' Pour plus d'informations, essayez --help # Messages d'aide ls-help-print-help = Afficher les informations d'aide. ls-help-set-display-format = Définir le format d'affichage. ls-help-display-files-columns = Afficher les fichiers en colonnes. ls-help-display-detailed-info = Afficher des informations détaillées. ls-help-list-entries-rows = Lister les entrées en lignes au lieu de colonnes. ls-help-assume-tab-stops = Supposer des arrêts de tabulation à chaque COLS au lieu de 8 ls-help-list-entries-commas = Lister les entrées séparées par des virgules. ls-help-list-entries-nul = Lister les entrées séparées par des caractères NUL ASCII. ls-help-generate-dired-output = générer une sortie conçue pour le mode dired (Directory Editor) d'Emacs ls-help-hyperlink-filenames = créer des hyperliens pour les noms de fichiers QUAND ls-help-list-one-file-per-line = Lister un fichier par ligne. ls-help-long-format-no-group = Format long sans informations de groupe. Identique à --format=long avec --no-group. ls-help-long-no-owner = Format long sans informations de propriétaire. ls-help-long-numeric-uid-gid = -l avec des UID et GID numériques. ls-help-set-quoting-style = Définir le style de citation. ls-help-literal-quoting-style = Utiliser le style de citation littéral. Équivalent à `--quoting-style=literal` ls-help-escape-quoting-style = Utiliser le style de citation d'échappement. Équivalent à `--quoting-style=escape` ls-help-c-quoting-style = Utiliser le style de citation C. Équivalent à `--quoting-style=c` ls-help-replace-control-chars = Remplacer les caractères de contrôle par '?' s'ils ne sont pas échappés. ls-help-show-control-chars = Afficher les caractères de contrôle 'tels quels' s'ils ne sont pas échappés. ls-help-show-time-field = Afficher l'heure dans : heure d'accès (-u) : atime, access, use ; heure de changement (-t) : ctime, status. heure de modification : mtime, modification. heure de création : birth, creation ; ls-help-time-change = Si le format de liste long (par ex., -l, -o) est utilisé, afficher l'heure de changement de statut (le 'ctime' dans l'inode) au lieu de l'heure de modification. Lors du tri explicite par heure (--sort=time ou -t) ou lors de l'absence de format de liste long, trier selon l'heure de changement de statut. ls-help-time-access = Si le format de liste long (par ex., -l, -o) est utilisé, afficher l'heure d'accès au statut au lieu de l'heure de modification. Lors du tri explicite par heure (--sort=time ou -t) ou lors de l'absence de format de liste long, trier selon l'heure d'accès. ls-help-hide-pattern = ne pas lister les entrées implicites correspondant au MOTIF shell (surchargé par -a ou -A) ls-help-ignore-pattern = ne pas lister les entrées implicites correspondant au MOTIF shell ls-help-ignore-backups = Ignorer les entrées qui se terminent par ~. ls-help-sort-by-field = Trier par : name, none (-U), time (-t), size (-S), extension (-X) ou width ls-help-sort-by-size = Trier par taille de fichier, le plus grand en premier. ls-help-sort-by-time = Trier par heure de modification (le 'mtime' dans l'inode), le plus récent en premier. ls-help-sort-by-version = Tri naturel des numéros (de version) dans les noms de fichiers. ls-help-sort-by-extension = Trier alphabétiquement par extension d'entrée. ls-help-sort-none = Ne pas trier ; lister les fichiers dans l'ordre où ils sont stockés dans le répertoire. Ceci est particulièrement utile lors de l'affichage de très grands répertoires, car ne pas trier peut être sensiblement plus rapide. ls-help-dereference-all = Lors de l'affichage d'informations de fichier pour un lien symbolique, afficher les informations pour le fichier référencé par le lien plutôt que le lien lui-même. ls-help-dereference-dir-args = Ne pas suivre les liens symboliques sauf quand ils pointent vers des répertoires et sont donnés comme arguments de ligne de commande. ls-help-dereference-args = Ne pas suivre les liens symboliques sauf quand ils sont donnés comme arguments de ligne de commande. ls-help-no-group = Ne pas afficher le groupe en format long. ls-help-author = Afficher l'auteur en format long. Sur les plateformes supportées, l'auteur correspond toujours au propriétaire du fichier. ls-help-all-files = Ne pas ignorer les fichiers cachés (fichiers dont les noms commencent par '.'). ls-help-almost-all = Dans un répertoire, ne pas ignorer tous les noms de fichiers qui commencent par '.', ignorer seulement '.' et '..'. ls-help-directory = Lister seulement les noms des répertoires, plutôt que le contenu des répertoires. Ceci ne suivra pas les liens symboliques à moins qu'une des options `--dereference-command-line (-H)`, `--dereference (-L)`, ou `--dereference-command-line-symlink-to-dir` soit spécifiée. ls-help-human-readable = Afficher les tailles de fichiers lisibles par l'homme (par ex. 1K 234M 56G). ls-help-kibibytes = par défaut aux blocs de 1024 octets pour l'utilisation du système de fichiers ; utilisé seulement avec -s et par totaux de répertoire ls-help-si = Afficher les tailles de fichiers lisibles par l'homme utilisant des puissances de 1000 au lieu de 1024. ls-help-block-size = dimensionner les tailles par BLOCK_SIZE lors de l'affichage ls-help-print-inode = afficher le numéro d'index de chaque fichier ls-help-reverse-sort = Inverser quelle que soit la méthode de tri, par ex., lister les fichiers en ordre alphabétique inverse, le plus jeune en premier, le plus petit en premier, ou autre. ls-help-recursive = Lister le contenu de tous les répertoires récursivement. ls-help-terminal-width = Supposer que le terminal a COLS colonnes de largeur. ls-help-allocation-size = afficher la taille allouée de chaque fichier, en blocs ls-help-color-output = Colorier la sortie basée sur le type de fichier. ls-help-indicator-style = Ajouter un indicateur avec le style WORD aux noms d'entrée : none (par défaut), slash (-p), file-type (--file-type), classify (-F) ls-help-classify = Ajouter un caractère à chaque nom de fichier indiquant le type de fichier. Aussi, pour les fichiers réguliers qui sont exécutables, ajouter '*'. Les indicateurs de type de fichier sont '/' pour les répertoires, '@' pour les liens symboliques, '|' pour les FIFOs, '=' pour les sockets, '>' pour les portes, et rien pour les fichiers réguliers. when peut être omis, ou un de : none - Ne pas classifier. C'est la valeur par défaut. auto - Classifier seulement si la sortie standard est un terminal. always - Toujours classifier. Spécifier --classify et aucun when est équivalent à --classify=always. Ceci ne suivra pas les liens symboliques listés sur la ligne de commande à moins que les options --dereference-command-line (-H), --dereference (-L), ou --dereference-command-line-symlink-to-dir soient spécifiées. ls-help-file-type = Identique à --classify, mais ne pas ajouter '*' ls-help-slash-directories = Ajouter l'indicateur / aux répertoires. ls-help-time-style = format de date/heure avec -l ; voir TIME_STYLE ci-dessous ls-help-full-time = comme -l --time-style=full-iso ls-help-context = afficher tout contexte de sécurité de chaque fichier ls-help-group-directories-first = grouper les répertoires avant les fichiers ; peut être augmenté avec une option --sort, mais toute utilisation de --sort=none (-U) désactive le groupement ls-invalid-quoting-style = {$program} : Ignorer la valeur invalide de la variable d'environnement QUOTING_STYLE : '{$style}' ls-invalid-columns-width = ignorer la largeur invalide dans la variable d'environnement COLUMNS : {$width} ls-invalid-ignore-pattern = Motif invalide pour ignore : {$pattern} ls-invalid-hide-pattern = Motif invalide pour hide : {$pattern} ls-total = total {$size} coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ls/src/000077500000000000000000000000001504311601400233715ustar00rootroot00000000000000coreutils-cf796758bea4bac29a4b1f71e805b9dc606eb50c/src/uu/ls/src/colors.rs000066400000000000000000000167261504311601400252540ustar00rootroot00000000000000// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. use super::PathData; use super::get_metadata_with_deref_opt; use lscolors::{Indicator, LsColors, Style}; use std::ffi::OsString; use std::fs::{DirEntry, Metadata}; use std::io::{BufWriter, Stdout}; /// We need this struct to be able to store the previous style. /// This because we need to check the previous value in case we don't need /// the reset pub(crate) struct StyleManager<'a> { /// last style that is applied, if `None` that means reset is applied. pub(crate) current_style: Option