openssh-sftp-client-0.15.0/.cargo_vcs_info.json0000644000000001360000000000100150240ustar { "git": { "sha1": "6995b4a48ed0913cda1c9cb76df271db21f6e0bd" }, "path_in_vcs": "" }openssh-sftp-client-0.15.0/.github/dependabot.yml000064400000000000000000000004361046102023000200070ustar 00000000000000version: 2 updates: - package-ecosystem: "github-actions" # Workflow files stored in the # default location of `.github/workflows` directory: "/" schedule: interval: "daily" - package-ecosystem: "cargo" directory: "/" schedule: interval: "daily" openssh-sftp-client-0.15.0/.github/workflows/cache-cleanup.yml000064400000000000000000000015271046102023000224310ustar 00000000000000name: Cleanup caches for closed PRs on: schedule: - cron: "0 17 * * *" workflow_dispatch: jobs: cleanup: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Cleanup run: | gh extension install actions/gh-actions-cache export REPO="${{ github.repository }}" gh pr list --state closed -L 20 --json number --jq '.[]|.number' | ( while IFS='$\n' read -r closed_pr; do BRANCH="refs/pull/${closed_pr}/merge" ./cleanup-cache.sh done ) gh pr list --state closed -L 20 --json headRefName --jq '.[]|.headRefName' | ( while IFS='$\n' read -r branch; do BRANCH="$branch" ./cleanup-cache.sh done ) env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} openssh-sftp-client-0.15.0/.github/workflows/publish.yml000064400000000000000000000011221046102023000213760ustar 00000000000000name: Release-plz permissions: pull-requests: write contents: write on: push: branches: - main jobs: release-plz: name: Release-plz runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable - name: Run release-plz uses: MarcoIeni/release-plz-action@v0.5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} openssh-sftp-client-0.15.0/.github/workflows/rust.yml000064400000000000000000000102561046102023000207350ustar 00000000000000name: Rust concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true env: RUSTFLAGS: -Dwarnings RUSTDOCFLAGS: -Dwarnings RUST_BACKTRACE: 1 CARGO_TERM_COLOR: always CARGO_INCREMENTAL: 0 CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse on: merge_group: push: branches: - main paths-ignore: - "README.md" - "LICENSE" - ".gitignore" pull_request: paths-ignore: - "README.md" - "LICENSE" - ".gitignore" jobs: os-check: runs-on: ubuntu-latest name: os check on ${{ matrix.target }} strategy: fail-fast: false matrix: include: - { target: x86_64-pc-windows-msvc, args: "--exclude-features openssh", } - { target: x86_64-apple-darwin } - { target: x86_64-unknown-linux-gnu } steps: - uses: actions/checkout@v4 - name: Install toolchain run: | rustup toolchain install stable --no-self-update --profile minimal --target ${{ matrix.target }} - uses: taiki-e/install-action@v2 with: tool: cargo-hack - name: Create Cargo.lock for caching run: cargo update - uses: Swatinem/rust-cache@v2 - run: | cargo hack check --feature-powerset --target ${{ matrix.target }} ${{ matrix.args }} check: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: submodules: "recursive" - name: Install toolchain run: | rustup toolchain install stable --component rustfmt,clippy --no-self-update --profile minimal rustup toolchain install nightly --no-self-update --profile minimal - name: Create Cargo.lock for caching run: cargo update - uses: Swatinem/rust-cache@v2 - run: ./check.sh build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: submodules: "recursive" - name: Install toolchain run: rustup toolchain install stable --no-self-update --profile minimal - name: Create Cargo.lock for caching run: cargo update - uses: Swatinem/rust-cache@v2 - name: Compile tests run: cargo test --all-features --workspace --no-run - name: Test ssh connectivity run: | # Wait for startup of openssh-server timeout 15 ./wait_for_sshd_start_up.sh chmod 600 .test-key mkdir /tmp/openssh-rs ssh -i .test-key -v -p 2222 -l test-user 127.0.0.1 -o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/tmp/openssh-rs/known_hosts whoami - name: Set up ssh-agent run: | eval $(ssh-agent) echo "SSH_AUTH_SOCK=$SSH_AUTH_SOCK" >> $GITHUB_ENV echo "SSH_AGENT_PID=$SSH_AGENT_PID" >> $GITHUB_ENV cat .test-key | ssh-add - - name: Run tests run: ./run_tests.sh env: XDG_RUNTIME_DIR: /tmp - name: ssh container log run: docker logs $(docker ps | grep openssh-server | awk '{print $1}') if: ${{ failure() }} - run: docker exec $(docker ps | grep openssh-server | awk '{print $1}') ls -R /config/logs/ if: ${{ failure() }} - run: docker exec $(docker ps | grep openssh-server | awk '{print $1}') cat /config/logs/openssh/current name: ssh server log if: ${{ failure() }} services: openssh: image: linuxserver/openssh-server:amd64-latest ports: - 2222:2222 env: USER_NAME: test-user PUBLIC_KEY: |- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGzHvK2pKtSlZXP9tPYOOBb/xn0IiC9iLMS355AYUPC7 DOCKER_MODS: linuxserver/mods:openssh-server-ssh-tunnel # Dummy job to have a stable name for the "all tests pass" requirement tests-pass: name: Tests pass needs: - os-check - check - build if: always() # always run even if dependencies fail runs-on: ubuntu-latest steps: # fail if ANY dependency has failed or cancelled - if: "contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')" run: exit 1 - run: exit 0 openssh-sftp-client-0.15.0/.gitignore000064400000000000000000000001011046102023000155740ustar 00000000000000# IDE and editor .vscode .idea **/target **/.DS_Store Cargo.lockopenssh-sftp-client-0.15.0/.gitmodules000064400000000000000000000001741046102023000157730ustar 00000000000000[submodule "openssh-portable"] path = sftp-test-common/openssh-portable url = https://github.com/openssh/openssh-portable openssh-sftp-client-0.15.0/.test-key000064400000000000000000000006331046102023000153640ustar 00000000000000-----BEGIN OPENSSH PRIVATE KEY----- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW QyNTUxOQAAACBsx7ytqSrUpWVz/bT2DjgW/8Z9CIgvYizEt+eQGFDwuwAAAJjouprb6Lqa 2wAAAAtzc2gtZWQyNTUxOQAAACBsx7ytqSrUpWVz/bT2DjgW/8Z9CIgvYizEt+eQGFDwuw AAAEDTnuB9lLA0WslBBEjIBwvrwvX/gI5L/cMS9tv1Rl53x2zHvK2pKtSlZXP9tPYOOBb/ xn0IiC9iLMS355AYUPC7AAAAEmpvbkBkZWZlbmVzdHJhdGlvbgECAw== -----END OPENSSH PRIVATE KEY----- openssh-sftp-client-0.15.0/.test-key.pub000064400000000000000000000001321046102023000161430ustar 00000000000000ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGzHvK2pKtSlZXP9tPYOOBb/xn0IiC9iLMS355AYUPC7 test-key openssh-sftp-client-0.15.0/CHANGELOG.md000064400000000000000000000032651046102023000154330ustar 00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ## [0.7.0](https://github.com/openssh-rust/openssh-sftp-client/compare/openssh-sftp-client-lowlevel-v0.6.0...openssh-sftp-client-lowlevel-v0.7.0) - 2024-08-10 ### Other - updated the following local packages: openssh-sftp-error ## [0.14.6](https://github.com/openssh-rust/openssh-sftp-client/compare/openssh-sftp-client-v0.14.5...openssh-sftp-client-v0.14.6) - 2024-07-25 ### Other - Fix panic when flush_interval is set to 0 ([#136](https://github.com/openssh-rust/openssh-sftp-client/pull/136)) ## [0.14.5](https://github.com/openssh-rust/openssh-sftp-client/compare/openssh-sftp-client-v0.14.4...openssh-sftp-client-v0.14.5) - 2024-07-11 ### Other - Implement `Sftp::from_clonable_session*` ([#131](https://github.com/openssh-rust/openssh-sftp-client/pull/131)) ## [0.14.4](https://github.com/openssh-rust/openssh-sftp-client/compare/openssh-sftp-client-v0.14.3...openssh-sftp-client-v0.14.4) - 2024-06-27 ### Other - Run rust.yml on merge_queue ([#128](https://github.com/openssh-rust/openssh-sftp-client/pull/128)) - Impl `Default` for `Permissions` ([#126](https://github.com/openssh-rust/openssh-sftp-client/pull/126)) - Use release-plz in publish.yml ([#125](https://github.com/openssh-rust/openssh-sftp-client/pull/125)) - Support setting time in MetaDataBuilder ([#124](https://github.com/openssh-rust/openssh-sftp-client/pull/124)) The changelog for this crate is kept in the project's Rust documentation in the changelog module. openssh-sftp-client-0.15.0/Cargo.lock0000644000000552060000000000100130070ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "arc-swap" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "autocfg" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "awaitable" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70af449c9a763cb655c6a1e5338b42d99c67190824ff90658c1e30be844c0775" dependencies = [ "awaitable-error", "cfg-if", ] [[package]] name = "awaitable-error" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5b3469636cdf8543cceab175efca534471f36eee12fb8374aba00eb5e7e7f8a" [[package]] name = "backtrace" version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bytes" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "504bdec147f2cc13c8b57ed9401fd8a147cc66b67ad5cb241394244f2c947549" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "concurrent_arena" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c529c2d4ecc249ae15d317c9a8b9e7d86f87e80d4417de6cfa8f4d6030f37daf" dependencies = [ "arc-swap", "parking_lot", "triomphe", ] [[package]] name = "derive_destructure2" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64b697ac90ff296f0fc031ee5a61c7ac31fb9fff50e3fb32873b09223613fc0c" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "diff" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] name = "errno" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "fastrand" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "futures-core" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-macro" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "futures-sink" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-core", "futures-macro", "futures-task", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "gimli" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "hermit-abi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "libc" version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "linux-raw-sys" version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lock_api" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "miniz_oxide" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] [[package]] name = "mio" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" dependencies = [ "hermit-abi", "libc", "wasi", "windows-sys 0.52.0", ] [[package]] name = "non-zero-byte-slice" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89daa1daa11c9df05d1181bcd0936d8066f8543144d77b09808eb78d65e38024" dependencies = [ "serde", ] [[package]] name = "num-derive" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "object" version = "0.36.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "openssh" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f27389e5da64700a3efb7f925e442f824f6e3d4b1c27f75e115a92ad3aecbb1" dependencies = [ "libc", "once_cell", "openssh-mux-client", "shell-escape", "tempfile", "thiserror", "tokio", ] [[package]] name = "openssh-mux-client" version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f56c1f51de60268d69b883d7daef8d3c7865e8a3861b470c833d58bb2bb6dce" dependencies = [ "cfg-if", "non-zero-byte-slice", "once_cell", "openssh-mux-client-error", "sendfd", "serde", "ssh_format", "tokio", "tokio-io-utility", "typed-builder", ] [[package]] name = "openssh-mux-client-error" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "015d49e592f4d2a456033e6ec48036588e8e58c8908424b1bc40994de58ae648" dependencies = [ "ssh_format_error", "thiserror", ] [[package]] name = "openssh-sftp-client" version = "0.15.0" dependencies = [ "bytes", "derive_destructure2", "futures-core", "futures-util", "once_cell", "openssh", "openssh-sftp-client-lowlevel", "openssh-sftp-error", "pin-project", "pretty_assertions", "scopeguard", "tempfile", "tokio", "tokio-io-utility", "tokio-util", "tracing", ] [[package]] name = "openssh-sftp-client-lowlevel" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12a5728cca10461b3842f715fb3d8acd7ef11e2635a0aff0eb5eceab2e1f1e4a" dependencies = [ "awaitable", "bytes", "concurrent_arena", "derive_destructure2", "openssh-sftp-error", "openssh-sftp-protocol", "pin-project", "tokio", "tokio-io-utility", ] [[package]] name = "openssh-sftp-error" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87ee6d9ea97e1fd08435222dd1eab26aa6a86674f0ff1a9583b48c98d1ef4801" dependencies = [ "awaitable-error", "openssh", "openssh-sftp-protocol-error", "ssh_format_error", "thiserror", "tokio", ] [[package]] name = "openssh-sftp-protocol" version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf38532d784978966f95d241226223823f351d5bb2a4bebcf6b20b9cb1e393e0" dependencies = [ "bitflags", "num-derive", "num-traits", "openssh-sftp-protocol-error", "serde", "ssh_format", "vec-strings", ] [[package]] name = "openssh-sftp-protocol-error" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0719269eb3f037866ae07ec89cb44ed2c1d63b72b2390cef8e1aa3016a956ff8" dependencies = [ "serde", "thiserror", "vec-strings", ] [[package]] name = "parking_lot" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-targets", ] [[package]] name = "pin-project" version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "pin-project-lite" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pretty_assertions" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" dependencies = [ "diff", "yansi", ] [[package]] name = "proc-macro2" version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] [[package]] name = "redox_syscall" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ "bitflags", ] [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustix" version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sendfd" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604b71b8fc267e13bb3023a2c901126c8f349393666a6d98ac1ae5729b701798" dependencies = [ "libc", "tokio", ] [[package]] name = "serde" version = "1.0.205" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33aedb1a7135da52b7c21791455563facbbcc43d0f0f66165b42c21b3dfb150" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.205" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692d6f5ac90220161d6774db30c662202721e64aed9058d2c394f451261420c1" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "shell-escape" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45bb67a18fa91266cc7807181f62f9178a6873bfad7dc788c42e6430db40184f" [[package]] name = "signal-hook-registry" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "ssh_format" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24ab31081d1c9097c327ec23550858cb5ffb4af6b866c1ef4d728455f01f3304" dependencies = [ "bytes", "serde", "ssh_format_error", ] [[package]] name = "ssh_format_error" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be3c6519de7ca611f71ef7e8a56eb57aa1c818fecb5242d0a0f39c83776c210c" dependencies = [ "serde", ] [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tempfile" version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", "once_cell", "rustix", "windows-sys 0.59.0", ] [[package]] name = "thin-vec" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "tokio" version = "1.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" dependencies = [ "backtrace", "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.52.0", ] [[package]] name = "tokio-io-utility" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d672654d175710e52c7c41f6aec77c62b3c0954e2a7ebce9049d1e94ed7c263" dependencies = [ "bytes", "tokio", ] [[package]] name = "tokio-macros" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "tokio-util" version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", ] [[package]] name = "tracing" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "tracing-core" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", ] [[package]] name = "triomphe" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6631e42e10b40c0690bf92f404ebcfe6e1fdb480391d15f17cc8e96eeed5369" dependencies = [ "arc-swap", "serde", "stable_deref_trait", ] [[package]] name = "typed-builder" version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a06fbd5b8de54c5f7c91f6fe4cebb949be2125d7758e630bb58b1d831dbce600" dependencies = [ "typed-builder-macro", ] [[package]] name = "typed-builder-macro" version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9534daa9fd3ed0bd911d462a37f172228077e7abf18c18a5f67199d959205f8" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "vec-strings" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8509489e2a7ee219522238ad45fd370bec6808811ac15ac6b07453804e77659" dependencies = [ "serde", "thin-vec", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "yansi" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" openssh-sftp-client-0.15.0/Cargo.toml0000644000000047160000000000100130320ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.64" name = "openssh-sftp-client" version = "0.15.0" authors = ["Jiahao XU "] build = false autobins = false autoexamples = false autotests = false autobenches = false description = "Highlevel API used to communicate with openssh sftp server." readme = "README.md" keywords = [ "ssh", "multiplex", "async", "network", "sftp", ] categories = [ "asynchronous", "network-programming", "api-bindings", ] license = "MIT" repository = "https://github.com/openssh-rust/openssh-sftp-client" [package.metadata.docs.rs] features = [ "openssh", "tracing", ] rustdoc-args = [ "--cfg", "docsrs", ] [lib] name = "openssh_sftp_client" path = "src/lib.rs" [[example]] name = "openssh" path = "examples/openssh.rs" [[test]] name = "highlevel" path = "tests/highlevel.rs" [dependencies.bytes] version = "1.2.1" [dependencies.derive_destructure2] version = "0.1.0" [dependencies.futures-core] version = "0.3.28" [dependencies.once_cell] version = "1.9.0" [dependencies.openssh] version = "0.11.0" optional = true default-features = false [dependencies.openssh-sftp-client-lowlevel] version = "0.7.0" [dependencies.openssh-sftp-error] version = "0.5.0" [dependencies.pin-project] version = "1.0.10" [dependencies.scopeguard] version = "1.1.0" [dependencies.tokio] version = "1.11.0" features = [ "sync", "time", "rt", "macros", ] [dependencies.tokio-io-utility] version = "0.7.4" [dependencies.tokio-util] version = "0.7.8" [dependencies.tracing] version = "0.1.37" optional = true [dev-dependencies.futures-util] version = "0.3.28" [dev-dependencies.openssh] version = "0.11.0" features = ["native-mux"] [dev-dependencies.pretty_assertions] version = "1.1.0" [dev-dependencies.tempfile] version = "3.1.0" [dev-dependencies.tokio] version = "1.11.0" features = [ "rt", "macros", ] [features] __ci-tests = [] openssh = [ "dep:openssh", "openssh-sftp-error/openssh", ] tracing = ["dep:tracing"] openssh-sftp-client-0.15.0/Cargo.toml.orig000064400000000000000000000031411046102023000165020ustar 00000000000000[package] name = "openssh-sftp-client" version = "0.15.0" edition = "2021" rust-version = "1.64" authors = ["Jiahao XU "] license = "MIT" description = "Highlevel API used to communicate with openssh sftp server." repository = "https://github.com/openssh-rust/openssh-sftp-client" keywords = ["ssh", "multiplex", "async", "network", "sftp"] categories = ["asynchronous", "network-programming", "api-bindings"] [workspace] members = [ "sftp-test-common", "openssh-sftp-error", "openssh-sftp-client-lowlevel", ] [features] openssh = ["dep:openssh", "openssh-sftp-error/openssh"] tracing = ["dep:tracing"] # This feature is for internal testing only!!! __ci-tests = [] [package.metadata.docs.rs] features = ["openssh", "tracing"] rustdoc-args = ["--cfg", "docsrs"] [dependencies] openssh-sftp-error = { version = "0.5.0", path = "openssh-sftp-error" } openssh-sftp-client-lowlevel = { version = "0.7.0", path = "openssh-sftp-client-lowlevel" } once_cell = "1.9.0" tokio = { version = "1.11.0", features = ["sync", "time", "rt", "macros"] } tracing = { version = "0.1.37", optional = true } derive_destructure2 = "0.1.0" bytes = "1.2.1" tokio-io-utility = "0.7.4" tokio-util = "0.7.8" pin-project = "1.0.10" futures-core = "0.3.28" scopeguard = "1.1.0" openssh = { version = "0.11.0", default-features = false, optional = true } [dev-dependencies] tokio = { version = "1.11.0", features = ["rt", "macros"] } tempfile = "3.1.0" pretty_assertions = "1.1.0" sftp-test-common = { path = "sftp-test-common" } futures-util = "0.3.28" openssh = { version = "0.11.0", features = ["native-mux"] } openssh-sftp-client-0.15.0/LICENSE000064400000000000000000000020521046102023000146200ustar 00000000000000MIT License Copyright (c) 2021 Jiahao XU Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. openssh-sftp-client-0.15.0/README.md000064400000000000000000000022741046102023000151000ustar 00000000000000# openssh-sftp-client [![Rust](https://github.com/openssh-rust/openssh-sftp-client/actions/workflows/rust.yml/badge.svg)](https://github.com/openssh-rust/openssh-sftp-client/actions/workflows/rust.yml) [![crate.io downloads](https://img.shields.io/crates/d/openssh-sftp-client)](https://crates.io/crates/openssh-sftp-client) [![crate.io version](https://img.shields.io/crates/v/openssh-sftp-client)](https://crates.io/crates/openssh-sftp-client) [![docs](https://docs.rs/openssh-sftp-client/badge.svg)](https://docs.rs/openssh-sftp-client) openssh-sftp-client, implements [sftp v3] accodring to [`openssh-portable/sftp-client.c`] in rust using `tokio` and `serde`. It exposes highlevel `async` APIs that models closely after `std::fs` that are easy to use. [sftp v3]: https://www.openssh.com/txt/draft-ietf-secsh-filexfer-02.txt [`openssh-portable/sftp-client.c`]: https://github.com/openssh/openssh-portable/blob/19b3d846f06697c85957ab79a63454f57f8e22d6/sftp-client.c ## Extensions This crate support the following extensions: - limits - expand path - fsync - hardlink - posix rename - copy-data ## How to run tests For macOS, please install latest rsync from homebrew. ``` ./run_tests.sh ``` openssh-sftp-client-0.15.0/TODO.md000064400000000000000000000001261046102023000147020ustar 00000000000000 - Use [`buf-list`](https://docs.rs/buf-list) to archive zero-copy using `Sink` trait openssh-sftp-client-0.15.0/check.sh000075500000000000000000000006151046102023000152320ustar 00000000000000#!/bin/bash set -euxo pipefail cd "$(dirname "$(realpath "$0")")" cargo fmt --all -- --check cargo clippy --all-features --all --no-deps cargo test --doc --all-features export RUSTDOCFLAGS="--cfg docsrs" exec cargo +nightly doc \ --no-deps \ --features openssh,tracing \ --package openssh-sftp-client \ --package openssh-sftp-error \ --package openssh-sftp-client-lowlevel openssh-sftp-client-0.15.0/cleanup-cache.sh000075500000000000000000000011271046102023000166440ustar 00000000000000#!/bin/bash set -uxo pipefail REPO="${REPO?}" BRANCH="${BRANCH?}" while true; do echo "Fetching list of cache key for $BRANCH" cacheKeysForPR="$(gh actions-cache list -R "$REPO" -B "$BRANCH" -L 100 | cut -f 1 )" if [ -z "$cacheKeysForPR" ]; then break fi ## Setting this to not fail the workflow while deleting cache keys. set +e echo "Deleting caches..." for cacheKey in $cacheKeysForPR do echo Removing "$cacheKey" gh actions-cache delete "$cacheKey" -R "$REPO" -B "$BRANCH" --confirm done done echo "Done cleaning up $BRANCH" openssh-sftp-client-0.15.0/examples/openssh.rs000064400000000000000000000012771046102023000174660ustar 00000000000000//! Expected `Cargo.toml` dependencies configuration: //! //! ``` //! [dependencies] //! openssh-sftp-client = { version = "0.13.6", features = ["openssh"] } //! tokio = { version = "1.11.0", features = ["rt", "macros"] } //! openssh = { version = "0.9.9", features = ["native-mux"] } //! ``` use std::error::Error; use openssh::{KnownHosts, Session as SshSession}; use openssh_sftp_client::Sftp; #[tokio::main(flavor = "current_thread")] async fn main() -> Result<(), Box> { let ssh_session = SshSession::connect_mux("ssh://user@hostname:port", KnownHosts::Add).await?; let sftp = Sftp::from_session(ssh_session, Default::default()).await?; sftp.close().await?; Ok(()) } openssh-sftp-client-0.15.0/run_tests.sh000075500000000000000000000002421046102023000161770ustar 00000000000000#!/bin/bash set -euxo pipefail cd "$(dirname "$(realpath "$0")")" export RUNTIME_DIR=${XDG_RUNTIME_DIR:-/tmp} exec cargo test --all-features --workspace "$@" openssh-sftp-client-0.15.0/src/auxiliary.rs000064400000000000000000000124051046102023000167620ustar 00000000000000use crate::{lowlevel::Extensions, SftpAuxiliaryData}; use std::sync::atomic::{AtomicU64, AtomicU8, AtomicUsize, Ordering}; use once_cell::sync::OnceCell; use tokio::{runtime::Handle, sync::Notify}; use tokio_util::sync::CancellationToken; #[derive(Debug, Copy, Clone)] pub(super) struct Limits { pub(super) read_len: u32, pub(super) write_len: u32, } #[derive(Debug)] pub(super) struct ConnInfo { pub(super) limits: Limits, pub(super) extensions: Extensions, } #[derive(Debug)] pub(super) struct Auxiliary { pub(super) conn_info: OnceCell, /// cancel_token is used to cancel `Awaitable*Future` /// when the read_task/flush_task has failed. pub(super) cancel_token: CancellationToken, /// flush_end_notify is used to avoid unnecessary wakeup /// in flush_task. pub(super) flush_end_notify: Notify, /// `Notify::notify_one` is called if /// pending_requests == max_pending_requests. pub(super) flush_immediately: Notify, /// There can be at most `u32::MAX` pending requests, since each request /// requires a request id that is 32 bits. pub(super) pending_requests: AtomicUsize, pub(super) max_pending_requests: u16, pub(super) read_end_notify: Notify, pub(super) requests_to_read: AtomicUsize, /// 0 means no shutdown is requested /// 1 means the read task should shutdown /// 2 means the flush task should shutdown pub(super) shutdown_stage: AtomicU8, /// Number of handles that can issue new requests. /// /// Use AtomicU64 in case the user keep creating new [`sftp::SftpHandle`] /// and then [`std::mem::forget`] them. pub(super) active_user_count: AtomicU64, pub(super) auxiliary_data: SftpAuxiliaryData, pub(super) tokio_compat_file_write_limit: usize, pub(super) tokio_handle: Handle, } impl Auxiliary { pub(super) fn new( max_pending_requests: u16, auxiliary_data: SftpAuxiliaryData, tokio_compat_file_write_limit: usize, tokio_handle: Handle, ) -> Self { Self { conn_info: OnceCell::new(), cancel_token: CancellationToken::new(), flush_end_notify: Notify::new(), flush_immediately: Notify::new(), pending_requests: AtomicUsize::new(0), max_pending_requests, read_end_notify: Notify::new(), requests_to_read: AtomicUsize::new(0), shutdown_stage: AtomicU8::new(0), active_user_count: AtomicU64::new(1), auxiliary_data, tokio_compat_file_write_limit, tokio_handle, } } pub(super) fn wakeup_flush_task(&self) { // Must increment requests_to_read first, since // flush_task might wakeup read_end once it done flushing. self.requests_to_read.fetch_add(1, Ordering::Relaxed); let pending_requests = self.pending_requests.fetch_add(1, Ordering::Relaxed); self.flush_end_notify.notify_one(); // Use `==` here to avoid unnecessary wakeup of flush_task. if pending_requests == self.max_pending_requests() { self.flush_immediately.notify_one(); } } fn conn_info(&self) -> &ConnInfo { self.conn_info .get() .expect("auxiliary.conn_info shall be initialized by sftp::Sftp::new") } pub(super) fn extensions(&self) -> Extensions { // since writing to conn_info is only done in `Sftp::new`, // reading these variable should never block. self.conn_info().extensions } pub(super) fn limits(&self) -> Limits { // since writing to conn_info is only done in `Sftp::new`, // reading these variable should never block. self.conn_info().limits } pub(super) fn max_pending_requests(&self) -> usize { self.max_pending_requests as usize } pub(super) fn order_shutdown(&self) { // Order the shutdown of read_task. // // Once it shutdowns, it will automatically order // shutdown of flush_task. self.shutdown_stage.store(1, Ordering::Relaxed); self.flush_immediately.notify_one(); self.flush_end_notify.notify_one(); } /// Triggers the flushing of the internal buffer in `flush_task`. /// /// If there are pending requests, then flushing would happen immediately. /// /// If not, then the next time a request is queued in the write buffer, it /// will be immediately flushed. pub(super) fn trigger_flushing(&self) { self.flush_immediately.notify_one(); } /// Return number of pending requests in the write buffer. pub(super) fn get_pending_requests(&self) -> usize { self.pending_requests.load(Ordering::Relaxed) } pub(super) fn inc_active_user_count(&self) { self.active_user_count.fetch_add(1, Ordering::Relaxed); } pub(super) fn dec_active_user_count(&self) { if self.active_user_count.fetch_sub(1, Ordering::Relaxed) == 1 { // self.active_user_count is now equal to 0, ready for shutdown. self.order_shutdown() } } pub(super) fn tokio_compat_file_write_limit(&self) -> usize { self.tokio_compat_file_write_limit } pub(super) fn tokio_handle(&self) -> &Handle { &self.tokio_handle } } openssh-sftp-client-0.15.0/src/cache.rs000064400000000000000000000064221046102023000160200ustar 00000000000000use crate::{cancel_error, Auxiliary, Error, Id, WriteEnd}; use std::{ future::Future, ops::{Deref, DerefMut}, pin::Pin, }; #[derive(Debug)] pub(super) struct WriteEndWithCachedId { pub(super) inner: WriteEnd, id: Option, } impl Clone for WriteEndWithCachedId { fn clone(&self) -> Self { self.inner.get_auxiliary().inc_active_user_count(); Self { inner: self.inner.clone(), id: None, } } } impl Drop for WriteEndWithCachedId { fn drop(&mut self) { self.inner.get_auxiliary().dec_active_user_count(); } } impl Deref for WriteEndWithCachedId { type Target = WriteEnd; fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for WriteEndWithCachedId { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl WriteEndWithCachedId { pub(super) fn new(write_end: WriteEnd) -> Self { Self { inner: write_end, id: None, } } pub(super) fn get_id_mut(&mut self) -> Id { self.id .take() .unwrap_or_else(|| self.inner.create_response_id()) } pub(super) fn cache_id_mut(&mut self, id: Id) { if self.id.is_none() { self.id = Some(id); } } /// * `f` - the future must be cancel safe. pub(super) async fn cancel_if_task_failed(&mut self, future: F) -> Result where F: Future> + Send, E: Into + Send, { let future = async move { future.await.map_err(Into::into) }; tokio::pin!(future); self.cancel_if_task_failed_inner(future).await } pub(super) async fn cancel_if_task_failed_inner( &mut self, future: Pin<&mut (dyn Future> + Send)>, ) -> Result { let cancel_err = || Err(cancel_error()); let auxiliary = self.inner.get_auxiliary(); let cancel_token = &auxiliary.cancel_token; if cancel_token.is_cancelled() { return cancel_err(); } tokio::select! { biased; _ = cancel_token.cancelled() => cancel_err(), res = future => res, } } pub(super) fn get_auxiliary(&self) -> &Auxiliary { self.inner.get_auxiliary() } } impl WriteEndWithCachedId { pub(super) async fn send_request(&mut self, f: Func) -> Result where Func: FnOnce(&mut WriteEnd, Id) -> Result + Send, F: Future> + Send + 'static, { let id = self.get_id_mut(); let write_end = &mut self.inner; let future = f(write_end, id)?; tokio::pin!(future); async fn inner( this: &mut WriteEndWithCachedId, future: Pin<&mut (dyn Future> + Send)>, ) -> Result { // Requests is already added to write buffer, so wakeup // the `flush_task` if necessary. this.get_auxiliary().wakeup_flush_task(); let (id, ret) = this.cancel_if_task_failed(future).await?; this.cache_id_mut(id); Ok(ret) } inner(self, future).await } } openssh-sftp-client-0.15.0/src/changelog.rs000064400000000000000000000257471046102023000167170ustar 00000000000000#[allow(unused_imports)] use crate::*; #[doc(hidden)] pub mod unreleased {} /// # Added /// - `Permissions::from` allows converting a u16 octet to a Permissions struct /// - Allows chain call for Permissions::set_* methods pub mod v_0_14_3 {} /// # Added /// - [`Sftp::from_session_with_check_connection`] for checking connection pub mod v_0_14_2 {} /// # Changed /// - Bump dependency [`openssh-sftp-error`] to v0.4.0. pub mod v_0_14_1 {} /// # Changed /// - Bump optional dependency [`openssh`] to v0.10.0. pub mod v_0_14_0 {} /// # Changed /// - Set [`SftpOptions::tokio_compat_file_write_limit`] to 640KB by default. pub mod v_0_13_10 {} /// # Fixed /// Invalid response id after closing file caused by the change in v0.13.8 pub mod v_0_13_9 {} /// # Fixed /// `Drop` implementation for `OwnedHandle` to make sure they never panic /// if tokio runtime is not avilable. pub mod v_0_13_8 {} /// # Fixed /// /// `Drop` implementation to make sure they never panic /// if tokio runtime is not avilable. /// /// - [`file::TokioCompatFile`] /// - [`fs::ReadDir`] pub mod v_0_13_7 {} /// ## Added /// - Add new option [`SftpOptions::tokio_compat_file_write_limit()`] to set write buffer limit /// for [`file::TokioCompatFile`]. pub mod v_0_13_6 {} /// ## Fixed /// - Fixed #80 [`file::TokioCompatFile`]: Incorrect behavior about `AsyncSeek` /// - Fixed [`file::TokioCompatFile`]: leave error of exceeding buffer len in `consume` to handle by `BytesMut` /// - Fixed [`file::TokioCompatFile`]: Implement `PinnedDrop` to poll read and write futures to end, /// otherwise it would drop the internal request ids too early, causing read task to fail /// when they should not fail. /// - Fixed [`fs::ReadDir`]: Implement `PinnedDrop` to poll future stored internally, /// otherwise it would drop the internal request ids too early, causing read task to fail /// when they should not fail. /// ## Added /// - Add new fn [`Sftp::support_expand_path`] to check if the server supports expand-path extension /// - Add new fn [`Sftp::support_fsync`] to check if the server supports fsync extension /// - Add new fn [`Sftp::support_hardlink`] to check if the server supports hardlink extension /// - Add new fn [`Sftp::support_posix_rename`] to check if the server supports posix-rename extension /// - Add new fn [`Sftp::support_copy`] to check if the server supports copy extension pub mod v_0_13_5 {} /// ## Improved /// - Fix: change the drop of `OwnedHandle` to wait for the close request in order to /// avoid invalid response id after closing file /// - Add log for droping OwnedHandle /// /// ## Other changes /// - Add msrv 1.64 in `Cargo.toml` /// - Bump `edition` to 2021 in `Cargo.toml` pub mod v_0_13_4 {} /// ## Improved /// - If `Sftp` is created using `Sftp::from_session`, then dropping it would /// also drop the `openssh::RemoteChild` and `openssh::Session` immediately /// after sftp graceful shutdown is done to prevent any leak. pub mod v_0_13_3 {} /// ## Added /// - `OpensshSession`, which is enabled by feature `openssh` /// - `SftpAuxiliaryData::ArcedOpensshSession`, which is enabled by feature `openssh` /// - `Sftp::from_session`, which is enabled by feature `openssh` /// - Logging support, enabled by feature `tracing` /// /// ## Improved /// - Keep waiting on other tasks on failure in [`Sftp::close`] /// to collect as much information about the failure as possible. /// - Add [`error::RecursiveError3`] for reting 3 errs in [`Sftp::close`] pub mod v_0_13_2 {} /// ## Added /// - [`SftpAuxiliaryData::PinnedFuture`] pub mod v_0_13_1 {} /// ## Fixed /// - Fixed #62 [`fs::ReadDir`]: Return all entries instead of just a subset. /// /// ## Added /// - [`file::File::as_mut_file`] /// - [`SftpAuxiliaryData`] /// - [`Sftp::new_with_auxiliary`] /// /// ## Changed /// - Remove lifetime from [`file::OpenOptions`]. /// - Remove lifetime from [`file::File`]. /// - Remove lifetime from [`file::TokioCompatFile`]. /// - Remove lifetime from [`fs::Fs`]. /// - Remove lifetime from [`fs::Dir`]. /// - Remove lifetime from [`fs::ReadDir`]. /// - Remove lifetime `'s` from [`fs::DirBuilder`]. /// - [`fs::ReadDir`] now implements `futures_core::{Stream, FusedStream}` /// instead of the {iterator, slice}-based interface. /// - Remove `file::File::sftp`. /// - Remove `file::TokioCompatFile::close`. /// - [`file::TokioCompatFile::fill_buf`] now takes `self: Pin<&mut Self>` /// instead of `&mut self`. /// - [`file::TokioCompatFile::read_into_buffer`] now takes /// `self: Pin<&mut Self>` instead of `&mut self`. /// /// ## Other changes /// - Clarify [`file::File::read`]. /// - Clarify [`file::File::write`]. /// - Clarify [`file::File::write_vectorized`]. /// - Clarify [`file::File::write_zero_copy`]. pub mod v_0_13_0 {} /// ## Fixed /// - Fix `read_task`: Order shutdown of flush_task on err/panic pub mod v_0_12_2 {} /// ## Fixed /// - `Sftp::new` now returns future that implemens `Send` pub mod v_0_12_1 {} /// ## Changed /// - Ensure stable api: Create newtype wrapper of UnixTimeStamp (#53) /// /// ## Other /// - Bump [`openssh-sftp-error`] to v0.3.0 pub mod v_0_12_0 {} /// ## Other change /// /// Bump dep /// - `ssh_format` to v0.13.0 /// - `openssh_sftp_protocol` to v0.22.0 /// - `openssh_sftp_error` to v0.2.0 /// - `openssh_sftp_client_lowlevel` to v0.3.0 pub mod v_0_11_3 {} /// ## Other change /// - Bump `openssh_sftp_client_lowlevel` version and optimize /// write buffer implementation. /// - Optimize: Reduce monomorphization /// - Optimize latency: `create_flush_task` first in `Sftp::new` /// and write the hello msg ASAP. pub mod v_0_11_2 {} /// Nothing has changed from [`v_0_11_0_rc_3`]. /// /// ## Other changes /// - Dependency [`bytes`] bump to v1.2.0 for its optimizations. pub mod v_0_11_1 {} /// Nothing has changed from [`v_0_11_0_rc_3`]. pub mod v_0_11_0 {} /// ## Changed /// - Rename `SftpOptions::write_end_buffer_size` to /// [`SftpOptions::requests_buffer_size`] and improve its /// documentation. /// - Rename `SftpOptions::read_end_buffer_size` to /// [`SftpOptions::responses_buffer_size`] and improve its /// documentation. /// /// ## Removed /// - `SftpOptions::max_read_len` /// - `SftpOptions::max_write_len` pub mod v_0_11_0_rc_3 {} /// ## Fixed /// - Changelog of v0.11.0-rc.1 /// /// ## Added /// - [`file::File::copy_all_to`] to copy until EOF. /// This function is extracted from the old `copy_to` /// function. /// - [`file::TokioCompatFile::capacity`] /// - [`file::TokioCompatFile::reserve`] /// - [`file::TokioCompatFile::shrink_to`] /// /// ## Changed /// - [`file::File::copy_to`] now takes [`std::num::NonZeroU64`] /// instead of `u64`. /// - [`file::TokioCompatFile::with_capacity`] does not take /// `max_buffer_len` anymore. /// /// ## Removed /// - `file::DEFAULT_MAX_BUFLEN` pub mod v_0_11_0_rc_2 {} /// ## Added /// - `SftpOptions::write_end_buffer_size` /// - `SftpOptions::read_end_buffer_size` /// /// ## Changed /// - All types now does not have generic parameter `W` /// except for `Sftp::new` /// /// ## Removed /// - Unused re-export `CancellationToken`. /// - Backward compatibility alias `file::TokioCompactFile`. /// - `Sftp::try_flush` /// - `Sftp::flush` /// - `file::File::max_write_len` /// - `file::File::max_read_len` /// - `file::File::max_buffered_write` /// /// ## Moved /// - `lowlevel` is now moved to be another crate [openssh_sftp_client_lowlevel]. /// - All items in `highlevel` is now moved into root. pub mod v_0_11_0_rc_1 {} /// ## Fixed /// - Changelog of v0.10.2 pub mod v_0_10_3 {} /// ## Added /// - Async fn `lowlevel::WriteEnd::send_copy_data_request` /// - Async fn `highlevel::file::File::copy_to` pub mod v_0_10_2 {} /// ## Fixed /// - Changelog of v0.10.0 /// - Changelog of v0.9.0 pub mod v0_10_1 {} /// ## Added /// - Export mod `highlevel::file` /// - Export mod `highlevel::fs` /// - Export mod `highlevel::metadata` /// /// ## Changed /// - `lowlevel::WriteEnd` now requires `W: AsyncWrite + Unpin` /// - `lowlevel::SharedData` now requires `W: AsyncWrite + Unpin` /// - `lowlevel::ReadEnd` now requires `W: AsyncWrite + Unpin` /// - `lowlevel::connect` now requires `W: AsyncWrite + Unpin` /// - `lowlevel::connect_with_auxiliary` now requires `W: AsyncWrite + Unpin` /// - All types in `highlevel` now requires `W: AsyncWrite + Unpin` /// except for /// - the re-exported type `highlevel::CancellationToken` /// - `highlevel::SftpOptions` /// - `highlevel::fs::DirEntry` /// - `highlevel::fs::ReadDir` /// /// ## Removed /// - Trait `Writer`. /// - `lowlevel::WriteEnd::send_write_request_direct_atomic` /// - `lowlevel::WriteEnd::send_write_request_direct_atomic_vectored` /// - `lowlevel::WriteEnd::send_write_request_direct_atomic_vectored2` /// - Export of `highlevel::file::TokioCompactFile` /// - Export of `highlevel::file::TokioCompatFile` /// - Export of `highlevel::file::DEFAULT_BUFLEN` /// - Export of `highlevel::file::DEFAULT_MAX_BUFLEN` /// - Export of `highlevel::file::File` /// - Export of `highlevel::file::OpenOptions` /// - Export of `highlevel::fs::DirEntry` /// - Export of `highlevel::fs::ReadDir` /// - Export of `highlevel::fs::Dir` /// - Export of `highlevel::fs::DirBuilder` /// - Export of `highlevel::fs::Fs` /// - Export of `highlevel::metadata::FileType` /// - Export of `highlevel::metadata::MetaData` /// - Export of `highlevel::metadata::MetaDataBuilder` /// - Export of `highlevel::metadata::Permissions` pub mod v0_10_0 {} /// ## Removed /// - `highlevel::Sftp::get_cancellation_token` /// - `highlevel::Sftp::max_write_len` /// - `highlevel::Sftp::max_read_len` /// - `highlevel::Sftp::max_buffered_write` pub mod v_0_9_0 {} /// ## Added /// - Type `highlevel::TokioCompatFile` to Replace /// `highlevel::TokioCompactFile`. pub mod v0_8_3 {} /// ## Fixed /// - Fix possible panic in `highlevel::max_atomic_write_len` pub mod v0_8_2 {} /// ## Added /// - Reexport `highlevel::CancellationToken`. pub mod v0_8_1 {} /// ## Added /// - Associated function `highlevel::FileType::is_fifo`. /// - Associated function `highlevel::FileType::is_socket`. /// - Associated function `highlevel::FileType::is_block_device`. /// - Associated function `highlevel::FileType::is_char_device`. /// - Trait `Writer`. /// /// ## Changed /// - Replace all use of `tokio_pipe::PipeRead` with generic bound /// `tokio::io::AsyncRead` + `Unpin`. /// - Replace all use of `tokio_pipe::PipeWrite` with generic bound /// `Writer`. /// - Replace constant `highlevel::MAX_ATOMIC_WRITE_LEN` with /// non-`const` function `highlevel::max_atomic_write_len`. /// - Associated function `highlevel::Sftp::fs` now only takes `&self` /// as parameter. /// /// ## Removed /// - Trait `std::os::unix::fs::FileTypeExt` implementation for /// `highlevel::FileType`. /// - Trait `std::os::unix::fs::PermissionsExt` implementation for /// `highlevel::Permissions`. /// - Associated function `lowlevel::WriteEnd::send_write_request_direct`. /// - Associated function /// `lowlevel::WriteEnd::send_write_request_direct_vectored`. pub mod v0_8_0 {} openssh-sftp-client-0.15.0/src/file/mod.rs000064400000000000000000000571371046102023000164640ustar 00000000000000use crate::{ lowlevel::{self, CreateFlags, Data, Extensions, FileAttrs, Handle}, metadata::{MetaData, MetaDataBuilder, Permissions}, Auxiliary, Error, Id, OwnedHandle, SftpHandle, WriteEnd, WriteEndWithCachedId, }; use std::{ borrow::Cow, cmp::min, convert::TryInto, future::Future, io::{self, IoSlice}, num::NonZeroU64, path::Path, pin::Pin, task::{Context, Poll}, }; use bytes::{Buf, Bytes, BytesMut}; use tokio::io::AsyncSeek; use tokio_io_utility::IoSliceExt; mod tokio_compat_file; pub use tokio_compat_file::{TokioCompatFile, DEFAULT_BUFLEN}; mod utility; use utility::{take_bytes, take_io_slices}; /// Options and flags which can be used to configure how a file is opened. #[derive(Debug, Clone)] pub struct OpenOptions { sftp: SftpHandle, options: lowlevel::OpenOptions, truncate: bool, create: bool, create_new: bool, } impl OpenOptions { pub(super) fn new(sftp: SftpHandle) -> Self { Self { sftp, options: lowlevel::OpenOptions::new(), truncate: false, create: false, create_new: false, } } /// Sets the option for read access. /// /// This option, when true, will indicate that the file /// should be read-able if opened. pub fn read(&mut self, read: bool) -> &mut Self { self.options = self.options.read(read); self } /// Sets the option for write access. /// /// This option, when true, will indicate that the file /// should be write-able if opened. /// /// If the file already exists, any write calls on it /// will overwrite its contents, without truncating it. pub fn write(&mut self, write: bool) -> &mut Self { self.options = self.options.write(write); self } /// Sets the option for the append mode. /// /// This option, when `true`, means that writes will append /// to a file instead of overwriting previous contents. /// /// Note that setting `.write(true).append(true)` has /// the same effect as setting only `.append(true)`. /// /// For most filesystems, the operating system guarantees that /// all writes are atomic: no writes get mangled because /// another process writes at the same time. /// /// Note that this function doesn’t create the file if it doesn’t exist. /// Use the [`OpenOptions::create`] method to do so. pub fn append(&mut self, append: bool) -> &mut Self { self.options = self.options.append(append); self } /// Sets the option for truncating a previous file. /// /// If a file is successfully opened with this option /// set it will truncate the file to `0` length if it already exists. /// /// Only take effect if [`OpenOptions::create`] is set to `true`. pub fn truncate(&mut self, truncate: bool) -> &mut Self { self.truncate = truncate; self } /// Sets the option to create a new file, or open it if it already exists. pub fn create(&mut self, create: bool) -> &mut Self { self.create = create; self } /// Sets the option to create a new file, failing if it already exists. /// /// No file is allowed to exist at the target location, /// also no (dangling) symlink. /// /// In this way, if the call succeeds, the file returned /// is guaranteed to be new. /// /// This option is useful because it is atomic. /// /// Otherwise between checking whether a file exists and /// creating a new one, the file may have been /// created by another process (a TOCTOU race condition / attack). /// /// If `.create_new(true)` is set, `.create()` and `.truncate()` are ignored. pub fn create_new(&mut self, create_new: bool) -> &mut Self { self.create_new = create_new; self } /// # Cancel Safety /// /// This function is cancel safe. pub async fn open(&self, path: impl AsRef) -> Result { OpenOptions::open_inner( self.options, self.truncate, self.create, self.create_new, path.as_ref(), self.sftp.clone().write_end(), ) .await } pub(super) async fn open_inner( options: lowlevel::OpenOptions, truncate: bool, create: bool, create_new: bool, filename: &Path, mut write_end: WriteEndWithCachedId, ) -> Result { let filename = Cow::Borrowed(filename); let params = if create || create_new { let flags = if create_new { CreateFlags::Excl } else if truncate { CreateFlags::Trunc } else { CreateFlags::None }; options.create(filename, flags, FileAttrs::new()) } else { options.open(filename) }; let handle = write_end .send_request(|write_end, id| Ok(write_end.send_open_file_request(id, params)?.wait())) .await?; Ok(File { inner: OwnedHandle::new(write_end, handle), is_readable: options.get_read(), is_writable: options.get_write(), need_flush: false, offset: 0, }) } } /// A reference to the remote file. /// /// Cloning [`File`] instance would return a new one that shares the same /// underlying file handle as the existing File instance, while reads, writes /// and seeks can be performed independently. /// /// If you want a file that implements [`tokio::io::AsyncRead`] and /// [`tokio::io::AsyncWrite`], checkout [`TokioCompatFile`]. #[derive(Debug)] pub struct File { inner: OwnedHandle, is_readable: bool, is_writable: bool, need_flush: bool, offset: u64, } impl Clone for File { fn clone(&self) -> Self { Self { inner: self.inner.clone(), is_writable: self.is_writable, is_readable: self.is_readable, need_flush: false, offset: self.offset, } } } impl File { fn auxiliary(&self) -> &Auxiliary { self.inner.get_auxiliary() } fn max_write_len_impl(&self) -> u32 { self.get_auxiliary().limits().write_len } /// The maximum amount of bytes that can be read in one request. /// Reading more than that, then your read will be split into multiple requests pub(super) fn max_read_len_impl(&self) -> u32 { self.get_auxiliary().limits().read_len } } #[cfg(feature = "__ci-tests")] impl File { /// The maximum amount of bytes that can be written in one request. /// Writing more than that, then your write will be split into multiple requests pub fn max_write_len(&self) -> u32 { self.max_write_len_impl() } /// The maximum amount of bytes that can be read in one request. /// Reading more than that, then your read will be split into multiple requests pub fn max_read_len(&self) -> u32 { self.max_read_len_impl() } } impl File { fn get_auxiliary(&self) -> &Auxiliary { self.inner.get_auxiliary() } fn get_inner(&mut self) -> (&mut WriteEnd, Cow<'_, Handle>) { (&mut self.inner.write_end, Cow::Borrowed(&self.inner.handle)) } fn check_for_writable_io_err(&self) -> Result<(), io::Error> { if !self.is_writable { Err(io::Error::new( io::ErrorKind::Other, "This file is not opened for writing", )) } else { Ok(()) } } fn check_for_writable(&self) -> Result<(), Error> { self.check_for_writable_io_err()?; Ok(()) } async fn send_writable_request(&mut self, f: Func) -> Result where Func: FnOnce(&mut WriteEnd, Cow<'_, Handle>, Id) -> Result + Send, F: Future> + Send + 'static, R: Send, { self.check_for_writable()?; self.inner.send_request(f).await } fn check_for_readable_io_err(&self) -> Result<(), io::Error> { if !self.is_readable { Err(io::Error::new( io::ErrorKind::Other, "This file is not opened for reading", )) } else { Ok(()) } } fn check_for_readable(&self) -> Result<(), Error> { self.check_for_readable_io_err()?; Ok(()) } async fn send_readable_request(&mut self, f: Func) -> Result where Func: FnOnce(&mut WriteEnd, Cow<'_, Handle>, Id) -> Result + Send, F: Future> + Send + 'static, R: Send, { self.check_for_readable()?; self.inner.send_request(f).await } /// Close the [`File`], send the close request /// if this is the last reference. /// /// # Cancel Safety /// /// This function is cancel safe. pub async fn close(self) -> Result<(), Error> { self.inner.close().await } /// Change the metadata of a file or a directory. /// /// # Cancel Safety /// /// This function is cancel safe. pub async fn set_metadata(&mut self, metadata: MetaData) -> Result<(), Error> { let attrs = metadata.into_inner(); self.send_writable_request(|write_end, handle, id| { Ok(write_end.send_fsetstat_request(id, handle, attrs)?.wait()) }) .await } /// Truncates or extends the underlying file, updating the size /// of this file to become size. /// /// If the size is less than the current file’s size, then the file /// will be shrunk. /// /// If it is greater than the current file’s size, then the file /// will be extended to size and have all of the intermediate data /// filled in with 0s. /// /// # Cancel Safety /// /// This function is cancel safe. pub async fn set_len(&mut self, size: u64) -> Result<(), Error> { let mut attrs = FileAttrs::new(); attrs.set_size(size); self.set_metadata(MetaData::new(attrs)).await } /// Attempts to sync all OS-internal metadata to disk. /// /// This function will attempt to ensure that all in-core data /// reaches the filesystem before returning. /// /// # Precondition /// /// Require extension `fsync` /// /// You can check it with [`Sftp::support_fsync`](crate::sftp::Sftp::support_fsync). /// /// # Cancel Safety /// /// This function is cancel safe. pub async fn sync_all(&mut self) -> Result<(), Error> { if !self .get_auxiliary() .extensions() .contains(Extensions::FSYNC) { return Err(Error::UnsupportedExtension(&"fsync")); } self.send_writable_request(|write_end, handle, id| { Ok(write_end.send_fsync_request(id, handle)?.wait()) }) .await } /// Changes the permissions on the underlying file. /// /// # Cancel Safety /// /// This function is cancel safe. pub async fn set_permissions(&mut self, perm: Permissions) -> Result<(), Error> { let metadata = MetaDataBuilder::new().permissions(perm).create(); self.set_metadata(metadata).await } /// Queries metadata about the underlying file. pub async fn metadata(&mut self) -> Result { self.send_readable_request(|write_end, handle, id| { Ok(write_end.send_fstat_request(id, handle)?.wait()) }) .await .map(MetaData::new) } /// * `n` - number of bytes to read in /// /// If the [`File`] has reached EOF or `n == 0`, then `None` is returned. /// /// NOTE that the returned buffer might be smaller than `n`. pub async fn read(&mut self, n: u32, buffer: BytesMut) -> Result, Error> { if n == 0 { return Ok(None); } let offset = self.offset; let n: u32 = min(n, self.max_read_len_impl()); let data = self .send_readable_request(|write_end, handle, id| { Ok(write_end .send_read_request(id, handle, offset, n, Some(buffer))? .wait()) }) .await?; let buffer = match data { Data::Buffer(buffer) => buffer, Data::Eof => return Ok(None), _ => std::unreachable!("Expect Data::Buffer"), }; // Adjust offset Pin::new(self).start_seek(io::SeekFrom::Current(n as i64))?; Ok(Some(buffer)) } /// Write data into the file. /// /// NOTE that this API might only write part of the `buf`. pub async fn write(&mut self, buf: &[u8]) -> Result { if buf.is_empty() { return Ok(0); } let offset = self.offset; // sftp v3 cannot send more than self.max_write_len() data at once. let max_write_len = self.max_write_len_impl(); let n: u32 = buf .len() .try_into() .map(|n| min(n, max_write_len)) .unwrap_or(max_write_len); // sftp v3 cannot send more than self.max_write_len() data at once. let buf = &buf[..(n as usize)]; self.send_writable_request(|write_end, handle, id| { Ok(write_end .send_write_request_buffered(id, handle, offset, Cow::Borrowed(buf))? .wait()) }) .await?; // Adjust offset Pin::new(self).start_seek(io::SeekFrom::Current(n as i64))?; Ok(n as usize) } /// Write from multiple buffer at once. /// /// NOTE that this API might only write part of the `buf`. pub async fn write_vectorized(&mut self, bufs: &[IoSlice<'_>]) -> Result { if bufs.is_empty() { return Ok(0); } // sftp v3 cannot send more than self.max_write_len() data at once. let max_write_len = self.max_write_len_impl(); let (n, bufs, buf) = if let Some(res) = take_io_slices(bufs, max_write_len as usize) { res } else { return Ok(0); }; let n: u32 = n.try_into().unwrap(); let buffers = [bufs, &buf]; let offset = self.offset; self.send_writable_request(|write_end, handle, id| { Ok(write_end .send_write_request_buffered_vectored2(id, handle, offset, &buffers)? .wait()) }) .await?; // Adjust offset Pin::new(self).start_seek(io::SeekFrom::Current(n as i64))?; Ok(n as usize) } /// Zero copy write. /// /// NOTE that this API might only write part of the `buf`. pub async fn write_zero_copy(&mut self, bytes_slice: &[Bytes]) -> Result { if bytes_slice.is_empty() { return Ok(0); } // sftp v3 cannot send more than self.max_write_len() data at once. let max_write_len = self.max_write_len_impl(); let (n, bufs, buf) = if let Some(res) = take_bytes(bytes_slice, max_write_len as usize) { res } else { return Ok(0); }; let buffers = [bufs, &buf]; let offset = self.offset; self.send_writable_request(|write_end, handle, id| { Ok(write_end .send_write_request_zero_copy2(id, handle, offset, &buffers)? .wait()) }) .await?; // Adjust offset Pin::new(self).start_seek(io::SeekFrom::Current(n.try_into().unwrap()))?; Ok(n) } /// * `n` - number of bytes to read in. /// /// If `n == 0` or EOF is reached, then `buffer` is returned unchanged. /// /// # Cancel Safety /// /// This function is cancel safe. pub async fn read_all( &mut self, mut n: usize, mut buffer: BytesMut, ) -> Result { if n == 0 { return Ok(buffer); } buffer.reserve(n); while n > 0 { let len = buffer.len(); if let Some(bytes) = self .read(n.try_into().unwrap_or(u32::MAX), buffer.split_off(len)) .await? { n -= bytes.len(); buffer.unsplit(bytes); } else { return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "").into()); } } Ok(buffer) } /// Write entire `buf`. /// /// # Cancel Safety /// /// This function is cancel safe. pub async fn write_all(&mut self, mut buf: &[u8]) -> Result<(), Error> { while !buf.is_empty() { let n = self.write(buf).await?; buf = &buf[n..]; } Ok(()) } /// Write entire `buf`. /// /// # Cancel Safety /// /// This function is cancel safe. pub async fn write_all_vectorized( &mut self, mut bufs: &mut [IoSlice<'_>], ) -> Result<(), Error> { if bufs.is_empty() { return Ok(()); } loop { let mut n = self.write_vectorized(bufs).await?; // This loop would also skip all `IoSlice` that is empty // until the first non-empty `IoSlice` is met. while bufs[0].len() <= n { n -= bufs[0].len(); bufs = &mut bufs[1..]; if bufs.is_empty() { debug_assert_eq!(n, 0); return Ok(()); } } bufs[0] = IoSlice::new(&bufs[0].into_inner()[n..]); } } /// Write entire `buf`. /// /// # Cancel Safety /// /// This function is cancel safe. pub async fn write_all_zero_copy(&mut self, mut bufs: &mut [Bytes]) -> Result<(), Error> { if bufs.is_empty() { return Ok(()); } loop { let mut n = self.write_zero_copy(bufs).await?; // This loop would also skip all `IoSlice` that is empty // until the first non-empty `IoSlice` is met. while bufs[0].len() <= n { n -= bufs[0].len(); bufs = &mut bufs[1..]; if bufs.is_empty() { debug_assert_eq!(n, 0); return Ok(()); } } bufs[0].advance(n); } } /// Return the offset of the file. pub fn offset(&self) -> u64 { self.offset } async fn copy_to_impl(&mut self, dst: &mut Self, n: u64) -> Result<(), Error> { if !self .inner .get_auxiliary() .extensions() .contains(Extensions::COPY_DATA) { return Err(Error::UnsupportedExtension(&"copy_data")); } dst.check_for_writable()?; let offset = self.offset; self.send_readable_request(|write_end, handle, id| { Ok(write_end .send_copy_data_request( id, handle, offset, n, Cow::Borrowed(&dst.inner.handle), dst.offset, )? .wait()) }) .await?; // Adjust offset Pin::new(self).start_seek(io::SeekFrom::Current(n.try_into().unwrap()))?; Pin::new(dst).start_seek(io::SeekFrom::Current(n.try_into().unwrap()))?; Ok(()) } /// Copy `n` bytes of data from `self` to `dst`. /// /// The server MUST copy the data exactly as if the data is copied /// using a series of read and write. /// /// There are no protocol restictions on this operation; however, the /// server MUST ensure that the user does not exceed quota, etc. The /// server is, as always, free to complete this operation out of order if /// it is too large to complete immediately, or to refuse a request that /// is too large. /// /// After a successful function call, the offset of `self` and `dst` /// are increased by `n`. /// /// # Precondition /// /// Requires extension `copy-data`. /// For [openssh-portable], this is available from V_9_0_P1. /// /// You can check it with [`Sftp::support_copy`](crate::sftp::Sftp::support_copy). /// /// If the extension is not supported by the server, this function /// would fail with [`Error::UnsupportedExtension`]. /// /// [openssh-portable]: https://github.com/openssh/openssh-portable pub async fn copy_to(&mut self, dst: &mut Self, n: NonZeroU64) -> Result<(), Error> { self.copy_to_impl(dst, n.get()).await } /// Copy data from `self` to `dst` until EOF is encountered. /// /// The server MUST copy the data exactly as if the data is copied /// using a series of read and write. /// /// There are no protocol restictions on this operation; however, the /// server MUST ensure that the user does not exceed quota, etc. The /// server is, as always, free to complete this operation out of order if /// it is too large to complete immediately, or to refuse a request that /// is too large. /// /// After a successful function call, the offset of `self` and `dst` /// are unchanged. /// /// # Precondition /// /// Requires extension `copy-data`. /// For [openssh-portable], this is available from V_9_0_P1. /// /// You can check it with [`Sftp::support_copy`](crate::sftp::Sftp::support_copy). /// /// If the extension is not supported by the server, this function /// would fail with [`Error::UnsupportedExtension`]. /// /// [openssh-portable]: https://github.com/openssh/openssh-portable pub async fn copy_all_to(&mut self, dst: &mut Self) -> Result<(), Error> { self.copy_to_impl(dst, 0).await } /// No-op to be compatible with [`TokioCompatFile::as_mut_file`] pub fn as_mut_file(&mut self) -> &mut File { self } } impl AsyncSeek for File { /// start_seek only adjust local offset since sftp protocol /// does not provides a seek function. /// /// Instead, offset is provided when sending read/write requests, /// thus errors are reported at read/write. fn start_seek(mut self: Pin<&mut Self>, position: io::SeekFrom) -> io::Result<()> { use io::SeekFrom::*; match position { Start(pos) => self.offset = pos, End(_) => { return Err(io::Error::new( io::ErrorKind::Unsupported, "Seeking from the end is unsupported", )) } Current(n) => { if n >= 0 { self.offset = self.offset .checked_add(n.try_into().unwrap()) .ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, "Overflow occured during seeking", ) })?; } else { self.offset = self .offset .checked_sub((-n).try_into().unwrap()) .ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, "Underflow occured during seeking", ) })?; } } } Ok(()) } /// This function is a no-op, it simply return the offset. fn poll_complete(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(self.offset)) } } openssh-sftp-client-0.15.0/src/file/tokio_compat_file.rs000064400000000000000000000575311046102023000213720ustar 00000000000000use crate::{ cancel_error, file::{utility::take_io_slices, File}, lowlevel::{AwaitableDataFuture, AwaitableStatusFuture, Handle}, Buffer, Data, Error, Id, WriteEnd, }; use std::{ borrow::Cow, cmp::{max, min}, collections::VecDeque, convert::TryInto, future::Future, io::{self, IoSlice}, mem, num::{NonZeroU32, NonZeroUsize}, ops::{Deref, DerefMut}, pin::Pin, task::{Context, Poll}, }; use bytes::{Buf, Bytes, BytesMut}; use derive_destructure2::destructure; use pin_project::{pin_project, pinned_drop}; use tokio::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; use tokio_io_utility::ready; use tokio_util::sync::WaitForCancellationFutureOwned; /// The default length of the buffer used in [`TokioCompatFile`]. pub const DEFAULT_BUFLEN: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(4096) }; fn sftp_to_io_error(sftp_err: Error) -> io::Error { match sftp_err { Error::IOError(io_error) => io_error, sftp_err => io::Error::new(io::ErrorKind::Other, sftp_err), } } fn send_request(file: &mut File, f: Func) -> Result where Func: FnOnce(&mut WriteEnd, Id, Cow<'_, Handle>, u64) -> Result, { // Get id and offset to avoid reference to file. let id = file.inner.get_id_mut(); let offset = file.offset; let (write_end, handle) = file.get_inner(); // Add request to write buffer let awaitable = f(write_end, id, handle, offset)?; // Requests is already added to write buffer, so wakeup // the `flush_task`. write_end.get_auxiliary().wakeup_flush_task(); Ok(awaitable) } /// File that implements [`AsyncRead`], [`AsyncBufRead`], [`AsyncSeek`] and /// [`AsyncWrite`], which is compatible with /// [`tokio::fs::File`](https://docs.rs/tokio/latest/tokio/fs/struct.File.html). #[derive(Debug, destructure)] #[pin_project(PinnedDrop)] pub struct TokioCompatFile { inner: File, buffer_len: NonZeroUsize, buffer: BytesMut, write_len: usize, read_future: Option>, write_futures: VecDeque, /// cancellation_fut is not only cancel-safe, but also can be polled after /// it is ready. /// /// Once it is ready, all polls after that immediately return Poll::Ready(()) #[pin] cancellation_future: WaitForCancellationFutureOwned, } #[derive(Debug)] struct WriteFutureElement { future: AwaitableStatusFuture, write_len: usize, } impl TokioCompatFile { /// Create a [`TokioCompatFile`] using [`DEFAULT_BUFLEN`]. pub fn new(inner: File) -> Self { Self::with_capacity(inner, DEFAULT_BUFLEN) } /// Create a [`TokioCompatFile`]. /// /// * `buffer_len` - buffer len to be used in [`AsyncBufRead`] /// and the minimum length to read in [`AsyncRead`]. pub fn with_capacity(inner: File, buffer_len: NonZeroUsize) -> Self { Self { cancellation_future: inner.get_auxiliary().cancel_token.clone().cancelled_owned(), inner, buffer: BytesMut::new(), buffer_len, write_len: 0, read_future: None, write_futures: VecDeque::new(), } } /// Return the inner [`File`]. pub fn into_inner(self) -> File { self.destructure().0 } /// Return capacity of the internal buffer /// /// Note that if there are pending requests, then the actual /// capacity might be more than the returned value. pub fn capacity(&self) -> usize { self.buffer.capacity() } /// Reserve the capacity of the internal buffer for at least `cap` /// bytes. pub fn reserve(&mut self, new_cap: usize) { let curr_cap = self.capacity(); if curr_cap < new_cap { self.buffer.reserve(new_cap - curr_cap); } } /// Shrink the capacity of the internal buffer to at most `cap` /// bytes. pub fn shrink_to(&mut self, new_cap: usize) { let curr_cap = self.capacity(); if curr_cap > new_cap { self.buffer = BytesMut::with_capacity(new_cap); } } /// This function is a low-level call. /// /// It needs to be paired with the `consume` method or /// [`TokioCompatFile::consume_and_return_buffer`] to function properly. /// /// When calling this method, none of the contents will be "read" in the /// sense that later calling read may return the same contents. /// /// As such, you must consume the corresponding bytes using the methods /// listed above. /// /// An empty buffer returned indicates that the stream has reached EOF. /// /// This function does not change the offset into the file. pub async fn fill_buf(mut self: Pin<&mut Self>) -> Result<(), Error> { let this = self.as_mut().project(); if this.buffer.is_empty() { let buffer_len = this.buffer_len.get().try_into().unwrap_or(u32::MAX); let buffer_len = NonZeroU32::new(buffer_len).unwrap(); self.read_into_buffer(buffer_len).await?; } Ok(()) } /// This can be used together with [`AsyncBufRead`] implementation for /// [`TokioCompatFile`] or [`TokioCompatFile::fill_buf`] or /// [`TokioCompatFile::read_into_buffer`] to avoid copying data. /// /// Return empty [`Bytes`] on EOF. /// /// This function does change the offset into the file. pub fn consume_and_return_buffer(&mut self, amt: usize) -> Bytes { let buffer = &mut self.buffer; let amt = min(amt, buffer.len()); let bytes = self.buffer.split_to(amt).freeze(); self.offset += amt as u64; bytes } /// * `amt` - Amount of data to read into the buffer. /// /// This function is a low-level call. /// /// It needs to be paired with the `consume` method or /// [`TokioCompatFile::consume_and_return_buffer`] to function properly. /// /// When calling this method, none of the contents will be "read" in the /// sense that later calling read may return the same contents. /// /// As such, you must consume the corresponding bytes using the methods /// listed above. /// /// An empty buffer returned indicates that the stream has reached EOF. /// /// This function does not change the offset into the file. pub fn poll_read_into_buffer( self: Pin<&mut Self>, cx: &mut Context<'_>, amt: NonZeroU32, ) -> Poll> { // Dereference it here once so that there will be only // one mutable borrow to self. let this = self.project(); this.inner.check_for_readable()?; let max_read_len = this.inner.max_read_len_impl(); let amt = min(amt.get(), max_read_len); let future = if let Some(future) = this.read_future { // Get the active future. // // The future might read more/less than remaining, // but the offset must be equal to this.offset, // since AsyncSeek::start_seek would reset this.future // if this.offset is changed. future } else { this.buffer.reserve(amt as usize); let cap = this.buffer.capacity(); let buffer = this.buffer.split_off(cap - (amt as usize)); let future = send_request(this.inner, |write_end, id, handle, offset| { write_end.send_read_request(id, handle, offset, amt, Some(buffer)) })? .wait(); // Store it in this.read_future *this.read_future = Some(future); this.read_future .as_mut() .expect("FileFuture::Data is just assigned to self.future!") }; if this.cancellation_future.poll(cx).is_ready() { return Poll::Ready(Err(cancel_error())); } // Wait for the future let res = ready!(Pin::new(future).poll(cx)); *this.read_future = None; let (id, data) = res?; this.inner.inner.cache_id_mut(id); match data { Data::Buffer(buffer) => { // Since amt != 0, all AwaitableDataFuture created // must at least read in one byte. debug_assert!(!buffer.is_empty()); // sftp v3 can at most read in max_read_len bytes. debug_assert!(buffer.len() <= max_read_len as usize); this.buffer.unsplit(buffer); } Data::Eof => return Poll::Ready(Ok(())), _ => std::unreachable!("Expect Data::Buffer"), }; Poll::Ready(Ok(())) } /// * `amt` - Amount of data to read into the buffer. /// /// This function is a low-level call. /// /// It needs to be paired with the `consume` method or /// [`TokioCompatFile::consume_and_return_buffer`] to function properly. /// /// When calling this method, none of the contents will be "read" in the /// sense that later calling read may return the same contents. /// /// As such, you must consume the corresponding bytes using the methods /// listed above. /// /// An empty buffer returned indicates that the stream has reached EOF. /// /// This function does not change the offset into the file. pub async fn read_into_buffer(self: Pin<&mut Self>, amt: NonZeroU32) -> Result<(), Error> { #[must_use] struct ReadIntoBuffer<'a>(Pin<&'a mut TokioCompatFile>, NonZeroU32); impl Future for ReadIntoBuffer<'_> { type Output = Result<(), Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let amt = self.1; self.0.as_mut().poll_read_into_buffer(cx, amt) } } ReadIntoBuffer(self, amt).await } /// Return the inner file pub fn as_mut_file(self: Pin<&mut Self>) -> &mut File { self.project().inner } fn flush_pending_requests( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Result<(), std::io::Error> { let this = self.project(); // Flush only if there is pending awaitable writes if this.inner.need_flush { // Only flush if there are pending requests if this.inner.auxiliary().get_pending_requests() != 0 { this.inner.auxiliary().trigger_flushing(); } this.inner.need_flush = false; } if this.cancellation_future.poll(cx).is_ready() { return Err(sftp_to_io_error(cancel_error())); } Ok(()) } fn flush_one( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { self.as_mut().flush_pending_requests(cx)?; let this = self.project(); let res = if let Some(element) = this.write_futures.front_mut() { let res = ready!(Pin::new(&mut element.future).poll(cx)); *this.write_len -= element.write_len; res } else { // All futures consumed without error debug_assert_eq!(*this.write_len, 0); return Poll::Ready(Ok(())); }; this.write_futures .pop_front() .expect("futures should have at least one elements in it"); // propagate error and recycle id this.inner .inner .cache_id_mut(res.map_err(sftp_to_io_error)?.0); Poll::Ready(Ok(())) } } impl From for TokioCompatFile { fn from(inner: File) -> Self { Self::new(inner) } } impl From for File { fn from(file: TokioCompatFile) -> Self { file.into_inner() } } /// Creates a new [`TokioCompatFile`] instance that shares the /// same underlying file handle as the existing File instance. /// /// Reads, writes, and seeks can be performed independently. impl Clone for TokioCompatFile { fn clone(&self) -> Self { Self::with_capacity(self.inner.clone(), self.buffer_len) } } impl Deref for TokioCompatFile { type Target = File; fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for TokioCompatFile { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl AsyncSeek for TokioCompatFile { fn start_seek(mut self: Pin<&mut Self>, position: io::SeekFrom) -> io::Result<()> { let this = self.as_mut().project(); let prev_offset = this.inner.offset(); Pin::new(&mut *this.inner).start_seek(position)?; let new_offset = this.inner.offset(); if new_offset != prev_offset { // Reset future since they are invalidated by change of offset. *this.read_future = None; // Reset buffer or consume buffer if necessary. if new_offset < prev_offset { this.buffer.clear(); } else if let Ok(offset) = (new_offset - prev_offset).try_into() { if offset > this.buffer.len() { this.buffer.clear(); } else { this.buffer.advance(offset); } } else { this.buffer.clear(); } } Ok(()) } fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(self.project().inner).poll_complete(cx) } } impl AsyncBufRead for TokioCompatFile { fn poll_fill_buf(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.as_mut().project(); if this.buffer.is_empty() { let buffer_len = this.buffer_len.get().try_into().unwrap_or(u32::MAX); let buffer_len = NonZeroU32::new(buffer_len).unwrap(); ready!(self.as_mut().poll_read_into_buffer(cx, buffer_len)) .map_err(sftp_to_io_error)?; } Poll::Ready(Ok(self.project().buffer)) } fn consume(self: Pin<&mut Self>, amt: usize) { let this = self.project(); let buffer = this.buffer; buffer.advance(amt); this.inner.offset += amt as u64; } } impl AsyncRead for TokioCompatFile { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, read_buf: &mut ReadBuf<'_>, ) -> Poll> { self.check_for_readable_io_err()?; let remaining = read_buf.remaining(); if remaining == 0 { return Poll::Ready(Ok(())); } if self.buffer.is_empty() { let n = max(remaining, DEFAULT_BUFLEN.get()); let n = n.try_into().unwrap_or(u32::MAX); let n = NonZeroU32::new(n).unwrap(); ready!(self.as_mut().poll_read_into_buffer(cx, n)).map_err(sftp_to_io_error)?; } let n = min(remaining, self.buffer.len()); read_buf.put_slice(&self.buffer[..n]); self.consume(n); Poll::Ready(Ok(())) } } /// [`TokioCompatFile::poll_write`] only writes data to the buffer. /// /// [`TokioCompatFile::poll_write`] and /// [`TokioCompatFile::poll_write_vectored`] would send at most one /// sftp request. /// /// It is perfectly safe to buffer requests and send them in one go, /// since sftp v3 guarantees that requests on the same file handler /// is processed sequentially. /// /// NOTE that these writes cannot be cancelled. /// /// One maybe obvious note when using append-mode: /// /// make sure that all data that belongs together is written /// to the file in one operation. /// /// This can be done by concatenating strings before passing them to /// [`AsyncWrite::poll_write`] or [`AsyncWrite::poll_write_vectored`] and /// calling [`AsyncWrite::poll_flush`] on [`TokioCompatFile`] when the message /// is complete. /// /// Calling [`AsyncWrite::poll_flush`] on [`TokioCompatFile`] would wait on /// writes in the order they are sent. impl AsyncWrite for TokioCompatFile { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { self.check_for_writable_io_err()?; if buf.is_empty() { return Poll::Ready(Ok(0)); } // sftp v3 cannot send more than self.max_write_len() data at once. let max_write_len = self.max_write_len_impl(); let mut n: u32 = buf .len() .try_into() .map(|n| min(n, max_write_len)) .unwrap_or(max_write_len); let write_limit = self.get_auxiliary().tokio_compat_file_write_limit(); let mut write_len = self.write_len; if write_len == write_limit { ready!(self.as_mut().flush_one(cx))?; write_len = self.write_len; } let new_write_len = match write_len.checked_add(n as usize) { Some(new_write_len) if new_write_len > write_limit => { n = (write_limit - write_len).try_into().unwrap(); write_limit } None => { // case overflow // This has to be a separate cases since // write_limit could be set to usize::MAX, in which case // saturating_add would never return anything larger than it. n = (write_limit - write_len).try_into().unwrap(); write_limit } Some(new_write_len) => new_write_len, }; // sftp v3 cannot send more than self.max_write_len() data at once. let buf = &buf[..(n as usize)]; let this = self.as_mut().project(); let file = this.inner; let future = send_request(file, |write_end, id, handle, offset| { write_end.send_write_request_buffered(id, handle, offset, Cow::Borrowed(buf)) }) .map_err(sftp_to_io_error)? .wait(); // Since a new request is buffered, flushing is required. file.need_flush = true; this.write_futures.push_back(WriteFutureElement { future, write_len: n as usize, }); *self.as_mut().project().write_len = new_write_len; // Adjust offset and reset self.future Poll::Ready( self.start_seek(io::SeekFrom::Current(n as i64)) .map(|_| n as usize), ) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.check_for_writable_io_err()?; if self.as_mut().project().write_futures.is_empty() { return Poll::Ready(Ok(())); } self.as_mut().flush_pending_requests(cx)?; let this = self.project(); loop { let res = if let Some(element) = this.write_futures.front_mut() { let res = ready!(Pin::new(&mut element.future).poll(cx)); *this.write_len -= element.write_len; res } else { // All futures consumed without error debug_assert_eq!(*this.write_len, 0); break Poll::Ready(Ok(())); }; this.write_futures .pop_front() .expect("futures should have at least one elements in it"); // propagate error and recycle id this.inner .inner .cache_id_mut(res.map_err(sftp_to_io_error)?.0); } } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.poll_flush(cx) } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll> { self.check_for_writable_io_err()?; if bufs.is_empty() { return Poll::Ready(Ok(0)); } let max_write_len = self.max_write_len_impl(); let n = if let Some(res) = take_io_slices(bufs, max_write_len as usize) { res.0 } else { return Poll::Ready(Ok(0)); }; let mut n: u32 = n.try_into().unwrap(); let write_limit = self.get_auxiliary().tokio_compat_file_write_limit(); let mut write_len = self.write_len; if write_len == write_limit { ready!(self.as_mut().flush_one(cx))?; write_len = self.write_len; } let new_write_len = match write_len.checked_add(n as usize) { Some(new_write_len) if new_write_len > write_limit => { n = (write_limit - write_len).try_into().unwrap(); write_limit } None => { // case overflow // This has to be a separate cases since // write_limit could be set to usize::MAX, in which case // saturating_add would never return anything larger than it. n = (write_limit - write_len).try_into().unwrap(); write_limit } Some(new_write_len) => new_write_len, }; let (_, bufs, buf) = take_io_slices(bufs, n as usize).unwrap(); let buffers = [bufs, &buf]; // Dereference it here once so that there will be only // one mutable borrow to self. let this = self.as_mut().project(); let file = this.inner; let future = send_request(file, |write_end, id, handle, offset| { write_end.send_write_request_buffered_vectored2(id, handle, offset, &buffers) }) .map_err(sftp_to_io_error)? .wait(); // Since a new request is buffered, flushing is required. file.need_flush = true; this.write_futures.push_back(WriteFutureElement { future, write_len: n as usize, }); *self.as_mut().project().write_len = new_write_len; // Adjust offset and reset self.future Poll::Ready( self.start_seek(io::SeekFrom::Current(n as i64)) .map(|_| n as usize), ) } fn is_write_vectored(&self) -> bool { true } } impl TokioCompatFile { async fn do_drop( mut file: File, read_future: Option>, write_futures: VecDeque, ) { if let Some(read_future) = read_future { // read_future error is ignored since users are no longer interested // in this. if let Ok((id, _)) = read_future.await { file.inner.cache_id_mut(id); } } for write_element in write_futures { // There are some pending writes that aren't flushed. // // While users have dropped TokioCompatFile, presumably because // they assume the data has already been written and flushed, it // fails and we need to notify our users of the error. match write_element.future.await { Ok((id, _)) => file.inner.cache_id_mut(id), Err(_err) => { #[cfg(feature = "tracing")] tracing::error!(?_err, "failed to write to File") } } } if let Err(_err) = file.close().await { #[cfg(feature = "tracing")] tracing::error!(?_err, "failed to close handle"); } } } /// We need to keep polling the read and write futures, otherwise it would drop /// the internal request ids too early, causing read task to fail /// when they should not fail. #[pinned_drop] impl PinnedDrop for TokioCompatFile { fn drop(mut self: Pin<&mut Self>) { let this = self.as_mut().project(); let file = this.inner.clone(); let read_future = this.read_future.take(); let write_futures = mem::take(this.write_futures); let cancellation_fut = self.auxiliary().cancel_token.clone().cancelled_owned(); let do_drop_fut = Self::do_drop(file, read_future, write_futures); self.auxiliary().tokio_handle().spawn(async move { tokio::select! { biased; _ = cancellation_fut => (), _ = do_drop_fut => (), } }); } } openssh-sftp-client-0.15.0/src/file/utility.rs000064400000000000000000000070621046102023000174000ustar 00000000000000use std::{io::IoSlice, ops::Deref}; use bytes::Bytes; use tokio_io_utility::IoSliceExt; /// Return `Some((n, subslices, reminder))` where /// - `n` is number of bytes in `subslices` and `reminder`. /// - `subslices` is a subslice of `bufs` /// - `reminder` might be a slice of `bufs[subslices.len()]` /// if `subslices.len() < bufs.len()` and the total number /// of bytes in `subslices` is less than `limit`. /// /// Return `None` if the total number of bytes in `bufs` is empty. fn take_slices>( bufs: &'_ [T], limit: usize, create_slice: impl FnOnce(&T, usize) -> T, ) -> Option<(usize, &'_ [T], [T; 1])> { if bufs.is_empty() { return None; } let mut end = 0; let mut n = 0; // loop 'buf // // This loop would skip empty `IoSlice`s. for buf in bufs { let cnt = n + buf.len(); // branch '1 if cnt > limit { break; } n = cnt; end += 1; } let buf = if end < bufs.len() { // In this branch, the loop 'buf terminate due to branch '1, // thus // // n + buf.len() > limit, // buf.len() > limit - n. // // And (limit - n) also cannot be 0, otherwise // branch '1 will not be executed. let res = [create_slice(&bufs[end], limit - n)]; n = limit; res } else { if n == 0 { return None; } [create_slice(&bufs[0], 0)] }; Some((n, &bufs[..end], buf)) } /// Return `Some((n, io_subslices, [reminder]))` where /// - `n` is number of bytes in `io_subslices` and `reminder`. /// - `io_subslices` is a subslice of `io_slices` /// - `reminder` might be a slice of `io_slices[io_subslices.len()]` /// if `io_subslices.len() < io_slices.len()` and the total number /// of bytes in `io_subslices` is less than `limit`. /// /// Return `None` if the total number of bytes in `io_slices` is empty. pub(super) fn take_io_slices<'a>( io_slices: &'a [IoSlice<'a>], limit: usize, ) -> Option<(usize, &'a [IoSlice<'a>], [IoSlice<'a>; 1])> { take_slices(io_slices, limit, |io_slice, end| { IoSlice::new(&io_slice.into_inner()[..end]) }) } /// Return `Some((n, bytes_subslice, [reminder]))` where /// - `n` is number of bytes in `bytes_subslice` and `reminder`. /// - `bytes_subslice` is a subslice of `bytes_slice` /// - `reminder` might be a slice of `bytes_slice[bytes_subslice.len()]` /// if `bytes_subslice.len() < bytes_slice.len()` and the total number /// of bytes in `bytes_subslice` is less than `limit`. /// /// Return `None` if the total number of bytes in `bytes_slice` is empty. pub(super) fn take_bytes( bytes_slice: &[Bytes], limit: usize, ) -> Option<(usize, &[Bytes], [Bytes; 1])> { take_slices(bytes_slice, limit, |bytes, end| bytes.slice(0..end)) } #[cfg(test)] mod tests { use super::{take_io_slices, IoSlice}; use pretty_assertions::assert_eq; #[test] fn test_take_io_slices() { let limit = 200; let content = b"HELLO, WORLD!\n".repeat(limit / 8); let len = content.len(); assert!(len / 2 < limit); let io_slices = [ IoSlice::new(&content[..len / 2]), IoSlice::new(&content[len / 2..]), ]; let (n, io_subslices, reminder) = take_io_slices(&io_slices, limit).unwrap(); assert_eq!(n, limit); assert_eq!(io_subslices.len(), 1); assert_eq!(&*io_subslices[0], &*io_slices[0]); assert_eq!(&*reminder[0], &io_slices[1][..(limit - len / 2)]); } } openssh-sftp-client-0.15.0/src/fs/dir.rs000064400000000000000000000130351046102023000161410ustar 00000000000000use crate::{ cancel_error, lowlevel::NameEntry, metadata::{FileType, MetaData}, Error, }; use super::Dir; use std::{ borrow::Cow, future::Future, path::Path, pin::Pin, task::{ready, Context, Poll}, vec::IntoIter, }; use futures_core::stream::{FusedStream, Stream}; use pin_project::{pin_project, pinned_drop}; use tokio_util::sync::WaitForCancellationFutureOwned; type ResponseFuture = crate::lowlevel::AwaitableNameEntriesFuture; /// Entries returned by the [`ReadDir`]. /// /// This is a specialized version of [`std::fs::DirEntry`]. #[repr(transparent)] #[derive(Debug, Clone)] pub struct DirEntry(NameEntry); impl DirEntry { /// Return filename of the dir entry. pub fn filename(&self) -> &Path { &self.0.filename } /// Return filename of the dir entry as a mutable reference. pub fn filename_mut(&mut self) -> &mut Box { &mut self.0.filename } /// Return metadata for the dir entry. pub fn metadata(&self) -> MetaData { MetaData::new(self.0.attrs) } /// Return the file type for the dir entry. pub fn file_type(&self) -> Option { self.metadata().file_type() } } /// Reads the the entries in a directory. #[derive(Debug)] #[pin_project(PinnedDrop)] pub struct ReadDir { dir: Dir, // future and entries contain the state // // Invariant: // - entries.is_none() => future.is_none() // - If entries.is_some(), then future.is_none() ^ entries.unwrap().as_slice().is_empty() future: Option, entries: Option>, /// cancellation_fut is not only cancel-safe, but also can be polled after /// it is ready. /// /// Once it is ready, all polls after that immediately return Poll::Ready(()) #[pin] cancellation_fut: WaitForCancellationFutureOwned, } impl ReadDir { pub(super) fn new(dir: Dir) -> Self { Self { cancellation_fut: dir.0.get_auxiliary().cancel_token.clone().cancelled_owned(), dir, future: None, entries: Some(Vec::new().into_iter()), } } fn new_request(dir: &mut Dir) -> Result { let owned_handle = &mut dir.0; let id = owned_handle.get_id_mut(); let handle = &owned_handle.handle; let write_end = &mut owned_handle.write_end.inner; let future = write_end .send_readdir_request(id, Cow::Borrowed(handle))? .wait(); // Requests is already added to write buffer, so wakeup // the `flush_task` if necessary. owned_handle.get_auxiliary().wakeup_flush_task(); Ok(future) } } impl Stream for ReadDir { type Item = Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); let future = this.future; let entries = match &mut *this.entries { Some(entries) => entries, None => return Poll::Ready(None), }; if entries.as_slice().is_empty() { let dir = &mut *this.dir; let cancellation_fut = this.cancellation_fut; let fut = match future { Some(future) => future, None => { *future = Some(Self::new_request(dir)?); future.as_mut().unwrap() } }; let res = { let fut = async move { tokio::select! { biased; _ = cancellation_fut => Err(cancel_error()), res = fut => res, } }; tokio::pin!(fut); ready!(fut.poll(cx)) }; *future = None; // future is ready, reset it to None let (id, ret) = res?; this.dir.0.cache_id_mut(id); if ret.is_empty() { *this.entries = None; return Poll::Ready(None); } else { *entries = Vec::from(ret).into_iter(); } } debug_assert!(future.is_none()); debug_assert!(!entries.as_slice().is_empty()); Poll::Ready(entries.next().map(DirEntry).map(Ok)) } } impl FusedStream for ReadDir { fn is_terminated(&self) -> bool { self.entries.is_none() } } impl ReadDir { async fn do_drop(mut dir: Dir, future: Option) { if let Some(future) = future { if let Ok((id, _)) = future.await { dir.0.cache_id_mut(id); } } if let Err(_err) = dir.close().await { #[cfg(feature = "tracing")] tracing::error!(?_err, "failed to close handle"); } } } /// We need to keep polling the future stored internally, otherwise it would /// drop the internal request ids too early, causing read task to fail /// when they should not fail. #[pinned_drop] impl PinnedDrop for ReadDir { fn drop(self: Pin<&mut Self>) { let this = self.project(); let dir = this.dir.clone(); let future = this.future.take(); let cancellation_fut = dir.0.get_auxiliary().cancel_token.clone().cancelled_owned(); let do_drop_fut = Self::do_drop(dir, future); this.dir.0.get_auxiliary().tokio_handle().spawn(async move { tokio::select! { biased; _ = cancellation_fut => (), _ = do_drop_fut => (), } }); } } openssh-sftp-client-0.15.0/src/fs/mod.rs000064400000000000000000000347601046102023000161520ustar 00000000000000use crate::{ file::OpenOptions, lowlevel::{self, Extensions}, metadata::{MetaData, MetaDataBuilder, Permissions}, Auxiliary, Buffer, Error, Id, OwnedHandle, WriteEnd, WriteEndWithCachedId, }; use std::{ borrow::Cow, cmp::min, convert::TryInto, path::{Path, PathBuf}, }; use bytes::BytesMut; mod dir; pub use dir::{DirEntry, ReadDir}; type AwaitableStatus = lowlevel::AwaitableStatus; type AwaitableAttrs = lowlevel::AwaitableAttrs; type SendLinkingRequest = fn(&mut WriteEnd, Id, Cow<'_, Path>, Cow<'_, Path>) -> Result; type SendRmRequest = fn(&mut WriteEnd, Id, Cow<'_, Path>) -> Result; type SendMetadataRequest = fn(&mut WriteEnd, Id, Cow<'_, Path>) -> Result; /// A struct used to perform operations on remote filesystem. #[derive(Debug, Clone)] pub struct Fs { write_end: WriteEndWithCachedId, cwd: Box, } impl Fs { pub(super) fn new(write_end: WriteEndWithCachedId, cwd: PathBuf) -> Self { Self { write_end, cwd: cwd.into_boxed_path(), } } fn get_auxiliary(&self) -> &Auxiliary { self.write_end.get_auxiliary() } /// Return current working dir. pub fn cwd(&self) -> &Path { &self.cwd } /// Set current working dir. /// /// * `cwd` - Can include `~`. /// If it is empty, then it is set to use the default /// directory set by the remote `sftp-server`. pub fn set_cwd(&mut self, cwd: impl Into) { self.cwd = cwd.into().into_boxed_path(); } fn concat_path_if_needed<'path>(&self, path: &'path Path) -> Cow<'path, Path> { if path.is_absolute() || self.cwd.as_os_str().is_empty() { Cow::Borrowed(path) } else { Cow::Owned(self.cwd.join(path)) } } } impl Fs { /// Open a remote dir pub async fn open_dir(&mut self, path: impl AsRef) -> Result { async fn inner(this: &mut Fs, path: &Path) -> Result { let path = this.concat_path_if_needed(path); this.write_end .send_request(|write_end, id| Ok(write_end.send_opendir_request(id, path)?.wait())) .await .map(|handle| Dir(OwnedHandle::new(this.write_end.clone(), handle))) } inner(self, path.as_ref()).await } /// Create a directory builder. pub fn dir_builder(&mut self) -> DirBuilder<'_> { DirBuilder { fs: self, metadata_builder: MetaDataBuilder::new(), } } /// Creates a new, empty directory at the provided path. pub async fn create_dir(&mut self, path: impl AsRef) -> Result<(), Error> { async fn inner(this: &mut Fs, path: &Path) -> Result<(), Error> { this.dir_builder().create(path).await } inner(self, path.as_ref()).await } async fn remove_impl(&mut self, path: &Path, f: SendRmRequest) -> Result<(), Error> { let path = self.concat_path_if_needed(path); self.write_end .send_request(|write_end, id| Ok(f(write_end, id, path)?.wait())) .await } /// Removes an existing, empty directory. pub async fn remove_dir(&mut self, path: impl AsRef) -> Result<(), Error> { self.remove_impl(path.as_ref(), WriteEnd::send_rmdir_request) .await } /// Removes a file from remote filesystem. pub async fn remove_file(&mut self, path: impl AsRef) -> Result<(), Error> { self.remove_impl(path.as_ref(), WriteEnd::send_remove_request) .await } /// Returns the canonical, absolute form of a path with all intermediate /// components normalized and symbolic links resolved. /// /// If the remote server supports the `expand-path` extension, then this /// method will also expand tilde characters (“~”) in the path. You can /// check it with [`Sftp::support_expand_path`](crate::sftp::Sftp::support_expand_path). pub async fn canonicalize(&mut self, path: impl AsRef) -> Result { async fn inner(this: &mut Fs, path: &Path) -> Result { let path = this.concat_path_if_needed(path); let f = if this .get_auxiliary() .extensions() .contains(Extensions::EXPAND_PATH) { // This supports canonicalisation of relative paths and those that // need tilde-expansion, i.e. “~”, “~/…” and “~user/…”. // // These paths are expanded using shell-like rules and the resultant // path is canonicalised similarly to WriteEnd::send_realpath_request. WriteEnd::send_expand_path_request } else { WriteEnd::send_realpath_request }; this.write_end .send_request(|write_end, id| Ok(f(write_end, id, path)?.wait())) .await .map(Into::into) } inner(self, path.as_ref()).await } async fn linking_impl( &mut self, src: &Path, dst: &Path, f: SendLinkingRequest, ) -> Result<(), Error> { let src = self.concat_path_if_needed(src); let dst = self.concat_path_if_needed(dst); self.write_end .send_request(|write_end, id| Ok(f(write_end, id, src, dst)?.wait())) .await } /// Creates a new hard link on the remote filesystem. /// /// # Precondition /// /// Require extension `hardlink` /// /// You can check it with [`Sftp::support_hardlink`](crate::sftp::Sftp::support_hardlink). pub async fn hard_link( &mut self, src: impl AsRef, dst: impl AsRef, ) -> Result<(), Error> { async fn inner(this: &mut Fs, src: &Path, dst: &Path) -> Result<(), Error> { if !this .get_auxiliary() .extensions() .contains(Extensions::HARDLINK) { return Err(Error::UnsupportedExtension(&"hardlink")); } this.linking_impl(src, dst, WriteEnd::send_hardlink_request) .await } inner(self, src.as_ref(), dst.as_ref()).await } /// Creates a new symlink on the remote filesystem. pub async fn symlink( &mut self, src: impl AsRef, dst: impl AsRef, ) -> Result<(), Error> { self.linking_impl(src.as_ref(), dst.as_ref(), WriteEnd::send_symlink_request) .await } /// Renames a file or directory to a new name, replacing the original file if to already exists. /// /// If the server supports the `posix-rename` extension, it will be used. /// You can check it with [`Sftp::support_posix_rename`](crate::sftp::Sftp::support_posix_rename). /// /// This will not work if the new name is on a different mount point. pub async fn rename( &mut self, from: impl AsRef, to: impl AsRef, ) -> Result<(), Error> { async fn inner(this: &mut Fs, from: &Path, to: &Path) -> Result<(), Error> { let f = if this .get_auxiliary() .extensions() .contains(Extensions::POSIX_RENAME) { // posix rename is guaranteed to be atomic WriteEnd::send_posix_rename_request } else { WriteEnd::send_rename_request }; this.linking_impl(from, to, f).await } inner(self, from.as_ref(), to.as_ref()).await } /// Reads a symbolic link, returning the file that the link points to. pub async fn read_link(&mut self, path: impl AsRef) -> Result { async fn inner(this: &mut Fs, path: &Path) -> Result { let path = this.concat_path_if_needed(path); this.write_end .send_request(|write_end, id| Ok(write_end.send_readlink_request(id, path)?.wait())) .await .map(Into::into) } inner(self, path.as_ref()).await } async fn set_metadata_impl(&mut self, path: &Path, metadata: MetaData) -> Result<(), Error> { let path = self.concat_path_if_needed(path); self.write_end .send_request(|write_end, id| { Ok(write_end .send_setstat_request(id, path, metadata.into_inner())? .wait()) }) .await } /// Change the metadata of a file or a directory. pub async fn set_metadata( &mut self, path: impl AsRef, metadata: MetaData, ) -> Result<(), Error> { self.set_metadata_impl(path.as_ref(), metadata).await } /// Changes the permissions found on a file or a directory. pub async fn set_permissions( &mut self, path: impl AsRef, perm: Permissions, ) -> Result<(), Error> { async fn inner(this: &mut Fs, path: &Path, perm: Permissions) -> Result<(), Error> { this.set_metadata_impl(path, MetaDataBuilder::new().permissions(perm).create()) .await } inner(self, path.as_ref(), perm).await } async fn metadata_impl( &mut self, path: &Path, f: SendMetadataRequest, ) -> Result { let path = self.concat_path_if_needed(path); self.write_end .send_request(|write_end, id| Ok(f(write_end, id, path)?.wait())) .await .map(MetaData::new) } /// Given a path, queries the file system to get information about a file, /// directory, etc. pub async fn metadata(&mut self, path: impl AsRef) -> Result { self.metadata_impl(path.as_ref(), WriteEnd::send_stat_request) .await } /// Queries the file system metadata for a path. pub async fn symlink_metadata(&mut self, path: impl AsRef) -> Result { self.metadata_impl(path.as_ref(), WriteEnd::send_lstat_request) .await } /// Reads the entire contents of a file into a bytes. pub async fn read(&mut self, path: impl AsRef) -> Result { async fn inner(this: &mut Fs, path: &Path) -> Result { let path = this.concat_path_if_needed(path); let mut file = OpenOptions::open_inner( lowlevel::OpenOptions::new().read(true), false, false, false, path.as_ref(), this.write_end.clone(), ) .await?; let max_read_len = file.max_read_len_impl(); let cap_to_reserve: usize = if let Some(len) = file.metadata().await?.len() { // To detect EOF, we need to a little bit more then the length // of the file. len.saturating_add(300) .try_into() .unwrap_or(max_read_len as usize) } else { max_read_len as usize }; let mut buffer = BytesMut::with_capacity(cap_to_reserve); loop { let cnt = buffer.len(); let n: u32 = if cnt <= cap_to_reserve { // To detect EOF, we need to a little bit more then the // length of the file. (cap_to_reserve - cnt) .saturating_add(300) .try_into() .map(|n| min(n, max_read_len)) .unwrap_or(max_read_len) } else { max_read_len }; buffer.reserve(n.try_into().unwrap_or(usize::MAX)); if let Some(bytes) = file.read(n, buffer.split_off(cnt)).await? { buffer.unsplit(bytes); } else { // Eof break Ok(buffer); } } } inner(self, path.as_ref()).await } /// Open/Create a file for writing and write the entire `contents` into it. pub async fn write( &mut self, path: impl AsRef, content: impl AsRef<[u8]>, ) -> Result<(), Error> { async fn inner(this: &mut Fs, path: &Path, content: &[u8]) -> Result<(), Error> { let path = this.concat_path_if_needed(path); OpenOptions::open_inner( lowlevel::OpenOptions::new().write(true), true, true, false, path.as_ref(), this.write_end.clone(), ) .await? .write_all(content) .await } inner(self, path.as_ref(), content.as_ref()).await } } /// Remote Directory #[repr(transparent)] #[derive(Debug, Clone)] pub struct Dir(OwnedHandle); impl Dir { /// Read dir. pub fn read_dir(self) -> ReadDir { ReadDir::new(self) } /// Close dir. pub async fn close(self) -> Result<(), Error> { self.0.close().await } } /// Builder for new directory to create. #[derive(Debug)] pub struct DirBuilder<'a> { fs: &'a mut Fs, metadata_builder: MetaDataBuilder, } impl DirBuilder<'_> { /// Reset builder back to default. pub fn reset(&mut self) -> &mut Self { self.metadata_builder = MetaDataBuilder::new(); self } /// Set id of the dir to be built. pub fn id(&mut self, (uid, gid): (u32, u32)) -> &mut Self { self.metadata_builder.id((uid, gid)); self } /// Set permissions of the dir to be built. pub fn permissions(&mut self, perm: Permissions) -> &mut Self { self.metadata_builder.permissions(perm); self } } impl DirBuilder<'_> { /// Creates the specified directory with the configured options. pub async fn create(&mut self, path: impl AsRef) -> Result<(), Error> { async fn inner(this: &mut DirBuilder<'_>, path: &Path) -> Result<(), Error> { let fs = &mut this.fs; let path = fs.concat_path_if_needed(path); let attrs = this.metadata_builder.create().into_inner(); fs.write_end .send_request(|write_end, id| { Ok(write_end.send_mkdir_request(id, path, attrs)?.wait()) }) .await } inner(self, path.as_ref()).await } } openssh-sftp-client-0.15.0/src/handle.rs000064400000000000000000000075621046102023000162160ustar 00000000000000use super::{ lowlevel::{Handle, HandleOwned}, {Error, Id, WriteEnd, WriteEndWithCachedId}, }; use std::{ borrow::Cow, future::Future, ops::{Deref, DerefMut}, sync::Arc, }; use derive_destructure2::destructure; /// Remote Directory #[derive(Debug, Clone, destructure)] pub(super) struct OwnedHandle { pub(super) write_end: WriteEndWithCachedId, pub(super) handle: Arc, } impl Drop for OwnedHandle { fn drop(&mut self) { let write_end = &mut self.write_end; let handle = &self.handle; if Arc::strong_count(handle) == 1 { // This is the last reference to the arc let id = write_end.get_id_mut(); match write_end.send_close_request(id, Cow::Borrowed(handle)) { Ok(response) => { // Requests is already added to write buffer, so wakeup // the `flush_task`. self.get_auxiliary().wakeup_flush_task(); // Reasons for moving future out of the async block: // 1. `response.wait()` is basically a no-op, which simply takes out the inner value of // AwaitableStatus and wrap it with a corresponding AwaitableStatusFuture // 2. `rustc` isn't very good at optimizing moves in the future, it often results in the // size of the Future blows out, becomes double of its size. // 3. the more states the Futures have, the harder it is to optimize and take advantage of the niche. let future = response.wait(); self.get_auxiliary().tokio_handle().spawn(async move { let _res = future.await; #[cfg(feature = "tracing")] match _res { Ok(_) => tracing::debug!("close handle success"), Err(err) => tracing::error!(?err, "failed to close handle"), } }); } Err(_err) => { #[cfg(feature = "tracing")] tracing::error!(?_err, "failed to send close request"); } } } } } impl OwnedHandle { pub(super) fn new(write_end: WriteEndWithCachedId, handle: HandleOwned) -> Self { Self { write_end, handle: Arc::new(handle), } } pub(super) async fn send_request(&mut self, f: Func) -> Result where Func: FnOnce(&mut WriteEnd, Cow<'_, Handle>, Id) -> Result + Send, F: Future> + Send + 'static, R: Send, { let handle = &self.handle; self.write_end .send_request(|write_end, id| f(write_end, Cow::Borrowed(handle), id)) .await } /// Close the [`OwnedHandle`], send the close request /// if this is the last reference. /// /// # Cancel Safety /// /// This function is cancel safe. pub(super) async fn close(self) -> Result<(), Error> { if Arc::strong_count(&self.handle) == 1 { // This is the last reference to the arc // Release resources without running `Drop::drop` let (mut write_end, handle) = self.destructure(); write_end .send_request(|write_end, id| { Ok(write_end .send_close_request(id, Cow::Borrowed(&handle))? .wait()) }) .await } else { Ok(()) } } } impl Deref for OwnedHandle { type Target = WriteEndWithCachedId; fn deref(&self) -> &Self::Target { &self.write_end } } impl DerefMut for OwnedHandle { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.write_end } } openssh-sftp-client-0.15.0/src/lib.rs000064400000000000000000000055241046102023000155250ustar 00000000000000//! openssh sftp client, implements [sftp v3] accodring to //! [`openssh-portable/sftp-client.c`] and provides //! an easy-to-use highlevel API. //! //! All `async` functions in this module are cancel safe. //! //! Internally, this is archived by first writing requests into a write buffer //! containing [`bytes::Bytes`] and then flush all buffers at once periodically //! to archive cancel safety and improve efficiencies. //! //! However, cancelling the future does not actually has any effect, //! since the requests are sent regardless of the cancellation. //! //! Thus, if you cancel a future that changes the remote filesystem in any way, //! then the change would still happen regardless. //! //! ## Usage //! //! It is recommended that you use this crate with [openssh]. //! //! You can also use this crate directly by using whatever ssh library //! to launch the `sftp` subsystem, then pass the stdin/stdout to //! [`Sftp::new`]. //! //! ## Extensions //! //! This crate support the following extensions: //! - limits //! - expand path //! - fsync //! - hardlink //! - posix rename //! - copy data //! //! [openssh]: https://crates.io/crates/openssh //! [sftp v3]: https://www.openssh.com/txt/draft-ietf-secsh-filexfer-02.txt //! [`openssh-portable/sftp-client.c`]: https://github.com/openssh/openssh-portable/blob/19b3d846f06697c85957ab79a63454f57f8e22d6/sftp-client.c #![warn( missing_docs, missing_debug_implementations, rustdoc::broken_intra_doc_links, rust_2018_idioms, unreachable_pub )] #![cfg_attr(docsrs, feature(doc_auto_cfg))] #[cfg(doc)] /// Changelog for this crate. pub mod changelog; mod utils; pub use error::{Error, UnixTimeStampError}; use openssh_sftp_client_lowlevel as lowlevel; pub use openssh_sftp_error as error; use bytes::BytesMut; mod unix_timestamp; pub use unix_timestamp::UnixTimeStamp; mod sftp; use sftp::SftpHandle; #[cfg(feature = "openssh")] pub use sftp::{CheckOpensshConnection, OpensshSession}; pub use sftp::{Sftp, SftpAuxiliaryData}; #[cfg(feature = "openssh")] pub use openssh; mod options; pub use options::SftpOptions; mod queue; use queue::MpscQueue; mod tasks; mod auxiliary; use auxiliary::Auxiliary; mod cache; use cache::WriteEndWithCachedId; mod handle; use handle::OwnedHandle; /// Module contains types for manipulating files. pub mod file; /// Module contains types for manipulating directories. pub mod fs; /// Module contains types for manipulating metadata of files or directories. pub mod metadata; type Buffer = BytesMut; type WriteEnd = lowlevel::WriteEnd; type ReadEnd = lowlevel::ReadEnd; type SharedData = lowlevel::SharedData; type Id = lowlevel::Id; type Data = lowlevel::Data; fn cancel_error() -> Error { Error::BackgroundTaskFailure(&"read/flush task failed") } openssh-sftp-client-0.15.0/src/metadata.rs000064400000000000000000000226441046102023000165410ustar 00000000000000use super::{ lowlevel::{FileAttrs, FileType as SftpFileType, Permissions as SftpPermissions}, UnixTimeStamp, }; /// Builder of [`MetaData`]. #[derive(Debug, Default, Copy, Clone)] pub struct MetaDataBuilder(FileAttrs); impl MetaDataBuilder { /// Create a builder. pub const fn new() -> Self { Self(FileAttrs::new()) } /// Reset builder back to default. pub fn reset(&mut self) -> &mut Self { self.0 = FileAttrs::new(); self } /// Set id of the metadata to be built. pub fn id(&mut self, (uid, gid): (u32, u32)) -> &mut Self { self.0.set_id(uid, gid); self } /// Set permissions of the metadata to be built. pub fn permissions(&mut self, perm: Permissions) -> &mut Self { self.0.set_permissions(perm.0); self } /// Set size of the metadata to built. pub fn len(&mut self, len: u64) -> &mut Self { self.0.set_size(len); self } /// Set accessed and modified time of the metadata to be built. pub fn time(&mut self, accessed: UnixTimeStamp, modified: UnixTimeStamp) -> &mut Self { self.0.set_time(accessed.0, modified.0); self } /// Create a [`MetaData`]. pub fn create(&self) -> MetaData { MetaData::new(self.0) } } /// Metadata information about a file. #[repr(transparent)] #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct MetaData(FileAttrs); #[allow(clippy::len_without_is_empty)] impl MetaData { pub(super) fn new(attrs: FileAttrs) -> Self { Self(attrs) } pub(super) fn into_inner(self) -> FileAttrs { self.0 } /// Returns the size of the file in bytes. /// /// Return `None` if the server did not return /// the size. pub fn len(&self) -> Option { self.0.get_size() } /// Returns the user ID of the owner. /// /// Return `None` if the server did not return /// the uid. pub fn uid(&self) -> Option { self.0.get_id().map(|(uid, _gid)| uid) } /// Returns the group ID of the owner. /// /// Return `None` if the server did not return /// the gid. pub fn gid(&self) -> Option { self.0.get_id().map(|(_uid, gid)| gid) } /// Returns the permissions. /// /// Return `None` if the server did not return /// the permissions. pub fn permissions(&self) -> Option { self.0.get_permissions().map(Permissions) } /// Returns the file type. /// /// Return `None` if the server did not return /// the file type. pub fn file_type(&self) -> Option { self.0.get_filetype().map(FileType) } /// Returns the last access time. /// /// Return `None` if the server did not return /// the last access time. pub fn accessed(&self) -> Option { self.0 .get_time() .map(|(atime, _mtime)| atime) .map(UnixTimeStamp) } /// Returns the last modification time. /// /// Return `None` if the server did not return /// the last modification time. pub fn modified(&self) -> Option { self.0 .get_time() .map(|(_atime, mtime)| mtime) .map(UnixTimeStamp) } } /// A structure representing a type of file with accessors for each file type. /// It is returned by [`MetaData::file_type`] method. #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] pub struct FileType(SftpFileType); impl FileType { /// Tests whether this file type represents a directory. pub fn is_dir(&self) -> bool { self.0 == SftpFileType::Directory } /// Tests whether this file type represents a regular file. pub fn is_file(&self) -> bool { self.0 == SftpFileType::RegularFile } /// Tests whether this file type represents a symbolic link. pub fn is_symlink(&self) -> bool { self.0 == SftpFileType::Symlink } /// Tests whether this file type represents a fifo. pub fn is_fifo(&self) -> bool { self.0 == SftpFileType::FIFO } /// Tests whether this file type represents a socket. pub fn is_socket(&self) -> bool { self.0 == SftpFileType::Socket } /// Tests whether this file type represents a block device. pub fn is_block_device(&self) -> bool { self.0 == SftpFileType::BlockDevice } /// Tests whether this file type represents a character device. pub fn is_char_device(&self) -> bool { self.0 == SftpFileType::CharacterDevice } } /// Representation of the various permissions on a file. #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] pub struct Permissions(SftpPermissions); macro_rules! impl_getter_setter { ($getter_name:ident, $setter_name:ident, $variant:ident, $variant_name:expr) => { #[doc = "Tests whether "] #[doc = $variant_name] #[doc = " bit is set."] pub fn $getter_name(&self) -> bool { self.0.intersects(SftpPermissions::$variant) } #[doc = "Modify the "] #[doc = $variant_name] #[doc = " bit."] pub fn $setter_name(&mut self, value: bool) -> &mut Self { self.0.set(SftpPermissions::$variant, value); self } }; } impl Permissions { /// Create a new permissions object with zero permissions /// set. pub const fn new() -> Self { Self(SftpPermissions::empty()) } impl_getter_setter!(suid, set_suid, SET_UID, "set-user-id"); impl_getter_setter!(sgid, set_sgid, SET_GID, "set-group-id"); impl_getter_setter!(svtx, set_vtx, SET_VTX, "set-sticky-bit"); impl_getter_setter!( read_by_owner, set_read_by_owner, READ_BY_OWNER, "read by owner" ); impl_getter_setter!( write_by_owner, set_write_by_owner, WRITE_BY_OWNER, "write by owner" ); impl_getter_setter!( execute_by_owner, set_execute_by_owner, EXECUTE_BY_OWNER, "execute by owner" ); impl_getter_setter!( read_by_group, set_read_by_group, READ_BY_GROUP, "read by group" ); impl_getter_setter!( write_by_group, set_write_by_group, WRITE_BY_GROUP, "write by group" ); impl_getter_setter!( execute_by_group, set_execute_by_group, EXECUTE_BY_GROUP, "execute by group" ); impl_getter_setter!( read_by_other, set_read_by_other, READ_BY_OTHER, "read by other" ); impl_getter_setter!( write_by_other, set_write_by_other, WRITE_BY_OTHER, "write by other" ); impl_getter_setter!( execute_by_other, set_execute_by_other, EXECUTE_BY_OTHER, "execute by other" ); /// Returns `true` if these permissions describe an unwritable file /// that no one can write to. pub fn readonly(&self) -> bool { !self.write_by_owner() && !self.write_by_group() && !self.write_by_other() } /// Modifies the readonly flag for this set of permissions. /// /// If the readonly argument is true, it will remove write permissions /// from all parties. /// /// Conversely, if it’s false, it will permit writing from all parties. /// /// This operation does not modify the filesystem. /// /// To modify the filesystem use the [`super::fs::Fs::set_permissions`] or /// the [`super::file::File::set_permissions`] function. pub fn set_readonly(&mut self, readonly: bool) { let writable = !readonly; self.set_write_by_owner(writable); self.set_write_by_group(writable); self.set_write_by_other(writable); } } impl From for Permissions { /// Converts numeric file mode bits permission into a [`Permissions`] object. /// /// The [numerical file mode bits](https://www.gnu.org/software/coreutils/manual/html_node/Numeric-Modes.html) are defined as follows: /// /// Special mode bits: /// 4000 Set user ID /// 2000 Set group ID /// 1000 Restricted deletion flag or sticky bit /// /// The file's owner: /// 400 Read /// 200 Write /// 100 Execute/search /// /// Other users in the file's group: /// 40 Read /// 20 Write /// 10 Execute/search /// /// Other users not in the file's group: /// 4 Read /// 2 Write /// 1 Execute/search /// fn from(octet: u16) -> Self { let mut result = Permissions::new(); // Lowest three bits, other result.set_execute_by_other(octet & 0o1 != 0); result.set_write_by_other(octet & 0o2 != 0); result.set_read_by_other(octet & 0o4 != 0); // Middle three bits, group result.set_execute_by_group(octet & 0o10 != 0); result.set_write_by_group(octet & 0o20 != 0); result.set_read_by_group(octet & 0o40 != 0); // Highest three bits, owner result.set_execute_by_owner(octet & 0o100 != 0); result.set_write_by_owner(octet & 0o200 != 0); result.set_read_by_owner(octet & 0o400 != 0); // Extra bits, sticky and setuid/setgid result.set_vtx(octet & 0o1000 != 0); result.set_sgid(octet & 0o2000 != 0); result.set_sgid(octet & 0o4000 != 0); result } } impl Default for Permissions { fn default() -> Self { Self::new() } } openssh-sftp-client-0.15.0/src/options.rs000064400000000000000000000135501046102023000164500ustar 00000000000000use std::{ num::{NonZeroU16, NonZeroUsize}, time::Duration, }; #[cfg(feature = "__ci-tests")] use std::num::NonZeroU32; /// Options when creating [`super::Sftp`]. #[derive(Debug, Copy, Clone, Default)] pub struct SftpOptions { read_end_buffer_size: Option, write_end_buffer_size: Option, flush_interval: Option, max_pending_requests: Option, tokio_compat_file_write_limit: Option, #[cfg(feature = "__ci-tests")] max_read_len: Option, #[cfg(feature = "__ci-tests")] max_write_len: Option, } impl SftpOptions { /// Create a new [`SftpOptions`]. pub const fn new() -> Self { Self { read_end_buffer_size: None, write_end_buffer_size: None, flush_interval: None, max_pending_requests: None, tokio_compat_file_write_limit: None, #[cfg(feature = "__ci-tests")] max_read_len: None, #[cfg(feature = "__ci-tests")] max_write_len: None, } } /// Set `flush_interval`, default value is 0.5 ms. /// /// `flush_interval` decides the maximum time your requests would stay /// in the write buffer before it is actually sent to the remote. /// /// If another thread is doing flushing, then the internal `flush_task` /// [`super::Sftp`] started would wait for another `flush_interval`. /// /// Setting it to be larger might improve overall performance by grouping /// writes and reducing the overhead of packet sent over network, but it /// might also increase latency, so be careful when setting the /// `flush_interval`. /// /// If `flush_interval` is set to 0, then every packet /// is flushed immediately. /// /// NOTE that it is perfectly OK to set `flush_interval` to 0 and /// it would not slowdown the program, as flushing is only performed /// on daemon. #[must_use] pub const fn flush_interval(mut self, flush_interval: Duration) -> Self { self.flush_interval = Some(flush_interval); self } pub(super) fn get_flush_interval(&self) -> Duration { self.flush_interval .unwrap_or_else(|| Duration::from_micros(500)) } /// Set `max_pending_requests`. /// /// If the pending_requests is larger than max_pending_requests, then the /// flush task will flush the write buffer without waiting for `flush_interval`. /// /// It is set to 100 by default. #[must_use] pub const fn max_pending_requests(mut self, max_pending_requests: NonZeroU16) -> Self { self.max_pending_requests = Some(max_pending_requests); self } pub(super) fn get_max_pending_requests(&self) -> u16 { self.max_pending_requests .map(NonZeroU16::get) .unwrap_or(100) } /// Set the init buffer size for requests. /// It is used to store [`bytes::Bytes`] and it will be resized /// to fit the pending requests. /// /// NOTE that sftp uses double buffer for efficient flushing /// without blocking the writers. /// /// It is set to 100 by default. #[must_use] pub const fn requests_buffer_size(mut self, buffer_size: NonZeroUsize) -> Self { self.write_end_buffer_size = Some(buffer_size); self } pub(super) fn get_write_end_buffer_size(&self) -> NonZeroUsize { self.write_end_buffer_size .unwrap_or_else(|| NonZeroUsize::new(100).unwrap()) } /// Set the init buffer size for responses. /// If the header of the response is larger than the buffer, then the buffer /// will be resized to fit the size of the header. /// /// It is set to 1024 by default. #[must_use] pub const fn responses_buffer_size(mut self, buffer_size: NonZeroUsize) -> Self { self.read_end_buffer_size = Some(buffer_size); self } pub(super) fn get_read_end_buffer_size(&self) -> NonZeroUsize { self.read_end_buffer_size .unwrap_or_else(|| NonZeroUsize::new(1024).unwrap()) } /// Set the write buffer limit for tokio compat file. /// If [`crate::file::TokioCompatFile`] has hit the write buffer limit /// set here, then it will flush one write buffer and continue /// sending (part of) the buffer to the server, which could be buffered. /// /// It is set to 640KB (640 * 1024 bytes) by default. #[must_use] pub const fn tokio_compat_file_write_limit(mut self, limit: NonZeroUsize) -> Self { self.tokio_compat_file_write_limit = Some(limit); self } pub(super) fn get_tokio_compat_file_write_limit(&self) -> usize { self.tokio_compat_file_write_limit .map(NonZeroUsize::get) .unwrap_or(640 * 1024) } } #[cfg(feature = "__ci-tests")] impl SftpOptions { /// Set `max_read_len`. /// /// It can be used to reduce `max_read_len`, but cannot be used /// to increase `max_read_len`. #[must_use] pub const fn max_read_len(mut self, max_read_len: NonZeroU32) -> Self { self.max_read_len = Some(max_read_len); self } pub(super) fn get_max_read_len(&self) -> Option { self.max_read_len.map(NonZeroU32::get) } /// Set `max_write_len`. /// /// It can be used to reduce `max_write_len`, but cannot be used /// to increase `max_write_len`. #[must_use] pub const fn max_write_len(mut self, max_write_len: NonZeroU32) -> Self { self.max_write_len = Some(max_write_len); self } pub(super) fn get_max_write_len(&self) -> Option { self.max_write_len.map(NonZeroU32::get) } } #[cfg(not(feature = "__ci-tests"))] impl SftpOptions { pub(super) const fn get_max_read_len(&self) -> Option { None } pub(super) const fn get_max_write_len(&self) -> Option { None } } openssh-sftp-client-0.15.0/src/queue.rs000064400000000000000000000015441046102023000161010ustar 00000000000000use std::{mem, sync::Mutex}; use bytes::Bytes; use openssh_sftp_client_lowlevel::Queue; #[derive(Debug)] pub(super) struct MpscQueue(Mutex>); impl MpscQueue { pub(crate) fn with_capacity(cap: usize) -> Self { Self(Mutex::new(Vec::with_capacity(cap))) } pub(crate) fn swap(&self, backup_vec: &mut Vec) { mem::swap(&mut *self.0.lock().unwrap(), backup_vec) } } impl Queue for MpscQueue { fn push(&self, bytes: Bytes) { if !bytes.is_empty() { self.0.lock().unwrap().push(bytes); } } fn extend(&self, header: Bytes, body: &[&[Bytes]]) { let mut v = self.0.lock().unwrap(); if !header.is_empty() { v.push(header); } for data in body { v.extend(data.iter().filter(|bytes| !bytes.is_empty()).cloned()); } } } openssh-sftp-client-0.15.0/src/sftp/openssh_session.rs000064400000000000000000000254021046102023000211520ustar 00000000000000use std::{fmt::Debug, future::Future, ops::Deref, pin::Pin, sync::Arc}; use openssh::{ChildStdin, ChildStdout, Error as OpensshError, Session, Stdio}; use tokio::{sync::oneshot, task::JoinHandle}; use crate::{utils::ErrorExt, Error, Sftp, SftpAuxiliaryData, SftpOptions}; /// The openssh session #[derive(Debug)] pub struct OpensshSession(JoinHandle>); /// Check for openssh connection to be alive pub trait CheckOpensshConnection { /// This function should only return on `Err()`. /// Once the sftp session is closed, the future will be cancelled (dropped). fn check_connection<'session>( self: Box, session: &'session Session, ) -> Pin> + Send + Sync + 'session>>; } impl CheckOpensshConnection for F where F: for<'session> FnOnce( &'session Session, ) -> Pin< Box> + Send + Sync + 'session>, >, { fn check_connection<'session>( self: Box, session: &'session Session, ) -> Pin> + Send + Sync + 'session>> { (self)(session) } } impl Drop for OpensshSession { fn drop(&mut self) { self.0.abort(); } } #[cfg_attr( feature = "tracing", tracing::instrument(name = "session_task", skip(tx, check_openssh_connection)) )] async fn create_session_task( session: impl Deref + Clone + Debug + Send + Sync, tx: oneshot::Sender>, check_openssh_connection: Option>, ) -> Option { #[cfg(feature = "tracing")] tracing::info!("Connecting to sftp subsystem, session = {session:?}"); let res = Session::to_subsystem(session.clone(), "sftp") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::null()) .spawn() .await; let mut child = match res { Ok(child) => child, Err(err) => { #[cfg(feature = "tracing")] tracing::error!( "Failed to connect to remote sftp subsystem: {err}, session = {session:?}" ); tx.send(Err(err)).unwrap(); // Err return None; } }; #[cfg(feature = "tracing")] tracing::info!("Connection to sftp subsystem established, session = {session:?}"); let stdin = child.stdin().take().unwrap(); let stdout = child.stdout().take().unwrap(); tx.send(Ok((stdin, stdout))).unwrap(); // Ok let original_error = { let check_conn_future = async { if let Some(checker) = check_openssh_connection { checker .check_connection(&session) .await .err() .map(Error::from) } else { None } }; let wait_on_child_future = async { match child.wait().await { Ok(exit_status) => { if !exit_status.success() { Some(Error::SftpServerFailure(exit_status)) } else { None } } Err(err) => Some(err.into()), } }; tokio::pin!(wait_on_child_future); tokio::select! { biased; original_error = check_conn_future => { let occuring_error = wait_on_child_future.await; match (original_error, occuring_error) { (Some(original_error), Some(occuring_error)) => { Some(original_error.error_on_cleanup(occuring_error)) } (Some(err), None) | (None, Some(err)) => Some(err), (None, None) => None, } } original_error = &mut wait_on_child_future => original_error, } }; #[cfg(feature = "tracing")] if let Some(err) = &original_error { tracing::error!( "Waiting on remote sftp subsystem to exit failed: {err}, session = {session:?}" ); } original_error } impl Sftp { /// Create [`Sftp`] from [`openssh::Session`]. /// /// Calling [`Sftp::close`] on sftp instances created using this function /// would also await on [`openssh::RemoteChild::wait`] and /// [`openssh::Session::close`] and propagate their error in /// [`Sftp::close`]. pub async fn from_session(session: Session, options: SftpOptions) -> Result { Self::from_session_with_check_connection_inner(session, options, None).await } /// Similar to [`Sftp::from_session`], but takes an additional parameter /// for checking if the connection is still alive. /// /// # Example /// /// ```rust,no_run /// fn check_connection<'session>( /// session: &'session openssh::Session, /// ) -> std::pin::Pin> + Send + Sync + 'session>> { /// Box::pin(async move { /// loop { /// tokio::time::sleep(std::time::Duration::from_secs(10)).await; /// session.check().await?; /// } /// Ok(()) /// }) /// } /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> Result<(), openssh_sftp_client::Error> { /// openssh_sftp_client::Sftp::from_session_with_check_connection( /// openssh::Session::connect_mux("me@ssh.example.com", openssh::KnownHosts::Strict).await?, /// openssh_sftp_client::SftpOptions::default(), /// check_connection, /// ).await?; /// # Ok(()) /// # } /// ``` pub async fn from_session_with_check_connection( session: Session, options: SftpOptions, check_openssh_connection: impl CheckOpensshConnection + Send + Sync + 'static, ) -> Result { Self::from_session_with_check_connection_inner( session, options, Some(Box::new(check_openssh_connection)), ) .await } async fn from_session_with_check_connection_inner( session: Session, options: SftpOptions, check_openssh_connection: Option>, ) -> Result { let (tx, rx) = oneshot::channel(); Self::from_session_task( options, rx, tokio::spawn(async move { let original_error = create_session_task(&session, tx, check_openssh_connection).await; let _session_str = format!("{session:?}"); let occuring_error = session.close().await.err().map(Error::from); #[cfg(feature = "tracing")] if let Some(err) = &occuring_error { tracing::error!("Closing session failed: {err}, session = {_session_str}"); } match (original_error, occuring_error) { (Some(original_error), Some(occuring_error)) => { Some(original_error.error_on_cleanup(occuring_error)) } (Some(err), None) | (None, Some(err)) => Some(err), (None, None) => None, } }), ) .await } /// Create [`Sftp`] from any type that can be dereferenced to [`openssh::Session`] /// and is clonable. pub async fn from_clonable_session( session: impl Deref + Clone + Debug + Send + Sync + 'static, options: SftpOptions, ) -> Result { Self::from_clonable_session_with_check_connection_inner(session, options, None).await } /// Similar to [`Sftp::from_session_with_check_connection`], but takes an additional parameter /// for checking if the connection is still alive. /// /// # Example /// /// ```rust,no_run /// fn check_connection<'session>( /// session: &'session openssh::Session, /// ) -> std::pin::Pin> + Send + Sync + 'session>> { /// Box::pin(async move { /// loop { /// tokio::time::sleep(std::time::Duration::from_secs(10)).await; /// session.check().await?; /// } /// Ok(()) /// }) /// } /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> Result<(), openssh_sftp_client::Error> { /// openssh_sftp_client::Sftp::from_clonable_session_with_check_connection( /// std::sync::Arc::new(openssh::Session::connect_mux("me@ssh.example.com", openssh::KnownHosts::Strict).await?), /// openssh_sftp_client::SftpOptions::default(), /// check_connection, /// ).await?; /// # Ok(()) /// # } /// ``` pub async fn from_clonable_session_with_check_connection( session: impl Deref + Clone + Debug + Send + Sync + 'static, options: SftpOptions, check_openssh_connection: impl CheckOpensshConnection + Send + Sync + 'static, ) -> Result { Self::from_clonable_session_with_check_connection_inner( session, options, Some(Box::new(check_openssh_connection)), ) .await } async fn from_clonable_session_with_check_connection_inner( session: impl Deref + Clone + Debug + Send + Sync + 'static, options: SftpOptions, check_openssh_connection: Option>, ) -> Result { let (tx, rx) = oneshot::channel(); Self::from_session_task( options, rx, tokio::spawn(create_session_task(session, tx, check_openssh_connection)), ) .await } async fn from_session_task( options: SftpOptions, rx: oneshot::Receiver>, handle: JoinHandle>, ) -> Result { let msg = "Task failed without sending anything, so it must have panicked"; let (stdin, stdout) = match rx.await { Ok(res) => res?, Err(_) => return Err(handle.await.expect_err(msg).into()), }; Self::new_with_auxiliary( stdin, stdout, options, SftpAuxiliaryData::ArcedOpensshSession(Arc::new(OpensshSession(handle))), ) .await } } impl OpensshSession { pub(super) async fn recover_session_err(mut self) -> Result<(), Error> { if let Some(err) = (&mut self.0).await? { Err(err) } else { Ok(()) } } } openssh-sftp-client-0.15.0/src/sftp.rs000064400000000000000000000353421046102023000157340ustar 00000000000000use crate::{ auxiliary, file::{File, OpenOptions}, fs::Fs, lowlevel, tasks, utils::{ErrorExt, ResultExt}, Error, MpscQueue, SftpOptions, SharedData, WriteEnd, WriteEndWithCachedId, }; use auxiliary::Auxiliary; use lowlevel::{connect, Extensions}; use tasks::{create_flush_task, create_read_task}; use std::{ any::Any, convert::TryInto, fmt, future::Future, ops::Deref, path::Path, pin::Pin, sync::Arc, }; use derive_destructure2::destructure; use tokio::{ io::{AsyncRead, AsyncWrite}, runtime::Handle, sync::oneshot::Receiver, task::JoinHandle, }; use tokio_io_utility::assert_send; #[cfg(feature = "openssh")] mod openssh_session; #[cfg(feature = "openssh")] pub use openssh_session::{CheckOpensshConnection, OpensshSession}; #[derive(Debug, destructure)] pub(super) struct SftpHandle(SharedData); impl Deref for SftpHandle { type Target = SharedData; fn deref(&self) -> &Self::Target { &self.0 } } impl SftpHandle { fn new(shared_data: &SharedData) -> Self { // Inc active_user_count for the same reason as Self::clone shared_data.get_auxiliary().inc_active_user_count(); Self(shared_data.clone()) } /// Takes `self` by value to ensure active_user_count get inc/dec properly. pub(super) fn write_end(self) -> WriteEndWithCachedId { // WriteEndWithCachedId also inc/dec active_user_count, so it's ok // to destructure self here. WriteEndWithCachedId::new(WriteEnd::new(self.destructure().0)) } } impl Clone for SftpHandle { fn clone(&self) -> Self { self.0.get_auxiliary().inc_active_user_count(); Self(self.0.clone()) } } impl Drop for SftpHandle { fn drop(&mut self) { self.0.get_auxiliary().dec_active_user_count(); } } /// A file-oriented channel to a remote host. #[derive(Debug)] pub struct Sftp { handle: SftpHandle, flush_task: JoinHandle>, read_task: JoinHandle>, } /// Auxiliary data for [`Sftp`]. #[non_exhaustive] pub enum SftpAuxiliaryData { /// No auxiliary data. None, /// Store any `Box`ed value. Boxed(Box), /// Store any `Pin`ed `Future`. PinnedFuture(Pin + Send + Sync + 'static>>), /// Store any `Arc`ed value. Arced(Arc), /// Store [`OpensshSession`] with in an `Arc`. #[cfg(feature = "openssh")] ArcedOpensshSession(Arc), } impl fmt::Debug for SftpAuxiliaryData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use SftpAuxiliaryData::*; match self { None => f.write_str("None"), Boxed(_) => f.write_str("Boxed(boxed_any)"), PinnedFuture(_) => f.write_str("PinnedFuture"), Arced(_) => f.write_str("Arced(arced_any)"), #[cfg(feature = "openssh")] ArcedOpensshSession(session) => write!(f, "ArcedOpensshSession({session:?})"), } } } impl Sftp { /// Create [`Sftp`]. pub async fn new( stdin: W, stdout: R, options: SftpOptions, ) -> Result { Self::new_with_auxiliary(stdin, stdout, options, SftpAuxiliaryData::None).await } /// Create [`Sftp`] with some auxiliary data. /// /// The auxiliary data will be dropped after all sftp requests has been /// sent(flush_task), all responses processed (read_task) and [`Sftp`] has /// been dropped. /// /// If you want to get back the data, you can simply use /// [`SftpAuxiliaryData::Arced`] and then stores an [`Arc`] elsewhere. /// /// Once the sftp tasks is completed and [`Sftp`] is dropped, you can call /// [`Arc::try_unwrap`] to get back the exclusive ownership of it. pub async fn new_with_auxiliary< W: AsyncWrite + Send + 'static, R: AsyncRead + Send + 'static, >( stdin: W, stdout: R, options: SftpOptions, auxiliary: SftpAuxiliaryData, ) -> Result { assert_send(async move { let write_end_buffer_size = options.get_write_end_buffer_size(); let write_end = assert_send(Self::connect( write_end_buffer_size.get(), options.get_max_pending_requests(), auxiliary, options.get_tokio_compat_file_write_limit(), ))?; let flush_task = create_flush_task( stdin, SharedData::clone(&write_end), write_end_buffer_size, options.get_flush_interval(), ); let (rx, read_task) = create_read_task( stdout, options.get_read_end_buffer_size(), SharedData::clone(&write_end), ); Self::init(flush_task, read_task, write_end, rx, &options).await }) .await } fn connect( write_end_buffer_size: usize, max_pending_requests: u16, auxiliary: SftpAuxiliaryData, tokio_compat_file_write_limit: usize, ) -> Result { connect( MpscQueue::with_capacity(write_end_buffer_size), Auxiliary::new( max_pending_requests, auxiliary, tokio_compat_file_write_limit, Handle::current(), ), ) } async fn init( flush_task: JoinHandle>, read_task: JoinHandle>, write_end: WriteEnd, rx: Receiver, options: &SftpOptions, ) -> Result { // Create sftp here. // // It would also gracefully shutdown `flush_task` and `read_task` if // the future is cancelled or error is encounted. let sftp = Self { handle: SftpHandle::new(&write_end), flush_task, read_task, }; let write_end = WriteEndWithCachedId::new(write_end); let extensions = if let Ok(extensions) = rx.await { extensions } else { drop(write_end); // Wait on flush_task and read_task to get a more detailed error message. sftp.close().await?; std::unreachable!("Error must have occurred in either read_task or flush_task") }; match Self::set_limits(write_end, options, extensions).await { Err(Error::BackgroundTaskFailure(_)) => { // Wait on flush_task and read_task to get a more detailed error message. sftp.close().await?; std::unreachable!("Error must have occurred in either read_task or flush_task") } res => res?, } Ok(sftp) } async fn set_limits( mut write_end: WriteEndWithCachedId, options: &SftpOptions, extensions: Extensions, ) -> Result<(), Error> { let default_download_buflen = lowlevel::OPENSSH_PORTABLE_DEFAULT_DOWNLOAD_BUFLEN as u64; let default_upload_buflen = lowlevel::OPENSSH_PORTABLE_DEFAULT_UPLOAD_BUFLEN as u64; // sftp can accept packet as large as u32::MAX, the header itself // is at least 9 bytes long. let default_max_packet_len = u32::MAX - 9; let (read_len, write_len, packet_len) = if extensions.contains(Extensions::LIMITS) { let mut limits = write_end .send_request(|write_end, id| Ok(write_end.send_limits_request(id)?.wait())) .await?; if limits.read_len == 0 { limits.read_len = default_download_buflen; } if limits.write_len == 0 { limits.write_len = default_upload_buflen; } ( limits.read_len, limits.write_len, limits .packet_len .try_into() .unwrap_or(default_max_packet_len), ) } else { ( default_download_buflen, default_upload_buflen, default_max_packet_len, ) }; // Each read/write request also has a header and contains a handle, // which is 4-byte long for openssh but can be at most 256 bytes long // for other implementations. let read_len = read_len.try_into().unwrap_or(packet_len - 300); let read_len = options .get_max_read_len() .map(|v| v.min(read_len)) .unwrap_or(read_len); let write_len = write_len.try_into().unwrap_or(packet_len - 300); let write_len = options .get_max_write_len() .map(|v| v.min(write_len)) .unwrap_or(write_len); let limits = auxiliary::Limits { read_len, write_len, }; write_end .get_auxiliary() .conn_info .set(auxiliary::ConnInfo { limits, extensions }) .expect("auxiliary.conn_info shall be uninitialized"); Ok(()) } /// Close sftp connection /// /// If sftp is created using `Sftp::from_session`, then calling this /// function would also await on `openssh::RemoteChild::wait` and /// `openssh::Session::close` and propagate their error in /// [`Sftp::close`]. pub async fn close(self) -> Result<(), Error> { let Self { handle, flush_task, read_task, } = self; let session = match &handle.get_auxiliary().auxiliary_data { #[cfg(feature = "openssh")] SftpAuxiliaryData::ArcedOpensshSession(session) => Some(Arc::clone(session)), _ => None, }; #[cfg(not(feature = "openssh"))] { // Help session infer generic T in Option let _: Option<()> = session; } // Drop handle. drop(handle); // Wait for responses for all requests buffered and sent. let read_task_error = read_task.await.flatten().err(); // read_task would order the shutdown of read_task, // so we just need to wait for it here. let flush_task_error = flush_task.await.flatten().err(); let session_error: Option = match session { #[cfg(feature = "openssh")] Some(session) => Arc::try_unwrap(session) .unwrap() .recover_session_err() .await .err(), #[cfg(not(feature = "openssh"))] Some(_) => unreachable!(), None => None, }; match (read_task_error, flush_task_error, session_error) { (Some(err1), Some(err2), Some(err3)) => Err(err1.error_on_cleanup3(err2, err3)), (Some(err1), Some(err2), None) | (Some(err1), None, Some(err2)) | (None, Some(err1), Some(err2)) => Err(err1.error_on_cleanup(err2)), (Some(err), None, None) | (None, Some(err), None) | (None, None, Some(err)) => Err(err), (None, None, None) => Ok(()), } } /// Return a new [`OpenOptions`] object. pub fn options(&self) -> OpenOptions { OpenOptions::new(self.handle.clone()) } /// Opens a file in write-only mode. /// /// This function will create a file if it does not exist, and will truncate /// it if it does. pub async fn create(&self, path: impl AsRef) -> Result { async fn inner(this: &Sftp, path: &Path) -> Result { this.options() .write(true) .create(true) .truncate(true) .open(path) .await } inner(self, path.as_ref()).await } /// Attempts to open a file in read-only mode. pub async fn open(&self, path: impl AsRef) -> Result { async fn inner(this: &Sftp, path: &Path) -> Result { this.options().read(true).open(path).await } inner(self, path.as_ref()).await } /// [`Fs`] defaults to the current working dir set by remote `sftp-server`, /// which usually is the home directory. pub fn fs(&self) -> Fs { Fs::new(self.handle.clone().write_end(), "".into()) } /// Check if the remote server supports the expand path extension. /// /// If it returns true, then [`Fs::canonicalize`] with expand path is supported. pub fn support_expand_path(&self) -> bool { self.handle .get_auxiliary() .extensions() .contains(Extensions::EXPAND_PATH) } /// Check if the remote server supports the fsync extension. /// /// If it returns true, then [`File::sync_all`] is supported. pub fn support_fsync(&self) -> bool { self.handle .get_auxiliary() .extensions() .contains(Extensions::FSYNC) } /// Check if the remote server supports the hardlink extension. /// /// If it returns true, then [`Fs::hard_link`] is supported. pub fn support_hardlink(&self) -> bool { self.handle .get_auxiliary() .extensions() .contains(Extensions::HARDLINK) } /// Check if the remote server supports the posix rename extension. /// /// If it returns true, then [`Fs::rename`] will use posix rename. pub fn support_posix_rename(&self) -> bool { self.handle .get_auxiliary() .extensions() .contains(Extensions::POSIX_RENAME) } /// Check if the remote server supports the copy data extension. /// /// If it returns true, then [`File::copy_to`] and [`File::copy_all_to`] are supported. pub fn support_copy(&self) -> bool { self.handle .get_auxiliary() .extensions() .contains(Extensions::COPY_DATA) } } #[cfg(feature = "__ci-tests")] impl Sftp { /// The maximum amount of bytes that can be written in one request. /// Writing more than that, then your write will be split into multiple requests /// /// If [`Sftp::max_buffered_write`] is less than [`max_atomic_write_len`], /// then the direct write is enabled and [`Sftp::max_write_len`] must be /// less than [`max_atomic_write_len`]. pub fn max_write_len(&self) -> u32 { self.handle.get_auxiliary().limits().write_len } /// The maximum amount of bytes that can be read in one request. /// Reading more than that, then your read will be split into multiple requests pub fn max_read_len(&self) -> u32 { self.handle.get_auxiliary().limits().read_len } /// Trigger flush task manually. pub fn manual_flush(&self) { self.handle.get_auxiliary().trigger_flushing() } } openssh-sftp-client-0.15.0/src/tasks.rs000064400000000000000000000177621046102023000161130ustar 00000000000000use super::{lowlevel::Extensions, Error, ReadEnd, SharedData}; use std::{ num::NonZeroUsize, pin::Pin, sync::atomic::{AtomicUsize, Ordering}, time::Duration, }; use bytes::Bytes; use scopeguard::defer; use tokio::{ io::{AsyncRead, AsyncWrite}, pin, sync::oneshot, task::{spawn, JoinHandle}, time, }; use tokio_io_utility::{write_all_bytes, ReusableIoSlices}; async fn flush( shared_data: &SharedData, writer: Pin<&mut (dyn AsyncWrite + Send)>, buffer: &mut Vec, reusable_io_slices: &mut ReusableIoSlices, ) -> Result<(), Error> { shared_data.queue().swap(buffer); #[cfg(feature = "tracing")] tracing::debug!( "Flushing out {} bytes, shared_data = {shared_data:p}", buffer.len() ); // `Queue` implementation for `MpscQueue` already removes // all empty `Bytes`s so that precond of write_all_bytes // is satisfied. write_all_bytes(writer, buffer, reusable_io_slices).await?; Ok(()) } /// Return the size after substraction. /// /// # Panic /// /// If it underflows, thenit will panic. fn atomic_sub_assign(atomic: &AtomicUsize, val: usize) -> usize { atomic.fetch_sub(val, Ordering::Relaxed) - val } pub(super) fn create_flush_task( writer: W, shared_data: SharedData, write_end_buffer_size: NonZeroUsize, flush_interval: Duration, ) -> JoinHandle> { #[cfg_attr( feature = "tracing", tracing::instrument(name = "flush_task", skip(writer, shared_data), err) )] async fn inner( mut writer: Pin<&mut (dyn AsyncWrite + Send)>, shared_data: SharedData, write_end_buffer_size: NonZeroUsize, flush_interval: Duration, ) -> Result<(), Error> { let mut interval = if !flush_interval.is_zero() { let mut interval = time::interval(flush_interval); interval.set_missed_tick_behavior(time::MissedTickBehavior::Delay); Some(interval) } else { None }; let auxiliary = shared_data.get_auxiliary(); let flush_end_notify = &auxiliary.flush_end_notify; let read_end_notify = &auxiliary.read_end_notify; let pending_requests = &auxiliary.pending_requests; let shutdown_stage = &auxiliary.shutdown_stage; let max_pending_requests = auxiliary.max_pending_requests(); let cancel_guard = auxiliary.cancel_token.clone().drop_guard(); let mut backup_queue_buffer = Vec::with_capacity(write_end_buffer_size.get()); let mut reusable_io_slices = ReusableIoSlices::new(write_end_buffer_size); loop { #[cfg(feature = "tracing")] tracing::debug!( "Flushing out the initial hello msg from shared_data = {shared_data:p}" ); // Flush the initial hello msg ASAP. let mut cnt = pending_requests.load(Ordering::Relaxed); loop { #[cfg(feature = "tracing")] tracing::debug!("Trigger read_task from shared_data = {shared_data:p}"); read_end_notify.notify_one(); // Wait until another thread is done or cancelled flushing // and try flush it again just in case the flushing is cancelled flush( &shared_data, writer.as_mut(), &mut backup_queue_buffer, &mut reusable_io_slices, ) .await?; cnt = atomic_sub_assign(pending_requests, cnt); if cnt < max_pending_requests { break; } } if shutdown_stage.load(Ordering::Relaxed) == 2 { #[cfg(feature = "tracing")] tracing::info!("flush_task graceful shutdown, shared_data = {shared_data:p}"); // Read tasks have read in all responses, thus // write task can exit now. // // Since sftp-server implementation from openssh-portable // will quit once its cannot read anything, we have to keep // the write task alive until all requests have been processed // by sftp-server and each response is handled by the read task. debug_assert_eq!(cnt, 0); cancel_guard.disarm(); break Ok(()); } flush_end_notify.notified().await; if let Some(interval) = interval.as_mut() { tokio::select! { biased; _ = interval.tick() => (), // tokio::sync::Notify is cancel safe, however // cancelling it would lose the place in the queue. // // However, since flush_task is the only one who // calls `flush_immediately.notified()`, it // is totally fine to cancel here. _ = auxiliary.flush_immediately.notified() => (), }; } } } spawn(async move { pin!(writer); inner(writer, shared_data, write_end_buffer_size, flush_interval).await }) } pub(super) fn create_read_task( stdout: R, read_end_buffer_size: NonZeroUsize, shared_data: SharedData, ) -> (oneshot::Receiver, JoinHandle>) { #[cfg_attr( feature = "tracing", tracing::instrument(name = "read_task", skip(stdout, tx, shared_data), err) )] async fn inner( stdout: Pin<&mut (dyn AsyncRead + Send)>, read_end_buffer_size: NonZeroUsize, shared_data: SharedData, tx: oneshot::Sender, ) -> Result<(), Error> { let read_end = ReadEnd::new(stdout, read_end_buffer_size, shared_data.clone()); let auxiliary = shared_data.get_auxiliary(); let read_end_notify = &auxiliary.read_end_notify; let requests_to_read = &auxiliary.requests_to_read; let shutdown_stage = &auxiliary.shutdown_stage; let cancel_guard = auxiliary.cancel_token.clone().drop_guard(); pin!(read_end); defer! { #[cfg(feature = "tracing")] tracing::info!( "Requesting graceful shutdown of flush_task from read_task, shared_data = {shared_data:p}" ); // Order the shutdown of flush_task. auxiliary.shutdown_stage.store(2, Ordering::Relaxed); auxiliary.flush_immediately.notify_one(); auxiliary.flush_end_notify.notify_one(); } #[cfg(feature = "tracing")] tracing::debug!("Receiving version and extensions, shared_data = {shared_data:p}"); // Receive version and extensions let extensions = read_end.as_mut().receive_server_hello_pinned().await?; tx.send(extensions).unwrap(); loop { read_end_notify.notified().await; let mut cnt = requests_to_read.load(Ordering::Relaxed); #[cfg(feature = "tracing")] tracing::debug!( "Attempting to read {cnt} responses in read_task, shared_data = {shared_data:p}" ); while cnt != 0 { // If attempt to read in more than new_requests_submit, then // `read_in_one_packet` might block forever. for _ in 0..cnt { read_end.as_mut().read_in_one_packet_pinned().await?; } cnt = atomic_sub_assign(requests_to_read, cnt); } if shutdown_stage.load(Ordering::Relaxed) == 1 { // All responses is read in and there is no // write_end/shared_data left. cancel_guard.disarm(); break Ok(()); } } } let (tx, rx) = oneshot::channel(); let handle = spawn(async move { pin!(stdout); inner(stdout, read_end_buffer_size, shared_data, tx).await }); (rx, handle) } openssh-sftp-client-0.15.0/src/unix_timestamp.rs000064400000000000000000000025161046102023000200230ustar 00000000000000use super::{lowlevel, UnixTimeStampError}; use std::time::{Duration, SystemTime}; /// Default value is 1970-01-01 00:00:00 UTC. /// /// UnixTimeStamp stores number of seconds elapsed since 1970-01-01 00:00:00 UTC /// as `u32`. #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Hash)] #[repr(transparent)] pub struct UnixTimeStamp(pub(crate) lowlevel::UnixTimeStamp); impl UnixTimeStamp { /// Create new unix timestamp from `system_time`. pub fn new(system_time: SystemTime) -> Result { lowlevel::UnixTimeStamp::new(system_time).map(Self) } /// Return unix epoch, same as [`UnixTimeStamp::default`] pub const fn unix_epoch() -> Self { Self(lowlevel::UnixTimeStamp::unix_epoch()) } /// Return `None` if [`std::time::SystemTime`] cannot hold the timestamp. pub fn from_raw(elapsed: u32) -> Option { lowlevel::UnixTimeStamp::from_raw(elapsed).map(Self) } /// Into `u32` which is used to internally store the timestamp in seconds. pub fn into_raw(self) -> u32 { self.0.into_raw() } /// Convert timestamp to [`Duration`]. pub fn as_duration(self) -> Duration { self.0.as_duration() } /// Convert timestamp back to [`SystemTime`]. pub fn as_system_time(self) -> SystemTime { self.0.as_system_time() } } openssh-sftp-client-0.15.0/src/utils.rs000064400000000000000000000016501046102023000161130ustar 00000000000000use std::convert::identity; use crate::error::{Error, RecursiveError, RecursiveError3}; pub(super) trait ErrorExt { fn error_on_cleanup(self, occuring_error: Self) -> Self; fn error_on_cleanup3(self, err2: Self, err3: Self) -> Self; } impl ErrorExt for Error { fn error_on_cleanup(self, occuring_error: Error) -> Self { Error::RecursiveErrors(Box::new(RecursiveError { original_error: self, occuring_error, })) } fn error_on_cleanup3(self, err2: Self, err3: Self) -> Self { Error::RecursiveErrors3(Box::new(RecursiveError3 { err1: self, err2, err3, })) } } pub(super) trait ResultExt { fn flatten(self) -> Result; } impl ResultExt for Result, E2> where E: From, { fn flatten(self) -> Result { self.map_err(E::from).and_then(identity) } } openssh-sftp-client-0.15.0/tests/highlevel.rs000064400000000000000000000503051046102023000172760ustar 00000000000000use std::{ borrow::Cow, cmp::{max, min}, convert::{identity, TryInto}, env, fs, future::ready, io::IoSlice, num::{NonZeroU32, NonZeroU64, NonZeroUsize}, path::Path, path::PathBuf, stringify, time::Duration, }; use bytes::BytesMut; use futures_util::StreamExt; use openssh::{KnownHosts, Session, SessionBuilder}; use openssh_sftp_client::*; use pretty_assertions::assert_eq; use sftp_test_common::*; use tokio::{ io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}, time::sleep, }; use tokio_io_utility::write_vectored_all; async fn connect(options: SftpOptions) -> (process::Child, Sftp) { let (child, stdin, stdout) = launch_sftp().await; (child, Sftp::new(stdin, stdout, options).await.unwrap()) } fn gen_path(func: &str) -> PathBuf { let mut path = get_path_for_tmp_files().join("highlevel"); fs::create_dir_all(&path).unwrap(); path.push(func); fs::remove_dir_all(&path).ok(); fs::remove_file(&path).ok(); path } #[tokio::test] async fn sftp_init() { let (mut child, sftp) = connect(Default::default()).await; sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } #[tokio::test] /// Test creating new file, truncating and opening existing file, /// basic read, write and removal. async fn sftp_file_basics() { let path = gen_path("sftp_file_basics"); let content = b"HELLO, WORLD!\n".repeat(200); let (mut child, sftp) = connect(SftpOptions::new().flush_interval(Duration::from_secs(0))).await; let content = &content[..min(sftp.max_write_len() as usize, content.len())]; { let mut fs = sftp.fs(); // Create new file (fail if already exists) and write to it. debug_assert_eq!( sftp.options() .write(true) .create_new(true) .open(&path) .await .unwrap() .write(content) .await .unwrap(), content.len() ); debug_assert_eq!(&*fs.read(&path).await.unwrap(), &*content); // Create new file with Trunc and write to it. // // Sftp::Create opens the file truncated. debug_assert_eq!( sftp.create(&path) .await .unwrap() .write(content) .await .unwrap(), content.len() ); debug_assert_eq!(&*fs.read(&path).await.unwrap(), &*content); // remove the file fs.remove_file(path).await.unwrap(); } // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } /// Return `SftpOptions` that has `max_rw_len` set to `200`. fn sftp_options_with_max_rw_len() -> SftpOptions { let max_rw_len = NonZeroU32::new(200).unwrap(); SftpOptions::new() .max_write_len(max_rw_len) .max_read_len(max_rw_len) } macro_rules! def_write_all_test { ($fname:ident, $sftp_options:expr, $file_converter:expr, $file_var:ident, $content_var:ident , $test_block:block) => { #[tokio::test] async fn $fname() { let path = gen_path(stringify!($fname)); for (msg, (mut child, sftp)) in vec![("Test direct write", connect($sftp_options).await)] { let max_len = max(sftp.max_write_len(), sftp.max_read_len()) as usize; let content = b"HELLO, WORLD!\n".repeat(max_len / 8); { let file = sftp .options() .write(true) .read(true) .create(true) .open(&path) .await .map($file_converter) .unwrap(); tokio::pin!(file); let len = content.len(); eprintln!("{}", msg); { let $file_var = &mut file; let $content_var = &content; $test_block; } eprintln!("Verifing the write"); file.rewind().await.unwrap(); let buffer = file .as_mut() .as_mut_file() .read_all(len, BytesMut::with_capacity(len)) .await .unwrap(); assert_eq!(&*buffer, &content); } sftp.fs().remove_file(&path).await.unwrap(); eprintln!("Closing sftp and child"); sftp.close().await.unwrap(); // TODO: somehow sftp-server hangs here child.kill().await.unwrap(); } } }; } def_write_all_test!( sftp_file_write_all, SftpOptions::new(), identity, file, content, { file.write_all(content).await.unwrap(); } ); def_write_all_test!( sftp_file_write_all_vectored, sftp_options_with_max_rw_len(), identity, file, content, { let len = content.len(); file.write_all_vectorized( [ IoSlice::new(&content[..len / 2]), IoSlice::new(&content[len / 2..]), ] .as_mut_slice(), ) .await .unwrap(); } ); def_write_all_test!( sftp_file_write_all_zero_copy, sftp_options_with_max_rw_len(), identity, file, content, { let len = content.len(); file.write_all_zero_copy( [ BytesMut::from(&content[..len / 2]).freeze(), BytesMut::from(&content[len / 2..]).freeze(), ] .as_mut_slice(), ) .await .unwrap(); } ); #[tokio::test] /// Test creating new TokioCompatFile, truncating and opening existing file, /// basic read, write and removal. async fn sftp_tokio_compact_file_basics() { let path = gen_path("sftp_tokio_compact_file_basics"); let content = b"HELLO, WORLD!\n".repeat(200); let (mut child, sftp) = connect(Default::default()).await; let content = &content[..min(sftp.max_write_len() as usize, content.len())]; let read_entire_file = || async { let mut buffer = Vec::with_capacity(content.len()); let file = sftp .open(&path) .await .map(file::TokioCompatFile::from) .unwrap(); tokio::pin!(file); file.read_to_end(&mut buffer).await.unwrap(); buffer }; { let mut fs = sftp.fs(); let file = sftp .options() .write(true) .create_new(true) .open(&path) .await .map(file::TokioCompatFile::from) .unwrap(); tokio::pin!(file); // Create new file (fail if already exists) and write to it. debug_assert_eq!(file.write(content).await.unwrap(), content.len()); file.flush().await.unwrap(); debug_assert_eq!(&*read_entire_file().await, &*content); // Create new file with Trunc and write to it. // // Sftp::Create opens the file truncated. let file = sftp .create(&path) .await .map(file::TokioCompatFile::from) .unwrap(); tokio::pin!(file); debug_assert_eq!(file.write(content).await.unwrap(), content.len()); // Flush the internal future buffers, but using a // different implementation from `TokioCompatFile::poll_flush` // since it is executed in async context. file.flush().await.unwrap(); debug_assert_eq!(&*read_entire_file().await, &*content); // remove the file fs.remove_file(&path).await.unwrap(); } // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } def_write_all_test!( sftp_tokio_compact_file_write_all, sftp_options_with_max_rw_len(), file::TokioCompatFile::from, file, content, { file.write_all(content).await.unwrap(); } ); def_write_all_test!( sftp_tokio_compact_file_write_vectored_all, sftp_options_with_max_rw_len(), file::TokioCompatFile::from, file, content, { let len = content.len(); write_vectored_all( file, [ IoSlice::new(&content[..len / 2]), IoSlice::new(&content[len / 2..]), ] .as_mut_slice(), ) .await .unwrap(); } ); #[tokio::test] /// Test File::{set_len, set_permissions, metadata}. async fn sftp_file_metadata() { let path = gen_path("sftp_file_metadata"); let (mut child, sftp) = connect(sftp_options_with_max_rw_len()).await; { let mut file = sftp .options() .read(true) .write(true) .create(true) .truncate(true) .open(&path) .await .unwrap(); assert_eq!(file.metadata().await.unwrap().len().unwrap(), 0); file.set_len(28802).await.unwrap(); assert_eq!(file.metadata().await.unwrap().len().unwrap(), 28802); } // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } #[tokio::test] /// Test File::sync_all. async fn sftp_file_sync_all() { let path = gen_path("sftp_file_sync_all"); let (mut child, sftp) = connect(sftp_options_with_max_rw_len()).await; sftp.create(path).await.unwrap().sync_all().await.unwrap(); // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } #[tokio::test] /// Test creating, removing and iterating over dir, as well /// as removing file. async fn sftp_dir_basics() { let path = gen_path("sftp_dir_basics"); let (mut child, sftp) = connect(Default::default()).await; { let mut fs = sftp.fs(); fs.create_dir(&path).await.unwrap(); fs.create_dir(&path.join("dir")).await.unwrap(); sftp.create(&path.join("file")).await.unwrap(); fs.open_dir(&path) .await .unwrap() .read_dir() .for_each(|res| { let entry = res.unwrap(); let filename = entry.filename().as_os_str(); if filename == "." || filename == ".." { return ready(()); } else if filename == "dir" { assert!(entry.file_type().unwrap().is_dir()); } else if filename == "file" { assert!(entry.file_type().unwrap().is_file()); } else { unreachable!("Unreachable!"); } ready(()) }) .await; fs.remove_file(&path.join("file")).await.unwrap(); fs.remove_dir(&path.join("dir")).await.unwrap(); fs.remove_dir(&path).await.unwrap(); } // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } #[tokio::test] /// Test creation of symlink and canonicalize/read_link async fn sftp_fs_symlink() { let filename = gen_path("sftp_fs_symlink_file"); let symlink = gen_path("sftp_fs_symlink_symlink"); let content = b"hello, world!\n"; let (mut child, sftp) = connect(Default::default()).await; { let mut fs = sftp.fs(); fs.write(&filename, content).await.unwrap(); fs.symlink(&filename, &symlink).await.unwrap(); assert_eq!(&*fs.read(&symlink).await.unwrap(), content); assert_eq!(fs.canonicalize(&filename).await.unwrap(), filename); assert_eq!(fs.canonicalize(&symlink).await.unwrap(), filename); assert_eq!(fs.read_link(&symlink).await.unwrap(), filename); fs.remove_file(&symlink).await.unwrap(); } // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } #[tokio::test] /// Test creation of hard_link and canonicalize async fn sftp_fs_hardlink() { let filename = gen_path("sftp_fs_hard_link_file"); let hardlink = gen_path("sftp_fs_hard_link_hardlink"); let content = b"hello, world!\n"; let (mut child, sftp) = connect(Default::default()).await; { let mut fs = sftp.fs(); fs.write(&filename, content).await.unwrap(); fs.hard_link(&filename, &hardlink).await.unwrap(); assert_eq!(&*fs.read(&hardlink).await.unwrap(), content); assert_eq!(fs.canonicalize(&filename).await.unwrap(), filename); assert_eq!(fs.canonicalize(&hardlink).await.unwrap(), hardlink); fs.remove_file(&hardlink).await.unwrap(); } // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } #[tokio::test] /// Test creation of rename and canonicalize async fn sftp_fs_rename() { let filename = gen_path("sftp_fs_rename_file"); let renamed = gen_path("sftp_fs_rename_renamed"); let content = b"hello, world!\n"; let (mut child, sftp) = connect(Default::default()).await; { let mut fs = sftp.fs(); fs.write(&filename, content).await.unwrap(); fs.rename(&filename, &renamed).await.unwrap(); fs.read(&filename).await.unwrap_err(); assert_eq!(&*fs.read(&renamed).await.unwrap(), content); assert_eq!(fs.canonicalize(&renamed).await.unwrap(), renamed); fs.remove_file(&renamed).await.unwrap(); } // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } #[tokio::test] /// Test Fs::{metadata, set_metadata}. async fn sftp_fs_metadata() { let path = gen_path("sftp_fs_metadata"); let content = b"hello, world!\n"; let (mut child, sftp) = connect(sftp_options_with_max_rw_len()).await; { let mut fs = sftp.fs(); fs.write(&path, content).await.unwrap(); assert_eq!( fs.metadata(&path).await.unwrap().len().unwrap(), content.len().try_into().unwrap() ); fs.set_metadata(&path, metadata::MetaDataBuilder::new().len(2834).create()) .await .unwrap(); assert_eq!(fs.metadata(&path).await.unwrap().len().unwrap(), 2834); } // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } #[tokio::test] /// Test File::copy_to async fn sftp_file_copy_to() { let path = gen_path("sftp_file_copy_to"); let content = b"hello, world!\n"; let (mut child, sftp) = connect(sftp_options_with_max_rw_len()).await; sftp.fs().create_dir(&path).await.unwrap(); { let mut file0 = sftp .options() .read(true) .write(true) .create(true) .open(path.join("file0")) .await .unwrap(); file0.write_all(content).await.unwrap(); file0.rewind().await.unwrap(); let mut file1 = sftp .options() .read(true) .write(true) .create(true) .open(path.join("file1")) .await .unwrap(); file0 .copy_to( &mut file1, NonZeroU64::new(content.len().try_into().unwrap()).unwrap(), ) .await .unwrap(); file1.rewind().await.unwrap(); assert_eq!( &*file1 .read_all(content.len(), BytesMut::new()) .await .unwrap(), content ); } // close sftp and child sftp.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); } // Test of `Sftp::from_session` fn addr() -> Cow<'static, str> { std::env::var("TEST_HOST") .map(Cow::Owned) .unwrap_or(Cow::Borrowed("ssh://test-user@127.0.0.1:2222")) } fn get_known_hosts_path() -> PathBuf { let mut path = env::var_os("XDG_RUNTIME_DIR") .map(PathBuf::from) .unwrap_or_else(|| "/tmp".into()); path.push("openssh-rs/known_hosts"); path } async fn connects_with_name() -> Vec<(Session, &'static str)> { let mut sessions = Vec::with_capacity(2); let mut builder = SessionBuilder::default(); builder .user_known_hosts_file(get_known_hosts_path()) .known_hosts_check(KnownHosts::Accept); sessions.push((builder.connect(&addr()).await.unwrap(), "process-mux")); sessions.push((builder.connect_mux(&addr()).await.unwrap(), "native-mux")); sessions } #[tokio::test] async fn sftp_test_from_sessions() { let path = Path::new("sftp_test_from_sessions"); let content = b"HELLO, WORLD!\n".repeat(200); for (session, _name) in connects_with_name().await { let sftp = Sftp::from_session(session, Default::default()) .await .unwrap(); let content = &content[..min(sftp.max_write_len() as usize, content.len())]; { let mut fs = sftp.fs(); // Create new file (fail if already exists) and write to it. debug_assert_eq!( sftp.options() .write(true) .create_new(true) .open(&path) .await .unwrap() .write(content) .await .unwrap(), content.len() ); debug_assert_eq!(&*fs.read(&path).await.unwrap(), content); // Create new file with Trunc and write to it. // // Sftp::Create opens the file truncated. debug_assert_eq!( sftp.create(&path) .await .unwrap() .write(content) .await .unwrap(), content.len() ); debug_assert_eq!(&*fs.read(&path).await.unwrap(), content); // remove the file fs.remove_file(&path).await.unwrap(); } sftp.close().await.unwrap(); } } #[tokio::test] /// Test write buffer limit for tokio compat file async fn sftp_tokio_compact_file_write_buffer_limit() { let path = gen_path("sftp_tokio_compact_file_write_buffer_limit"); let content = b"HELLO, WORLD!\n".repeat(100); let option = SftpOptions::new() .flush_interval(Duration::from_secs(10000)) .tokio_compat_file_write_limit(NonZeroUsize::new(1000).unwrap()); let (mut child, sftp) = connect(option).await; let (mut child2, sftp2) = connect(Default::default()).await; let content = &content[..content.len().min(sftp.max_write_len() as usize)]; assert!(content.len() > 1000 && content.len() < 2000); let read_entire_file = || async { let mut buffer = Vec::with_capacity(content.len()); let file = sftp2 .open(&path) .await .map(file::TokioCompatFile::from) .unwrap(); tokio::pin!(file); file.read_to_end(&mut buffer).await.unwrap(); buffer }; { let mut fs = sftp.fs(); sftp.manual_flush(); let file = sftp .options() .write(true) .create_new(true) .open(&path) .await .map(file::TokioCompatFile::from) .unwrap(); tokio::pin!(file); let len = 1400; let (content1, content2) = content.split_at(len / 2); // Create new file (fail if already exists) and write to it. file.write_all(content1).await.unwrap(); debug_assert_eq!(read_entire_file().await.len(), 0); file.write_all(content2).await.unwrap(); sleep(Duration::from_millis(100)).await; debug_assert_eq!(read_entire_file().await.len(), 1000); file.flush().await.unwrap(); sleep(Duration::from_millis(100)).await; debug_assert_eq!(read_entire_file().await.len(), 1400); // remove the file sftp.manual_flush(); fs.remove_file(&path).await.unwrap(); } // close sftp and child sftp.manual_flush(); sftp.close().await.unwrap(); sftp2.close().await.unwrap(); assert!(child.wait().await.unwrap().success()); assert!(child2.wait().await.unwrap().success()); } openssh-sftp-client-0.15.0/wait_for_sshd_start_up.sh000075500000000000000000000001321046102023000207230ustar 00000000000000#!/bin/bash set -euxo pipefail until ssh-keyscan -p 2222 localhost; do sleep 1 done