openssh-0.11.0/.cargo_vcs_info.json0000644000000001360000000000100125720ustar { "git": { "sha1": "da6fc29ae5d297748931046769ca9e9aaadcad89" }, "path_in_vcs": "" }openssh-0.11.0/.github/dependabot.yml000064400000000000000000000004361046102023000155550ustar 00000000000000version: 2 updates: - package-ecosystem: "github-actions" # Workflow files stored in the # default location of `.github/workflows` directory: "/" schedule: interval: "daily" - package-ecosystem: "cargo" directory: "/" schedule: interval: "daily" openssh-0.11.0/.github/workflows/coverage.yml000064400000000000000000000044701046102023000173020ustar 00000000000000on: push: branches: [master] paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' pull_request: paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' name: coverage concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true env: # makes all the ignored tests not ignored RUSTFLAGS: --cfg=ci jobs: test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install toolchain run: | rustup toolchain install stable --no-self-update --profile minimal --component llvm-tools-preview - name: Install cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov - name: Create Cargo.lock for caching run: cargo update - uses: Swatinem/rust-cache@v2 - run: | # Wait for startup of openssh-server timeout 15 ./wait_for_sshd_start_up.sh chmod 600 .test-key mkdir /tmp/openssh-rs ssh -i .test-key -v -p 2222 -l test-user localhost -o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/tmp/openssh-rs/known_hosts whoami name: Test ssh connectivity - run: | eval $(ssh-agent) echo "SSH_AUTH_SOCK=$SSH_AUTH_SOCK" >> $GITHUB_ENV echo "SSH_AGENT_PID=$SSH_AGENT_PID" >> $GITHUB_ENV cat .test-key | ssh-add - name: Set up ssh-agent - name: Generate code coverage run: cargo llvm-cov --all-features --lcov --output-path lcov.info env: # we cannot use 127.0.0.1 (the default here) # since we are running from a different container TEST_HOST: ssh://test-user@localhost:2222 XDG_RUNTIME_DIR: /tmp - name: Upload to codecov.io uses: codecov/codecov-action@v4 with: fail_ci_if_error: true services: openssh: image: linuxserver/openssh-server:amd64-latest ports: - 2222:2222 env: USER_NAME: test-user PUBLIC_KEY: |- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGzHvK2pKtSlZXP9tPYOOBb/xn0IiC9iLMS355AYUPC7 DOCKER_MODS: linuxserver/mods:openssh-server-ssh-tunnel openssh-0.11.0/.github/workflows/features.yml000064400000000000000000000017521046102023000173250ustar 00000000000000on: push: branches: [master] paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' pull_request: paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' name: cargo hack concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true env: # makes all the ignored tests not ignored RUSTFLAGS: --cfg=ci jobs: check: runs-on: ubuntu-latest steps: - name: Install toolchain run: | rustup toolchain install stable --no-self-update --profile minimal - uses: actions/checkout@v4 - uses: taiki-e/install-action@cargo-hack - name: Create Cargo.lock for caching run: cargo update - uses: Swatinem/rust-cache@v2 - name: cargo hack run: | cargo hack --feature-powerset check --all-targets openssh-0.11.0/.github/workflows/minimal.yml000064400000000000000000000041351046102023000171330ustar 00000000000000on: push: branches: [master] paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' pull_request: paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' name: With dependencies at minimal versions concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true env: # makes all the ignored tests not ignored RUSTFLAGS: --cfg=ci jobs: test: runs-on: ubuntu-latest steps: - name: Install toolchain run: | rustup toolchain install stable --no-self-update --profile minimal rustup toolchain install nightly --no-self-update --profile minimal - uses: actions/checkout@v4 - name: cargo update -Zminimal-versions run: cargo +nightly -Zminimal-versions update - uses: Swatinem/rust-cache@v2 - name: Compile tests run: cargo test --all-features --workspace --no-run - run: | # Wait for startup of openssh-server timeout 15 ./wait_for_sshd_start_up.sh chmod 600 .test-key mkdir /tmp/openssh-rs ssh -i .test-key -v -p 2222 -l test-user 127.0.0.1 -o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/tmp/openssh-rs/known_hosts whoami name: Test ssh connectivity - run: | eval $(ssh-agent) echo "SSH_AUTH_SOCK=$SSH_AUTH_SOCK" >> $GITHUB_ENV echo "SSH_AGENT_PID=$SSH_AGENT_PID" >> $GITHUB_ENV cat .test-key | ssh-add - name: Set up ssh-agent - name: cargo test run: | cargo test --all-features env: XDG_RUNTIME_DIR: /tmp services: openssh: image: linuxserver/openssh-server:amd64-latest ports: - 2222:2222 env: USER_NAME: test-user PUBLIC_KEY: |- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGzHvK2pKtSlZXP9tPYOOBb/xn0IiC9iLMS355AYUPC7 DOCKER_MODS: linuxserver/mods:openssh-server-ssh-tunnel openssh-0.11.0/.github/workflows/msrv.yml000064400000000000000000000020321046102023000164660ustar 00000000000000on: push: branches: [master] paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' pull_request: paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' name: Minimum Supported Rust Version concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true jobs: check: runs-on: ubuntu-latest steps: - name: Install toolchain run: | rustup toolchain install 1.63 --no-self-update --profile minimal rustup toolchain install nightly --no-self-update --profile minimal rustup override set 1.63 rustup default 1.63 - uses: actions/checkout@v4 - name: cargo update -Zminimal-versions run: cargo +nightly -Zminimal-versions update - uses: Swatinem/rust-cache@v2 - name: cargo +1.63.0 check run: | cargo +1.63 check openssh-0.11.0/.github/workflows/os-check.yml000064400000000000000000000017111046102023000171760ustar 00000000000000on: push: branches: [master] paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' pull_request: paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' name: os check concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true jobs: os-check: runs-on: ${{ matrix.os }} name: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [macos-latest] steps: - name: Install toolchain run: | rustup toolchain install stable --no-self-update --profile minimal - uses: actions/checkout@v4 - name: Create Cargo.lock for caching run: cargo update - uses: Swatinem/rust-cache@v2 - name: cargo test run: cargo test --all-features --all-targets openssh-0.11.0/.github/workflows/release-plz.yml000064400000000000000000000011241046102023000177230ustar 00000000000000name: Release-plz permissions: pull-requests: write contents: write on: push: branches: - master jobs: release-plz: name: Release-plz runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable - name: Run release-plz uses: MarcoIeni/release-plz-action@v0.5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} openssh-0.11.0/.github/workflows/shellcheck.yml000064400000000000000000000003261046102023000176100ustar 00000000000000on: push: branches: [master] pull_request: name: shellcheck jobs: shellcheck: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: shellcheck run: shellcheck *.sh openssh-0.11.0/.github/workflows/style.yml000064400000000000000000000032501046102023000166420ustar 00000000000000on: push: branches: [master] paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' pull_request: paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' name: lint concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true jobs: style: runs-on: ubuntu-latest name: ${{ matrix.toolchain }} strategy: fail-fast: false matrix: toolchain: [stable, beta] steps: - name: Install toolchain run: | rustup toolchain install ${{ matrix.toolchain }} --no-self-update --profile minimal --component rustfmt,clippy rustup override set ${{ matrix.toolchain }} rustup default ${{ matrix.toolchain }} - uses: actions/checkout@v4 - name: Create Cargo.lock for caching run: cargo update - uses: Swatinem/rust-cache@v2 - run: cargo fmt --check - name: cargo clippy uses: actions-rs/clippy-check@v1 if: always() with: token: ${{ secrets.GITHUB_TOKEN }} doc: runs-on: ubuntu-latest env: RUSTDOCFLAGS: --cfg docsrs steps: - name: Install toolchain run: | rustup toolchain install nightly --no-self-update --profile minimal rustup override set nightly rustup default nightly - uses: actions/checkout@v4 - name: Create Cargo.lock for caching run: cargo update - uses: Swatinem/rust-cache@v2 - run: cargo +nightly doc --no-deps --all-features openssh-0.11.0/.github/workflows/test.yml000064400000000000000000000051101046102023000164560ustar 00000000000000on: push: branches: [master] paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' pull_request: paths-ignore: - 'build_doc.sh' - 'check.sh' - 'run_ci_tests.sh' - 'start_sshd.sh' - 'stop_sshd.sh' name: cargo test concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true env: # makes all the ignored tests not ignored RUSTFLAGS: --cfg=ci jobs: test: runs-on: ubuntu-latest name: ${{ matrix.toolchain }} strategy: matrix: toolchain: [stable, beta, nightly] steps: - name: Install toolchain run: | rustup toolchain install ${{ matrix.toolchain }} --no-self-update --profile minimal rustup override set ${{ matrix.toolchain }} rustup default ${{ matrix.toolchain }} - uses: actions/checkout@v4 - name: Create Cargo.lock for caching run: cargo update - uses: Swatinem/rust-cache@v2 - name: Compile tests run: cargo test --all-features --workspace --no-run - run: | # Wait for startup of openssh-server timeout 15 ./wait_for_sshd_start_up.sh chmod 600 .test-key mkdir /tmp/openssh-rs ssh -i .test-key -v -p 2222 -l test-user 127.0.0.1 -o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/tmp/openssh-rs/known_hosts whoami name: Test ssh connectivity - run: | eval $(ssh-agent) echo "SSH_AUTH_SOCK=$SSH_AUTH_SOCK" >> $GITHUB_ENV echo "SSH_AGENT_PID=$SSH_AGENT_PID" >> $GITHUB_ENV cat .test-key | ssh-add - name: Set up ssh-agent - run: cargo test --all-features env: XDG_RUNTIME_DIR: /tmp - run: docker logs $(docker ps | grep openssh-server | awk '{print $1}') name: ssh container log if: ${{ failure() }} - run: docker exec $(docker ps | grep openssh-server | awk '{print $1}') ls -R /config/logs/ if: ${{ failure() }} - run: docker exec $(docker ps | grep openssh-server | awk '{print $1}') cat /config/logs/openssh/current name: ssh server log if: ${{ failure() }} services: openssh: image: linuxserver/openssh-server:amd64-latest ports: - 2222:2222 env: USER_NAME: test-user PUBLIC_KEY: |- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGzHvK2pKtSlZXP9tPYOOBb/xn0IiC9iLMS355AYUPC7 DOCKER_MODS: linuxserver/mods:openssh-server-ssh-tunnel openssh-0.11.0/.gitignore000064400000000000000000000000571046102023000133540ustar 00000000000000/target /ci-target Cargo.lock .idea/ .DS_Store openssh-0.11.0/.test-key000064400000000000000000000006331046102023000131320ustar 00000000000000-----BEGIN OPENSSH PRIVATE KEY----- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW QyNTUxOQAAACBsx7ytqSrUpWVz/bT2DjgW/8Z9CIgvYizEt+eQGFDwuwAAAJjouprb6Lqa 2wAAAAtzc2gtZWQyNTUxOQAAACBsx7ytqSrUpWVz/bT2DjgW/8Z9CIgvYizEt+eQGFDwuw AAAEDTnuB9lLA0WslBBEjIBwvrwvX/gI5L/cMS9tv1Rl53x2zHvK2pKtSlZXP9tPYOOBb/ xn0IiC9iLMS355AYUPC7AAAAEmpvbkBkZWZlbmVzdHJhdGlvbgECAw== -----END OPENSSH PRIVATE KEY----- openssh-0.11.0/.test-key.pub000064400000000000000000000001321046102023000137110ustar 00000000000000ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGzHvK2pKtSlZXP9tPYOOBb/xn0IiC9iLMS355AYUPC7 test-key openssh-0.11.0/CHANGELOG.md000064400000000000000000000020371046102023000131750ustar 00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ## [0.11.0](https://github.com/openssh-rust/openssh/compare/v0.10.5...v0.10.6) - 2024-08-10 - Remove dep tokio-pipe (#156) - Remove deprecated functions (#156) - Replace `From` with `TryFrom`, since the converison is falliable (#156) - Remove `IntoRawFd` for `Child*` since the conversion is falliable (#156) ## [0.10.5](https://github.com/openssh-rust/openssh/compare/v0.10.4...v0.10.5) - 2024-08-10 ### Other - Fix release-plz.yml - Add missing feature doc for `Session::new*` ([#153](https://github.com/openssh-rust/openssh/pull/153)) - Create release-plz.yml for auto-release ([#151](https://github.com/openssh-rust/openssh/pull/151)) The changelog for this crate is kept in the project's Rust documentation in the changelog module. openssh-0.11.0/Cargo.lock0000644000000520340000000000100105510ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "arc-swap" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "autocfg" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "awaitable" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70af449c9a763cb655c6a1e5338b42d99c67190824ff90658c1e30be844c0775" dependencies = [ "awaitable-error", "cfg-if", ] [[package]] name = "awaitable-error" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5b3469636cdf8543cceab175efca534471f36eee12fb8374aba00eb5e7e7f8a" [[package]] name = "backtrace" version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bytes" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "504bdec147f2cc13c8b57ed9401fd8a147cc66b67ad5cb241394244f2c947549" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "concurrent_arena" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c529c2d4ecc249ae15d317c9a8b9e7d86f87e80d4417de6cfa8f4d6030f37daf" dependencies = [ "arc-swap", "parking_lot", "triomphe", ] [[package]] name = "derive_destructure2" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64b697ac90ff296f0fc031ee5a61c7ac31fb9fff50e3fb32873b09223613fc0c" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "errno" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "fastrand" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "futures-core" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-sink" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "gimli" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "hermit-abi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "libc" version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "linux-raw-sys" version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lock_api" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "miniz_oxide" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] [[package]] name = "mio" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" dependencies = [ "hermit-abi", "libc", "wasi", "windows-sys 0.52.0", ] [[package]] name = "non-zero-byte-slice" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89daa1daa11c9df05d1181bcd0936d8066f8543144d77b09808eb78d65e38024" dependencies = [ "serde", ] [[package]] name = "num-derive" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "object" version = "0.36.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "openssh" version = "0.11.0" dependencies = [ "libc", "once_cell", "openssh-mux-client", "openssh-sftp-client", "regex", "shell-escape", "tempfile", "thiserror", "tokio", ] [[package]] name = "openssh-mux-client" version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f56c1f51de60268d69b883d7daef8d3c7865e8a3861b470c833d58bb2bb6dce" dependencies = [ "cfg-if", "non-zero-byte-slice", "once_cell", "openssh-mux-client-error", "sendfd", "serde", "ssh_format", "tokio", "tokio-io-utility", "typed-builder", ] [[package]] name = "openssh-mux-client-error" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "015d49e592f4d2a456033e6ec48036588e8e58c8908424b1bc40994de58ae648" dependencies = [ "ssh_format_error", "thiserror", ] [[package]] name = "openssh-sftp-client" version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f472c9c38ea60bc161f8b5df4d04c79057003cdc12572eaad7f6dcc74e6fca5" dependencies = [ "bytes", "derive_destructure2", "futures-core", "once_cell", "openssh-sftp-client-lowlevel", "openssh-sftp-error", "pin-project", "scopeguard", "tokio", "tokio-io-utility", "tokio-util", ] [[package]] name = "openssh-sftp-client-lowlevel" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a84f1a52761901fcf5b10885544085348a872e57294531ec9188145d9a83042" dependencies = [ "awaitable", "bytes", "concurrent_arena", "derive_destructure2", "openssh-sftp-error", "openssh-sftp-protocol", "pin-project", "tokio", "tokio-io-utility", ] [[package]] name = "openssh-sftp-error" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61a5aea093d714df10186f481a6003e3f906f6fc8360c026737a841f4f182996" dependencies = [ "awaitable-error", "openssh-sftp-protocol-error", "ssh_format_error", "thiserror", "tokio", ] [[package]] name = "openssh-sftp-protocol" version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf38532d784978966f95d241226223823f351d5bb2a4bebcf6b20b9cb1e393e0" dependencies = [ "bitflags", "num-derive", "num-traits", "openssh-sftp-protocol-error", "serde", "ssh_format", "vec-strings", ] [[package]] name = "openssh-sftp-protocol-error" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0719269eb3f037866ae07ec89cb44ed2c1d63b72b2390cef8e1aa3016a956ff8" dependencies = [ "serde", "thiserror", "vec-strings", ] [[package]] name = "parking_lot" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-targets", ] [[package]] name = "pin-project" version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "pin-project-lite" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "proc-macro2" version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] [[package]] name = "redox_syscall" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ "bitflags", ] [[package]] name = "regex" version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustix" version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sendfd" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604b71b8fc267e13bb3023a2c901126c8f349393666a6d98ac1ae5729b701798" dependencies = [ "libc", "tokio", ] [[package]] name = "serde" version = "1.0.205" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33aedb1a7135da52b7c21791455563facbbcc43d0f0f66165b42c21b3dfb150" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.205" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692d6f5ac90220161d6774db30c662202721e64aed9058d2c394f451261420c1" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "shell-escape" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45bb67a18fa91266cc7807181f62f9178a6873bfad7dc788c42e6430db40184f" [[package]] name = "signal-hook-registry" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] [[package]] name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "ssh_format" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24ab31081d1c9097c327ec23550858cb5ffb4af6b866c1ef4d728455f01f3304" dependencies = [ "bytes", "serde", "ssh_format_error", ] [[package]] name = "ssh_format_error" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be3c6519de7ca611f71ef7e8a56eb57aa1c818fecb5242d0a0f39c83776c210c" dependencies = [ "serde", ] [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tempfile" version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", "once_cell", "rustix", "windows-sys 0.59.0", ] [[package]] name = "thin-vec" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "tokio" version = "1.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" dependencies = [ "backtrace", "bytes", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.52.0", ] [[package]] name = "tokio-io-utility" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d672654d175710e52c7c41f6aec77c62b3c0954e2a7ebce9049d1e94ed7c263" dependencies = [ "bytes", "tokio", ] [[package]] name = "tokio-macros" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "tokio-util" version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", ] [[package]] name = "triomphe" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6631e42e10b40c0690bf92f404ebcfe6e1fdb480391d15f17cc8e96eeed5369" dependencies = [ "arc-swap", "serde", "stable_deref_trait", ] [[package]] name = "typed-builder" version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a06fbd5b8de54c5f7c91f6fe4cebb949be2125d7758e630bb58b1d831dbce600" dependencies = [ "typed-builder-macro", ] [[package]] name = "typed-builder-macro" version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9534daa9fd3ed0bd911d462a37f172228077e7abf18c18a5f67199d959205f8" dependencies = [ "proc-macro2", "quote", "syn 2.0.72", ] [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "vec-strings" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8509489e2a7ee219522238ad45fd370bec6808811ac15ac6b07453804e77659" dependencies = [ "serde", "thin-vec", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" openssh-0.11.0/Cargo.toml0000644000000044220000000000100105720ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.63.0" name = "openssh" version = "0.11.0" authors = ["Jon Gjengset "] build = false exclude = [ "ci-target", "*.sh", ] autobins = false autoexamples = false autotests = false autobenches = false description = "SSH through OpenSSH" documentation = "https://docs.rs/openssh" readme = "README.md" keywords = [ "ssh", "remote", "openssh", "orchestration", ] categories = [ "network-programming", "api-bindings", ] license = "MIT OR Apache-2.0" repository = "https://github.com/openssh-rust/openssh.git" [package.metadata.docs.rs] all-features = true rustdoc-args = [ "--cfg", "docsrs", ] [lib] name = "openssh" path = "src/lib.rs" [[example]] name = "native-mux_tsp" path = "examples/native-mux_tsp.rs" required-features = ["native-mux"] [[example]] name = "tsp" path = "examples/tsp.rs" required-features = ["process-mux"] [[test]] name = "openssh" path = "tests/openssh.rs" [dependencies.libc] version = "0.2.137" [dependencies.once_cell] version = "1.8.0" [dependencies.openssh-mux-client] version = "0.17.0" optional = true [dependencies.shell-escape] version = "0.1.5" [dependencies.tempfile] version = "3.9.0" [dependencies.thiserror] version = "1.0.30" [dependencies.tokio] version = "1.36.0" features = [ "process", "io-util", "macros", "net", ] [dev-dependencies.openssh-sftp-client] version = "0.14.0" [dev-dependencies.regex] version = "1" [dev-dependencies.tokio] version = "1" features = ["full"] [features] default = ["process-mux"] native-mux = ["openssh-mux-client"] process-mux = [] [badges.azure-devops] build = "23" pipeline = "openssh" project = "jonhoo/jonhoo" [badges.codecov] branch = "master" repository = "jonhoo/openssh-rs" service = "github" [badges.maintenance] status = "experimental" openssh-0.11.0/Cargo.toml.orig000064400000000000000000000030731046102023000142540ustar 00000000000000[package] name = "openssh" version = "0.11.0" authors = ["Jon Gjengset "] edition = "2021" rust-version = "1.63.0" license = "MIT OR Apache-2.0" readme = "README.md" description = "SSH through OpenSSH" repository = "https://github.com/openssh-rust/openssh.git" documentation = "https://docs.rs/openssh" keywords = ["ssh","remote","openssh","orchestration"] categories = ["network-programming", "api-bindings"] exclude = ["ci-target", "*.sh"] [badges] azure-devops = { project = "jonhoo/jonhoo", pipeline = "openssh", build = "23" } codecov = { repository = "jonhoo/openssh-rs", branch = "master", service = "github" } maintenance = { status = "experimental" } # docs.rs-specific configuration, shamelessly copied from # https://stackoverflow.com/a/61417700/8375400. # # To test locally, use ` ./build_doc.sh` [package.metadata.docs.rs] # document all features all-features = true # defines the configuration attribute `docsrs` rustdoc-args = ["--cfg", "docsrs"] [features] default = ["process-mux"] process-mux = [] native-mux = ["openssh-mux-client"] [dependencies] tempfile = "3.9.0" shell-escape = "0.1.5" thiserror = "1.0.30" tokio = { version = "1.36.0", features = [ "process", "io-util", "macros", "net" ] } once_cell = "1.8.0" openssh-mux-client = { version = "0.17.0", optional = true } libc = "0.2.137" [dev-dependencies] regex = "1" tokio = { version = "1", features = [ "full" ] } openssh-sftp-client = "0.14.0" [[example]] name = "native-mux_tsp" required-features = ["native-mux"] [[example]] name = "tsp" required-features = ["process-mux"] openssh-0.11.0/LICENSE-APACHE000064400000000000000000000261161046102023000133140ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2020 Jon Gjengset Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. openssh-0.11.0/LICENSE-MIT000064400000000000000000000020551046102023000130200ustar 00000000000000MIT License Copyright (c) 2020 Jon Gjengset Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. openssh-0.11.0/README.md000064400000000000000000000037241046102023000126470ustar 00000000000000[![Crates.io](https://img.shields.io/crates/v/openssh.svg)](https://crates.io/crates/openssh) [![Documentation](https://docs.rs/openssh/badge.svg)](https://docs.rs/openssh/) [![Codecov](https://codecov.io/github/openssh-rust/openssh/coverage.svg?branch=master)](https://codecov.io/gh/openssh-rust/openssh) Scriptable SSH through OpenSSH. This crate wraps the OpenSSH remote login client (`ssh` on most machines), and provides a convenient mechanism for running commands on remote hosts. Since all commands are executed through the `ssh` command, all your existing configuration (e.g., in `.ssh/config`) should continue to work as expected. The library's API is modeled closely after that of [`std::process::Command`], since `ssh` also attempts to make the remote process seem as much as possible like a local command. ## License Licensed under either of * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ### Run integration tests Requires `docker` and [`cargo-hack`]. Check [getting Docker guide](https://docs.docker.com/get-docker/) on how to install docker, and use `cargo install cargo-hack` to install [`cargo-hack`]. ``` ./run_ci_tests.sh ``` It will create a container which runs sshd, setup ssh-agent, and environment variables that are required to run the integration tests. It will also test different combination of feature flags to ensure they all compile without error. [`cargo-hack`]: https://github.com/taiki-e/cargo-hack ### Build documentation Requires nightly cargo. To install nightly cargo, run `rustup toolchain install nightly`. ``` ./build_doc.sh ``` openssh-0.11.0/codecov.yml000064400000000000000000000004341046102023000135300ustar 00000000000000# Hold ourselves to a high bar coverage: range: 85..100 round: down precision: 2 status: project: default: threshold: 1% # Tests aren't important for coverage ignore: - "tests" # Make less noisy comments comment: layout: "files" require_changes: yes openssh-0.11.0/contributing.md000064400000000000000000000020301046102023000144060ustar 00000000000000## License Licensed under either of * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ### Run integration tests Requires `docker` and [`cargo-hack`]. Check [getting Docker guide](https://docs.docker.com/get-docker/) on how to install docker, and use `cargo install cargo-hack` to install [`cargo-hack`]. ``` ./run_ci_tests.sh ``` It will create a container which runs sshd, set up an ssh-agent, and set environment variables that are required to run the integration tests. It will also test different combination of feature flags to ensure they all compile without error. [`cargo-hack`]: https://github.com/taiki-e/cargo-hack openssh-0.11.0/examples/native-mux_tsp.rs000064400000000000000000000007771046102023000165440ustar 00000000000000use openssh::*; #[tokio::main] async fn main() { let session = Session::connect_mux("ssh://jon@ssh.thesquareplanet.com:222", KnownHosts::Strict) .await .unwrap(); let ls = session.command("ls").output().await.unwrap(); eprintln!( "{}", String::from_utf8(ls.stdout).expect("server output was not valid UTF-8") ); let whoami = session.command("whoami").output().await.unwrap(); assert_eq!(whoami.stdout, b"jon\n"); session.close().await.unwrap(); } openssh-0.11.0/examples/tsp.rs000064400000000000000000000007731046102023000143630ustar 00000000000000use openssh::*; #[tokio::main] async fn main() { let session = Session::connect("ssh://jon@ssh.thesquareplanet.com:222", KnownHosts::Strict) .await .unwrap(); let ls = session.command("ls").output().await.unwrap(); eprintln!( "{}", String::from_utf8(ls.stdout).expect("server output was not valid UTF-8") ); let whoami = session.command("whoami").output().await.unwrap(); assert_eq!(whoami.stdout, b"jon\n"); session.close().await.unwrap(); } openssh-0.11.0/src/builder.rs000064400000000000000000000477121046102023000141600ustar 00000000000000use super::{Error, Session}; use std::borrow::Cow; use std::ffi::OsString; use std::iter::IntoIterator; use std::ops::Deref; use std::path::{Path, PathBuf}; use std::process::Stdio; use std::str; use std::{fs, io}; use once_cell::sync::OnceCell; use tempfile::{Builder, TempDir}; use tokio::process; #[cfg(not(windows))] fn state_dir() -> Option { fn get_absolute_path(path: OsString) -> Option { let path = PathBuf::from(path); path.is_absolute().then_some(path) } #[allow(deprecated)] if let Some(xdg) = std::env::var_os("XDG_STATE_HOME") { get_absolute_path(xdg) } else if let Some(home) = std::env::home_dir() { Some(get_absolute_path(home.into())?.join(".local/state")) } else { None } } #[cfg(windows)] fn state_dir() -> Option { None } /// The returned `&'static Path` can be coreced to any lifetime. fn get_default_control_dir<'a>() -> Result<&'a Path, Error> { static DEFAULT_CONTROL_DIR: OnceCell>> = OnceCell::new(); DEFAULT_CONTROL_DIR .get_or_try_init(|| { if let Some(state_dir) = state_dir() { fs::create_dir_all(&state_dir).map_err(Error::Connect)?; Ok(Some(state_dir.into_boxed_path())) } else { Ok(None) } }) .map(|default_control_dir| { default_control_dir .as_deref() .unwrap_or_else(|| Path::new("./")) }) } fn clean_history_control_dir(socketdir: &Path, prefix: &str) -> io::Result<()> { // Read the entries in the parent directory fs::read_dir(socketdir)? // Filter out and keep only the valid entries .filter_map(Result::ok) // Filter the entries to only include files that start with prefix .filter(|entry| { if let Ok(file_type) = entry.file_type() { file_type.is_dir() && entry.file_name().to_string_lossy().starts_with(prefix) } else { false } }) // For each matching entry, remove the directory .for_each(|entry| { let _ = fs::remove_dir_all(entry.path()); }); Ok(()) } /// Build a [`Session`] with options. #[derive(Debug, Clone)] pub struct SessionBuilder { user: Option, port: Option, keyfile: Option, connect_timeout: Option, server_alive_interval: Option, known_hosts_check: KnownHosts, control_dir: Option, control_persist: ControlPersist, clean_history_control_dir: bool, config_file: Option, compression: Option, jump_hosts: Vec>, user_known_hosts_file: Option>, ssh_auth_sock: Option>, } impl Default for SessionBuilder { fn default() -> Self { Self { user: None, port: None, keyfile: None, connect_timeout: None, server_alive_interval: None, known_hosts_check: KnownHosts::Add, control_dir: None, control_persist: ControlPersist::Forever, clean_history_control_dir: false, config_file: None, compression: None, jump_hosts: Vec::new(), user_known_hosts_file: None, ssh_auth_sock: None, } } } impl SessionBuilder { /// Return the user set in builder. pub fn get_user(&self) -> Option<&str> { self.user.as_deref() } /// Return the port set in builder. pub fn get_port(&self) -> Option<&str> { self.port.as_deref() } /// Set the ssh user (`ssh -l`). /// /// Defaults to `None`. pub fn user(&mut self, user: String) -> &mut Self { self.user = Some(user); self } /// Set the port to connect on (`ssh -p`). /// /// Defaults to `None`. pub fn port(&mut self, port: u16) -> &mut Self { self.port = Some(format!("{}", port)); self } /// Set the keyfile to use (`ssh -i`). /// /// Defaults to `None`. pub fn keyfile(&mut self, p: impl AsRef) -> &mut Self { self.keyfile = Some(p.as_ref().to_path_buf()); self } /// See [`KnownHosts`]. /// /// Default `KnownHosts::Add`. pub fn known_hosts_check(&mut self, k: KnownHosts) -> &mut Self { self.known_hosts_check = k; self } /// Set the connection timeout (`ssh -o ConnectTimeout`). /// /// This value is specified in seconds. Any sub-second duration remainder will be ignored. /// Defaults to `None`. pub fn connect_timeout(&mut self, d: std::time::Duration) -> &mut Self { self.connect_timeout = Some(d.as_secs().to_string()); self } /// Set the timeout interval after which if no data has been received from the server, ssh /// will request a response from the server (`ssh -o ServerAliveInterval`). /// /// This value is specified in seconds. Any sub-second duration remainder will be ignored. /// Defaults to `None`. pub fn server_alive_interval(&mut self, d: std::time::Duration) -> &mut Self { self.server_alive_interval = Some(d.as_secs()); self } /// Set the directory in which the temporary directory containing the control socket will /// be created. /// /// If not set, openssh will try to use `$XDG_STATE_HOME`, `$HOME/.local/state` on unix, and fallback to /// `./` (the current directory) if it failed. /// #[cfg(not(windows))] #[cfg_attr(docsrs, doc(cfg(not(windows))))] pub fn control_directory(&mut self, p: impl AsRef) -> &mut Self { self.control_dir = Some(p.as_ref().to_path_buf()); self } /// Clean up the temporary directories with the `.ssh-connection` prefix /// in directory specified by [`SessionBuilder::control_directory`], created by /// previous `openssh::Session` that is not cleaned up for some reasons /// (e.g. process getting killed, abort on panic, etc) /// /// Use this with caution, do not enable this if you don't understand /// what it does, #[cfg(not(windows))] #[cfg_attr(docsrs, doc(cfg(not(windows))))] pub fn clean_history_control_directory(&mut self, clean: bool) -> &mut Self { self.clean_history_control_dir = clean; self } /// Set the ControlPersist option to configure how long the controlling /// ssh session should stay alive. /// /// Defaults to `ControlPersist::Forever`. /// pub fn control_persist(&mut self, value: ControlPersist) -> &mut Self { self.control_persist = value; self } /// Set an alternative per-user configuration file. /// /// By default, ssh uses `~/.ssh/config`. This is equivalent to `ssh -F

`. /// /// Defaults to `None`. pub fn config_file(&mut self, p: impl AsRef) -> &mut Self { self.config_file = Some(p.as_ref().to_path_buf()); self } /// Enable or disable compression (including stdin, stdout, stderr, data /// for forwarded TCP and unix-domain connections, sftp and scp /// connections). /// /// Note that the ssh server can forcibly disable the compression. /// /// By default, ssh uses configure value set in `~/.ssh/config`. /// /// If `~/.ssh/config` does not enable compression, then it is disabled /// by default. pub fn compression(&mut self, compression: bool) -> &mut Self { self.compression = Some(compression); self } /// Specify one or multiple jump hosts. /// /// Connect to the target host by first making a ssh connection to the /// jump host described by destination and then establishing a TCP /// forwarding to the ultimate destination from there. /// /// Multiple jump hops may be specified. /// This is a shortcut to specify a ProxyJump configuration directive. /// /// Note that configuration directives specified by [`SessionBuilder`] /// do not apply to the jump hosts. /// /// Use ~/.ssh/config to specify configuration for jump hosts. pub fn jump_hosts>(&mut self, hosts: impl IntoIterator) -> &mut Self { self.jump_hosts = hosts .into_iter() .map(|s| s.as_ref().to_string().into_boxed_str()) .collect(); self } /// Specify the path to the `known_hosts` file. /// /// The path provided may use tilde notation (`~`) to refer to the user's /// home directory. /// /// The default is `~/.ssh/known_hosts` and `~/.ssh/known_hosts2`. pub fn user_known_hosts_file(&mut self, user_known_hosts_file: impl AsRef) -> &mut Self { self.user_known_hosts_file = Some(user_known_hosts_file.as_ref().to_owned().into_boxed_path()); self } /// Specify the path to the ssh-agent. /// /// The path provided may use tilde notation (`~`) to refer to the user's /// home directory. /// /// The default is `None`. pub fn ssh_auth_sock(&mut self, ssh_auth_sock: impl AsRef) -> &mut Self { self.ssh_auth_sock = Some(ssh_auth_sock.as_ref().to_owned().into_boxed_path()); self } /// Connect to the host at the given `host` over SSH using process impl, which will /// spawn a new ssh process for each `Child` created. /// /// The format of `destination` is the same as the `destination` argument to `ssh`. It may be /// specified as either `[user@]hostname` or a URI of the form `ssh://[user@]hostname[:port]`. /// A username or port that is specified in the connection string overrides the one set in the /// builder (but does not change the builder). /// /// If connecting requires interactive authentication based on `STDIN` (such as reading a /// password), the connection will fail. Consider setting up keypair-based authentication /// instead. #[cfg(feature = "process-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "process-mux")))] pub async fn connect>(&self, destination: S) -> Result { self.connect_impl(destination.as_ref(), Session::new_process_mux) .await } /// Connect to the host at the given `host` over SSH using native mux, which will /// create a new local socket connection for each `Child` created. /// /// See the crate-level documentation for more details on the difference between native and process-based mux. /// /// The format of `destination` is the same as the `destination` argument to `ssh`. It may be /// specified as either `[user@]hostname` or a URI of the form `ssh://[user@]hostname[:port]`. /// A username or port that is specified in the connection string overrides the one set in the /// builder (but does not change the builder). /// /// If connecting requires interactive authentication based on `STDIN` (such as reading a /// password), the connection will fail. Consider setting up keypair-based authentication /// instead. #[cfg(feature = "native-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "native-mux")))] pub async fn connect_mux>(&self, destination: S) -> Result { self.connect_impl(destination.as_ref(), Session::new_native_mux) .await } async fn connect_impl( &self, destination: &str, f: fn(TempDir) -> Session, ) -> Result { let (builder, destination) = self.resolve(destination); let tempdir = builder.launch_master(destination).await?; Ok(f(tempdir)) } /// [`SessionBuilder`] support for `destination` parsing. /// The format of `destination` is the same as the `destination` argument to `ssh`. /// /// # Examples /// /// ```rust /// use openssh::SessionBuilder; /// let b = SessionBuilder::default(); /// let (b, d) = b.resolve("ssh://test-user@127.0.0.1:2222"); /// assert_eq!(b.get_port().as_deref(), Some("2222")); /// assert_eq!(b.get_user().as_deref(), Some("test-user")); /// assert_eq!(d, "127.0.0.1"); /// ``` pub fn resolve<'a, 'b>(&'a self, mut destination: &'b str) -> (Cow<'a, Self>, &'b str) { // the "new" ssh://user@host:port form is not supported by all versions of ssh, // so we always translate it into the option form. let mut user = None; let mut port = None; if destination.starts_with("ssh://") { destination = &destination[6..]; if let Some(at) = destination.rfind('@') { // specified a username -- extract it: user = Some(&destination[..at]); destination = &destination[(at + 1)..]; } if let Some(colon) = destination.rfind(':') { let p = &destination[(colon + 1)..]; if let Ok(p) = p.parse() { // user specified a port -- extract it: port = Some(p); destination = &destination[..colon]; } } } if user.is_none() && port.is_none() { return (Cow::Borrowed(self), destination); } let mut with_overrides = self.clone(); if let Some(user) = user { with_overrides.user(user.to_owned()); } if let Some(port) = port { with_overrides.port(port); } (Cow::Owned(with_overrides), destination) } /// Create ssh master session and return [`TempDir`] which /// contains the ssh control socket. pub async fn launch_master(&self, destination: &str) -> Result { let socketdir = if let Some(socketdir) = self.control_dir.as_ref() { socketdir } else { get_default_control_dir()? }; let prefix = ".ssh-connection"; if self.clean_history_control_dir { let _ = clean_history_control_dir(socketdir, prefix); } let dir = Builder::new() .prefix(prefix) .tempdir_in(socketdir) .map_err(Error::Master)?; let log = dir.path().join("log"); let mut init = process::Command::new("ssh"); init.stdin(Stdio::null()) .stdout(Stdio::null()) .stderr(Stdio::null()) .arg("-E") .arg(&log) .arg("-S") .arg(dir.path().join("master")) .arg("-M") .arg("-f") .arg("-N") .arg("-o") .arg(self.control_persist.as_option().deref()) .arg("-o") .arg("BatchMode=yes") .arg("-o") .arg(self.known_hosts_check.as_option()); if let Some(ref timeout) = self.connect_timeout { init.arg("-o").arg(format!("ConnectTimeout={}", timeout)); } if let Some(ref interval) = self.server_alive_interval { init.arg("-o") .arg(format!("ServerAliveInterval={}", interval)); } if let Some(ref port) = self.port { init.arg("-p").arg(port); } if let Some(ref user) = self.user { init.arg("-l").arg(user); } if let Some(ref k) = self.keyfile { // if the user gives a keyfile, _only_ use that keyfile init.arg("-o").arg("IdentitiesOnly=yes"); init.arg("-i").arg(k); } if let Some(ref config_file) = self.config_file { init.arg("-F").arg(config_file); } if let Some(compression) = self.compression { let arg = if compression { "yes" } else { "no" }; init.arg("-o").arg(format!("Compression={}", arg)); } if let Some(ssh_auth_sock) = self.ssh_auth_sock.as_deref() { init.env("SSH_AUTH_SOCK", ssh_auth_sock); } let mut it = self.jump_hosts.iter(); if let Some(jump_host) = it.next() { let s = jump_host.to_string(); let dest = it.fold(s, |mut s, jump_host| { s.push(','); s.push_str(jump_host); s }); init.arg("-J").arg(&dest); } if let Some(user_known_hosts_file) = &self.user_known_hosts_file { let mut option: OsString = "UserKnownHostsFile=".into(); option.push(&**user_known_hosts_file); init.arg("-o").arg(option); } init.arg(destination); // we spawn and immediately wait, because the process is supposed to fork. let status = init.status().await.map_err(Error::Connect)?; if !status.success() { let output = fs::read_to_string(log).map_err(Error::Connect)?; Err(Error::interpret_ssh_error(&output)) } else { Ok(dir) } } } /// Specifies how long the controlling ssh process should stay alive. #[derive(Clone, Debug, Default)] #[non_exhaustive] pub enum ControlPersist { /// Will stay alive indefinitely. #[default] Forever, /// Will be closed after the initial connection is closed ClosedAfterInitialConnection, /// If the ssh control server has been idle for specified duration /// (in seconds), it will exit. IdleFor(std::num::NonZeroUsize), } impl ControlPersist { fn as_option(&self) -> Cow<'_, str> { match self { ControlPersist::Forever => Cow::Borrowed("ControlPersist=yes"), ControlPersist::ClosedAfterInitialConnection => Cow::Borrowed("ControlPersist=no"), ControlPersist::IdleFor(d) => Cow::Owned(format!("ControlPersist={}s", d.get())), } } } /// Specifies how the host's key fingerprint should be handled. #[derive(Debug, Clone)] pub enum KnownHosts { /// The host's fingerprint must match what is in the known hosts file. /// /// If the host is not in the known hosts file, the connection is rejected. /// /// This corresponds to `ssh -o StrictHostKeyChecking=yes`. Strict, /// Strict, but if the host is not already in the known hosts file, it will be added. /// /// This corresponds to `ssh -o StrictHostKeyChecking=accept-new`. Add, /// Accept whatever key the server provides and add it to the known hosts file. /// /// This corresponds to `ssh -o StrictHostKeyChecking=no`. Accept, } impl KnownHosts { fn as_option(&self) -> &'static str { match *self { KnownHosts::Strict => "StrictHostKeyChecking=yes", KnownHosts::Add => "StrictHostKeyChecking=accept-new", KnownHosts::Accept => "StrictHostKeyChecking=no", } } } #[cfg(test)] mod tests { use super::SessionBuilder; #[test] fn resolve() { let b = SessionBuilder::default(); let (b, d) = b.resolve("ssh://test-user@127.0.0.1:2222"); assert_eq!(b.port.as_deref(), Some("2222")); assert_eq!(b.user.as_deref(), Some("test-user")); assert_eq!(d, "127.0.0.1"); let b = SessionBuilder::default(); let (b, d) = b.resolve("ssh://test-user@opensshtest:2222"); assert_eq!(b.port.as_deref(), Some("2222")); assert_eq!(b.user.as_deref(), Some("test-user")); assert_eq!(d, "opensshtest"); let b = SessionBuilder::default(); let (b, d) = b.resolve("ssh://opensshtest:2222"); assert_eq!(b.port.as_deref(), Some("2222")); assert_eq!(b.user.as_deref(), None); assert_eq!(d, "opensshtest"); let b = SessionBuilder::default(); let (b, d) = b.resolve("ssh://test-user@opensshtest"); assert_eq!(b.port.as_deref(), None); assert_eq!(b.user.as_deref(), Some("test-user")); assert_eq!(d, "opensshtest"); let b = SessionBuilder::default(); let (b, d) = b.resolve("ssh://opensshtest"); assert_eq!(b.port.as_deref(), None); assert_eq!(b.user.as_deref(), None); assert_eq!(d, "opensshtest"); let b = SessionBuilder::default(); let (b, d) = b.resolve("opensshtest"); assert_eq!(b.port.as_deref(), None); assert_eq!(b.user.as_deref(), None); assert_eq!(d, "opensshtest"); } } openssh-0.11.0/src/changelog.rs000064400000000000000000000200001046102023000144360ustar 00000000000000#[allow(unused_imports)] use crate::*; /// TODO: RENAME THIS INTO THE NEXT VERSION BEFORE RELEASE #[doc(hidden)] pub mod unreleased {} /// # Changed /// - Remove dep tokio-pipe (#156) /// - Remove deprecated functions (#156) /// - Replace `From` /// with `TryFrom`, since the converison is falliable (#156) /// - Remove `IntoRawFd` for `Child*` since the conversion is falliable (#156) pub mod v0_11_0 {} /// ## Changed /// - Add missing feature doc for `Session::new*` pub mod v0_10_5 {} /// ## Changed /// - Added new fn [`Session::control_persist`] to set the `ControlPersist` option of /// the master ssh connection. pub mod v0_10_4 {} /// ## Changed /// - Removed dependency on MPL licensed dirs-sys in favor of local implementation pub mod v0_10_3 {} /// ## Changed /// - Use `str::rfind` to locate the `@` in connection string in case the username contains `@` pub mod v0_10_2 {} /// ## Added /// - Add new fns [`Session::arc_command`], [`Session::arc_raw_command`], /// [`Session::to_command`], and [`Session::to_raw_command`] to support /// session-owning commands /// - Add generic [`crate::OwningCommand`], to support session-owning /// commands. /// - Add [`crate::child::Child`] as a generic version of [`RemoteChild`] /// to support session-owning commands /// ## Changed /// - Change [`RemoteChild`] to be an alias to [`crate::child::Child`] /// owning a session references. /// - Change [`Command`] to be an alias to [`OwningCommand`] owning a /// session reference. /// - Change [`OverSsh::over_ssh`] to be generic and support owned /// sessions. /// ## Removed #[doc(hidden)] pub mod v0_10_1 {} /// ## Added /// - [`Session::new_process_mux`] /// - [`Session::new_native_mux`] /// - [`SessionBuilder::get_user`] /// - [`SessionBuilder::get_port`] /// - [`SessionBuilder::resolve`] /// - [`SessionBuilder::launch_master`] /// - [`SessionBuilder::clean_history_control_directory`] /// - [`OverSsh`] for converting [`std::process::Command`], /// [`tokio::process::Command`] or other custom types to /// [`Command`]. /// ## Changed /// - [`Socket::TcpSocket`] now contains `host: Cow<'_, str>` and `port: u16` /// instead of an already resolved `SocketAddr`. /// Since the socket could be opened on remote host, which might has /// different dns configuration, it's better to delay resolution and perform /// it on remote instead. /// - [`Socket::new`] now takes `host: Cow<'_, str>` and `port: u16` for the /// same reason as above. pub mod v0_10_0 {} /// ## Added /// - Add new fn `SessionBuilder::ssh_auth_sock` pub mod v0_9_9 {} /// ## Added /// - `impl From for Stdio` /// - Add new fn `Stdio::from_raw_fd_owned` /// ## Changed /// - Mark `FromRawFd` impl for `Stdio` as deprecated /// - Mark `From` for `Stdio` as deprecated /// - Mark `From` for `Stdio` as deprecated /// ## Fixed /// - [`wait_with_output` + `native-mux` cuts off stdout output](https://github.com/openssh-rust/openssh/issues/103) pub mod v0_9_8 {} /// ## Changed /// - Bumped minimum version of `openssh-mux-client` to 0.15.1 pub mod v0_9_7 {} /// ## Added /// - [`SessionBuilder::jump_hosts`] pub mod v0_9_6 {} /// ## Added /// - `From for Socket<'static>` /// - `From> for Socket<'a>` /// - `From<&'a Path> for Socket<'a>` /// - `From for Socket<'static>` /// - `From> for Socket<'static>` /// - `From<(IpAddr, u16)> for Socket<'static>` /// - `From<(Ipv4Addr, u16)> for Socket<'static>` /// - `From<(Ipv6Addr, u16)> for Socket<'static>` /// /// ## Changed /// - [`Session::request_port_forward`] now takes `impl Into<...>` /// to make it much easier to use. /// - [`Socket::new`] now returns `Socket<'static>` pub mod v0_9_5 {} /// ## Added /// - [`Session::resume`] /// - [`Session::resume_mux`] /// - [`Session::detach`] pub mod v0_9_3 {} /// ## Changed /// - Removed `impl From for Stdio` as it was an unintentional part of the public API. /// This is technically a breaking change, but should in practice affect no-one. pub mod v0_9_2 {} /// ## Added /// - [`Session::subsystem`] pub mod v0_9_1 {} /// No changes since 0.9.0-rc4. pub mod v0_9_0 {} /// ## Fixed /// - Remove accidentally exposed `TryFrom` /// implementation for [`ChildStdin`]. /// - Remove accidentally exposed `TryFrom` /// implementation for [`ChildStdout`]. /// - Remove accidentally exposed `TryFrom` /// implementation for [`ChildStdout`]. /// - Remove accidentally exposed `TryFrom` /// implementation for [`ChildStderr`]. /// - Remove accidentally exposed `TryFrom` /// implementation for [`ChildStderr`]. /// /// ## Changed /// - Make [`Session::check`] available only on unix. /// - Make [`Socket::UnixSocket`] available only on unix. /// - Make [`SessionBuilder::control_directory`] available only on unix. pub mod v0_9_0_rc4 {} /// ## Fixed /// - Fixed changelog entry for rc2 not being visible pub mod v0_9_0_rc3 {} /// ## Fixed /// - Fixed crate level doc /// /// ## Added /// - Added changelog /// - Associated function [`SessionBuilder::compression`] /// - Associated function [`SessionBuilder::user_known_hosts_file`] /// - Associated function [`Session::control_socket`] for non-Windows platform. /// /// ## Changed /// - Make [`ChildStdin`] an opaque type. /// - Make [`ChildStdout`] an opaque type. /// - Make [`ChildStderr`] an opaque type. /// /// ## Removed /// - Type `Sftp`. /// - Type `Mode`. /// - Type `RemoteFile`. /// - Associated function `Session::sftp`. pub mod v0_9_0_rc2 {} /// ## Added /// - Feature flag `native-mux`, an alternative backend that communicates /// with the ssh multiplex server directly through control socket as opposed /// `process-mux` implementation that spawns a process to communicate with /// the ssh multiplex server. /// /// Compared to `process-mux`, `native-mux` provides more robust error /// reporting, better performance and reduced memory usage. /// /// `process-mux` checks the exit status of `ssh` for indication of error, /// then parse the output of it and the output of the ssh multiplex master /// to return an error. /// /// This method is obviously not so robust as `native-mux`, which directly /// communicates with ssh multiplex master through its [multiplex protocol]. /// /// - Feature flag `process-mux` (enabled by default) to disable the old /// backend if desired. /// - API [`Session::connect_mux`] for the new `native-mux` backend, /// which is used to create a [`Session`] backed by `native-mux` /// implementation. /// - API [`SessionBuilder::connect_mux`] for the new `native-mux` backend, /// which is used to create a [`Session`] backed by `native-mux` /// implementation. /// - [`Session::request_port_forward`] for local/remote forwarding /// of tcp or unix stream sockets, along with [`ForwardType`] and /// [`Socket`], which is used to setup port forwarding. /// - A new module [`process`] is added to provide interfaces more similar to /// [`std::process`]. /// - New variants are added to [`Error`]. /// /// ## Changed /// - A new type [`Stdio`] is used for setting stdin/stdout/stderr. /// - [`ChildStdin`], [`ChildStdout`] and [`ChildStderr`] are now aliases /// for [`tokio_pipe::PipeRead`] and [`tokio_pipe::PipeWrite`]. /// - [`Command::spawn`] and [`Command::status`] now conforms to /// [`std::process::Command`] and [`tokio::process::Command`], in which /// stdin, stdout and stderr are inherit by default. /// - [`Command::spawn`] is now an `async` method. /// - [`RemoteChild::wait`] now takes `self` by value. /// - [`Error`] is now marked `#[non_exhaustive]`. /// /// [multiplex protocol]: https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.mux pub mod v0_9_0_rc_1 {} openssh-0.11.0/src/child.rs000064400000000000000000000171071046102023000136100ustar 00000000000000use super::{ChildStderr, ChildStdin, ChildStdout, Error}; use std::io; use std::process::{ExitStatus, Output}; use tokio::io::AsyncReadExt; use tokio::try_join; #[derive(Debug)] pub(crate) enum RemoteChildImp { #[cfg(feature = "process-mux")] ProcessImpl(super::process_impl::RemoteChild), #[cfg(feature = "native-mux")] NativeMuxImpl(super::native_mux_impl::RemoteChild), } #[cfg(feature = "process-mux")] impl From for RemoteChildImp { fn from(imp: super::process_impl::RemoteChild) -> Self { RemoteChildImp::ProcessImpl(imp) } } #[cfg(feature = "native-mux")] impl From for RemoteChildImp { fn from(imp: super::native_mux_impl::RemoteChild) -> Self { RemoteChildImp::NativeMuxImpl(imp) } } macro_rules! delegate { ($impl:expr, $var:ident, $then:block) => {{ match $impl { #[cfg(feature = "process-mux")] RemoteChildImp::ProcessImpl($var) => $then, #[cfg(feature = "native-mux")] RemoteChildImp::NativeMuxImpl($var) => $then, } }}; } /// Representation of a running or exited remote child process. /// /// This structure is used to represent and manage remote child /// processes. A remote child process is created via the /// [`OwningCommand`](crate::OwningCommand) struct through /// [`Session::command`](crate::Session::command) or one of its /// variants, which configures the spawning process and can itself be /// constructed using a builder-style interface. /// /// Calling [`wait`](Child::wait) (or other functions that wrap around it) will make the /// parent process wait until the child has actually exited before continuing. /// /// Unlike [`std::process::Child`], `Child` *does* implement [`Drop`], and will terminate the /// local `ssh` process corresponding to the remote process when it goes out of scope. Note that /// this does _not_ terminate the remote process. If you want to do that, you will need to kill it /// yourself by executing a remote command like `pkill` to kill it on the remote side. /// /// As a result, `Child` cannot expose `stdin`, `stdout`, and `stderr` as fields for /// split-borrows like [`std::process::Child`] does. Instead, it exposes /// [`stdin`](Child::stdin), [`stdout`](Child::stdout), /// and [`stderr`](Child::stderr) as methods. Callers can call `.take()` to get the same /// effect as a split borrow and use multiple streams concurrently. Note that for the streams to be /// available,`Stdio::piped()` should be passed to the corresponding method on /// [`OwningCommand`](crate::OwningCommand). /// /// NOTE that once `Child` is dropped, any data written to `stdin` will not be sent to the /// remote process and `stdout` and `stderr` will yield EOF immediately. /// /// ```rust,no_run /// # async fn foo() { /// # let child: openssh::RemoteChild<'static> = unimplemented!(); /// let stdin = child.stdin().take().unwrap(); /// let stdout = child.stdout().take().unwrap(); /// tokio::io::copy(&mut stdout, &mut stdin).await; /// # } /// ``` #[derive(Debug)] pub struct Child { session: S, imp: RemoteChildImp, stdin: Option, stdout: Option, stderr: Option, } impl Child { pub(crate) fn new( session: S, (imp, stdin, stdout, stderr): ( RemoteChildImp, Option, Option, Option, ), ) -> Self { Self { session, stdin, stdout, stderr, imp, } } /// Disconnect from this given remote child process. /// /// Note that disconnecting does _not_ kill the remote process, it merely kills the local /// handle to that remote process. pub async fn disconnect(self) -> io::Result<()> { delegate!(self.imp, imp, { imp.disconnect().await }) } /// Waits for the remote child to exit completely, returning the status that it exited with. /// /// This function will continue to have the same return value after it has been called at least /// once. /// /// The stdin handle to the child process, if any, will be closed before waiting. This helps /// avoid deadlock: it ensures that the child does not block waiting for input from the parent, /// while the parent waits for the child to exit. pub async fn wait(mut self) -> Result { // Close stdin so that if the remote process is reading stdin, // it would return EOF and the remote process can exit. self.stdin().take(); delegate!(self.imp, imp, { imp.wait().await }) } /// Simultaneously waits for the remote child to exit and collect all remaining output on the /// stdout/stderr handles, returning an `Output` instance. /// /// The stdin handle to the child process, if any, will be closed before waiting. This helps /// avoid deadlock: it ensures that the child does not block waiting for input from the parent, /// while the parent waits for the child to exit. /// /// By default, stdin, stdout and stderr are inherited from the parent. In order to capture the /// output into this `Result` it is necessary to create new pipes between parent and /// child. Use `stdout(Stdio::piped())` or `stderr(Stdio::piped())`, respectively. pub async fn wait_with_output(mut self) -> Result { let child_stdout = self.stdout.take(); let stdout_read = async move { let mut stdout = Vec::new(); if let Some(mut child_stdout) = child_stdout { child_stdout .read_to_end(&mut stdout) .await .map_err(Error::ChildIo)?; } Ok::<_, Error>(stdout) }; let child_stderr = self.stderr.take(); let stderr_read = async move { let mut stderr = Vec::new(); if let Some(mut child_stderr) = child_stderr { child_stderr .read_to_end(&mut stderr) .await .map_err(Error::ChildIo)?; } Ok::<_, Error>(stderr) }; // Execute them concurrently to avoid the pipe buffer being filled up // and cause the remote process to block forever. let (stdout, stderr) = try_join!(stdout_read, stderr_read)?; Ok(Output { // The self.wait() future terminates the stdout and stderr futures // when it resolves, even if there may still be more data arriving // from the server. // // Therefore, we wait for them first, and only once they're complete // do we wait for the process to have terminated. status: self.wait().await?, stdout, stderr, }) } /// Access the handle for reading from the remote child's standard input (stdin), if requested. pub fn stdin(&mut self) -> &mut Option { &mut self.stdin } /// Access the handle for reading from the remote child's standard output (stdout), if /// requested. pub fn stdout(&mut self) -> &mut Option { &mut self.stdout } /// Access the handle for reading from the remote child's standard error (stderr), if requested. pub fn stderr(&mut self) -> &mut Option { &mut self.stderr } } impl Child { /// Access the SSH session that this remote process was spawned from. pub fn session(&self) -> S { self.session.clone() } } openssh-0.11.0/src/command.rs000064400000000000000000000341501046102023000141400ustar 00000000000000use crate::escape::escape; use super::child::Child; use super::stdio::TryFromChildIo; use super::Stdio; use super::{Error, Session}; use std::borrow::Cow; use std::ffi::OsStr; use std::ops::Deref; use std::process; #[derive(Debug)] pub(crate) enum CommandImp { #[cfg(feature = "process-mux")] ProcessImpl(super::process_impl::Command), #[cfg(feature = "native-mux")] NativeMuxImpl(super::native_mux_impl::Command), } #[cfg(feature = "process-mux")] impl From for CommandImp { fn from(imp: super::process_impl::Command) -> Self { CommandImp::ProcessImpl(imp) } } #[cfg(feature = "native-mux")] impl From for CommandImp { fn from(imp: super::native_mux_impl::Command) -> Self { CommandImp::NativeMuxImpl(imp) } } #[cfg(any(feature = "process-mux", feature = "native-mux"))] macro_rules! delegate { ($impl:expr, $var:ident, $then:block) => {{ match $impl { #[cfg(feature = "process-mux")] CommandImp::ProcessImpl($var) => $then, #[cfg(feature = "native-mux")] CommandImp::NativeMuxImpl($var) => $then, } }}; } #[cfg(not(any(feature = "process-mux", feature = "native-mux")))] macro_rules! delegate { ($impl:expr, $var:ident, $then:block) => {{ unreachable!("Neither feature process-mux nor native-mux is enabled") }}; } /// If a command is `OverSsh` then it can be executed over an SSH session. /// /// Primarily a way to allow `std::process::Command` to be turned directly into an `openssh::Command`. pub trait OverSsh { /// Given an ssh session, return a command that can be executed over that ssh session. /// /// ### Notes /// /// The command to be executed on the remote machine should not explicitly /// set environment variables or the current working directory. It errors if the source command /// has environment variables or a current working directory set, since `openssh` doesn't (yet) have /// a method to set environment variables and `ssh` doesn't support setting a current working directory /// outside of `bash/dash/zsh` (which is not always available). /// /// ### Examples /// /// 1. Consider the implementation of `OverSsh` for `std::process::Command`. Let's build a /// `ls -l -a -h` command and execute it over an SSH session. /// /// ```no_run /// # #[tokio::main(flavor = "current_thread")] /// async fn main() -> Result<(), Box> { /// use std::process::Command; /// use openssh::{Session, KnownHosts, OverSsh}; /// /// let session = Session::connect_mux("me@ssh.example.com", KnownHosts::Strict).await?; /// let ls = /// Command::new("ls") /// .arg("-l") /// .arg("-a") /// .arg("-h") /// .over_ssh(&session)? /// .output() /// .await?; /// /// assert!(String::from_utf8(ls.stdout).unwrap().contains("total")); /// # Ok(()) /// } /// /// ``` /// 2. Building a command with environment variables or a current working directory set will /// results in an error. /// /// ```no_run /// # #[tokio::main(flavor = "current_thread")] /// async fn main() -> Result<(), Box> { /// use std::process::Command; /// use openssh::{Session, KnownHosts, OverSsh}; /// /// let session = Session::connect_mux("me@ssh.example.com", KnownHosts::Strict).await?; /// let echo = /// Command::new("echo") /// .env("MY_ENV_VAR", "foo") /// .arg("$MY_ENV_VAR") /// .over_ssh(&session); /// assert!(matches!(echo, Err(openssh::Error::CommandHasEnv))); /// /// # Ok(()) /// } /// /// ``` fn over_ssh + Clone>( &self, session: S, ) -> Result, crate::Error>; } impl OverSsh for std::process::Command { fn over_ssh + Clone>( &self, session: S, ) -> Result, crate::Error> { // I'd really like `!self.get_envs().is_empty()` here, but that's // behind a `exact_size_is_empty` feature flag. if self.get_envs().len() > 0 { return Err(crate::Error::CommandHasEnv); } if self.get_current_dir().is_some() { return Err(crate::Error::CommandHasCwd); } let program_escaped: Cow<'_, OsStr> = escape(self.get_program()); let mut command = Session::to_raw_command(session, program_escaped); let args = self.get_args().map(escape); command.raw_args(args); Ok(command) } } impl OverSsh for tokio::process::Command { fn over_ssh + Clone>( &self, session: S, ) -> Result, crate::Error> { self.as_std().over_ssh(session) } } impl OverSsh for &S where S: OverSsh, { fn over_ssh + Clone>( &self, session: U, ) -> Result, crate::Error> { ::over_ssh(self, session) } } impl OverSsh for &mut S where S: OverSsh, { fn over_ssh + Clone>( &self, session: U, ) -> Result, crate::Error> { ::over_ssh(self, session) } } /// A remote process builder, providing fine-grained control over how a new remote process should /// be spawned. /// /// A default configuration can be generated using [`Session::command(program)`](Session::command) /// or [`Session::arc_command(program)`](Session::arc_command), where `program` gives a path to /// the program to be executed. Additional builder methods allow the configuration to be changed /// (for example, by adding arguments) prior to spawning. The interface is almost identical to /// that of [`std::process::Command`]. /// /// `OwningCommand` can be reused to spawn multiple remote processes. The builder methods change /// the command without needing to immediately spawn the process. Similarly, you can call builder /// methods after spawning a process and then spawn a new process with the modified settings. /// /// # Environment variables and current working directory. /// /// You'll notice that unlike its `std` counterpart, `OwningCommand` does not have any methods for /// setting environment variables or the current working directory for the remote command. This is /// because the SSH protocol does not support this (at least not in its standard configuration). /// For more details on this, see the `ENVIRONMENT` section of [`ssh(1)`]. To work around this, /// give [`env(1)`] a try. If the remote shell supports it, you can also prefix your command with /// `["cd", "dir", "&&"]` to run the rest of the command in some directory `dir`. /// /// # Exit status /// /// The `ssh` command generally forwards the exit status of the remote process. The exception is if /// a protocol-level error occured, in which case it will return with exit status 255. Since the /// remote process _could_ also return with exit status 255, we have no reliable way to distinguish /// between remote errors and errors from `ssh`, but this library _assumes_ that 255 means the /// error came from `ssh`, and acts accordingly. /// /// [`ssh(1)`]: https://linux.die.net/man/1/ssh /// [`env(1)`]: https://linux.die.net/man/1/env #[derive(Debug)] pub struct OwningCommand { session: S, imp: CommandImp, stdin_set: bool, stdout_set: bool, stderr_set: bool, } impl OwningCommand { pub(crate) fn new(session: S, imp: CommandImp) -> Self { Self { session, imp, stdin_set: false, stdout_set: false, stderr_set: false, } } /// Adds an argument to pass to the remote program. /// /// Before it is passed to the remote host, `arg` is escaped so that special characters aren't /// evaluated by the remote shell. If you do not want this behavior, use /// [`raw_arg`](Self::raw_arg). /// /// Only one argument can be passed per use. So instead of: /// /// ```no_run /// # fn foo(c: &mut openssh::Command<'_>) { c /// .arg("-C /path/to/repo") /// # ; } /// ``` /// /// usage would be: /// /// ```no_run /// # fn foo(c: &mut openssh::Command<'_>) { c /// .arg("-C") /// .arg("/path/to/repo") /// # ; } /// ``` /// /// To pass multiple arguments see [`args`](Self::args). pub fn arg>(&mut self, arg: A) -> &mut Self { self.raw_arg(&*shell_escape::unix::escape(Cow::Borrowed(arg.as_ref()))) } /// Adds an argument to pass to the remote program. /// /// Unlike [`arg`](Self::arg), this method does not shell-escape `arg`. The argument is passed as written /// to `ssh`, which will pass it again as an argument to the remote shell. Since the remote /// shell may do argument parsing, characters such as spaces and `*` may be interpreted by the /// remote shell. /// /// To pass multiple unescaped arguments see [`raw_args`](Self::raw_args). pub fn raw_arg>(&mut self, arg: A) -> &mut Self { delegate!(&mut self.imp, imp, { imp.raw_arg(arg.as_ref()); }); self } /// Adds multiple arguments to pass to the remote program. /// /// Before they are passed to the remote host, each argument in `args` is escaped so that /// special characters aren't evaluated by the remote shell. If you do not want this behavior, /// use [`raw_args`](Self::raw_args). /// /// To pass a single argument see [`arg`](Self::arg). pub fn args(&mut self, args: I) -> &mut Self where I: IntoIterator, A: AsRef, { for arg in args { self.arg(arg); } self } /// Adds multiple arguments to pass to the remote program. /// /// Unlike [`args`](Self::args), this method does not shell-escape `args`. The arguments are passed as /// written to `ssh`, which will pass them again as arguments to the remote shell. However, /// since the remote shell may do argument parsing, characters such as spaces and `*` may be /// interpreted by the remote shell. /// /// To pass a single argument see [`raw_arg`](Self::raw_arg). pub fn raw_args(&mut self, args: I) -> &mut Self where I: IntoIterator, A: AsRef, { for arg in args { self.raw_arg(arg); } self } /// Configuration for the remote process's standard input (stdin) handle. /// /// Defaults to [`inherit`] when used with `spawn` or `status`, and /// defaults to [`null`] when used with `output`. /// /// [`inherit`]: struct.Stdio.html#method.inherit /// [`null`]: struct.Stdio.html#method.null pub fn stdin>(&mut self, cfg: T) -> &mut Self { delegate!(&mut self.imp, imp, { imp.stdin(cfg.into()); }); self.stdin_set = true; self } /// Configuration for the remote process's standard output (stdout) handle. /// /// Defaults to [`inherit`] when used with `spawn` or `status`, and /// defaults to [`piped`] when used with `output`. /// /// [`inherit`]: struct.Stdio.html#method.inherit /// [`piped`]: struct.Stdio.html#method.piped pub fn stdout>(&mut self, cfg: T) -> &mut Self { delegate!(&mut self.imp, imp, { imp.stdout(cfg.into()); }); self.stdout_set = true; self } /// Configuration for the remote process's standard error (stderr) handle. /// /// Defaults to [`inherit`] when used with `spawn` or `status`, and /// defaults to [`piped`] when used with `output`. /// /// [`inherit`]: struct.Stdio.html#method.inherit /// [`piped`]: struct.Stdio.html#method.piped pub fn stderr>(&mut self, cfg: T) -> &mut Self { delegate!(&mut self.imp, imp, { imp.stderr(cfg.into()); }); self.stderr_set = true; self } } impl OwningCommand { async fn spawn_impl(&mut self) -> Result, Error> { Ok(Child::new( self.session.clone(), delegate!(&mut self.imp, imp, { let (imp, stdin, stdout, stderr) = imp.spawn().await?; ( imp.into(), stdin.map(TryFromChildIo::try_from).transpose()?, stdout.map(TryFromChildIo::try_from).transpose()?, stderr.map(TryFromChildIo::try_from).transpose()?, ) }), )) } /// Executes the remote command without waiting for it, returning a handle to it /// instead. /// /// By default, stdin, stdout and stderr are inherited. pub async fn spawn(&mut self) -> Result, Error> { if !self.stdin_set { self.stdin(Stdio::inherit()); } if !self.stdout_set { self.stdout(Stdio::inherit()); } if !self.stderr_set { self.stderr(Stdio::inherit()); } self.spawn_impl().await } /// Executes the remote command, waiting for it to finish and collecting all of its output. /// /// By default, stdout and stderr are captured (and used to provide the resulting /// output) and stdin is set to `Stdio::null()`. pub async fn output(&mut self) -> Result { if !self.stdin_set { self.stdin(Stdio::null()); } if !self.stdout_set { self.stdout(Stdio::piped()); } if !self.stderr_set { self.stderr(Stdio::piped()); } self.spawn_impl().await?.wait_with_output().await } /// Executes the remote command, waiting for it to finish and collecting its exit status. /// /// By default, stdin, stdout and stderr are inherited. pub async fn status(&mut self) -> Result { self.spawn().await?.wait().await } } openssh-0.11.0/src/error.rs000064400000000000000000000230311046102023000136470ustar 00000000000000use std::io; /// Errors that occur when interacting with a remote process. #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum Error { /// The master connection failed. #[error("the master connection failed")] Master(#[source] io::Error), /// Failed to establish initial connection to the remote host. #[error("failed to connect to the remote host")] Connect(#[source] io::Error), /// Failed to run the `ssh` command locally. #[cfg(feature = "process-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "process-mux")))] #[error("the local ssh command could not be executed")] Ssh(#[source] io::Error), /// Failed to connect to the ssh multiplex server. #[cfg(feature = "native-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "native-mux")))] #[error("failed to connect to the ssh multiplex server")] SshMux(#[source] openssh_mux_client::Error), /// Invalid command that contains null byte. #[cfg(feature = "native-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "native-mux")))] #[error("invalid command: Command contains null byte.")] InvalidCommand, /// The remote process failed. #[error("the remote command could not be executed")] Remote(#[source] io::Error), /// The connection to the remote host was severed. /// /// Note that for the process impl, this is a best-effort error, and it _may_ instead /// signify that the remote process exited with an error code of 255. /// /// You should call [`Session::check`](crate::Session::check) to verify if you get /// this error back. #[error("the connection was terminated")] Disconnected, /// Remote process is terminated. /// /// It is likely to be that the process is terminated by signal. /// /// **NOTE that due to a fundamental design flaw in ssh multiplex protocol, /// there is no way to tell `RemoteProcessTerminated` from `Disconnect`.** /// /// If you really need to identify `Disconnect`, you can call `session.check()` /// after `wait()` returns `RemoteProcessTerminated`, however the ssh multiplex master /// could exit right after `wait()`, meaning the remote process actually is terminated /// instead of `Disconnect`ed. /// /// It is thus recommended to create your own workaround for your particular use cases. #[error("the remote process has terminated")] RemoteProcessTerminated, /// Failed to remove temporary dir where ssh socket and output is stored. #[error("failed to remove temporary ssh session directory")] Cleanup(#[source] io::Error), /// IO Error when creating/reading/writing from ChildStdin, ChildStdout, ChildStderr. #[error("failure while accessing standard i/o of remote process")] ChildIo(#[source] io::Error), /// The command has some env variables that it expects to carry over ssh. /// However, OverSsh does not support passing env variables over ssh. #[error("rejected runing a command over ssh that expects env variables to be carried over to remote.")] CommandHasEnv, /// The command expects to be in a specific working directory in remote. /// However, OverSsh does not support setting a working directory for commands to be executed over ssh. #[error("rejected runing a command over ssh that expects a specific working directory to be carried over to remote.")] CommandHasCwd, } #[cfg(feature = "native-mux")] impl From for Error { fn from(err: openssh_mux_client::Error) -> Self { use io::ErrorKind; match &err { openssh_mux_client::Error::IOError(ioerr) => match ioerr.kind() { ErrorKind::NotFound | ErrorKind::ConnectionReset // If the listener of a unix socket exits without removing the socket // file, then attempt to connect to the file results in // `ConnectionRefused`. | ErrorKind::ConnectionRefused | ErrorKind::ConnectionAborted | ErrorKind::NotConnected => Error::Disconnected, _ => Error::SshMux(err), }, _ => Error::SshMux(err), } } } impl Error { pub(crate) fn interpret_ssh_error(stderr: &str) -> Self { // we want to turn the string-only ssh error into something a little more "handleable". // we do this by trying to interpret the output from `ssh`. this is error-prone, but // the best we can do. if you find ways to impove this, even just through heuristics, // please file an issue or PR :) // // format is: // // ssh: ssh error: io error let mut stderr = stderr.trim(); stderr = stderr.strip_prefix("ssh: ").unwrap_or(stderr); if stderr.starts_with("Warning: Permanently added ") { // added to hosts file -- let's ignore that message stderr = stderr.split_once('\n').map(|x| x.1.trim()).unwrap_or(""); } let mut kind = io::ErrorKind::ConnectionAborted; let mut err = stderr.splitn(2, ": "); if let Some(ssh_error) = err.next() { if ssh_error.starts_with("Could not resolve") { // match what `std` gives: https://github.com/rust-lang/rust/blob/a5de254862477924bcd8b9e1bff7eadd6ffb5e2a/src/libstd/sys/unix/net.rs#L40 // we _could_ match on "Name or service not known" from io_error, // but my guess is that the ssh error is more stable. kind = io::ErrorKind::Other; } if let Some(io_error) = err.next() { match io_error { "Network is unreachable" => { kind = io::ErrorKind::Other; } "Connection refused" => { kind = io::ErrorKind::ConnectionRefused; } e if ssh_error.starts_with("connect to host") && e == "Connection timed out" => { kind = io::ErrorKind::TimedOut; } e if ssh_error.starts_with("connect to host") && e == "Operation timed out" => { // this is the macOS version of "connection timed out" kind = io::ErrorKind::TimedOut; } e if ssh_error.starts_with("connect to host") && e == "Permission denied" => { // this is the macOS version of "network is unreachable". kind = io::ErrorKind::Other; } e if e.contains("Permission denied (") => { kind = io::ErrorKind::PermissionDenied; } _ => {} } } } // NOTE: we may want to provide more structured connection errors than just io::Error? // NOTE: can we re-use this method for non-connect cases? Error::Connect(io::Error::new(kind, stderr)) } } #[cfg(test)] mod tests { use super::{io, Error}; #[test] fn parse_error() { let err = "ssh: Warning: Permanently added \'login.csail.mit.edu,128.52.131.0\' (ECDSA) to the list of known hosts.\r\nopenssh-tester@login.csail.mit.edu: Permission denied (publickey,gssapi-keyex,gssapi-with-mic,password,keyboard-interactive)."; let err = Error::interpret_ssh_error(err); let target = io::Error::new(io::ErrorKind::PermissionDenied, "openssh-tester@login.csail.mit.edu: Permission denied (publickey,gssapi-keyex,gssapi-with-mic,password,keyboard-interactive)."); if let Error::Connect(e) = err { assert_eq!(e.kind(), target.kind()); assert_eq!(format!("{}", e), format!("{}", target)); } else { unreachable!("{:?}", err); } } #[test] fn error_sanity() { use std::error::Error as _; let ioe = || io::Error::new(io::ErrorKind::Other, "test"); let expect = ioe(); let e = Error::Master(ioe()); assert!(!format!("{}", e).is_empty()); let e = e .source() .expect("source failed") .downcast_ref::() .expect("source not io"); assert_eq!(e.kind(), expect.kind()); assert_eq!(format!("{}", e), format!("{}", expect)); let e = Error::Connect(ioe()); assert!(!format!("{}", e).is_empty()); let e = e .source() .expect("source failed") .downcast_ref::() .expect("source not io"); assert_eq!(e.kind(), expect.kind()); assert_eq!(format!("{}", e), format!("{}", expect)); #[cfg(feature = "process-mux")] { let e = Error::Ssh(ioe()); assert!(!format!("{}", e).is_empty()); let e = e .source() .expect("source failed") .downcast_ref::() .expect("source not io"); assert_eq!(e.kind(), expect.kind()); assert_eq!(format!("{}", e), format!("{}", expect)); } let e = Error::Remote(ioe()); assert!(!format!("{}", e).is_empty()); let e = e .source() .expect("source failed") .downcast_ref::() .expect("source not io"); assert_eq!(e.kind(), expect.kind()); assert_eq!(format!("{}", e), format!("{}", expect)); let e = Error::Disconnected; assert!(!format!("{}", e).is_empty()); assert!(e.source().is_none()); } } openssh-0.11.0/src/escape.rs000064400000000000000000000060261046102023000137630ustar 00000000000000//! Escape characters that may have special meaning in a shell, including spaces. //! This is a modified version of the [`shell-escape::unix`] module of [`shell-escape`] crate. //! //! [`shell-escape`]: https://crates.io/crates/shell-escape //! [`shell-escape::unix`]: https://docs.rs/shell-escape/latest/src/shell_escape/lib.rs.html#101 use std::{ borrow::Cow, ffi::{OsStr, OsString}, os::unix::ffi::OsStrExt, os::unix::ffi::OsStringExt, }; fn allowed(byte: u8) -> bool { matches!(byte, b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' | b'-' | b'_' | b'=' | b'/' | b',' | b'.' | b'+') } /// Escape characters that may have special meaning in a shell, including spaces. /// /// **Note**: This function is an adaptation of [`shell-escape::unix::escape`]. /// This function exists only for type compatibility and the implementation is /// almost exactly the same as [`shell-escape::unix::escape`]. /// /// [`shell-escape::unix::escape`]: https://docs.rs/shell-escape/latest/src/shell_escape/lib.rs.html#101 /// pub(crate) fn escape(s: &OsStr) -> Cow<'_, OsStr> { let as_bytes = s.as_bytes(); let all_allowed = as_bytes.iter().copied().all(allowed); if !as_bytes.is_empty() && all_allowed { return Cow::Borrowed(s); } let mut escaped = Vec::with_capacity(as_bytes.len() + 2); escaped.push(b'\''); for &b in as_bytes { match b { b'\'' | b'!' => { escaped.reserve(4); escaped.push(b'\''); escaped.push(b'\\'); escaped.push(b); escaped.push(b'\''); } _ => escaped.push(b), } } escaped.push(b'\''); OsString::from_vec(escaped).into() } #[cfg(test)] mod tests { use super::*; fn test_escape_case(input: &str, expected: &str) { test_escape_from_bytes(input.as_bytes(), expected.as_bytes()) } fn test_escape_from_bytes(input: &[u8], expected: &[u8]) { let input_os_str = OsStr::from_bytes(input); let observed_os_str = escape(input_os_str); let expected_os_str = OsStr::from_bytes(expected); assert_eq!(observed_os_str, expected_os_str); } // These tests are courtesy of the `shell-escape` crate. #[test] fn test_escape() { test_escape_case( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_=/,.+", "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_=/,.+", ); test_escape_case("--aaa=bbb-ccc", "--aaa=bbb-ccc"); test_escape_case( "linker=gcc -L/foo -Wl,bar", r#"'linker=gcc -L/foo -Wl,bar'"#, ); test_escape_case(r#"--features="default""#, r#"'--features="default"'"#); test_escape_case(r#"'!\$`\\\n "#, r#"''\'''\!'\$`\\\n '"#); test_escape_case("", r#"''"#); test_escape_case(" ", r#"' '"#); test_escape_case("*", r#"'*'"#); test_escape_from_bytes( &[0x66, 0x6f, 0x80, 0x6f], &[b'\'', 0x66, 0x6f, 0x80, 0x6f, b'\''], ); } } openssh-0.11.0/src/lib.rs000064400000000000000000000217031046102023000132700ustar 00000000000000//! Scriptable SSH through OpenSSH (**only works on unix**). //! //! This crate wraps the OpenSSH remote login client (`ssh` on most machines), and provides //! a convenient mechanism for running commands on remote hosts. Since all commands are executed //! through the `ssh` command, all your existing configuration (e.g., in `.ssh/config`) should //! continue to work as expected. //! //! # Executing remote processes //! //! The library's API is modeled closely after that of [`std::process::Command`], since `ssh` also //! attempts to make the remote process seem as much as possible like a local command. However, //! there are some differences. //! //! First of all, all remote commands are executed in the context of a single ssh //! [session](Session). Authentication happens once when the session is //! [established](Session::connect), and subsequent command invocations re-use the same connection. //! //! Note that the maximum number of multiplexed remote commands is 10 by default. This value can be //! increased by changing the `MaxSessions` setting in [`sshd_config`]. //! //! Much like with [`std::process::Command`], you have multiple //! options when it comes to launching a remote command. You can //! [spawn](Command::spawn) the remote command, which just gives you a //! handle to the running process, you can run the command and wait //! for its [output](Command::output), or you can run it and just //! extract its [exit status](Command::status). Unlike its `std` //! counterpart though, these methods on [`OwningCommand`] can fail //! even if the remote command executed successfully, since there is a //! fallible network separating you from it. //! //! Also unlike its `std` counterpart, [`spawn`](OwningCommand::spawn) gives you a [`Child`] rather //! than a [`std::process::Child`]. Behind the scenes, a remote child is really just a process //! handle to the _local_ `ssh` instance corresponding to the spawned remote command. The behavior //! of the methods of [`RemoteChild`] therefore match the behavior of `ssh`, rather than that of //! the remote command directly. Usually, these are the same, though not always, as highlighted in //! the documetantation the individual methods. See also the section below on Remote Shells. //! //! # Connection modes //! //! This library provides two way to connect to the [`ControlMaster`]: //! //! One is to spawn a new process, the other is to connect to //! the control socket directly. //! //! The process implementation executes remote commands by invoking //! the ssh command locally with arguments that make the invocation //! reuse the connections set up by the control master. //! //! This maximizes compatibility with OpenSSH, but loses out on some fidelity //! in information about execution since only the exit code and the output of //! the ssh command is available to inspect. //! //! The native mux implementation on the other hand connects directly to //! the ssh control master and executes commands and retrieves the exit codes and //! the output of the remote process over its native protocol. //! //! This gives better access to error information at the cost of introducing //! more non-OpenSSH code into the call path. //! //! The former parses the stdout/stderr of the ssh control master to retrieve the error //! for any failed operations, while the later retrieves the error from the control socket //! directly. //! //! Thus, the error handling in the later is more robust. //! //! Also, the former requires one process to be spawn for every connection while the later only //! needs to create one socket, so the later has better performance and consumes less resource. //! //! Behind the scenes, the crate uses ssh's [`ControlMaster`] feature to multiplex the channels for //! the different remote commands. Because of this, each remote command is tied to the lifetime of //! the [`Session`] that spawned them. When the session is [closed](Session::close), the connection //! is severed, and there can be no outstanding remote clients. //! //! # Authentication //! //! This library supports only password-less authentication schemes. If running `ssh` to a target //! host requires you to provide input on standard input (`STDIN`), then this crate will not work //! for you. You should set up keypair-based authentication instead. //! //! # Errors //! //! Since we are wrapping the `ssh`, which in turn runs a remote command that we do not control, we //! do not have a reliable way to tell the difference between what is a failure of the SSH //! connection itself, and what is a program error from the remote host. We do our best with some //! heuristics (like `ssh` exiting with status code 255 if a connection error occurs), but the //! errors from this crate will almost necessarily be worse than those of a native SSH //! implementation. Sorry in advance :) //! //! This also means that you may see strange errors when the remote process is terminated by a //! signal (such as through `kill` or `pkill`). When this happens, all the local ssh program sees //! is that the remote process disappeared, and so it returns with an error. It does not //! communicate that the process exited due to a signal. In cases like this, your call will return //! [`Error::Disconnected`], because the connection to _that_ remote process was disconnected. The //! ssh connection as a whole is likely still intact. //! //! To check if the connection has truly failed, use [`Session::check`]. It will return `Ok` if the //! master connection is still operational, and _may_ provide you with more information than you //! got from the failing command (that is, just [`Error::Disconnected`]) if it is not. //! //! # Remote Shells //! //! When you invoke a remote command through ssh, the remote command is executed by a shell on the //! remote end. That shell _interprets_ anything passed to it — it might evalute words starting //! with `$` as variables, split arguments by whitespace, and other things a shell is wont to do. //! Since that is _usually_ not what you expect to happen, `.arg("a b")` should pass a _single_ //! argument with the value `a b`, `openssh` _escapes_ every argument (and the command itself) by //! default using [`shell-escape`]. This works well in most cases, but might run into issues when //! the remote shell (generally the remote user's login shell) has a different syntax than the //! shell `shell-escape` targets (bash). For example, Windows shells have different escaping syntax //! than bash does. //! //! If this applies to you, you can use [`raw_arg`](Command::raw_arg), //! [`raw_args`](Command::raw_args), and [`raw_command`](Session::raw_command) to bypass the //! escaping that `openssh` normally does for you. //! //! # Sftp subsystem //! //! For sftp and other ssh subsystem, check [`Session::subsystem`] for more information. //! //! # Examples //! //! ```rust,no_run //! # #[cfg(feature = "native-mux")] //! # #[tokio::main] //! # async fn main() -> Result<(), openssh::Error> { //! use openssh::{Session, KnownHosts}; //! //! let session = Session::connect_mux("me@ssh.example.com", KnownHosts::Strict).await?; //! //! let ls = session.command("ls").output().await?; //! eprintln!("{}", String::from_utf8(ls.stdout).expect("server output was not valid UTF-8")); //! //! let whoami = session.command("whoami").output().await?; //! assert_eq!(whoami.stdout, b"me\n"); //! //! session.close().await?; //! # Ok(()) } //! ``` //! //! [`ControlMaster`]: https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Multiplexing //! [`sshd_config`]: https://linux.die.net/man/5/sshd_config //! [`shell-escape`]: https://crates.io/crates/shell-escape #![warn( missing_docs, missing_debug_implementations, rustdoc::broken_intra_doc_links, rust_2018_idioms, unreachable_pub )] #![cfg_attr( not(any(feature = "process-mux", feature = "native-mux")), allow(unused_variables, unreachable_code, unused_imports, dead_code) )] // only enables the nightly `doc_cfg` feature when // the `docsrs` configuration attribute is defined #![cfg_attr(docsrs, feature(doc_cfg))] #[cfg(not(unix))] compile_error!("This crate can only be used on unix"); mod stdio; pub use stdio::{ChildStderr, ChildStdin, ChildStdout, Stdio}; mod session; pub use session::Session; mod builder; pub use builder::{ControlPersist, KnownHosts, SessionBuilder}; mod command; pub use command::{OverSsh, OwningCommand}; /// Convenience [`OwningCommand`] alias when working with a session reference. pub type Command<'s> = OwningCommand<&'s Session>; mod escape; mod child; pub use child::Child; /// Convenience [`Child`] alias when working with a session reference. pub type RemoteChild<'a> = Child<&'a Session>; mod error; pub use error::Error; #[cfg(feature = "process-mux")] pub(crate) mod process_impl; #[cfg(feature = "native-mux")] pub(crate) mod native_mux_impl; #[cfg(doc)] /// Changelog for this crate. pub mod changelog; mod port_forwarding; pub use port_forwarding::*; /// Types to create and interact with the Remote Process pub mod process { pub use super::{ChildStderr, ChildStdin, ChildStdout, Command, RemoteChild, Stdio}; } openssh-0.11.0/src/native_mux_impl/child.rs000064400000000000000000000035411046102023000170050ustar 00000000000000use super::Error; use std::io; use std::os::unix::process::ExitStatusExt; use std::process::ExitStatus; use openssh_mux_client::{EstablishedSession, SessionStatus}; #[derive(Debug)] pub(crate) struct RemoteChild { established_session: EstablishedSession, } impl RemoteChild { pub(crate) fn new(established_session: EstablishedSession) -> Self { Self { established_session, } } pub(crate) async fn disconnect(self) -> io::Result<()> { // ssh multiplex protocol does not specify any message type // that can be used to kill the remote process or properly shutdown // the connection. // // So here we just let the drop handler does its job to release // underlying resources such as unix stream socket and heap memory allocated, // the remote process is not killed. Ok(()) } pub(crate) async fn wait(self) -> Result { let session_status = self .established_session .wait() .await .map_err(|(err, _established_session)| err)?; match session_status { SessionStatus::TtyAllocFail(_established_session) => { unreachable!("native_mux_impl never allocates a tty") } SessionStatus::Exited { exit_value } => { if let Some(val) = exit_value { if val == 127 { Err(Error::Remote(io::Error::new( io::ErrorKind::NotFound, "remote command not found", ))) } else { Ok(ExitStatusExt::from_raw((val as i32) << 8)) } } else { Err(Error::RemoteProcessTerminated) } } } } } openssh-0.11.0/src/native_mux_impl/command.rs000064400000000000000000000044501046102023000173400ustar 00000000000000use super::Error; use super::RemoteChild; use super::{ChildStderr, ChildStdin, ChildStdout, Stdio}; use std::borrow::Cow; use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; use std::path::Path; use openssh_mux_client::{Connection, NonZeroByteSlice, Session}; #[derive(Debug)] pub(crate) struct Command { cmd: Vec, ctl: Box, subsystem: bool, stdin_v: Stdio, stdout_v: Stdio, stderr_v: Stdio, } impl Command { pub(crate) fn new(ctl: Box, cmd: Vec, subsystem: bool) -> Self { Self { cmd, ctl, subsystem, stdin_v: Stdio::inherit(), stdout_v: Stdio::inherit(), stderr_v: Stdio::inherit(), } } pub(crate) fn raw_arg>(&mut self, arg: S) { self.cmd.push(b' '); self.cmd.extend_from_slice(arg.as_ref().as_bytes()); } pub(crate) fn stdin>(&mut self, cfg: T) { self.stdin_v = cfg.into(); } pub(crate) fn stdout>(&mut self, cfg: T) { self.stdout_v = cfg.into(); } pub(crate) fn stderr>(&mut self, cfg: T) { self.stderr_v = cfg.into(); } pub(crate) async fn spawn( &mut self, ) -> Result< ( RemoteChild, Option, Option, Option, ), Error, > { let (stdin, child_stdin) = self.stdin_v.to_stdin()?; let (stdout, child_stdout) = self.stdout_v.to_stdout()?; let (stderr, child_stderr) = self.stderr_v.to_stderr()?; let stdios = [ stdin.as_raw_fd_or_null_fd()?, stdout.as_raw_fd_or_null_fd()?, stderr.as_raw_fd_or_null_fd()?, ]; let cmd = NonZeroByteSlice::new(&self.cmd).ok_or(Error::InvalidCommand)?; let session = Session::builder() .cmd(Cow::Borrowed(cmd)) .subsystem(self.subsystem) .build(); let established_session = Connection::connect(&self.ctl) .await? .open_new_session(&session, &stdios) .await?; Ok(( RemoteChild::new(established_session), child_stdin, child_stdout, child_stderr, )) } } openssh-0.11.0/src/native_mux_impl/mod.rs000064400000000000000000000004551046102023000165020ustar 00000000000000use super::{Error, Stdio}; pub(crate) use openssh_mux_client::{ForwardType, Socket}; mod stdio; pub(crate) use stdio::{ChildStderr, ChildStdin, ChildStdout}; mod command; pub(crate) use command::Command; mod child; pub(crate) use child::RemoteChild; mod session; pub(crate) use session::Session; openssh-0.11.0/src/native_mux_impl/session.rs000064400000000000000000000054531046102023000174110ustar 00000000000000use super::{Command, Error}; use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; use std::path::Path; use openssh_mux_client::{shutdown_mux_master, Connection}; use tempfile::TempDir; #[derive(Debug)] pub(crate) struct Session { /// TempDir will automatically removes the temporary dir on drop tempdir: Option, ctl: Box, } impl Session { pub(crate) fn new(dir: TempDir) -> Self { let ctl = dir.path().join("master").into_boxed_path(); Self { tempdir: Some(dir), ctl, } } pub(crate) fn resume(ctl: Box, _master_log: Option>) -> Self { Self { tempdir: None, ctl } } pub(crate) async fn check(&self) -> Result<(), Error> { Connection::connect(&self.ctl) .await? .send_alive_check() .await?; Ok(()) } pub(crate) fn ctl(&self) -> &Path { &self.ctl } pub(crate) fn raw_command>(&self, program: S) -> Command { Command::new(self.ctl.clone(), program.as_ref().as_bytes().into(), false) } pub(crate) fn subsystem>(&self, program: S) -> Command { Command::new(self.ctl.clone(), program.as_ref().as_bytes().into(), true) } pub(crate) async fn request_port_forward( &self, forward_type: crate::ForwardType, listen_socket: crate::Socket<'_>, connect_socket: crate::Socket<'_>, ) -> Result<(), Error> { Connection::connect(&self.ctl) .await? .request_port_forward( forward_type.into(), &listen_socket.into(), &connect_socket.into(), ) .await?; Ok(()) } async fn close_impl(&self) -> Result<(), Error> { Connection::connect(&self.ctl) .await? .request_stop_listening() .await?; Ok(()) } pub(crate) async fn close(mut self) -> Result, Error> { // Take self.tempdir so that drop would do nothing let tempdir = self.tempdir.take(); self.close_impl().await?; Ok(tempdir) } pub(crate) fn detach(mut self) -> (Box, Option>) { ( self.ctl.clone(), self.tempdir.take().map(TempDir::into_path).map(|mut path| { path.push("log"); path.into_boxed_path() }), ) } } impl Drop for Session { fn drop(&mut self) { // Keep tempdir alive until the shutdown request is sent let _tempdir = match self.tempdir.take() { Some(tempdir) => tempdir, // return since close must have already been called. None => return, }; let _ = shutdown_mux_master(&self.ctl); } } openssh-0.11.0/src/native_mux_impl/stdio.rs000064400000000000000000000072051046102023000170450ustar 00000000000000use crate::{stdio::StdioImpl, Error, Stdio}; use std::{ fs::{File, OpenOptions}, io, os::unix::io::{AsRawFd, OwnedFd, RawFd}, }; use libc::{c_int, fcntl, F_GETFL, F_SETFL, O_NONBLOCK}; use once_cell::sync::OnceCell; use tokio::net::unix::pipe::{pipe, Receiver as PipeReader, Sender as PipeWriter}; fn create_pipe() -> Result<(PipeReader, PipeWriter), Error> { pipe().map_err(Error::ChildIo).map(|(w, r)| (r, w)) } /// Open "/dev/null" with RW. fn get_null_fd() -> Result { static NULL_FD: OnceCell = OnceCell::new(); let res = NULL_FD.get_or_try_init(|| { OpenOptions::new() .read(true) .write(true) .open("/dev/null") .map_err(Error::ChildIo) }); res.map(AsRawFd::as_raw_fd) } pub(crate) enum Fd { Owned(OwnedFd), Borrowed(RawFd), Null, } fn cvt(ret: c_int) -> io::Result { if ret == -1 { Err(io::Error::last_os_error()) } else { Ok(ret) } } fn set_blocking_inner(fd: RawFd) -> io::Result<()> { let flags = cvt(unsafe { fcntl(fd, F_GETFL) })?; cvt(unsafe { fcntl(fd, F_SETFL, flags & (!O_NONBLOCK)) })?; Ok(()) } fn set_blocking(fd: RawFd) -> Result<(), Error> { set_blocking_inner(fd).map_err(Error::ChildIo) } impl Fd { pub(crate) fn as_raw_fd_or_null_fd(&self) -> Result { use Fd::*; match self { Owned(owned_fd) => Ok(owned_fd.as_raw_fd()), Borrowed(rawfd) => Ok(*rawfd), Null => get_null_fd(), } } } impl TryFrom for Fd { type Error = Error; fn try_from(pipe_reader: PipeReader) -> Result { pipe_reader .into_blocking_fd() .map_err(Error::ChildIo) .map(Fd::Owned) } } impl TryFrom for Fd { type Error = Error; fn try_from(pipe_writer: PipeWriter) -> Result { pipe_writer .into_blocking_fd() .map_err(Error::ChildIo) .map(Fd::Owned) } } impl Stdio { pub(crate) fn to_stdin(&self) -> Result<(Fd, Option), Error> { match &self.0 { StdioImpl::Inherit => Ok((Fd::Borrowed(io::stdin().as_raw_fd()), None)), StdioImpl::Null => Ok((Fd::Null, None)), StdioImpl::Pipe => { let (read, write) = create_pipe()?; Ok((read.try_into()?, Some(write))) } StdioImpl::Fd(fd) => { let raw_fd = fd.as_raw_fd(); set_blocking(raw_fd)?; Ok((Fd::Borrowed(raw_fd), None)) } } } fn to_output( &self, get_inherit_rawfd: fn() -> RawFd, ) -> Result<(Fd, Option), Error> { match &self.0 { StdioImpl::Inherit => Ok((Fd::Borrowed(get_inherit_rawfd()), None)), StdioImpl::Null => Ok((Fd::Null, None)), StdioImpl::Pipe => { let (read, write) = create_pipe()?; Ok((write.try_into()?, Some(read))) } StdioImpl::Fd(fd) => { let raw_fd = fd.as_raw_fd(); set_blocking(raw_fd)?; Ok((Fd::Borrowed(raw_fd), None)) } } } pub(crate) fn to_stdout(&self) -> Result<(Fd, Option), Error> { self.to_output(|| io::stdout().as_raw_fd()) } pub(crate) fn to_stderr(&self) -> Result<(Fd, Option), Error> { self.to_output(|| io::stderr().as_raw_fd()) } } pub(crate) type ChildStdin = PipeWriter; pub(crate) type ChildStdout = PipeReader; pub(crate) type ChildStderr = PipeReader; openssh-0.11.0/src/port_forwarding.rs000064400000000000000000000072021046102023000157260ustar 00000000000000#[cfg(feature = "native-mux")] use super::native_mux_impl; #[cfg(feature = "process-mux")] use std::ffi::OsStr; use std::borrow::Cow; use std::fmt; use std::net::{self, SocketAddr}; use std::path::{Path, PathBuf}; /// Type of forwarding #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum ForwardType { /// Forward requests to a port on the local machine to remote machine. Local, /// Forward requests to a port on the remote machine to local machine. Remote, } #[cfg(feature = "native-mux")] impl From for native_mux_impl::ForwardType { fn from(fwd_type: ForwardType) -> Self { use native_mux_impl::ForwardType::*; match fwd_type { ForwardType::Local => Local, ForwardType::Remote => Remote, } } } /// TCP/Unix socket #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub enum Socket<'a> { /// Unix socket. #[cfg(unix)] #[cfg_attr(docsrs, doc(cfg(unix)))] UnixSocket { /// Filesystem path path: Cow<'a, Path>, }, /// Tcp socket. TcpSocket { /// Hostname. host: Cow<'a, str>, /// Port. port: u16, }, } impl From for Socket<'static> { fn from(addr: SocketAddr) -> Self { Socket::TcpSocket { host: addr.ip().to_string().into(), port: addr.port(), } } } macro_rules! impl_from_addr { ($ip:ty) => { impl From<($ip, u16)> for Socket<'static> { fn from((ip, port): ($ip, u16)) -> Self { SocketAddr::new(ip.into(), port).into() } } }; } impl_from_addr!(net::IpAddr); impl_from_addr!(net::Ipv4Addr); impl_from_addr!(net::Ipv6Addr); impl<'a> From> for Socket<'a> { fn from(path: Cow<'a, Path>) -> Self { Socket::UnixSocket { path } } } impl<'a> From<&'a Path> for Socket<'a> { fn from(path: &'a Path) -> Self { Socket::UnixSocket { path: Cow::Borrowed(path), } } } impl From for Socket<'static> { fn from(path: PathBuf) -> Self { Socket::UnixSocket { path: Cow::Owned(path), } } } impl From> for Socket<'static> { fn from(path: Box) -> Self { Socket::UnixSocket { path: Cow::Owned(path.into()), } } } impl Socket<'_> { /// Create a new TcpSocket pub fn new<'a, S>(host: S, port: u16) -> Socket<'a> where S: Into>, { Socket::TcpSocket { host: host.into(), port, } } #[cfg(feature = "process-mux")] pub(crate) fn as_os_str(&self) -> Cow<'_, OsStr> { match self { #[cfg(unix)] Socket::UnixSocket { path } => Cow::Borrowed(path.as_os_str()), Socket::TcpSocket { host, port } => Cow::Owned(format!("{host}:{port}").into()), } } } #[cfg(feature = "native-mux")] impl<'a> From> for native_mux_impl::Socket<'a> { fn from(socket: Socket<'a>) -> Self { use native_mux_impl::Socket::*; match socket { #[cfg(unix)] Socket::UnixSocket { path } => UnixSocket { path }, Socket::TcpSocket { host, port } => TcpSocket { host, port: port as u32, }, } } } impl<'a> fmt::Display for Socket<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { #[cfg(unix)] Socket::UnixSocket { path } => { write!(f, "{}", path.display()) } Socket::TcpSocket { host, port } => write!(f, "{host}:{port}"), } } } openssh-0.11.0/src/process_impl/child.rs000064400000000000000000000021431046102023000163010ustar 00000000000000use super::Error; use std::io; use std::process::ExitStatus; use tokio::process; // Disconnects the ssh session at drop, but does not kill the remote process. #[derive(Debug)] pub(crate) struct RemoteChild { channel: process::Child, } impl RemoteChild { /// * `channel` - Must be created with `process::Command::kill_on_drop(true)`. pub(crate) fn new(channel: process::Child) -> Self { Self { channel } } pub(crate) async fn disconnect(mut self) -> io::Result<()> { // this disconnects, but does not kill the remote process self.channel.kill().await?; Ok(()) } pub(crate) async fn wait(mut self) -> Result { match self.channel.wait().await { Err(e) => Err(Error::Remote(e)), Ok(w) => match w.code() { Some(255) => Err(Error::RemoteProcessTerminated), Some(127) => Err(Error::Remote(io::Error::new( io::ErrorKind::NotFound, "remote command not found", ))), _ => Ok(w), }, } } } openssh-0.11.0/src/process_impl/command.rs000064400000000000000000000027671046102023000166500ustar 00000000000000use super::Error; use super::RemoteChild; use super::{ChildStderr, ChildStdin, ChildStdout}; use std::ffi::OsStr; use std::process::Stdio; use tokio::process; #[derive(Debug)] pub(crate) struct Command { builder: process::Command, } impl Command { pub(crate) fn new(mut builder: process::Command) -> Self { // Disconnects the ssh session at `RemoteChild::drop`, but does // not kill the remote process. builder.kill_on_drop(true); Self { builder } } } impl Command { pub(crate) fn raw_arg>(&mut self, arg: S) { self.builder.arg(arg); } pub(crate) fn stdin>(&mut self, cfg: T) { self.builder.stdin(cfg); } pub(crate) fn stdout>(&mut self, cfg: T) { self.builder.stdout(cfg); } pub(crate) fn stderr>(&mut self, cfg: T) { self.builder.stderr(cfg); } pub(crate) async fn spawn( &mut self, ) -> Result< ( RemoteChild, Option, Option, Option, ), Error, > { let mut channel = self.builder.spawn().map_err(Error::Ssh)?; let child_stdin = channel.stdin.take(); let child_stdout = channel.stdout.take(); let child_stderr = channel.stderr.take(); Ok(( RemoteChild::new(channel), child_stdin, child_stdout, child_stderr, )) } } openssh-0.11.0/src/process_impl/mod.rs000064400000000000000000000003761046102023000160030ustar 00000000000000use super::{Error, ForwardType, Socket}; pub(crate) use tokio::process::{ChildStderr, ChildStdin, ChildStdout}; mod session; pub(crate) use session::Session; mod command; pub(crate) use command::Command; mod child; pub(crate) use child::RemoteChild; openssh-0.11.0/src/process_impl/session.rs000064400000000000000000000155731046102023000167140ustar 00000000000000use super::{Command, Error, ForwardType, Socket}; use std::ffi::OsStr; use std::fs; use std::io; use std::path::Path; use std::process::Stdio; use tokio::process; use tempfile::TempDir; #[derive(Debug)] pub(crate) struct Session { tempdir: Option, ctl: Box, master_log: Option>, } impl Session { pub(crate) fn new(tempdir: TempDir) -> Self { let log = tempdir.path().join("log").into_boxed_path(); let ctl = tempdir.path().join("master").into_boxed_path(); Self { tempdir: Some(tempdir), ctl, master_log: Some(log), } } pub(crate) fn resume(ctl: Box, master_log: Option>) -> Self { Self { tempdir: None, ctl, master_log, } } fn new_std_cmd(&self, args: &[impl AsRef]) -> std::process::Command { let mut cmd = std::process::Command::new("ssh"); cmd.stdin(Stdio::null()) .arg("-S") .arg(&*self.ctl) .arg("-o") .arg("BatchMode=yes") .args(args) // ssh does not care about the addr as long as we have passed // `-S &*self.ctl`. // It is tested on OpenSSH 8.2p1, 8.9p1, 9.0p1 .arg("none"); cmd } fn new_cmd(&self, args: &[impl AsRef]) -> process::Command { self.new_std_cmd(args).into() } pub(crate) async fn check(&self) -> Result<(), Error> { let check = self .new_cmd(&["-O", "check"]) .output() .await .map_err(Error::Ssh)?; if let Some(255) = check.status.code() { if let Some(master_error) = self.discover_master_error() { Err(master_error) } else { Err(Error::Disconnected) } } else { Ok(()) } } pub(crate) fn ctl(&self) -> &Path { &self.ctl } pub(crate) fn raw_command>(&self, program: S) -> Command { // XXX: Should we do a self.check() here first? // NOTE: we pass -p 9 nine here (the "discard" port) to ensure that ssh does not // succeed in establishing a _new_ connection if the master connection has failed. let mut cmd = self.new_cmd(&["-T", "-p", "9"]); cmd.arg("--").arg(program); Command::new(cmd) } pub(crate) fn subsystem>(&self, program: S) -> Command { // XXX: Should we do a self.check() here first? // NOTE: we pass -p 9 nine here (the "discard" port) to ensure that ssh does not // succeed in establishing a _new_ connection if the master connection has failed. let mut cmd = self.new_cmd(&["-T", "-p", "9", "-s"]); cmd.arg("--").arg(program); Command::new(cmd) } pub(crate) async fn request_port_forward( &self, forward_type: ForwardType, listen_socket: Socket<'_>, connect_socket: Socket<'_>, ) -> Result<(), Error> { let flag = match forward_type { ForwardType::Local => OsStr::new("-L"), ForwardType::Remote => OsStr::new("-R"), }; let mut forwarding = listen_socket.as_os_str().into_owned(); forwarding.push(":"); forwarding.push(connect_socket.as_os_str()); let port_forwarding = self .new_cmd(&[OsStr::new("-fNT"), flag, &*forwarding]) .output() .await .map_err(Error::Ssh)?; if port_forwarding.status.success() { Ok(()) } else { let exit_err = String::from_utf8_lossy(&port_forwarding.stderr); let err = exit_err.trim(); if err.is_empty() { if let Some(master_error) = self.discover_master_error() { return Err(master_error); } } Err(Error::Ssh(io::Error::new(io::ErrorKind::Other, err))) } } async fn close_impl(&self) -> Result<(), Error> { let exit = self .new_cmd(&["-O", "exit"]) .output() .await .map_err(Error::Ssh)?; if let Some(master_error) = self.discover_master_error() { return Err(master_error); } // let's get this case straight: // we tried to tell the master to exit. // the -o exit command failed. // the master exited, but did not produce an error. // what could cause that? // // If the remote sshd process is accidentally killed, then the local // ssh multiplex server would exit without anything printed to the log, // and the -o exit command failed to connect to the multiplex server. // // Check `broken_connection` test in `tests/openssh.rs` for an example // of this scenario. if !exit.status.success() { let exit_err = String::from_utf8_lossy(&exit.stderr); let err = exit_err.trim(); return Err(Error::Ssh(io::Error::new( io::ErrorKind::ConnectionAborted, err, ))); } Ok(()) } pub(crate) async fn close(mut self) -> Result, Error> { // Take self.tempdir so that drop would do nothing let tempdir = self.tempdir.take(); self.close_impl().await?; Ok(tempdir) } pub(crate) fn detach(mut self) -> (Box, Option>) { self.tempdir.take().map(TempDir::into_path); (self.ctl.clone(), self.master_log.take()) } fn discover_master_error(&self) -> Option { let err = match fs::read_to_string(self.master_log.as_ref()?) { Ok(err) => err, Err(e) => return Some(Error::Master(e)), }; let mut stderr = err.trim(); stderr = stderr.strip_prefix("ssh: ").unwrap_or(stderr); if stderr.starts_with("Warning: Permanently added ") { // added to hosts file -- let's ignore that message stderr = stderr.split_once('\n').map(|x| x.1.trim()).unwrap_or(""); } if stderr.is_empty() { return None; } let kind = if stderr.contains("Connection to") && stderr.contains("closed by remote host") { io::ErrorKind::ConnectionAborted } else { io::ErrorKind::Other }; Some(Error::Master(io::Error::new(kind, stderr))) } } impl Drop for Session { fn drop(&mut self) { // Keep tempdir alive until the connection is established let _tempdir = match self.tempdir.take() { Some(tempdir) => tempdir, // return since close must have already been called. None => return, }; let _ = self .new_std_cmd(&["-O", "exit"]) .stdout(Stdio::null()) .stderr(Stdio::null()) .status(); } } openssh-0.11.0/src/session.rs000064400000000000000000000474221046102023000142130ustar 00000000000000use super::{Error, ForwardType, KnownHosts, OwningCommand, SessionBuilder, Socket}; #[cfg(feature = "process-mux")] use super::process_impl; #[cfg(feature = "native-mux")] use super::native_mux_impl; use std::borrow::Cow; use std::ffi::OsStr; use std::ops::Deref; use std::path::Path; use tempfile::TempDir; #[derive(Debug)] pub(crate) enum SessionImp { #[cfg(feature = "process-mux")] ProcessImpl(process_impl::Session), #[cfg(feature = "native-mux")] NativeMuxImpl(native_mux_impl::Session), } #[cfg(any(feature = "process-mux", feature = "native-mux"))] macro_rules! delegate { ($impl:expr, $var:ident, $then:block) => {{ match $impl { #[cfg(feature = "process-mux")] SessionImp::ProcessImpl($var) => $then, #[cfg(feature = "native-mux")] SessionImp::NativeMuxImpl($var) => $then, } }}; } #[cfg(not(any(feature = "process-mux", feature = "native-mux")))] macro_rules! delegate { ($impl:expr, $var:ident, $then:block) => {{ unreachable!("Neither feature process-mux nor native-mux is enabled") }}; } /// A single SSH session to a remote host. /// /// You can use [`command`](Session::command) to start a new command on the connected machine. /// /// When the `Session` is dropped, the connection to the remote host is severed, and any errors /// silently ignored. To disconnect and be alerted to errors, use [`close`](Session::close). #[derive(Debug)] pub struct Session(SessionImp); // TODO: UserKnownHostsFile for custom known host fingerprint. impl Session { /// The method for creating a [`Session`] and externally control the creation of TempDir. /// /// By using the built-in [`SessionBuilder`] in openssh, or a custom SessionBuilder, /// create a TempDir. /// /// # Examples /// /// ```rust,no_run /// # use std::error::Error; /// # #[cfg(feature = "process-mux")] /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// /// use openssh::{Session, Stdio, SessionBuilder}; /// use openssh_sftp_client::Sftp; /// /// let builder = SessionBuilder::default(); /// let (builder, destination) = builder.resolve("ssh://jon@ssh.thesquareplanet.com:222"); /// let tempdir = builder.launch_master(destination).await?; /// /// let session = Session::new_process_mux(tempdir); /// /// let mut child = session /// .subsystem("sftp") /// .stdin(Stdio::piped()) /// .stdout(Stdio::piped()) /// .spawn() /// .await?; /// /// Sftp::new( /// child.stdin().take().unwrap(), /// child.stdout().take().unwrap(), /// Default::default(), /// ) /// .await? /// .close() /// .await?; /// /// # Ok(()) } /// ``` #[cfg(feature = "process-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "process-mux")))] pub fn new_process_mux(tempdir: TempDir) -> Self { Self(SessionImp::ProcessImpl(process_impl::Session::new(tempdir))) } /// The method for creating a [`Session`] and externally control the creation of TempDir. /// /// By using the built-in [`SessionBuilder`] in openssh, or a custom SessionBuilder, /// create a TempDir. /// /// # Examples /// /// ```rust,no_run /// # use std::error::Error; /// # #[cfg(feature = "native-mux")] /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// /// use openssh::{Session, Stdio, SessionBuilder}; /// use openssh_sftp_client::Sftp; /// /// let builder = SessionBuilder::default(); /// let (builder, destination) = builder.resolve("ssh://jon@ssh.thesquareplanet.com:222"); /// let tempdir = builder.launch_master(destination).await?; /// /// let session = Session::new_native_mux(tempdir); /// let mut child = session /// .subsystem("sftp") /// .stdin(Stdio::piped()) /// .stdout(Stdio::piped()) /// .spawn() /// .await?; /// /// Sftp::new( /// child.stdin().take().unwrap(), /// child.stdout().take().unwrap(), /// Default::default(), /// ) /// .await? /// .close() /// .await?; /// /// # Ok(()) } /// ``` #[cfg(feature = "native-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "native-mux")))] pub fn new_native_mux(tempdir: TempDir) -> Self { Self(SessionImp::NativeMuxImpl(native_mux_impl::Session::new( tempdir, ))) } /// Resume the connection using path to control socket and /// path to ssh multiplex output log. /// /// If you do not use `-E` option (or redirection) to write /// the log of the ssh multiplex master to the disk, you can /// simply pass `None` to `master_log`. /// /// [`Session`] created this way will not be terminated on drop, /// but can be forced terminated by [`Session::close`]. /// /// This connects to the ssh multiplex master using process mux impl. #[cfg(feature = "process-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "process-mux")))] pub fn resume(ctl: Box, master_log: Option>) -> Self { Self(SessionImp::ProcessImpl(process_impl::Session::resume( ctl, master_log, ))) } /// Same as [`Session::resume`] except that it connects to /// the ssh multiplex master using native mux impl. #[cfg(feature = "native-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "native-mux")))] pub fn resume_mux(ctl: Box, master_log: Option>) -> Self { Self(SessionImp::NativeMuxImpl(native_mux_impl::Session::resume( ctl, master_log, ))) } /// Connect to the host at the given `host` over SSH using process impl, which will /// spawn a new ssh process for each `Child` created. /// /// The format of `destination` is the same as the `destination` argument to `ssh`. It may be /// specified as either `[user@]hostname` or a URI of the form `ssh://[user@]hostname[:port]`. /// /// If connecting requires interactive authentication based on `STDIN` (such as reading a /// password), the connection will fail. Consider setting up keypair-based authentication /// instead. /// /// For more options, see [`SessionBuilder`]. #[cfg(feature = "process-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "process-mux")))] pub async fn connect>(destination: S, check: KnownHosts) -> Result { SessionBuilder::default() .known_hosts_check(check) .connect(destination) .await } /// Connect to the host at the given `host` over SSH using native mux impl, which /// will create a new socket connection for each `Child` created. /// /// See the crate-level documentation for more details on the difference between native and process-based mux. /// /// The format of `destination` is the same as the `destination` argument to `ssh`. It may be /// specified as either `[user@]hostname` or a URI of the form `ssh://[user@]hostname[:port]`. /// /// If connecting requires interactive authentication based on `STDIN` (such as reading a /// password), the connection will fail. Consider setting up keypair-based authentication /// instead. /// /// For more options, see [`SessionBuilder`]. #[cfg(feature = "native-mux")] #[cfg_attr(docsrs, doc(cfg(feature = "native-mux")))] pub async fn connect_mux>( destination: S, check: KnownHosts, ) -> Result { SessionBuilder::default() .known_hosts_check(check) .connect_mux(destination) .await } /// Check the status of the underlying SSH connection. #[cfg(not(windows))] #[cfg_attr(docsrs, doc(cfg(not(windows))))] pub async fn check(&self) -> Result<(), Error> { delegate!(&self.0, imp, { imp.check().await }) } /// Get the SSH connection's control socket path. #[cfg(not(windows))] #[cfg_attr(docsrs, doc(cfg(not(windows))))] pub fn control_socket(&self) -> &Path { delegate!(&self.0, imp, { imp.ctl() }) } /// Constructs a new [`OwningCommand`] for launching the program at path `program` on the remote /// host. /// /// Before it is passed to the remote host, `program` is escaped so that special characters /// aren't evaluated by the remote shell. If you do not want this behavior, use /// [`raw_command`](Session::raw_command). /// /// The returned `OwningCommand` is a builder, with the following default configuration: /// /// * No arguments to the program /// * Empty stdin and discard stdout/stderr for `spawn` or `status`, but create output pipes for /// `output` /// /// Builder methods are provided to change these defaults and otherwise configure the process. /// /// If `program` is not an absolute path, the `PATH` will be searched in an OS-defined way on /// the host. pub fn command<'a, S: Into>>(&self, program: S) -> OwningCommand<&'_ Self> { Self::to_command(self, program) } /// Constructs a new [`OwningCommand`] for launching the program at path `program` on the remote /// host. /// /// Unlike [`command`](Session::command), this method does not shell-escape `program`, so it may be evaluated in /// unforeseen ways by the remote shell. /// /// The returned `OwningCommand` is a builder, with the following default configuration: /// /// * No arguments to the program /// * Empty stdin and dsicard stdout/stderr for `spawn` or `status`, but create output pipes for /// `output` /// /// Builder methods are provided to change these defaults and otherwise configure the process. /// /// If `program` is not an absolute path, the `PATH` will be searched in an OS-defined way on /// the host. pub fn raw_command>(&self, program: S) -> OwningCommand<&'_ Self> { Self::to_raw_command(self, program) } /// Version of [`command`](Self::command) which stores an /// `Arc` instead of a reference, making the resulting /// [`OwningCommand`] independent from the source [`Session`] and /// simplifying lifetime management and concurrent usage: /// /// ```rust,no_run /// # use std::sync::Arc; /// # use tokio::io::AsyncReadExt; /// # use openssh::{Session, KnownHosts}; /// # #[cfg(feature = "native-mux")] /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// /// let session = Arc::new(Session::connect_mux("me@ssh.example.com", KnownHosts::Strict).await?); /// /// let mut log = session.arc_command("less").arg("+F").arg("./some-log-file").spawn().await?; /// # let t: tokio::task::JoinHandle> = /// tokio::spawn(async move { /// // can move the child around /// let mut stdout = log.stdout().take().unwrap(); /// let mut buf = vec![0;100]; /// loop { /// let n = stdout.read(&mut buf).await?; /// if n == 0 { /// return Ok(()) /// } /// println!("read {:?}", &buf[..n]); /// } /// }); /// # t.await??; /// # Ok(()) } pub fn arc_command<'a, P: Into>>( self: std::sync::Arc, program: P, ) -> OwningCommand> { Self::to_command(self, program) } /// Version of [`raw_command`](Self::raw_command) which stores an /// `Arc`, similar to [`arc_command`](Self::arc_command). pub fn arc_raw_command>( self: std::sync::Arc, program: P, ) -> OwningCommand> { Self::to_raw_command(self, program) } /// Version of [`command`](Self::command) which stores an /// arbitrary shared-ownership smart pointer to a [`Session`], /// more generic but less convenient than /// [`arc_command`](Self::arc_command). pub fn to_command<'a, S, P>(session: S, program: P) -> OwningCommand where P: Into>, S: Deref + Clone, { Self::to_raw_command(session, &*shell_escape::unix::escape(program.into())) } /// Version of [`raw_command`](Self::raw_command) which stores an /// arbitrary shared-ownership smart pointer to a [`Session`], /// more generic but less convenient than /// [`arc_raw_command`](Self::arc_raw_command). pub fn to_raw_command(session: S, program: P) -> OwningCommand where P: AsRef, S: Deref + Clone, { let session_impl = delegate!(&session.0, imp, { imp.raw_command(program.as_ref()).into() }); OwningCommand::new(session, session_impl) } /// Constructs a new [`OwningCommand`] for launching subsystem `program` on the remote /// host. /// /// Unlike [`command`](Session::command), this method does not shell-escape `program`, so it may be evaluated in /// unforeseen ways by the remote shell. /// /// The returned `OwningCommand` is a builder, with the following default configuration: /// /// * No arguments to the program /// * Empty stdin and dsicard stdout/stderr for `spawn` or `status`, but create output pipes for /// `output` /// /// Builder methods are provided to change these defaults and otherwise configure the process. /// /// ## Sftp subsystem /// /// To use the sftp subsystem, you'll want to use [`openssh-sftp-client`], /// then use the following code to construct a sftp instance: /// /// [`openssh-sftp-client`]: https://crates.io/crates/openssh-sftp-client /// /// ```rust,no_run /// # use std::error::Error; /// # #[cfg(feature = "native-mux")] /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// /// use openssh::{Session, KnownHosts, Stdio}; /// use openssh_sftp_client::Sftp; /// /// let session = Session::connect_mux("me@ssh.example.com", KnownHosts::Strict).await?; /// /// let mut child = session /// .subsystem("sftp") /// .stdin(Stdio::piped()) /// .stdout(Stdio::piped()) /// .spawn() /// .await?; /// /// Sftp::new( /// child.stdin().take().unwrap(), /// child.stdout().take().unwrap(), /// Default::default(), /// ) /// .await? /// .close() /// .await?; /// /// # Ok(()) } /// ``` pub fn subsystem>(&self, program: S) -> OwningCommand<&'_ Self> { Self::to_subsystem(self, program) } /// Version of [`subsystem`](Self::subsystem) which stores an /// arbitrary shared-ownership pointer to a session making the /// resulting [`OwningCommand`] independent from the source /// [`Session`] and simplifying lifetime management and concurrent /// usage: pub fn to_subsystem(session: S, program: P) -> OwningCommand where P: AsRef, S: Deref + Clone, { let session_impl = delegate!(&session.0, imp, { imp.subsystem(program.as_ref()).into() }); OwningCommand::new(session, session_impl) } /// Constructs a new [`OwningCommand`] that runs the provided shell command on the remote host. /// /// The provided command is passed as a single, escaped argument to `sh -c`, and from that /// point forward the behavior is up to `sh`. Since this executes a shell command, keep in mind /// that you are subject to the shell's rules around argument parsing, such as whitespace /// splitting, variable expansion, and other funkyness. I _highly_ recommend you read /// [this article] if you observe strange things. /// /// While the returned `OwningCommand` is a builder, like for [`command`](Session::command), you should not add /// additional arguments to it, since the arguments are already passed within the shell /// command. /// /// # Non-standard Remote Shells /// /// It is worth noting that there are really _two_ shells at work here: the one that sshd /// launches for the session, and that launches are command; and the instance of `sh` that we /// launch _in_ that session. This method tries hard to ensure that the provided `command` is /// passed exactly as-is to `sh`, but this is complicated by the presence of the "outer" shell. /// That outer shell may itself perform argument splitting, variable expansion, and the like, /// which might produce unintuitive results. For example, the outer shell may try to expand a /// variable that is only defined in the inner shell, and simply produce an empty string in the /// variable's place by the time it gets to `sh`. /// /// To counter this, this method assumes that the remote shell (the one launched by `sshd`) is /// [POSIX compliant]. This is more or less equivalent to "supports `bash` syntax" if you don't /// look too closely. It uses [`shell-escape`] to escape `command` before sending it to the /// remote shell, with the expectation that the remote shell will only end up undoing that one /// "level" of escaping, thus producing the original `command` as an argument to `sh`. This /// works _most of the time_. /// /// With sufficiently complex or weird commands, the escaping of `shell-escape` may not fully /// match the "un-escaping" of the remote shell. This will manifest as escape characters /// appearing in the `sh` command that you did not intend to be there. If this happens, try /// changing the remote shell if you can, or fall back to [`command`](Session::command) /// and do the escaping manually instead. /// /// [POSIX compliant]: https://pubs.opengroup.org/onlinepubs/9699919799/xrat/V4_xcu_chap02.html /// [this article]: https://mywiki.wooledge.org/Arguments /// [`shell-escape`]: https://crates.io/crates/shell-escape pub fn shell>(&self, command: S) -> OwningCommand<&'_ Self> { let mut cmd = self.command("sh"); cmd.arg("-c").arg(command.as_ref()); cmd } /// Request to open a local/remote port forwarding. /// The `Socket` can be either a unix socket or a tcp socket. /// /// If `forward_type` == Local, then `listen_socket` on local machine will be /// forwarded to `connect_socket` on remote machine. /// /// Otherwise, `listen_socket` on the remote machine will be forwarded to `connect_socket` /// on the local machine. /// /// Currently, there is no way of stopping a port forwarding due to the fact that /// openssh multiplex server/master does not support this. pub async fn request_port_forward( &self, forward_type: impl Into, listen_socket: impl Into>, connect_socket: impl Into>, ) -> Result<(), Error> { delegate!(&self.0, imp, { imp.request_port_forward( forward_type.into(), listen_socket.into(), connect_socket.into(), ) .await }) } /// Terminate the remote connection. /// /// This destructor terminates the ssh multiplex server /// regardless of how it was created. pub async fn close(self) -> Result<(), Error> { let res: Result, Error> = delegate!(self.0, imp, { imp.close().await }); res?.map(TempDir::close) .transpose() .map_err(Error::Cleanup) .map(|_| ()) } /// Detach the lifetime of underlying ssh multiplex master /// from this `Session`. /// /// Return (path to control socket, path to ssh multiplex output log) pub fn detach(self) -> (Box, Option>) { delegate!(self.0, imp, { imp.detach() }) } } openssh-0.11.0/src/stdio.rs000064400000000000000000000172361046102023000136520ustar 00000000000000use super::Error; #[cfg(feature = "native-mux")] use super::native_mux_impl; use std::fs::File; use std::io; use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, OwnedFd, RawFd}; use std::pin::Pin; use std::process; use std::task::{Context, Poll}; use tokio::{ io::{AsyncRead, AsyncWrite, ReadBuf}, net::unix::pipe::{Receiver as PipeReader, Sender as PipeWriter}, }; #[derive(Debug)] pub(crate) enum StdioImpl { /// Read/Write to /dev/null Null, /// Read/Write to a newly created pipe Pipe, /// Read/Write to custom fd Fd(OwnedFd), /// Inherit stdin/stdout/stderr Inherit, } /// Describes what to do with a standard I/O stream for a remote child process /// when passed to the stdin, stdout, and stderr methods of Command. #[derive(Debug)] pub struct Stdio(pub(crate) StdioImpl); impl Stdio { /// A new pipe should be arranged to connect the parent and remote child processes. pub const fn piped() -> Self { Self(StdioImpl::Pipe) } /// This stream will be ignored. /// This is the equivalent of attaching the stream to /dev/null. pub const fn null() -> Self { Self(StdioImpl::Null) } /// The child inherits from the corresponding parent descriptor. /// /// NOTE that the stdio fd must be in blocking mode, otherwise /// ssh might not flush all output since it considers /// (`EAGAIN`/`EWOULDBLOCK`) as an error pub const fn inherit() -> Self { Self(StdioImpl::Inherit) } /// `Stdio::from_raw_fd_owned` takes ownership of the fd passed in /// and closes the fd on drop. /// /// NOTE that the fd will be put into blocking mode, then it will be /// closed when `Stdio` is dropped. /// /// # Safety /// /// * `fd` - must be a valid fd and must give its ownership to `Stdio`. pub unsafe fn from_raw_fd_owned(fd: RawFd) -> Self { Self(StdioImpl::Fd(OwnedFd::from_raw_fd(fd))) } } impl From for process::Stdio { fn from(stdio: Stdio) -> Self { match stdio.0 { StdioImpl::Null => process::Stdio::null(), StdioImpl::Pipe => process::Stdio::piped(), StdioImpl::Inherit => process::Stdio::inherit(), StdioImpl::Fd(fd) => process::Stdio::from(fd), } } } impl From for Stdio { fn from(fd: OwnedFd) -> Self { Self(StdioImpl::Fd(fd)) } } macro_rules! impl_from_for_stdio { ($type:ty) => { impl From<$type> for Stdio { fn from(arg: $type) -> Self { Self(StdioImpl::Fd(arg.into())) } } }; } macro_rules! impl_try_from_for_stdio { ($type:ty) => { impl TryFrom<$type> for Stdio { type Error = Error; fn try_from(arg: $type) -> Result { Ok(Self(StdioImpl::Fd( arg.into_owned_fd().map_err(Error::ChildIo)?, ))) } } }; } impl_from_for_stdio!(process::ChildStdin); impl_from_for_stdio!(process::ChildStdout); impl_from_for_stdio!(process::ChildStderr); impl_try_from_for_stdio!(ChildStdin); impl_try_from_for_stdio!(ChildStdout); impl_try_from_for_stdio!(ChildStderr); impl_from_for_stdio!(File); macro_rules! impl_try_from_tokio_process_child_for_stdio { ($type:ident) => { impl TryFrom for Stdio { type Error = Error; fn try_from(arg: tokio::process::$type) -> Result { arg.into_owned_fd().map_err(Error::ChildIo).map(Into::into) } } }; } impl_try_from_tokio_process_child_for_stdio!(ChildStdin); impl_try_from_tokio_process_child_for_stdio!(ChildStdout); impl_try_from_tokio_process_child_for_stdio!(ChildStderr); /// Input for the remote child. #[derive(Debug)] pub struct ChildStdin(PipeWriter); /// Stdout for the remote child. #[derive(Debug)] pub struct ChildStdout(PipeReader); /// Stderr for the remote child. #[derive(Debug)] pub struct ChildStderr(PipeReader); pub(crate) trait TryFromChildIo: Sized { type Error; fn try_from(arg: T) -> Result; } macro_rules! impl_from_impl_child_io { (process, $type:ident, $inner:ty) => { impl TryFromChildIo for $type { type Error = Error; fn try_from(arg: tokio::process::$type) -> Result { let fd = arg.into_owned_fd().map_err(Error::ChildIo)?; <$inner>::from_owned_fd(fd) .map(Self) .map_err(Error::ChildIo) } } }; (native_mux, $type:ident) => { #[cfg(feature = "native-mux")] impl TryFromChildIo for $type { type Error = Error; fn try_from(arg: native_mux_impl::$type) -> Result { Ok(Self(arg)) } } }; } impl_from_impl_child_io!(process, ChildStdin, PipeWriter); impl_from_impl_child_io!(process, ChildStdout, PipeReader); impl_from_impl_child_io!(process, ChildStderr, PipeReader); impl_from_impl_child_io!(native_mux, ChildStdin); impl_from_impl_child_io!(native_mux, ChildStdout); impl_from_impl_child_io!(native_mux, ChildStderr); macro_rules! impl_child_stdio { (AsRawFd, $type:ty) => { impl AsRawFd for $type { fn as_raw_fd(&self) -> RawFd { self.0.as_raw_fd() } } }; (AsFd, $type:ty) => { impl AsFd for $type { fn as_fd(&self) -> BorrowedFd<'_> { self.0.as_fd() } } }; (into_owned_fd, $type:ty) => { impl $type { /// Convert into an owned fd, it'd be deregisted from tokio and in blocking mode. pub fn into_owned_fd(self) -> io::Result { self.0.into_blocking_fd() } } }; (AsyncRead, $type:ty) => { impl_child_stdio!(AsRawFd, $type); impl_child_stdio!(AsFd, $type); impl_child_stdio!(into_owned_fd, $type); impl AsyncRead for $type { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { Pin::new(&mut self.0).poll_read(cx, buf) } } }; (AsyncWrite, $type: ty) => { impl_child_stdio!(AsRawFd, $type); impl_child_stdio!(AsFd, $type); impl_child_stdio!(into_owned_fd, $type); impl AsyncWrite for $type { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut self.0).poll_write(cx, buf) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.0).poll_flush(cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { Pin::new(&mut self.0).poll_shutdown(cx) } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { Pin::new(&mut self.0).poll_write_vectored(cx, bufs) } fn is_write_vectored(&self) -> bool { self.0.is_write_vectored() } } }; } impl_child_stdio!(AsyncWrite, ChildStdin); impl_child_stdio!(AsyncRead, ChildStdout); impl_child_stdio!(AsyncRead, ChildStderr); openssh-0.11.0/tests/openssh.rs000064400000000000000000000773031046102023000145630ustar 00000000000000use once_cell::sync::Lazy; use regex::Regex; use std::{ env, io::{self, Write}, net::IpAddr, path::PathBuf, process, time::Duration, }; use tempfile::tempdir; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, net::{UnixListener, UnixStream}, time::sleep, }; use openssh::*; // TODO: how do we test the connection actually _failing_ so that the master reports an error? fn addr() -> String { std::env::var("TEST_HOST").unwrap_or_else(|_| "ssh://test-user@127.0.0.1:2222".to_string()) } fn loopback() -> IpAddr { "127.0.0.1".parse().unwrap() } fn get_known_hosts_path() -> PathBuf { let mut path = env::var_os("XDG_RUNTIME_DIR") .map(PathBuf::from) .unwrap_or_else(|| "/tmp".into()); path.push("openssh-rs/known_hosts"); path } async fn session_builder_connect(mut builder: SessionBuilder, addr: &str) -> Vec { let mut sessions = Vec::with_capacity(2); builder.user_known_hosts_file(get_known_hosts_path()); #[cfg(feature = "process-mux")] { sessions.push(builder.connect(addr).await.unwrap()); } #[cfg(feature = "native-mux")] { sessions.push(builder.connect_mux(addr).await.unwrap()); } sessions } async fn connects_with_name() -> Vec<(Session, &'static str)> { let mut sessions = Vec::with_capacity(2); let mut builder = SessionBuilder::default(); builder .user_known_hosts_file(get_known_hosts_path()) .known_hosts_check(KnownHosts::Accept); #[cfg(feature = "process-mux")] { sessions.push((builder.connect(&addr()).await.unwrap(), "process-mux")); } #[cfg(feature = "native-mux")] { sessions.push((builder.connect_mux(&addr()).await.unwrap(), "native-mux")); } sessions } async fn connects() -> Vec { connects_with_name() .await .into_iter() .map(|(session, _name)| session) .collect() } async fn connects_err(host: &str) -> Vec { session_builder_connects_err(host, SessionBuilder::default()).await } async fn session_builder_connects_err(host: &str, mut builder: SessionBuilder) -> Vec { builder .user_known_hosts_file(get_known_hosts_path()) .known_hosts_check(KnownHosts::Accept); let mut errors = Vec::with_capacity(2); #[cfg(feature = "process-mux")] { errors.push(builder.connect(host).await.unwrap_err()); } #[cfg(feature = "native-mux")] { errors.push(builder.connect_mux(host).await.unwrap_err()); } errors } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn it_connects() { for session in connects().await { session.check().await.unwrap(); session.close().await.unwrap(); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn control_dir() { let dirname = std::path::Path::new("control-test"); assert!(!dirname.exists()); std::fs::create_dir(dirname).unwrap(); let mut session_builder = SessionBuilder::default(); session_builder.control_directory(&dirname); for session in session_builder_connect(session_builder, &addr()).await { session.check().await.unwrap(); let mut iter = std::fs::read_dir(&dirname).unwrap(); assert!(iter.next().is_some()); session.close().await.unwrap(); } std::fs::remove_dir(&dirname).unwrap(); } #[derive(Default, Debug, PartialEq, Eq)] struct ProtoUserHostPort<'a> { proto: Option<&'a str>, user: Option<&'a str>, host: Option<&'a str>, port: Option<&'a str>, } fn parse_user_host_port(s: &str) -> Option { static SSH_REGEX: Lazy = Lazy::new(|| { Regex::new( r"(?x)^((?P[[:alpha:]]+)://)?((?P.*?)@)?(?P.*?)(:(?P\d+))?$", ) .unwrap() }); SSH_REGEX.captures(s).map(|cap| ProtoUserHostPort { proto: cap.name("proto").map(|m| m.as_str()), user: cap.name("user").map(|m| m.as_str()), host: cap.name("host").map(|m| m.as_str()), port: cap.name("port").map(|m| m.as_str()), }) } #[test] fn test_parse_proto_user_host_port() { let addr = "ssh://test-user@127.0.0.1:2222"; let parsed_addr = parse_user_host_port(addr).unwrap(); assert_eq!("ssh", parsed_addr.proto.unwrap()); assert_eq!("test-user", parsed_addr.user.unwrap()); assert_eq!("127.0.0.1", parsed_addr.host.unwrap()); assert_eq!("2222", parsed_addr.port.unwrap()); } #[test] fn test_parse_user_host_port() { let addr = "test-user@127.0.0.1:2222"; let parsed_addr = parse_user_host_port(addr).unwrap(); assert!(parsed_addr.proto.is_none()); assert_eq!("test-user", parsed_addr.user.unwrap()); assert_eq!("127.0.0.1", parsed_addr.host.unwrap()); assert_eq!("2222", parsed_addr.port.unwrap()); } #[test] fn test_parse_user_host() { let addr = "test-user@127.0.0.1"; let parsed_addr = parse_user_host_port(addr).unwrap(); assert!(parsed_addr.proto.is_none()); assert_eq!("test-user", parsed_addr.user.unwrap()); assert_eq!("127.0.0.1", parsed_addr.host.unwrap()); assert!(parsed_addr.port.is_none()); } #[test] fn test_parse_host_port() { let addr = "127.0.0.1:2222"; let parsed_addr = parse_user_host_port(addr).unwrap(); assert!(parsed_addr.proto.is_none()); assert!(parsed_addr.user.is_none()); assert_eq!("127.0.0.1", parsed_addr.host.unwrap()); assert_eq!("2222", parsed_addr.port.unwrap()); } #[test] fn test_parse_host() { let addr = "127.0.0.1"; let parsed_addr = parse_user_host_port(addr).unwrap(); assert!(parsed_addr.proto.is_none()); assert!(parsed_addr.user.is_none()); assert_eq!("127.0.0.1", parsed_addr.host.unwrap()); assert!(parsed_addr.port.is_none()); } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn config_file() { let dirname = std::path::Path::new("config-file-test"); let ssh_config_file = dirname.join("alternate_ssh_config"); assert!(!dirname.exists()); assert!(!ssh_config_file.exists()); std::fs::create_dir(dirname).unwrap(); let addr = addr(); let parsed_addr = parse_user_host_port(&addr).unwrap(); let ssh_config_contents = format!( r#"Host config-file-test User {} HostName {} Port {}"#, parsed_addr.user.unwrap_or("test-user"), parsed_addr.host.unwrap_or("127.0.0.1"), parsed_addr.port.unwrap_or("2222") ); let mut ssh_config_handle = std::fs::File::create(&ssh_config_file).unwrap(); ssh_config_handle .write_all(ssh_config_contents.as_bytes()) .unwrap(); let mut session_builder = SessionBuilder::default(); session_builder .known_hosts_check(KnownHosts::Accept) .config_file(&ssh_config_file); // this host name is resolved by the custom ssh_config. for session in session_builder_connect(session_builder, "config-file-test").await { session.check().await.unwrap(); session.close().await.unwrap(); } std::fs::remove_dir_all(&dirname).unwrap(); } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn terminate_on_drop() { let mut builder = SessionBuilder::default(); builder .user_known_hosts_file(get_known_hosts_path()) .known_hosts_check(KnownHosts::Add); #[cfg(feature = "process-mux")] { drop(builder.connect(&addr()).await.unwrap()); } #[cfg(feature = "native-mux")] { drop(builder.connect_mux(&addr()).await.unwrap()); } // NOTE: how do we test that it actually killed the master here? } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn stdout() { for session in connects().await { let child = session.command("echo").arg("foo").output().await.unwrap(); assert_eq!(child.stdout, b"foo\n"); let child = session .command("echo") .arg("foo") .raw_arg(">") .arg("/dev/stderr") .output() .await .unwrap(); assert!(child.stdout.is_empty()); session.close().await.unwrap(); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn over_session_ok() { for session in connects().await { let mut command = std::process::Command::new("echo") .arg("foo") .over_ssh(&session) .expect("No env vars or current working dir is set."); let child = command.output().await.unwrap(); assert_eq!(child.stdout, b"foo\n"); let child = session .command("echo") .arg("foo") .raw_arg(">") .arg("/dev/stderr") .output() .await .unwrap(); assert!(child.stdout.is_empty()); session.close().await.unwrap(); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn over_session_ok_require_escaping_arguments() { for session in connects().await { let mut command = std::process::Command::new("echo") .arg("\"\'\' foo \'\'\"") .over_ssh(&session) .expect("No env vars or current working dir is set."); let child = command.output().await.unwrap(); assert_eq!(child.stdout, b"\"\'\' foo \'\'\"\n"); let child = session .command("echo") .arg("foo") .raw_arg(">") .arg("/dev/stderr") .output() .await .unwrap(); assert!(child.stdout.is_empty()); session.close().await.unwrap(); } } /// Test that `over_ssh` errors if the source command has env vars specified. #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn over_session_err_because_env_var() { for session in connects().await { let command_with_env = std::process::Command::new("printenv") .arg("MY_ENV_VAR") .env("MY_ENV_VAR", "foo") .over_ssh(&session); assert!(matches!( command_with_env, Err(openssh::Error::CommandHasEnv) )); } } /// Test that `over_ssh` errors if the source command has a `current_dir` specified. #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn over_session_err_because_cwd() { for session in connects().await { let command_with_current_dir = std::process::Command::new("echo") .arg("foo") .current_dir("/tmp") .over_ssh(&session); assert!(matches!( command_with_current_dir, Err(openssh::Error::CommandHasCwd) )); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn shell() { for session in connects().await { let child = session.shell("echo $USER").output().await.unwrap(); assert_eq!(child.stdout, b"test-user\n"); let child = session .shell(r#"touch "$USER Documents""#) .status() .await .unwrap(); assert!(child.success()); let child = session .shell(r#"rm test-user\ Documents"#) .output() .await .unwrap(); eprintln!("shell: {:#?}", child); assert!(child.status.success()); let child = session.shell("echo \\$SHELL").output().await.unwrap(); assert_eq!(child.stdout, b"$SHELL\n"); let child = session .shell(r#"echo $USER | grep -c test"#) .status() .await .unwrap(); eprintln!("shell: {:#?}", child); assert!(child.success()); session.close().await.unwrap(); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn stderr() { for session in connects().await { let child = session.command("echo").arg("foo").output().await.unwrap(); assert!(child.stderr.is_empty()); let child = session .command("echo") .arg("foo") .raw_arg(">") .arg("/dev/stderr") .output() .await .unwrap(); assert_eq!(child.stderr, b"foo\n"); session.close().await.unwrap(); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn stdin() { for session in connects().await { let mut child = session .command("cat") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .await .unwrap(); // write something to standard in and send EOF let mut stdin = child.stdin().take().unwrap(); stdin.write_all(b"hello world").await.unwrap(); drop(stdin); // cat should print it back on stdout let mut stdout = child.stdout().take().unwrap(); let mut out = String::new(); stdout.read_to_string(&mut out).await.unwrap(); assert_eq!(out, "hello world"); drop(stdout); // cat should now have terminated let status = child.wait().await.unwrap(); // ... successfully assert!(status.success()); session.close().await.unwrap(); } } macro_rules! assert_remote_kind { ($e:expr, $kind:expr) => { let e = $e; assert!( matches!(e, Error::Remote(ref e) if e.kind() == $kind), "{:?}", e ); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn bad_remote_command() { for session in connects().await { // a bad remote command should result in a _local_ error. let failed = session .command("no such program") .output() .await .unwrap_err(); eprintln!("{:?}", failed); assert_remote_kind!(failed, io::ErrorKind::NotFound); // no matter how you run it let failed = session .command("no such program") .status() .await .unwrap_err(); eprintln!("{:?}", failed); assert_remote_kind!(failed, io::ErrorKind::NotFound); // even if you spawn first let child = session.command("no such program").spawn().await.unwrap(); let failed = child.wait().await.unwrap_err(); eprintln!("{:?}", failed); assert_remote_kind!(failed, io::ErrorKind::NotFound); // of if you want output let child = session.command("no such program").spawn().await.unwrap(); let failed = child.wait_with_output().await.unwrap_err(); eprintln!("{:?}", failed); assert_remote_kind!(failed, io::ErrorKind::NotFound); session.close().await.unwrap(); } } #[tokio::test] async fn connect_timeout() { use std::time::{Duration, Instant}; let mut sb = SessionBuilder::default(); sb.connect_timeout(Duration::from_secs(1)) .user_known_hosts_file(get_known_hosts_path()); let host = "192.0.0.8"; // Test process_impl #[cfg(feature = "process-mux")] { let t = Instant::now(); let res = sb.connect(host).await; let duration = t.elapsed(); let failed = res.unwrap_err(); assert!(duration > Duration::from_secs(1)); assert!(duration < Duration::from_secs(2)); eprintln!("{:?}", failed); assert!(matches!(failed, Error::Connect(ref e) if e.kind() == io::ErrorKind::TimedOut)); } // Test native-mux_impl #[cfg(feature = "native-mux")] { let t = Instant::now(); let res = sb.connect_mux(host).await; let duration = t.elapsed(); let failed = res.unwrap_err(); assert!(duration > Duration::from_secs(1)); assert!(duration < Duration::from_secs(2)); eprintln!("{:?}", failed); assert!(matches!(failed, Error::Connect(ref e) if e.kind() == io::ErrorKind::TimedOut)); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn spawn_and_wait() { use std::time::{Duration, Instant}; for session in connects().await { let t = Instant::now(); let sleeping1 = session.command("sleep").arg("1").spawn().await.unwrap(); let sleeping2 = sleeping1 .session() .command("sleep") .arg("2") .spawn() .await .unwrap(); sleeping1.wait_with_output().await.unwrap(); assert!(t.elapsed() > Duration::from_secs(1)); sleeping2.wait_with_output().await.unwrap(); assert!(t.elapsed() > Duration::from_secs(2)); session.close().await.unwrap(); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn escaping() { for session in connects().await { let status = dbg!(session .command("printf") .arg("%d %d") .arg("1") .arg("2") .output() .await .unwrap()) .status; assert!(status.success()); let status = dbg!(session .command("printf") .args(vec!["%d %d", "1", "2"]) .output() .await .unwrap()) .status; assert!(status.success()); let status = dbg!(session .command("printf") .arg("%d %d") .raw_arg("1 2") .output() .await .unwrap()) .status; assert!(status.success()); let status = dbg!(session .command("printf") .arg("%d %d") .raw_args(std::iter::once("1 2")) .output() .await .unwrap()) .status; assert!(status.success()); let status = dbg!(session .raw_command("printf '%d %d'") .arg("1") .arg("2") .output() .await .unwrap()) .status; assert!(status.success()); session.close().await.unwrap(); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn process_exit_on_signal() { for session in connects().await { let sleeping = session.command("sleep").arg("5566").spawn().await.unwrap(); // give it some time to make sure it starts tokio::time::sleep(std::time::Duration::from_secs(1)).await; // Now stop that process. // // We use `pkill -f` to match on the number rather than the `sleep` command, since other tests // may use `sleep`. We use `-o` to ensure that we don't accidentally kill the ssh connection // itself, but instead match the _oldest_ matching command. let killed = session .command("pkill") .arg("-f") .arg("-o") .arg("5566") .output() .await .unwrap(); eprintln!("process_exit_on_signal: {:?}", killed); assert!(killed.status.success()); // await that process — this will yield "RemoteProcessTerminated", since the remote process disappeared let failed = sleeping.wait().await.unwrap_err(); eprintln!("{:?}", failed); assert!(matches!(failed, Error::RemoteProcessTerminated)); // the connection should still work though session.check().await.unwrap(); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn broken_connection() { for session in connects().await { let sleeping = session.command("sleep").arg("1000").spawn().await.unwrap(); // get ID of remote ssh process let ppid = session .command("echo") .raw_arg("$PPID") .output() .await .unwrap(); eprintln!("ppid: {:?}", ppid); let ppid: u32 = String::from_utf8(ppid.stdout) .unwrap() .trim() .parse() .unwrap(); // and kill it -- this kills the master connection let killed = session .command("kill") .arg("-9") .arg(&format!("{}", ppid)) .output() .await .unwrap_err(); eprintln!("{:?}", killed); assert!(matches!(killed, Error::RemoteProcessTerminated)); // this fails because the master connection is gone let failed = session .command("echo") .arg("foo") .output() .await .unwrap_err(); eprintln!("{:?}", failed); assert!(matches!( failed, Error::RemoteProcessTerminated | Error::Disconnected )); // so does this let failed = session .command("echo") .arg("foo") .status() .await .unwrap_err(); eprintln!("{:?}", failed); assert!(matches!( failed, Error::RemoteProcessTerminated | Error::Disconnected )); // the spawned child we're waiting for must also have failed let failed = sleeping.wait_with_output().await.unwrap_err(); eprintln!("{:?}", failed); assert!(matches!(failed, Error::RemoteProcessTerminated)); // check should obviously fail let failed = session.check().await.unwrap_err(); assert!(matches!(failed, Error::Disconnected), "{:?}", failed); // Since the ssh multiplex server has exited due to remote sshd process // being forcibly killed, `session.close()` should fail here. session.close().await.unwrap_err(); } } #[tokio::test] async fn cannot_resolve() { for err in connects_err("bad-host").await { match err { Error::Connect(e) => { eprintln!("{:?}", e); assert_eq!(e.kind(), io::ErrorKind::Other); } e => unreachable!("{:?}", e), } } } #[tokio::test] async fn no_route() { let mut builder = SessionBuilder::default(); builder.connect_timeout(Duration::from_secs(1)); for err in session_builder_connects_err("192.0.2.1", builder).await { match err { Error::Connect(e) => { eprintln!("{:?}", e); assert_eq!(e.kind(), io::ErrorKind::TimedOut); } e => unreachable!("{:?}", e), } } } #[tokio::test] async fn connection_refused() { for err in connects_err("ssh://127.0.0.1:9").await { match err { Error::Connect(e) => { eprintln!("{:?}", e); assert_eq!(e.kind(), io::ErrorKind::ConnectionRefused); } e => unreachable!("{:?}", e), } } } #[tokio::test] async fn auth_failed() { let addr = if cfg!(ci) { // prefer the known-accessible test server when available addr().replace("test-user", "bad-user") } else { String::from("ssh://openssh-tester@login.csail.mit.edu") }; for err in connects_err(&addr).await { match err { Error::Connect(e) => { eprintln!("{:?}", e); assert_eq!(e.kind(), io::ErrorKind::PermissionDenied); } e => unreachable!("{:?}", e), } } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn remote_socket_forward() { let sessions = connects().await; for (session, port) in sessions.iter().zip(&[1234, 1233]) { let dir = tempdir().unwrap(); let unix_socket = dir.path().join("unix_socket_listener"); let output_listener = UnixListener::bind(&unix_socket).unwrap(); eprintln!("Requesting port forward"); session .request_port_forward(ForwardType::Remote, (loopback(), *port), &*unix_socket) .await .unwrap(); eprintln!("Creating remote process"); let cmd = format!( "echo -e '0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n' | nc localhost {} >/dev/stderr", port ); let child = session .raw_command(cmd) .stderr(Stdio::piped()) .spawn() .await .unwrap(); eprintln!("Waiting for connection"); let (mut output, _addr) = output_listener.accept().await.unwrap(); eprintln!("Reading"); const DATA: &[u8] = "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n".as_bytes(); let mut buffer = [0_u8; DATA.len()]; output.read_exact(&mut buffer).await.unwrap(); assert_eq!(DATA, &buffer); drop(output); drop(output_listener); eprintln!("Waiting for session to end"); let output = child.wait_with_output().await.unwrap(); eprintln!("remote_socket_forward: {:#?}", output); assert!(output.status.success()); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn local_socket_forward() { let sessions = connects().await; for (session, port) in sessions.iter().zip([1433, 1432]) { eprintln!("Creating remote process"); let cmd = format!( "echo -e '0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n' | nc -l -p {} >/dev/stderr", port ); let child = session .raw_command(cmd) .stderr(Stdio::piped()) .spawn() .await .unwrap(); sleep(Duration::from_secs(1)).await; eprintln!("Requesting port forward"); let dir = tempdir().unwrap(); let unix_socket = dir.path().join("unix_socket_forwarded"); session .request_port_forward(ForwardType::Local, &*unix_socket, (loopback(), port)) .await .unwrap(); eprintln!("Connecting to forwarded socket"); let mut output = UnixStream::connect(&unix_socket).await.unwrap(); eprintln!("Reading"); const DATA: &[u8] = "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n".as_bytes(); let mut buffer = [0_u8; DATA.len()]; output.read_exact(&mut buffer).await.unwrap(); assert_eq!(DATA, buffer); drop(output); eprintln!("Waiting for session to end"); let output = child.wait_with_output().await.unwrap(); eprintln!("local_socket_forward: {:#?}", output); assert!(output.status.success()); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] #[cfg(feature = "process-mux")] async fn test_detach_and_resume_process_mux() { for session1 in connects().await { session1.check().await.unwrap(); // First detach let (ctl1, master_log1) = session1.detach(); // First resume let session2 = Session::resume(ctl1, master_log1); session2.check().await.unwrap(); // Second detach to ensure detach handles tempdir // set to None correctly. let (ctl2, master_log2) = session2.detach(); // Second resume to ensure close handles tempdir set to None correctly let session3 = Session::resume(ctl2, master_log2); session3.check().await.unwrap(); session3.close().await.unwrap(); } // test close for session1 in connects().await { session1.check().await.unwrap(); let (ctl1, master_log1) = session1.detach(); let ctl = ctl1.clone(); let session2 = Session::resume(ctl1, master_log1); session2.check().await.unwrap(); session2.close().await.unwrap(); // Wait for ssh multiplex master to clean up and exit. sleep(Duration::from_secs(3)).await; assert!(!ctl.exists()); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] #[cfg(feature = "native-mux")] async fn test_detach_and_resume_native_mux() { for session1 in connects().await { session1.check().await.unwrap(); // First detach let (ctl1, master_log1) = session1.detach(); // First resume_mux let session2 = Session::resume_mux(ctl1, master_log1); session2.check().await.unwrap(); // Second detach to ensure detach handles tempdir // set to None correctly. let (ctl2, master_log2) = session2.detach(); // Second resume_mux to ensure close handles tempdir set to None correctly let session3 = Session::resume_mux(ctl2, master_log2); session3.check().await.unwrap(); session3.close().await.unwrap(); } // test close for session1 in connects().await { session1.check().await.unwrap(); let (ctl1, master_log1) = session1.detach(); let ctl = ctl1.clone(); let session2 = Session::resume_mux(ctl1, master_log1); session2.check().await.unwrap(); session2.close().await.unwrap(); // Wait for ssh multiplex master to clean up and exit. sleep(Duration::from_secs(3)).await; assert!(!ctl.exists()); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn test_sftp_subsystem() { use openssh_sftp_client::Sftp; let content = b"This is a test case for the openssh-rust/openssh crate.\n"; for session in connects().await { let mut child = session .subsystem("sftp") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .await .unwrap(); let sftp = Sftp::new( child.stdin().take().unwrap(), child.stdout().take().unwrap(), Default::default(), ) .await .unwrap(); let file_path = "/tmp/openssh-rust-test-sftp-subsystem"; { let mut fs = sftp.fs(); fs.write(file_path, content).await.unwrap(); assert_eq!(&*sftp.fs().read(file_path).await.unwrap(), content); } sftp.close().await.unwrap(); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn test_read_large_file_bug() { for (session, name) in connects_with_name().await { eprintln!("Testing {name} implementation"); let bs = 1024; let count = 20480; let process::Output { status, stdout, .. } = session .shell(format!("dd if=/dev/zero bs={bs} count={count}")) .output() .await .unwrap(); assert!(status.success()); stdout.iter().copied().for_each(|byte| assert_eq!(byte, 0)); assert_eq!(stdout.len(), bs * count); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn test_session_arc_command() { for session in connects().await { let session = std::sync::Arc::new(session); let mut child = session .clone() .arc_command("cat") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .await .unwrap(); drop(session); // write something to standard in and send EOF let mut stdin = child.stdin().take().unwrap(); stdin.write_all(b"hello world").await.unwrap(); drop(stdin); // cat should print it back on stdout let mut stdout = child.stdout().take().unwrap(); let mut out = String::new(); stdout.read_to_string(&mut out).await.unwrap(); assert_eq!(out, "hello world"); drop(stdout); // cat should now have terminated let status = child.wait().await.unwrap(); // ... successfully assert!(status.success()); } } #[tokio::test] #[cfg_attr(not(ci), ignore)] async fn test_session_to_command() { for session in connects().await { test_to_command(&session).await; } for session in connects().await { test_to_command(std::rc::Rc::new(session)).await; } for session in connects().await { test_to_command(std::sync::Arc::new(session)).await; } async fn test_to_command(session: S) where S: Clone + std::ops::Deref, { let mut child = Session::to_command(session, "cat") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .await .unwrap(); // write something to standard in and send EOF let mut stdin = child.stdin().take().unwrap(); stdin.write_all(b"hello world").await.unwrap(); drop(stdin); // cat should print it back on stdout let mut stdout = child.stdout().take().unwrap(); let mut out = String::new(); stdout.read_to_string(&mut out).await.unwrap(); assert_eq!(out, "hello world"); drop(stdout); // cat should now have terminated let status = child.wait().await.unwrap(); // ... successfully assert!(status.success()); } }