filespooler-1.2.3/.cargo/config.toml000064400000000000000000000005541046102023000154710ustar 00000000000000[target.aarch64-unknown-linux-musl] linker = "/usr/bin/aarch64-linux-gnu-ld" ar = "/usr/aarch64-linux-gnu/bin/ar" rustflags = ["-L", "/usr/lib/gcc-cross/aarch64-linux-gnu/10"] [target.armv7-unknown-linux-musleabihf] linker = "/usr/bin/arm-linux-gnueabihf-ld" ar = "/usr/arm-linux-gnueabihf/bin/ld" rustflags = ["-L", "/usr/lib/gcc-cross/arm-linux-gnueabihf/10"] filespooler-1.2.3/.cargo_vcs_info.json0000644000000001360000000000100133620ustar { "git": { "sha1": "a96910b5ae1b00d598a9a1960825557356f30816" }, "path_in_vcs": "" }filespooler-1.2.3/.gitignore000064400000000000000000000000101046102023000141310ustar 00000000000000/target filespooler-1.2.3/.gitlab-ci.yml000064400000000000000000000100541046102023000146060ustar 00000000000000image: "rust:bullseye" variables: CARGO_HOME: $CI_PROJECT_DIR/cargo generate_release_executables: stage: build before_script: - echo "This is job $CI_JOB_ID" - echo "GE_JOB_ID=$CI_JOB_ID" >> generate_release_executables.env script: - if [ -f target-cache.tar ]; then echo "Extracing cache"; tar -xpf target-cache.tar; rm target-cache.tar; fi - rustc --version && cargo --version - cargo build --release - tar -cpf target-cache.tar target cargo artifacts: paths: - target/release/fspl reports: dotenv: generate_release_executables.env rules: - if: $CI_COMMIT_TAG cache: key: prod paths: - target/release/fspl - target-cache.tar generate_release_executables_aarch64: stage: build before_script: - echo "This is job $CI_JOB_ID" - echo "GEAARCH64_JOB_ID=$CI_JOB_ID" >> generate_release_executables.env script: - rustup target add aarch64-unknown-linux-musl - apt-get update && apt-get -y install crossbuild-essential-arm64 - if [ -f target-cache-aarch64.tar ]; then echo "Extracing cache"; tar -xpf target-cache-aarch64.tar; rm target-cache-aarch64.tar; fi - rustc --version && cargo --version - cargo build --release --target aarch64-unknown-linux-musl - tar -cpf target-cache-aarch64.tar target cargo artifacts: paths: - target/aarch64-unknown-linux-musl/release/fspl reports: dotenv: generate_release_executables.env rules: - if: $CI_COMMIT_TAG cache: key: aarch64 paths: - target-cache-aarch64.tar generate_release_executables_armhf: stage: build before_script: - echo "This is job $CI_JOB_ID" - echo "GEARMHF_JOB_ID=$CI_JOB_ID" >> generate_release_executables.env script: - rustup target add armv7-unknown-linux-musleabihf - apt-get update && apt-get -y install crossbuild-essential-armhf - if [ -f target-cache-armhf.tar ]; then echo "Extracing cache"; tar -xpf target-cache-armhf.tar; rm target-cache-armhf.tar; fi - rustc --version && cargo --version - cargo build --release --target armv7-unknown-linux-musleabihf - tar -cpf target-cache-armhf.tar target cargo artifacts: paths: - target/armv7-unknown-linux-musleabihf/release/fspl reports: dotenv: generate_release_executables.env rules: - if: $CI_COMMIT_TAG cache: key: aarch64 paths: - target-cache-armhf.tar release: stage: deploy image: registry.gitlab.com/gitlab-org/release-cli:latest needs: - job: generate_release_executables artifacts: true - job: generate_release_executables_aarch64 artifacts: true - job: generate_release_executables_armhf artifacts: true script: - echo "Running release for $CI_COMMIT_TAG, job id $CI_JOB_ID" - echo "x86_64 artifact job id $GE_JOB_ID" - echo "aarch64 artifact job id $GEAARCH64_JOB_ID" - echo "armhf artifact job id $GEARMHF_JOB_ID" rules: - if: $CI_COMMIT_TAG release: name: 'Release $CI_COMMIT_TAG' description: 'Created by git-tag' tag_name: '$CI_COMMIT_TAG' ref: '$CI_COMMIT_TAG' assets: links: - name: 'Executable (Linux x86_64, release, unstripped)' url: 'https://salsa.debian.org/jgoerzen/filespooler/-/jobs/${GE_JOB_ID}/artifacts/raw/target/release/fspl' - name: 'Executable (Linux aarch64/arm64, release, unstripped, static musl)' url: 'https://salsa.debian.org/jgoerzen/filespooler/-/jobs/${GEAARCH64_JOB_ID}/artifacts/raw/target/aarch64-unknown-linux-musl/release/fspl' - name: 'Executable (Linux armhf/armv7, release, unstripped, static musl)' url: 'https://salsa.debian.org/jgoerzen/filespooler/-/jobs/${GEARMHF_JOB_ID}/artifacts/raw/target/armv7-unknown-linux-musleabihf/release/fspl' test: cache: key: test paths: - target-debug-cache.tar script: - if [ -f target-debug-cache.tar ]; then echo "Extracing cache"; tar -xpf target-debug-cache.tar; rm target-debug-cache.tar; fi - rustc --version && cargo --version - cargo test --workspace --verbose - tar -cpf target-debug-cache.tar target cargo filespooler-1.2.3/Cargo.lock0000644000000507010000000000100113400ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "anyhow" version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" dependencies = [ "backtrace", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "byteorder" version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ "libc", "num-integer", "num-traits", "time", "winapi", ] [[package]] name = "clap" version = "4.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42dfd32784433290c51d92c438bb72ea5063797fc3cc9a21a8c4346bebbb2098" dependencies = [ "bitflags 2.4.0", "clap_derive", "clap_lex", "once_cell", "terminal_size", ] [[package]] name = "clap_derive" version = "4.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fddf67631444a3a3e3e5ac51c36a5e01335302de677bd78759eaa90ab1f46644" dependencies = [ "heck", "proc-macro-error", "proc-macro2", "quote", "syn", ] [[package]] name = "clap_lex" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "033f6b7a4acb1f358c742aaca805c939ee73b4c6209ae4318ec7aca81c42e646" dependencies = [ "os_str_bytes", ] [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "errno" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" dependencies = [ "errno-dragonfly", "libc", "winapi", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "fastrand" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" dependencies = [ "instant", ] [[package]] name = "fd-lock" version = "3.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46e245f4c8ec30c6415c56cb132c07e69e74f1942f6b4a4061da748b49f486ca" dependencies = [ "cfg-if", "rustix 0.34.8", "windows-sys 0.30.0", ] [[package]] name = "filespooler" version = "1.2.3" dependencies = [ "anyhow", "bytes", "chrono", "clap", "crc32fast", "fd-lock", "rmp-serde", "serde", "tempfile", "tracing", "tracing-subscriber", "uuid", "wait-timeout", ] [[package]] name = "getrandom" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "gimli" version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" [[package]] name = "heck" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "io-lifetimes" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9448015e586b611e5d322f6703812bbca2f1e709d5773ecd38ddb4e3bb649504" [[package]] name = "io-lifetimes" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59ce5ef949d49ee85593fc4d3f3f95ad61657076395cbbce23e2121fc5542074" [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "linux-raw-sys" version = "0.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "miniz_oxide" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" dependencies = [ "adler", ] [[package]] name = "num-integer" version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", ] [[package]] name = "num-traits" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] [[package]] name = "object" version = "0.28.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" [[package]] name = "os_str_bytes" version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" [[package]] name = "paste" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" [[package]] name = "pin-project-lite" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "proc-macro-error" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", "syn", "version_check", ] [[package]] name = "proc-macro-error-attr" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", "version_check", ] [[package]] name = "proc-macro2" version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" dependencies = [ "proc-macro2", ] [[package]] name = "redox_syscall" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "remove_dir_all" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ "winapi", ] [[package]] name = "rmp" version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44519172358fd6d58656c86ab8e7fbc9e1490c3e8f14d35ed78ca0dd07403c9f" dependencies = [ "byteorder", "num-traits", "paste", ] [[package]] name = "rmp-serde" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25786b0d276110195fa3d6f3f31299900cf71dfbd6c28450f3f58a0e7f7a347e" dependencies = [ "byteorder", "rmp", "serde", ] [[package]] name = "rustc-demangle" version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustix" version = "0.34.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2079c267b8394eb529872c3cf92e181c378b41fea36e68130357b52493701d2e" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes 0.6.1", "libc", "linux-raw-sys", "winapi", ] [[package]] name = "rustix" version = "0.35.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72c825b8aa8010eb9ee99b75f05e10180b9278d161583034d7574c9d617aeada" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes 0.7.5", "libc", "linux-raw-sys", "windows-sys 0.36.1", ] [[package]] name = "serde" version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "sharded-slab" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] [[package]] name = "syn" version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tempfile" version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ "cfg-if", "fastrand", "libc", "redox_syscall", "remove_dir_all", "winapi", ] [[package]] name = "terminal_size" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40ca90c434fd12083d1a6bdcbe9f92a14f96c8a1ba600ba451734ac334521f7a" dependencies = [ "rustix 0.35.9", "windows-sys 0.42.0", ] [[package]] name = "thread_local" version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ "once_cell", ] [[package]] name = "time" version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", "wasi", "winapi", ] [[package]] name = "tracing" version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" dependencies = [ "cfg-if", "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tracing-core" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" dependencies = [ "lazy_static", "valuable", ] [[package]] name = "tracing-subscriber" version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" dependencies = [ "sharded-slab", "thread_local", "tracing-core", ] [[package]] name = "unicode-ident" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" [[package]] name = "uuid" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93bbc61e655a4833cf400d0d15bf3649313422fa7572886ad6dab16d79886365" dependencies = [ "getrandom", ] [[package]] name = "valuable" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "wait-timeout" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" dependencies = [ "libc", ] [[package]] name = "wasi" version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "030b7ff91626e57a05ca64a07c481973cbb2db774e4852c9c7ca342408c6a99a" dependencies = [ "windows_aarch64_msvc 0.30.0", "windows_i686_gnu 0.30.0", "windows_i686_msvc 0.30.0", "windows_x86_64_gnu 0.30.0", "windows_x86_64_msvc 0.30.0", ] [[package]] name = "windows-sys" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ "windows_aarch64_msvc 0.36.1", "windows_i686_gnu 0.36.1", "windows_i686_msvc 0.36.1", "windows_x86_64_gnu 0.36.1", "windows_x86_64_msvc 0.36.1", ] [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29277a4435d642f775f63c7d1faeb927adba532886ce0287bd985bffb16b6bca" [[package]] name = "windows_aarch64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1145e1989da93956c68d1864f32fb97c8f561a8f89a5125f6a2b7ea75524e4b8" [[package]] name = "windows_i686_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4a09e3a0d4753b73019db171c1339cd4362c8c44baf1bcea336235e955954a6" [[package]] name = "windows_i686_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ca64fcb0220d58db4c119e050e7af03c69e6f4f415ef69ec1773d9aab422d5a" [[package]] name = "windows_x86_64_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08cabc9f0066848fef4bc6a1c1668e6efce38b661d2aeec75d18d8617eebb5f1" [[package]] name = "windows_x86_64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" filespooler-1.2.3/Cargo.toml0000644000000034540000000000100113660ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "filespooler" version = "1.2.3" authors = ["John Goerzen "] description = "Sequential, distributed, POSIX-style job queue processing" homepage = "https://www.complete.org/filespooler/" readme = "README.md" keywords = [ "shell", "processes", "distributed", "queues", ] categories = [ "command-line-utilities", "filesystem", "data-structures", ] license = "GPL-3.0+" repository = "https://salsa.debian.org/jgoerzen/filespooler" [[bin]] name = "fspl" path = "src/main.rs" [dependencies.anyhow] version = "1.0.57" features = ["backtrace"] [dependencies.bytes] version = "1.1.0" [dependencies.chrono] version = "0.4.19" [dependencies.clap] version = "4.1" features = [ "derive", "std", "help", "usage", "error-context", "wrap_help", ] default-features = false [dependencies.crc32fast] version = "1.3.2" [dependencies.fd-lock] version = "3.0.5" [dependencies.rmp-serde] version = "1.1.0" [dependencies.serde] version = "1.0.137" features = ["derive"] [dependencies.tempfile] version = "3.3.0" [dependencies.tracing] version = "0.1.34" [dependencies.tracing-subscriber] version = "0.3.11" features = [ "std", "fmt", ] default-features = false [dependencies.uuid] version = "1.0.0" features = ["v4"] [dependencies.wait-timeout] version = "0.2.0" filespooler-1.2.3/Cargo.toml.orig000064400000000000000000000022271046102023000150440ustar 00000000000000[package] name = "filespooler" version = "1.2.3" edition = "2021" authors = ["John Goerzen "] license = "GPL-3.0+" description = "Sequential, distributed, POSIX-style job queue processing" homepage = "https://www.complete.org/filespooler/" repository = "https://salsa.debian.org/jgoerzen/filespooler" readme = "README.md" keywords = ["shell", "processes", "distributed", "queues"] categories = ["command-line-utilities", "filesystem", "data-structures"] [[bin]] name = "fspl" path = "src/main.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] clap = { version = "4.1", default-features = false, features = ["derive", "std", "help", "usage", "error-context", "wrap_help"] } bytes = "1.1.0" anyhow = {version = "1.0.57", features = ["backtrace"] } tracing = "0.1.34" tracing-subscriber = { version = "0.3.11", default-features = false, features = ["std", "fmt"] } serde = { version = "1.0.137", features = ["derive"] } crc32fast = "1.3.2" rmp-serde = "1.1.0" fd-lock = "3.0.5" tempfile = "3.3.0" uuid = { version = "1.0.0", features = ["v4"] } chrono = "0.4.19" wait-timeout = "0.2.0" filespooler-1.2.3/GPL-3000064400000000000000000001045131046102023000126630ustar 00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . filespooler-1.2.3/LICENSE000064400000000000000000000012431046102023000131570ustar 00000000000000Copyright (C) 2022-2023 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . filespooler-1.2.3/README.md000064400000000000000000000103641046102023000134350ustar 00000000000000# Filespooler: CLI & Library for Sequential, Distributed, POSIX-style job queue processing ![build](https://salsa.debian.org/jgoerzen/filespooler/badges/main/pipeline.svg) ![docs](https://docs.rs/filespooler/badge.svg) ## Quick Links - The [Filespooler homepage](https://www.complete.org/filespooler/), complete with extensive documentation, examples of many integrations, and tutorials. - The [detailed manpage reference](./doc/fspl.1.md), which includes installation instructions - The [releases page](https://salsa.debian.org/jgoerzen/filespooler/-/releases), which includes prebuilt binaries for Linux x86_64, aarch64, and armhf (for Raspberry Pi) # Introduction Filespooler is a Unix-style tool that facilitates local or remote command execution, complete with stdin capture, with easy integration with various tools. I will decode what that means below. For now, here's a brief Filespooler feature list: - It can easily use tools such as S3, Dropbox, Syncthing, NNCP, ssh, UUCP, USB drives, CDs, etc. as transport. - Translation: you can use basically anything that is a filesystem as a transport - It can use arbitrary decoder command pipelines (eg, zcat, stdcat, gpg, age, etc) to pre-process stored packets. - It can send and receive packets by pipes. - Its storage format is simple on-disk files with locking. - It supports one-to-one and one-to-many configurations. - Locking is unnecessary when writing new jobs to the queue, and many arbitrary tools (eg, Syncthing, Dropbox, etc) can safely write directly to the queue without any assistance. - Queue processing is strictly ordered based on the order on the creation machine, even if job files are delivered out of order to the destination. - stdin can be piped into the job creation tool, and piped to a later executor at process time on a remote machine. - The file format is lightweight; less than 100 bytes overhead unless large extra parameters are given. - The queue format is lightweight; having 1000 different queues on a Raspberry Pi would be easy. - Processing is stream-based throughout; arbitrarily-large packets are fine and sizes in the TB range are no problem. - The Filespooler command, fspl, is extremely lightweight, consuming less than 10MB of RAM on x86_64. - Filespooler has extensive documentation. Filespooler consists of a command-line tool (fspl) for interacting with queues. It also consists of a Rust library that is used by fspl. main.rs for fspl is just a few lines long. ## Use Cases Imagine for a moment that you want to send incremental backups from one machine to your backup server. You might run something like this: tar --incremental -cSpf - ... | ssh backupsvr tar -xvSpf - -C /backups That will work when all is good. But when the network between the two machines drops, now what? Probably data loss. What we want is a way to reliably execute things, in order, with reordering in case of out-of-order data. This turns out to be useful in many situations: Git repository syncing, backups, etc. Now, say you do something like this: tar --incremental -cSpf - ... | fspl prepare -s ~/statefile -i - > ~/syncedpath/fspl-`uuid`.fspl At this point, a tool like Syncthing or Dropbox will sync this syncedpath to the `~/queue/jobs/` directory under the queue on the backup server. Now you can run this (from cron, systemd, etc) on the backup serer: fspl queue-process -q ~/queue tar -- -xvSpf - -C /backups Boom. Done. queue-process will (by default) delete jobs that finish successfully. It will keep track of which jobs have been completed and process them in order. # Copyright Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . filespooler-1.2.3/doc/Makefile000064400000000000000000000001221046102023000143520ustar 00000000000000ALL: fspl.1 %.1: %.1.md pandoc -f markdown-smart --standalone --to man $< -o $@ filespooler-1.2.3/doc/fspl.1000064400000000000000000000734401046102023000137550ustar 00000000000000.\" Automatically generated by Pandoc 2.9.2.1 .\" .TH "fspl" "1" "May 2022" "John Goerzen" "fspl Manual" .hy .SH NAME .PP fspl - sequential, distributed job queue processing .SH SYNOPSIS .PP \f[B]fspl\f[R] [ \f[I]OPTIONS\f[R] ] \f[B]COMMAND\f[R] [ \f[I]command_options\f[R] ] .SH OVERVIEW .PP \f[B]fspl\f[R] is the CLI part of the Filespooler (https://www.complete.org/filespooler) package. .PP \f[B]fspl\f[R] is a Unix-style tool that facilitates local or remote command execution, complete with stdin capture, with easy integration with various tools. Here\[aq]s a brief Filespooler feature list: .IP \[bu] 2 It can easily use tools such as S3, Dropbox, Syncthing, NNCP, ssh, UUCP, USB drives, CDs, etc. as transport. .RS 2 .IP \[bu] 2 Translation: you can use basically anything that is a filesystem as a transport .RE .IP \[bu] 2 It can use arbitrary decoder command pipelines (eg, zcat, stdcat, gpg, age, etc) to pre-process stored packets. .IP \[bu] 2 It can send and receive packets by pipes. .IP \[bu] 2 Its storage format is simple on-disk files with locking. .IP \[bu] 2 It supports one-to-one and one-to-many configurations. .IP \[bu] 2 Locking is unnecessary when writing new jobs to the queue, and many arbitrary tools (eg, Syncthing, Dropbox, etc) can safely write directly to the queue without any assistance. .IP \[bu] 2 Queue processing is strictly ordered based on the order on the creation machine, even if job files are delivered out of order to the destination. .IP \[bu] 2 stdin can be piped into the job creation tool, and piped to a later executor at process time on a remote machine. .IP \[bu] 2 The file format is lightweight; less than 100 bytes overhead unless large extra parameters are given. .IP \[bu] 2 The queue format is lightweight; having 1000 different queues on a Raspberry Pi would be easy. .IP \[bu] 2 Processing is stream-based throughout; arbitrarily-large packets are fine and sizes in the TB range are no problem. .IP \[bu] 2 The Filespooler command, fspl, is extremely lightweight, consuming less than 10MB of RAM on x86_64. .IP \[bu] 2 Filespooler has extensive documentation. .PP Filespooler consists of a command-line tool (fspl) for interacting with queues. It also consists of a Rust library that is used by fspl. main.rs for fspl is just a few lines long. .SH A WORD ABOUT DOCUMENTATION .PP This manual is the reference for fspl. The filespooler homepage, contains many examples, instructions on how to integrate with everything from file syncers to encryption tools, and so forth. Please refer to it for further information. .SH BASIC OPERATION .PP The basic idea is this: .IP \[bu] 2 Before starting, on the receiving end, you run \f[B]fspl queue-init\f[R] to prepare a queue directory. .IP \[bu] 2 On the sending end, you use \f[B]fspl prepare\f[R] to prepare a job file (packet). This packet is written to stdout. From there, you can pipe it to \f[B]fspl write\f[R] to inject it into a local queue, or use various kinds of transport to get it to a remote machine. .IP \[bu] 2 You use \f[B]fspl queue-process\f[R] to execute packets. .RS 2 .IP \[bu] 2 Alternatively, the \f[B]fspl stdin-\f[R] series of commands let you have more manual control over queue processing, accepting job packets in stdin. They can let you completely ignore the built-in queue mechanism if you so desire. .RE .SH ON-DISK FORMATS .PP The key way to ensure the ordered processing of the job queue is with a sequence number. This is a 64-bit unsigned integer. It is stored in a \f[I]seqfile\f[R] on both the sending and the receiving side. On the sending side, the seqfile is standalone; there is only an accompanying \f[C].lock\f[R] file for it. On the receiving side, the seqfile and its accompanying lock file live within the queue directory. .PP When the seqfile is referenced on the sending side, it will be created and initialized with the value \f[B]1\f[R] if it does not already exist. On the receiving side, it is created as part of \f[B]fspl queue-init\f[R]. .PP In either case, the seqfile consists of one newline-terminated line, containing the next number to process. On the sending side, this is used by \f[B]fspl prepare\f[R] as the sequence number for the next generated packet. On the receiving side, it is used by \f[B]fspl queue-process\f[R] to determine which job to process next (unless changed by \f[B]--order-by\f[R]). .SS THE QUEUE .PP The queue has this general layout: .IP .nf \f[C] queuedir/ Top-level queue directory nextseq Sequence file nextseq.lock Lock file jobs/ Job files stored here \f[R] .fi .PP When passing the \f[B]--queuedir\f[R] to one of the \f[B]fspl queue-\f[R] commands, you give it the path to the top-level queuedir as shown here. .PP You are free to create additional directories within the \f[B]queuedir\f[R] so long as they don\[aq]t use one of the names listed above. This can be helpful for receiving queue contents in certain situations. .SS Append-Only Queues .PP You can specify \f[B]--append-only\f[R] to \f[B]fspl queue-init\f[R], which will cause the \f[B]nextseq\f[R] and \f[B]nextseq.lock\f[R] files to be omitted. This has the effect of making the queue write-only. This can be useful if you are synchronizing the \f[B]jobs\f[R] subdirectory between machines, but still want to be able to use \f[B]fspl queue-write\f[R] to add jobs to that folder. It will prevent \f[B]fspl queue-process\f[R] from running. You can still inspect an append-only queue with commands like \f[B]fspl queue-ls\f[R] and \f[B]fspl queue-info\f[R]. .SS JOB FILES .PP Job files live within \f[B]queuedir/jobs\f[R]. They all must follow this naming pattern: .IP .nf \f[C] fspl-*.fspl \f[R] .fi .PP This pattern is specifically designed to facilitate safe injection of job files into the queue by other tools. Many other tools prepend or append a temporary string to a filename to signify that it has not yet been fully transferred. The Filespooler assumption is that once a file appears in \f[B]jobs/\f[R] with a name matching this pattern, than it has been fully transferred and can be processed at any time. .PP So long as the filename begins with \f[B]fspl-\f[R] and ends with \f[B].fspl\f[R], you are free to put whatever string you like in the middle. The only other requirement, of course, is that each job must have a unique filename within the directory. To simplify things, you can pipe a job file to \f[B]fspl queue-write\f[R] and let that command take care of naming. Or, you can generate a random (or non-random) string yourself in a shell script. .PP The job file itself consists of a small binary header, which is CRC32-checked. This header is normally less than 100 bytes and the length of it is encoded within the file. Following the header, if \f[B]--input\f[R] was given to \f[B]fspl prepare\f[R], whatever was piped to \f[B]prepare\f[R] is included as the \[dq]payload\[dq]. This will be piped to the executor command when run by \f[B]fspl queue-process\f[R] or \f[B]fspl stdin-process\f[R]. The payload is not validated by CRC or length by Filespooler, since this is assumed to be the role of the transport layer. The website contains examples of using GPG or other tools to ensure integrity. .PP There are three types of job files: .IP \[bu] 2 Command, created by \f[B]fspl prepare\f[R]. This is the typical kind of job file, and is used to request the execution of a command by the processor. .IP \[bu] 2 NOP, created by \f[B]fspl prepare-nop\f[R]. This is a \[dq]no-op\[dq] job file, which does not run a command but is considered to always succeed. .IP \[bu] 2 Fail, created by \f[B]fspl prepare-fail\f[R]. This is a \[dq]fail\[dq] job file, which does not run a command but is considered to always fail. This could be usedful, for instance, to create a \[dq]barrier\[dq] to prevent a queue processor from continuing to execute commands past there without human intervention. .SS ADDING FILES TO THE QUEUE .PP To expand slightly on the discussion above about adding files to the queue: .PP A common way to do this if your transport tool doesn\[aq]t use a nice temporary name is to transport the file to an adjacent directory, and then use \f[B]mv(1)\f[R] or, better, make a hard link with \f[B]ln(1)\f[R] to get the file into the jobs/ directory. Note that in both cases, you must take care that you are not crossing a filesystem boundary; on some platforms such as Linux, mv will revert to copy instead of rename if you cross the boundary and then the assumptions about completeness are violated. .SS JOB FILE ENCODING AND DECODING .PP Job files are, by default, stored exactly as laid out above. However, in many cases, it may be desirable to store them \[dq]encoded\[dq] - compressed or encrypted. In this case, the output from \f[B]fspl prepare\f[R] can be piped through, say, \f[B]gzip\f[R] and the resulting packet can still be stored in \f[B]jobs/\f[R] by \f[B]fspl queue-write\f[R] or any related tool. .PP Now, however, we arrive at the question: how can Filespooler process a queue containing files that have been compressed, encrypted, or so forth? .PP Every \f[B]fspl queue\f[R] command takes an optional \f[B]--decoder\f[R] (or \f[B]-d\f[R]) parameter, which is a command string that will be executed by the shell. This decoder command will receive the entire job file (not just the payload) piped to it on stdin, and is expected to write the decoded file to stdout. .PP The \f[B]fspl stdin\f[R] pairs to the queue commands do not accept a decoder parameter, since it is assumed you would do that in the pipeline on the way to the stdin command. .PP For instance: .IP .nf \f[C] date | fspl prepare -s \[ti]/state -i - | gzip | fspl queue-write -q \[ti]/queue fspl queue-ls -q \[ti]/queue -d zcat ID creation timestamp filename 48 2022-05-07T21:07:02-05:00 fspl-48aa52ad-c65c-478a-9d37-123d4bebcb30.fspl \f[R] .fi .PP Normally, \f[B]fspl\f[R] ignores files that fail to decode the header. If you omit the \f[B]--decoder\f[R], it may just look like your queue is empty. (Using \f[B]--log-level=debug\f[R] will illuminate what is happening.) .SH DISTRIBUTED NATURE OF FILESPOOLER .PP As mentioned, Filespooler is designed to be used as a distributed, asynchronous, ordered command queue. The homepage contains many more examples. Here is one simple example of using ssh as a transport to get commands to a remote queue: .IP .nf \f[C] tar -cpf - /usr/local | fspl prepare -s \[ti]/state -i - | ssh remote queue-write -q \[ti]/queue \f[R] .fi .SH INSTALLATION .PP \f[B]fspl\f[R] is a Rust program. If you don\[aq]t already have Rust installed, it can be easily installed from . .PP Once Rust is installed, Filespooler can be installed with this command: .IP .nf \f[C] cargo install filespooler \f[R] .fi .PP From a checked-out source tree, it can be built by running \f[B]\f[CB]cargo build --release\f[B]\f[R]. The executable will then be placed in \f[B]target/release/xbnet\f[R]. .PP You can also obtain pre-built binaries for x86_64 Linux from . .SH ENVIRONMENT .PP \f[B]fspl prepare\f[R] will save certain environment variables to the packet, which will be set later at process time. \f[B]fspl {queue,stdin}-process\f[R] will set a number of useful environment variables in the execution environment. \f[B]fspl {queue,stdin}-info\f[R] will show the environment that will be passed to the commands. See each of these for further discussion. .SH EXIT CODE .PP In general, the commands exit with 0 on success and nonzero on failure. The concept of success and failure can be complicated in some situations; see the discussion of the process command. .PP These situations explicitly cause a nonzero (error) exit code: .IP \[bu] 2 Failure to obtain a lock (see \[dq]locking and concurrency\[dq] below), but only if a lock is required; for many commands, no lock is needed. .IP \[bu] 2 An I/O error .IP \[bu] 2 For commands that require a specific job ID (eg, \f[B]fspl queue-info\f[R]), no job with that ID can be located .IP \[bu] 2 While processing, the executed command returns a nonzero exit status and \f[B]--on-error\f[R] is set to \f[B]Retry\f[R] (the default) .IP \[bu] 2 In some cases, the presence of multiple files in the queuedir with the same sequence number. The presence of this condition with commands that take a \f[B]-j ID\f[R] option, or with \f[B]queue-process\f[R] in its standard configuration, will cause an error. .RS 2 .IP \[bu] 2 However, this condition is acceptable for \f[B]queue-ls\f[R] and \f[B]queue-process --order-by=Timestamp\f[R]. .RE .PP These situations explicitly terminate with success (0): .IP \[bu] 2 While processing, the \f[B]--maxjobs\f[R] limit is reached before some other error causes an abnormal exit .IP \[bu] 2 An error while running a command while \f[B]--on-error\f[R] is set to \f[B]Delete\f[R] or \f[B]Leave\f[R] .IP \[bu] 2 Files are encountered in the queuedir/jobs directory with unparsable headers. \f[B]fspl\f[R] detects and logs (subject to \f[B]--log-level\f[R]) this condition, but does not consider it an error, on the grounds that the presence of extra data should not prevent the proper functioning of the queue. This may manifest itself in the queue appearing to have nothing to do, \f[B]queue-ls\f[R] showing fewer jobs than there are files, etc. A common cause of this may be an incorrect \f[B]--decoder\f[R]. .IP \[bu] 2 Zero jobs in the queue, or zero jobs available to process. .SH LOCKING AND CONCURRENCY .PP Next to every \f[B]seqfile\f[R] on both the sender and within the queue on the recipient is a file named \f[I]seqfile\f[R].lock. An exclusive lock is held on this file during the following conditions: .IP \[bu] 2 On the sender with \f[B]fspl prepare\f[R] and related functions, briefly while obtaining the next sequence number. Once this is done, the lock is released, even if the process of consuming stdin takes a long time. .IP \[bu] 2 On the recipient, when processing the queue with \f[B]fspl queue-process\f[R] or other commands that access the seqfile (eg, \f[B]fspl queue-set-next\f[R]). .PP fspl will exit with an error code if it cannot obtain the lock when it needs it. .PP These are situations that explicitly do \f[I]NOT\f[R] obtain a lock: .IP \[bu] 2 \f[B]fspl queue-write\f[R] or other non-fspl method of injecting packets into the queue .IP \[bu] 2 The \f[B]fspl stdin-\f[R] series of commands .IP \[bu] 2 Commands that scan the queue without accessing the state of the seqfile. Examples include \f[B]queue-ls\f[R], \f[B]queue-info\f[R], and \f[B]queue-payload\f[R]. .PP Note that if the queue is being actively processed while a \f[B]queue-ls\f[R] is in process, a race condition is possible if a file disappears between the readdir() call and the time the file is opened for reading, which could potentially cause queue-ls to fail. queue-ls intentionally does not attempt to acquire the lock, however, because it would \f[I]always\f[R] fail while the queue is being processed in that case, preventing one from being able to list the queue at all while long-running jobs are in process. .PP Note that \f[B]fspl queue-write\f[R] does not need to obtain a lock. The \f[B]fspl stdin-\f[R] series of commands also do not obtain a lock. .PP Taken together, this means that any given queue is intended to be processed sequentially, not in parallel. However, if parallel processing is desired, it is trivial to iterate over the jobs and use \f[B]fspl stdin-process\f[R] in whatever custom manner you would like. Also, since queues are so lightweight, there is no problem with creating thousands of them. .SH INVOCATION: GLOBAL OPTIONS .PP These options may be specified for any command, and must be given before the command on the command line. .TP \f[B]-l\f[R], \f[B]--log-level\f[R] \f[I]LEVEL\f[R] Information about the progress of \f[B]fspl\f[R] is written to stderr. This parameter controls how much information is written. In order from most to least information, the options are: trace, debug, info, warn, error. The default is info. .TP \f[B]-V\f[R], \f[B]--version\f[R] Print version information and exit .TP \f[B]-h\f[R], \f[B]--help\f[R] Print help information and exit. Can also be given after a subcommand, in which case it displays more detailed information about that subcommand. .TP \f[I]COMMAND\f[R] The subcommand which will be executed. Required unless using \f[B]--version\f[R] or \f[B]--help\f[R]. .SH INVOCATION: SUBCOMMANDS .PP Every subcomand accepts \f[B]--help\f[R] to display a brief summary of options, invoked as: \f[B]fspl\f[R] \f[I]SUBCOMMAND\f[R] \f[B]--help\f[R] . .SS fspl ... prepare .PP Generates a packet (job file data) and writes it to stdout. This file can be piped to other programs (particularly \f[B]fspl queue-write\f[R]) or saved directly to disk. .PP Usage: .PP \f[B]fspl\f[R] \f[B]prepare\f[R] [ \f[I]OPTIONS\f[R] ] \f[B]-s\f[R] \f[I]FILE\f[R] [ \f[B]-- PARAMS...\f[R] ] .TP \f[B]-s\f[R], \f[B]--seqfile\f[R] \f[I]FILE\f[R] Path to the local seqfile. If it does not already exist, it will be created. If set to \[dq]-\[dq], then no sequence file is used and the sequence emitted will always be 1. .TP \f[B]-i\f[R], \f[B]--input\f[R] \f[I]INPUT\f[R] By default, prepare will not read anything as payload. If \f[I]INPUT\f[R] is set to \[dq]-\[dq], then prepare will read standard input (stdin) and use it as input. Otherwise, if \f[I]INPUT\f[R] is anything other than \[dq]-\[dq], it is assumed to be a filename, which is opened and read for input. .TP \f[B]-- \f[BI]PARAMS\f[B]...\f[R] If a \[dq]--\[dq] is present on the command line, everything after it is taken as parameters to be added to the generated job packet. When the packet is later processed, if \f[B]--allow-job-params\f[R] is given to \f[B]queue-process\f[R] or \f[B]stdin-process\f[R], then these parameters will be appended to the command line of the executed command. .PP In addition to these options, any environment variable beginning with \f[B]FSPL_SET_\f[R] will be saved in the packet and will be set in the execution environment at processing time. .SS fspl ... prepare-fail, prepare-nop .PP These commands create a non-command packet, one which is either considered to always fail or to always succeed (nop). These two commands take only one option, which is required: .TP \f[B]-s\f[R], \f[B]--seqfile\f[R] \f[I]FILE\f[R] Path to the local seqfile. Required. If set to \[dq]-\[dq], then no sequence file is used and the sequence emitted will always be 1. .SS fspl ... prepare-get-next .PP Prints the sequence number that will be used by the next prepare command. .PP Usage: .PP \f[B]fspl\f[R] \f[B]prepare-get-next\f[R] \f[B]-s\f[R] \f[I]FILE\f[R] .TP \f[B]-s\f[R], \f[B]--seqfile\f[R] \f[I]FILE\f[R] Path to the local seqfile. Required. If set to \[dq]-\[dq], then no sequence file is used and the sequence emitted will always be 1. .SS fspl ... prepare-set-next .PP Changes the sequence number that will be used by the next prepare command. .PP Usage: .PP \f[B]fspl\f[R] \f[B]prepare-set-next\f[R] \f[B]-s\f[R] \f[I]FILE\f[R] \f[I]ID\f[R] .TP \f[B]-s\f[R], \f[B]--seqfile\f[R] \f[I]FILE\f[R] Path to the local seqfile. Required. \f[I]ID\f[R] The numeric ID to set the seqfile to. .SS fspl ... stdin-info, queue-info .PP These two commands display information about a given packet. This information is printed to stdout in a style that is similar to how the shell sets environment variables. In fact, it shows precisely the environment variables that will be set by a corresponding \f[B]process\f[R] command. .PP stdin-info expects the packet to be piped in to stdin; queue-info will find it in the given queue. .PP This command will not attempt to read the payload of the file; it will only read the header. (Note that this is not a guarantee that some layer of the system may not try to read a few KB past the header, merely a note that running this command will not try to read all of a 1TB packet.) .PP Usage: .PP \f[B]fspl\f[R] \f[B]queue-info\f[R] [ \f[I]OPTIONS\f[R] ] \f[B]-q\f[R] \f[I]DIR\f[R] \f[B]-j\f[R] \f[I]ID\f[R] .PP \f[B]fspl\f[R] \f[B]stdin-info\f[R] .PP Options (valid for \f[B]queue-info\f[R] only): .TP \f[B]-q\f[R], \f[B]--queuedir\f[R] \f[I]DIR\f[R] Path to the local queue directory. Required. .TP \f[B]-j\f[R], \f[B]--job\f[R] \f[I]ID\f[R] Numeric job ID to process. See \f[B]fspl queue-ls\f[R] to determine this. Required. .TP \f[B]-d\f[R], \f[B]--decoder\f[R] \f[I]DECODECMD\f[R] Decoder command to run. This string is passed to \f[B]$SHELL -c\f[R]. See the above conversation about decoders. Optional. .PP Example: .IP .nf \f[C] fspl queue-info -q /tmp/queue -j 45 -d zcat FSPL_SEQ=45 FSPL_CTIME_SECS=1651970311 FSPL_CTIME_NANOS=425412511 FSPL_CTIME_RFC3339_UTC=2022-05-08T00:38:31Z FSPL_CTIME_RFC3339_LOCAL=2022-05-07T19:38:31-05:00 FSPL_JOB_FILENAME=fspl-29342606-02a0-438c-81f2-efdfb80afbe9.fspl FSPL_JOB_QUEUEDIR=/tmp/bar FSPL_JOB_FULLPATH=/tmp/bar/jobs/fspl-29342606-02a0-438c-81f2-efdfb80afbe9.fspl FSPL_PARAM_1=hithere FSPL_SET_FOO=bar \f[R] .fi .PP Some notes on these variables: .IP \[bu] 2 The \f[B]FSPL_JOB_FILENAME\f[R] is relative to the jobs subdirectory of the queue directory. .IP \[bu] 2 The \f[B]FSPL_JOB_FULLPATH\f[R] is relative to the current working directory; that is, it is what was given by \f[B]-q\f[R] plus the path within that directory to the filename. It is not guaranteed to be absolute. .IP \[bu] 2 \f[B]FSPL_PARAM_n\f[R] will be set to the optional parameters passed to \f[B]fspl prepare\f[R], with n starting at 1. .IP \[bu] 2 \f[B]FSPL_SET_x\f[R] will reflect any \f[B]FSPL_SET_x\f[R] parameters that were in the environment when \f[B]fspl prepare\f[R] was run. .IP \[bu] 2 Filespooler does not enforce limits to environment variable content. If you want to do something like embed newlines in variable content, Filespooler will happily accept this (since it is valid POSIX) and handle it properly - but your shell scripts may not be so lucky. It is advisable that you avoid this and other weird constructions for your sanity in working with things outside Filespooler - though Filespooler won\[aq]t prevent you from doing it. .SS fspl ... stdin-payload, queue-payload .PP These two commands extract the payload (if any) from the given packet. This is written to stdout. No header or other information is written to stdout. .PP stdin-payload expect the packet to be piped in to stdin; queue-stdout will find it in the given queue. .PP The payload will be piped to the command started by the process commands. The payload will be 0-bytes if \f[B]-i\f[R] was not passed to \f[B]fspl prepare\f[R], or if an empty payload was given to it. .PP Usage: .PP \f[B]fspl\f[R] \f[B]queue-payload\f[R] [ \f[I]OPTIONS\f[R] ] \f[B]-q\f[R] \f[I]DIR\f[R] \f[B]-j\f[R] \f[I]ID\f[R] .PP \f[B]fspl\f[R] \f[B]stdin-payload\f[R] .PP Options (valid for \f[B]queue-payload\f[R] only): .TP \f[B]-q\f[R], \f[B]--queuedir\f[R] \f[I]DIR\f[R] Path to the local queue directory. Required. .TP \f[B]-j\f[R], \f[B]--job\f[R] \f[I]ID\f[R] Numeric job ID to process. See \f[B]fspl queue-ls\f[R] to determine this. Required. .TP \f[B]-d\f[R], \f[B]--decoder\f[R] \f[I]DECODECMD\f[R] Decoder command to run. This string is passed to \f[B]$SHELL -c\f[R]. See the above conversation about decoders. Optional. .SS fspl ... stdin-process, queue-process .PP Process packet(s). stdin-process will process exactly one packet on stdin. queue-process will process zero or more packets, depending on the content of the queue and options given. .PP Usage: .PP \f[B]fspl\f[R] \f[B]queue-process\f[R] [ \f[I]OPTIONS\f[R] ] \f[B]-q\f[R] \f[I]DIR\f[R] \f[I]COMMAND\f[R] [ \f[B]-- PARAMS...\f[R] ] .PP \f[B]fspl\f[R] \f[B]stdin-process\f[R] [ \f[I]OPTIONS\f[R] ] \f[I]COMMAND\f[R] [ \f[B]-- PARAMS...\f[R] ] .PP Common options: .TP \f[B]--allow-job-params\f[R] Specifies that optional parameters given to \f[B]fspl prepare\f[R] will be passed on the command line to this command .TP \f[B]--ignore-payload\f[R] Ignores the payload; does not pipe it to the command. .TP \f[B]--timeout\f[R] \f[I]SECONDS\f[R] Specifies a timeout, in seconds, for the command. If the command has not exited within that timeframe, SIGKILL is sent to the process. Failing to exit within the timeout is considered an error for Filespooler\[aq]s purposes. .TP \f[I]COMMAND\f[R] The command to run. This is \f[I]not\f[R] passed to the shell, so it must point to an executable. This command will not be run for NOP or Fail packets. .TP \f[B]-- \f[BI]PARAMS\f[B]...\f[R] If a \[dq]--\[dq] is present on the command line, everything after it is taken as parameters to be sent to the given command. If \f[B]--allow-job-params\f[R] is given, then those parameters will be sent after these. .PP Options valid only for \f[B]queue-process\f[R]: .TP \f[B]-q\f[R], \f[B]--queuedir\f[R] \f[I]DIR\f[R] Path to the local queue directory. Required. .TP \f[B]-d\f[R], \f[B]--decoder\f[R] \f[I]DECODECMD\f[R] Decoder command to run. This string is passed to \f[B]$SHELL -c\f[R]. See the above conversation about decoders. Optional. .TP \f[B]-n\f[R], \f[B]--maxjobs\f[R] \f[I]JOBS\f[R] The maximum number of jobs to process. There is no limit by default. .TP \f[B]--never-delete\f[R] Never delete the job file after processing in any circumstance, regardless of other options. .TP \f[B]--order-by\f[R] \f[I]ORDERING\f[R] In what order to process the queue. When \f[B]Sequence\f[R], which is the default, process the queue in order of sequence number. When set to \f[B]Timestamp\f[R], process the queue in order of the creation timestamp as it appears in the job header. Note that when set to \f[B]Timestamp\f[R], the seqfile within the queue is neither used nor changed. \f[B]Timestamp\f[R] implies that you do not care about a strict sequential ordering of items in cases where items arrive out of order. .TP \f[B]--on-error\f[R] \f[I]ONERROR\f[R] What to do when the supplied command fails (is a fail packet or a command exits with a nonzero status). If set to \f[B]Retry\f[R], abort processing with a nonzero error code and leave the packet in the queue to be tried again by a later invocation of \f[B]queue-process\f[R]. If set to \f[B]Delete\f[R], delete the packet from the queue (unless \f[B]--never-delete\f[R] is given), increment the next job counter, and continue processing the queue normally. If set to \f[B]Leave\f[R], then leave the packet on disk, increment the next job counter, and continue processing the rest of the queue normally. \f[B]Retry\f[R] is the only option that will cause a failure to not increment the next job counter. \f[B]Retry\f[R] is the default. .TP \f[B]--output-to\f[R] \f[I]DEST\f[R] What to do with the stdout and stderr of the invoked command. If set to \f[B]PassBoth\f[R], then they are simply written to the stdout/stderr of \f[B]fspl queue-process\f[R]. If set to \f[B]SaveBoth\f[R], then both are added to a file in the queue\[aq]s jobs directory named \f[B]filename.out\f[R]. This file is up to you to process whenever you wish. The default is \f[B]PassBoth\f[R]. .PP The environment is set as described above. Note that since no queue directory or filename is relevant with the \f[B]stdin-process\f[R] flavor, those variables are unset under \f[B]stdin-process\f[R]. .PP To skip a failing job at the head of the queue, you can use \f[B]fspl queue-set-next\f[R], or alternatively, \f[B]fspl queue-process --on-error Delete --maxjobs 1\f[R] to cause it to be deleted. You would probably not wish to combine this with timestamp ordering. .SS fspl ... queue-set-next .PP Changes the sequence number that will be used by the next \f[B]fspl queue-process\f[R] command. .PP Usage: .PP \f[B]fspl\f[R] \f[B]queue-set-next\f[R] \f[B]-q\f[R] \f[I]DIR\f[R] \f[I]ID\f[R] .TP \f[B]-q\f[R], \f[B]--queuedir\f[R] \f[I]DIR\f[R] Path to the local queue directory. Required. .TP \f[B]--append-only\f[R] Creates an append-only queue. \f[I]ID\f[R] The numeric ID to set the seqfile to. .SS fspl ... queue-write .PP Receives a packet on stdin and writes it to the queue. This command does not bother to decode, process, or validate the packet in any way. It simply writes it to the queue safely, using a temporary filename until completely written, at which point it is renamed to a **fspl-*.fspl** file with a random middle part. .PP Usage: .PP \f[B]fspl\f[R] \f[B]queue-write\f[R] \f[B]-q\f[R] \f[I]DIR\f[R] .TP \f[B]-q\f[R], \f[B]--queuedir\f[R] \f[I]DIR\f[R] Path to the local queue directory. Required. .SS fspl ... queue-init .PP Creates the queue directory and the needed files and subdirectories within it. .PP Usage: .PP \f[B]fspl\f[R] \f[B]queue-init\f[R] \f[B]-q\f[R] \f[I]DIR\f[R] .TP \f[B]-q\f[R], \f[B]--queuedir\f[R] \f[I]DIR\f[R] Path to the local queue directory. Required. .SS fspl ... gen-filename .PP Generates a filename matching the \f[C]fspl-*.fspl\f[R] pattern, which will be valid for a job file in a Filespooler queue. This is often useful when generating a filename that will be used by a tool other than \f[B]fspl queue-write\f[R]. .PP Usage: .PP \f[B]fspl gen-filename\f[R] .PP Example: .IP .nf \f[C] fspl gen-filename fspl-b3bd6e63-f62c-49ee-8c46-6677069d2c58.fspl \f[R] .fi .SS fspl ... gen-uuid .PP Generates a random UUID and prints it to stdout. This is generated using the same algorithm as \f[B]fspl queue-write\f[R] uses. It can be used in scripts for making your own unique filenames. .PP Usage: .PP \f[B]fspl gen-uuid\f[R] .PP Example: .IP .nf \f[C] fspl gen-uuid 2896c849-37c5-4a6d-8b90-0cf63e3e9daa \f[R] .fi .SS fspl show-license .PP Displays the copyright and license information for fspl. .SH AUTHOR .PP John Goerzen .SH HOMEPAGE .PP .SH COPYRIGHT AND LICENSE .PP Copyright (C) 2022 John Goerzen .PP This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. .PP This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. .PP You should have received a copy of the GNU General Public License along with this program. If not, see . .SH AUTHORS John Goerzen. filespooler-1.2.3/doc/fspl.1.md000064400000000000000000000660621046102023000143560ustar 00000000000000% fspl(1) John Goerzen | fspl Manual % John Goerzen % May 2022 # NAME fspl - sequential, distributed job queue processing # SYNOPSIS **fspl** [ *OPTIONS* ] **COMMAND** [ *command_options* ] # OVERVIEW **fspl** is the CLI part of the [Filespooler](https://www.complete.org/filespooler) package. **fspl** is a Unix-style tool that facilitates local or remote command execution, complete with stdin capture, with easy integration with various tools. Here's a brief Filespooler feature list: - It can easily use tools such as S3, Dropbox, Syncthing, NNCP, ssh, UUCP, USB drives, CDs, etc. as transport. - Translation: you can use basically anything that is a filesystem as a transport - It can use arbitrary decoder command pipelines (eg, zcat, stdcat, gpg, age, etc) to pre-process stored packets. - It can send and receive packets by pipes. - Its storage format is simple on-disk files with locking. - It supports one-to-one and one-to-many configurations. - Locking is unnecessary when writing new jobs to the queue, and many arbitrary tools (eg, Syncthing, Dropbox, etc) can safely write directly to the queue without any assistance. - Queue processing is strictly ordered based on the order on the creation machine, even if job files are delivered out of order to the destination. - stdin can be piped into the job creation tool, and piped to a later executor at process time on a remote machine. - The file format is lightweight; less than 100 bytes overhead unless large extra parameters are given. - The queue format is lightweight; having 1000 different queues on a Raspberry Pi would be easy. - Processing is stream-based throughout; arbitrarily-large packets are fine and sizes in the TB range are no problem. - The Filespooler command, fspl, is extremely lightweight, consuming less than 10MB of RAM on x86_64. - Filespooler has extensive documentation. Filespooler consists of a command-line tool (fspl) for interacting with queues. It also consists of a Rust library that is used by fspl. main.rs for fspl is just a few lines long. # A WORD ABOUT DOCUMENTATION This manual is the reference for fspl. The filespooler homepage, contains many examples, instructions on how to integrate with everything from file syncers to encryption tools, and so forth. Please refer to it for further information. # BASIC OPERATION The basic idea is this: - Before starting, on the receiving end, you run **fspl queue-init** to prepare a queue directory. - On the sending end, you use **fspl prepare** to prepare a job file (packet). This packet is written to stdout. From there, you can pipe it to **fspl write** to inject it into a local queue, or use various kinds of transport to get it to a remote machine. - You use **fspl queue-process** to execute packets. - Alternatively, the **fspl stdin-** series of commands let you have more manual control over queue processing, accepting job packets in stdin. They can let you completely ignore the built-in queue mechanism if you so desire. # ON-DISK FORMATS The key way to ensure the ordered processing of the job queue is with a sequence number. This is a 64-bit unsigned integer. It is stored in a *seqfile* on both the sending and the receiving side. On the sending side, the seqfile is standalone; there is only an accompanying `.lock` file for it. On the receiving side, the seqfile and its accompanying lock file live within the queue directory. When the seqfile is referenced on the sending side, it will be created and initialized with the value **1** if it does not already exist. On the receiving side, it is created as part of **fspl queue-init**. In either case, the seqfile consists of one newline-terminated line, containing the next number to process. On the sending side, this is used by **fspl prepare** as the sequence number for the next generated packet. On the receiving side, it is used by **fspl queue-process** to determine which job to process next (unless changed by **--order-by**). ## THE QUEUE The queue has this general layout: queuedir/ Top-level queue directory nextseq Sequence file nextseq.lock Lock file jobs/ Job files stored here When passing the **--queuedir** to one of the **fspl queue-** commands, you give it the path to the top-level queuedir as shown here. You are free to create additional directories within the **queuedir** so long as they don't use one of the names listed above. This can be helpful for receiving queue contents in certain situations. ### Append-Only Queues You can specify **--append-only** to **fspl queue-init**, which will cause the **nextseq** and **nextseq.lock** files to be omitted. This has the effect of making the queue write-only. This can be useful if you are synchronizing the **jobs** subdirectory between machines, but still want to be able to use **fspl queue-write** to add jobs to that folder. It will prevent **fspl queue-process** from running. You can still inspect an append-only queue with commands like **fspl queue-ls** and **fspl queue-info**. ## JOB FILES Job files live within **queuedir/jobs**. They all must follow this naming pattern: fspl-*.fspl This pattern is specifically designed to facilitate safe injection of job files into the queue by other tools. Many other tools prepend or append a temporary string to a filename to signify that it has not yet been fully transferred. The Filespooler assumption is that once a file appears in **jobs/** with a name matching this pattern, than it has been fully transferred and can be processed at any time. So long as the filename begins with **fspl-** and ends with **.fspl**, you are free to put whatever string you like in the middle. The only other requirement, of course, is that each job must have a unique filename within the directory. To simplify things, you can pipe a job file to **fspl queue-write** and let that command take care of naming. Or, you can generate a random (or non-random) string yourself in a shell script. The job file itself consists of a small binary header, which is CRC32-checked. This header is normally less than 100 bytes and the length of it is encoded within the file. Following the header, if **--input** was given to **fspl prepare**, whatever was piped to **prepare** is included as the "payload". This will be piped to the executor command when run by **fspl queue-process** or **fspl stdin-process**. The payload is not validated by CRC or length by Filespooler, since this is assumed to be the role of the transport layer. The website contains examples of using GPG or other tools to ensure integrity. There are three types of job files: - Command, created by **fspl prepare**. This is the typical kind of job file, and is used to request the execution of a command by the processor. - NOP, created by **fspl prepare-nop**. This is a "no-op" job file, which does not run a command but is considered to always succeed. - Fail, created by **fspl prepare-fail**. This is a "fail" job file, which does not run a command but is considered to always fail. This could be usedful, for instance, to create a "barrier" to prevent a queue processor from continuing to execute commands past there without human intervention. ## ADDING FILES TO THE QUEUE To expand slightly on the discussion above about adding files to the queue: A common way to do this if your transport tool doesn't use a nice temporary name is to transport the file to an adjacent directory, and then use **mv(1)** or, better, make a hard link with **ln(1)** to get the file into the jobs/ directory. Note that in both cases, you must take care that you are not crossing a filesystem boundary; on some platforms such as Linux, mv will revert to copy instead of rename if you cross the boundary and then the assumptions about completeness are violated. ## JOB FILE ENCODING AND DECODING Job files are, by default, stored exactly as laid out above. However, in many cases, it may be desirable to store them "encoded" - compressed or encrypted. In this case, the output from **fspl prepare** can be piped through, say, **gzip** and the resulting packet can still be stored in **jobs/** by **fspl queue-write** or any related tool. Now, however, we arrive at the question: how can Filespooler process a queue containing files that have been compressed, encrypted, or so forth? Every **fspl queue** command takes an optional **--decoder** (or **-d**) parameter, which is a command string that will be executed by the shell. This decoder command will receive the entire job file (not just the payload) piped to it on stdin, and is expected to write the decoded file to stdout. The **fspl stdin** pairs to the queue commands do not accept a decoder parameter, since it is assumed you would do that in the pipeline on the way to the stdin command. For instance: date | fspl prepare -s ~/state -i - | gzip | fspl queue-write -q ~/queue fspl queue-ls -q ~/queue -d zcat ID creation timestamp filename 48 2022-05-07T21:07:02-05:00 fspl-48aa52ad-c65c-478a-9d37-123d4bebcb30.fspl Normally, **fspl** ignores files that fail to decode the header. If you omit the **--decoder**, it may just look like your queue is empty. (Using **--log-level=debug** will illuminate what is happening.) # DISTRIBUTED NATURE OF FILESPOOLER As mentioned, Filespooler is designed to be used as a distributed, asynchronous, ordered command queue. The homepage contains many more examples. Here is one simple example of using ssh as a transport to get commands to a remote queue: tar -cpf - /usr/local | fspl prepare -s ~/state -i - | ssh remote queue-write -q ~/queue # INSTALLATION **fspl** is a Rust program. If you don't already have Rust installed, it can be easily installed from . Once Rust is installed, Filespooler can be installed with this command: cargo install filespooler From a checked-out source tree, it can be built by running **`cargo build --release`**. The executable will then be placed in **target/release/xbnet**. You can also obtain pre-built binaries for x86_64 Linux from . # ENVIRONMENT **fspl prepare** will save certain environment variables to the packet, which will be set later at process time. **fspl {queue,stdin}-process** will set a number of useful environment variables in the execution environment. **fspl {queue,stdin}-info** will show the environment that will be passed to the commands. See each of these for further discussion. # EXIT CODE In general, the commands exit with 0 on success and nonzero on failure. The concept of success and failure can be complicated in some situations; see the discussion of the process command. These situations explicitly cause a nonzero (error) exit code: - Failure to obtain a lock (see "locking and concurrency" below), but only if a lock is required; for many commands, no lock is needed. - An I/O error - For commands that require a specific job ID (eg, **fspl queue-info**), no job with that ID can be located - While processing, the executed command returns a nonzero exit status and **--on-error** is set to **Retry** (the default) - In some cases, the presence of multiple files in the queuedir with the same sequence number. The presence of this condition with commands that take a **-j ID** option, or with **queue-process** in its standard configuration, will cause an error. - However, this condition is acceptable for **queue-ls** and **queue-process --order-by=Timestamp**. These situations explicitly terminate with success (0): - While processing, the **--maxjobs** limit is reached before some other error causes an abnormal exit - An error while running a command while **--on-error** is set to **Delete** or **Leave** - Files are encountered in the queuedir/jobs directory with unparsable headers. **fspl** detects and logs (subject to **--log-level**) this condition, but does not consider it an error, on the grounds that the presence of extra data should not prevent the proper functioning of the queue. This may manifest itself in the queue appearing to have nothing to do, **queue-ls** showing fewer jobs than there are files, etc. A common cause of this may be an incorrect **--decoder**. - Zero jobs in the queue, or zero jobs available to process. # LOCKING AND CONCURRENCY Next to every **seqfile** on both the sender and within the queue on the recipient is a file named *seqfile*.lock. An exclusive lock is held on this file during the following conditions: - On the sender with **fspl prepare** and related functions, briefly while obtaining the next sequence number. Once this is done, the lock is released, even if the process of consuming stdin takes a long time. - On the recipient, when processing the queue with **fspl queue-process** or other commands that access the seqfile (eg, **fspl queue-set-next**). fspl will exit with an error code if it cannot obtain the lock when it needs it. These are situations that explicitly do *NOT* obtain a lock: - **fspl queue-write** or other non-fspl method of injecting packets into the queue - The **fspl stdin-** series of commands - Commands that scan the queue without accessing the state of the seqfile. Examples include **queue-ls**, **queue-info**, and **queue-payload**. Note that if the queue is being actively processed while a **queue-ls** is in process, a race condition is possible if a file disappears between the readdir() call and the time the file is opened for reading, which could potentially cause queue-ls to fail. queue-ls intentionally does not attempt to acquire the lock, however, because it would *always* fail while the queue is being processed in that case, preventing one from being able to list the queue at all while long-running jobs are in process. Note that **fspl queue-write** does not need to obtain a lock. The **fspl stdin-** series of commands also do not obtain a lock. Taken together, this means that any given queue is intended to be processed sequentially, not in parallel. However, if parallel processing is desired, it is trivial to iterate over the jobs and use **fspl stdin-process** in whatever custom manner you would like. Also, since queues are so lightweight, there is no problem with creating thousands of them. # INVOCATION: GLOBAL OPTIONS These options may be specified for any command, and must be given before the command on the command line. **-l**, **--log-level** *LEVEL* : Information about the progress of **fspl** is written to stderr. This parameter controls how much information is written. In order from most to least information, the options are: trace, debug, info, warn, error. The default is info. **-V**, **--version** : Print version information and exit **-h**, **--help** : Print help information and exit. Can also be given after a subcommand, in which case it displays more detailed information about that subcommand. *COMMAND* : The subcommand which will be executed. Required unless using **--version** or **--help**. # INVOCATION: SUBCOMMANDS Every subcomand accepts **--help** to display a brief summary of options, invoked as: **fspl** *SUBCOMMAND* **--help** . ## fspl ... prepare Generates a packet (job file data) and writes it to stdout. This file can be piped to other programs (particularly **fspl queue-write**) or saved directly to disk. Usage: **fspl** **prepare** [ *OPTIONS* ] **-s** *FILE* [ **-- PARAMS...** ] **-s**, **--seqfile** *FILE* : Path to the local seqfile. If it does not already exist, it will be created. If set to "-", then no sequence file is used and the sequence emitted will always be 1. **-i**, **--input** *INPUT* : By default, prepare will not read anything as payload. If *INPUT* is set to "-", then prepare will read standard input (stdin) and use it as input. Otherwise, if *INPUT* is anything other than "-", it is assumed to be a filename, which is opened and read for input. **-- *PARAMS*...** : If a "--" is present on the command line, everything after it is taken as parameters to be added to the generated job packet. When the packet is later processed, if **--allow-job-params** is given to **queue-process** or **stdin-process**, then these parameters will be appended to the command line of the executed command. In addition to these options, any environment variable beginning with **FSPL_SET_** will be saved in the packet and will be set in the execution environment at processing time. ## fspl ... prepare-fail, prepare-nop These commands create a non-command packet, one which is either considered to always fail or to always succeed (nop). These two commands take only one option, which is required: **-s**, **--seqfile** *FILE* : Path to the local seqfile. Required. If set to "-", then no sequence file is used and the sequence emitted will always be 1. ## fspl ... prepare-get-next Prints the sequence number that will be used by the next prepare command. Usage: **fspl** **prepare-get-next** **-s** *FILE* **-s**, **--seqfile** *FILE* : Path to the local seqfile. Required. If set to "-", then no sequence file is used and the sequence emitted will always be 1. ## fspl ... prepare-set-next Changes the sequence number that will be used by the next prepare command. Usage: **fspl** **prepare-set-next** **-s** *FILE* *ID* **-s**, **--seqfile** *FILE* : Path to the local seqfile. Required. : *ID* : The numeric ID to set the seqfile to. ## fspl ... stdin-info, queue-info These two commands display information about a given packet. This information is printed to stdout in a style that is similar to how the shell sets environment variables. In fact, it shows precisely the environment variables that will be set by a corresponding **process** command. stdin-info expects the packet to be piped in to stdin; queue-info will find it in the given queue. This command will not attempt to read the payload of the file; it will only read the header. (Note that this is not a guarantee that some layer of the system may not try to read a few KB past the header, merely a note that running this command will not try to read all of a 1TB packet.) Usage: **fspl** **queue-info** [ *OPTIONS* ] **-q** *DIR* **-j** *ID* **fspl** **stdin-info** Options (valid for **queue-info** only): **-q**, **--queuedir** *DIR* : Path to the local queue directory. Required. **-j**, **--job** *ID* : Numeric job ID to process. See **fspl queue-ls** to determine this. Required. **-d**, **--decoder** *DECODECMD* : Decoder command to run. This string is passed to **$SHELL -c**. See the above conversation about decoders. Optional. Example: ``` fspl queue-info -q /tmp/queue -j 45 -d zcat FSPL_SEQ=45 FSPL_CTIME_SECS=1651970311 FSPL_CTIME_NANOS=425412511 FSPL_CTIME_RFC3339_UTC=2022-05-08T00:38:31Z FSPL_CTIME_RFC3339_LOCAL=2022-05-07T19:38:31-05:00 FSPL_JOB_FILENAME=fspl-29342606-02a0-438c-81f2-efdfb80afbe9.fspl FSPL_JOB_QUEUEDIR=/tmp/bar FSPL_JOB_FULLPATH=/tmp/bar/jobs/fspl-29342606-02a0-438c-81f2-efdfb80afbe9.fspl FSPL_PARAM_1=hithere FSPL_SET_FOO=bar ``` Some notes on these variables: - The **FSPL_JOB_FILENAME** is relative to the jobs subdirectory of the queue directory. - The **FSPL_JOB_FULLPATH** is relative to the current working directory; that is, it is what was given by **-q** plus the path within that directory to the filename. It is not guaranteed to be absolute. - **FSPL_PARAM_n** will be set to the optional parameters passed to **fspl prepare**, with n starting at 1. - **FSPL_SET_x** will reflect any **FSPL_SET_x** parameters that were in the environment when **fspl prepare** was run. - Filespooler does not enforce limits to environment variable content. If you want to do something like embed newlines in variable content, Filespooler will happily accept this (since it is valid POSIX) and handle it properly - but your shell scripts may not be so lucky. It is advisable that you avoid this and other weird constructions for your sanity in working with things outside Filespooler - though Filespooler won't prevent you from doing it. ## fspl ... stdin-payload, queue-payload These two commands extract the payload (if any) from the given packet. This is written to stdout. No header or other information is written to stdout. stdin-payload expect the packet to be piped in to stdin; queue-stdout will find it in the given queue. The payload will be piped to the command started by the process commands. The payload will be 0-bytes if **-i** was not passed to **fspl prepare**, or if an empty payload was given to it. Usage: **fspl** **queue-payload** [ *OPTIONS* ] **-q** *DIR* **-j** *ID* **fspl** **stdin-payload** Options (valid for **queue-payload** only): **-q**, **--queuedir** *DIR* : Path to the local queue directory. Required. **-j**, **--job** *ID* : Numeric job ID to process. See **fspl queue-ls** to determine this. Required. **-d**, **--decoder** *DECODECMD* : Decoder command to run. This string is passed to **$SHELL -c**. See the above conversation about decoders. Optional. ## fspl ... stdin-process, queue-process Process packet(s). stdin-process will process exactly one packet on stdin. queue-process will process zero or more packets, depending on the content of the queue and options given. Usage: **fspl** **queue-process** [ *OPTIONS* ] **-q** *DIR* *COMMAND* [ **-- PARAMS...** ] **fspl** **stdin-process** [ *OPTIONS* ] *COMMAND* [ **-- PARAMS...** ] Common options: **--allow-job-params** : Specifies that optional parameters given to **fspl prepare** will be passed on the command line to this command **--ignore-payload** : Ignores the payload; does not pipe it to the command. **--timeout** *SECONDS* : Specifies a timeout, in seconds, for the command. If the command has not exited within that timeframe, SIGKILL is sent to the process. Failing to exit within the timeout is considered an error for Filespooler's purposes. *COMMAND* : The command to run. This is _not_ passed to the shell, so it must point to an executable. This command will not be run for NOP or Fail packets. **-- *PARAMS*...** : If a "--" is present on the command line, everything after it is taken as parameters to be sent to the given command. If **--allow-job-params** is given, then those parameters will be sent after these. Options valid only for **queue-process**: **-q**, **--queuedir** *DIR* : Path to the local queue directory. Required. **-d**, **--decoder** *DECODECMD* : Decoder command to run. This string is passed to **$SHELL -c**. See the above conversation about decoders. Optional. **-n**, **--maxjobs** *JOBS* : The maximum number of jobs to process. There is no limit by default. **--never-delete** : Never delete the job file after processing in any circumstance, regardless of other options. **--order-by** *ORDERING* : In what order to process the queue. When **Sequence**, which is the default, process the queue in order of sequence number. When set to **Timestamp**, process the queue in order of the creation timestamp as it appears in the job header. Note that when set to **Timestamp**, the seqfile within the queue is neither used nor changed. **Timestamp** implies that you do not care about a strict sequential ordering of items in cases where items arrive out of order. **--on-error** *ONERROR* : What to do when the supplied command fails (is a fail packet or a command exits with a nonzero status). If set to **Retry**, abort processing with a nonzero error code and leave the packet in the queue to be tried again by a later invocation of **queue-process**. If set to **Delete**, delete the packet from the queue (unless **--never-delete** is given), increment the next job counter, and continue processing the queue normally. If set to **Leave**, then leave the packet on disk, increment the next job counter, and continue processing the rest of the queue normally. **Retry** is the only option that will cause a failure to not increment the next job counter. **Retry** is the default. **--output-to** *DEST* : What to do with the stdout and stderr of the invoked command. If set to **PassBoth**, then they are simply written to the stdout/stderr of **fspl queue-process**. If set to **SaveBoth**, then both are added to a file in the queue's jobs directory named **filename.out**. This file is up to you to process whenever you wish. The default is **PassBoth**. The environment is set as described above. Note that since no queue directory or filename is relevant with the **stdin-process** flavor, those variables are unset under **stdin-process**. To skip a failing job at the head of the queue, you can use **fspl queue-set-next**, or alternatively, **fspl queue-process --on-error Delete --maxjobs 1** to cause it to be deleted. You would probably not wish to combine this with timestamp ordering. ## fspl ... queue-set-next Changes the sequence number that will be used by the next **fspl queue-process** command. Usage: **fspl** **queue-set-next** **-q** *DIR* *ID* **-q**, **--queuedir** *DIR* : Path to the local queue directory. Required. **--append-only** : Creates an append-only queue. : *ID* : The numeric ID to set the seqfile to. ## fspl ... queue-write Receives a packet on stdin and writes it to the queue. This command does not bother to decode, process, or validate the packet in any way. It simply writes it to the queue safely, using a temporary filename until completely written, at which point it is renamed to a **fspl-*.fspl** file with a random middle part. Usage: **fspl** **queue-write** **-q** *DIR* **-q**, **--queuedir** *DIR* : Path to the local queue directory. Required. ## fspl ... queue-init Creates the queue directory and the needed files and subdirectories within it. Usage: **fspl** **queue-init** **-q** *DIR* **-q**, **--queuedir** *DIR* : Path to the local queue directory. Required. ## fspl ... gen-filename Generates a filename matching the `fspl-*.fspl` pattern, which will be valid for a job file in a Filespooler queue. This is often useful when generating a filename that will be used by a tool other than **fspl queue-write**. Usage: **fspl gen-filename** Example: ``` fspl gen-filename fspl-b3bd6e63-f62c-49ee-8c46-6677069d2c58.fspl ``` ## fspl ... gen-uuid Generates a random UUID and prints it to stdout. This is generated using the same algorithm as **fspl queue-write** uses. It can be used in scripts for making your own unique filenames. Usage: **fspl gen-uuid** Example: ``` fspl gen-uuid 2896c849-37c5-4a6d-8b90-0cf63e3e9daa ``` ## fspl show-license Displays the copyright and license information for fspl. # AUTHOR John Goerzen # HOMEPAGE # COPYRIGHT AND LICENSE Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . filespooler-1.2.3/src/cmd/cmd_exec.rs000064400000000000000000000344331046102023000156300ustar 00000000000000/*! Tools executing jobs */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use crate::cmd::cmd_queue::QueueOptsWithDecoder; use crate::exec; use crate::header::{CommandTypeV1, FSMetaV1}; use crate::jobfile; use crate::jobqueue::*; use crate::seqfile; use crate::seqfile::SeqFile; use crate::util::get_unbuffered_stdin; use anyhow::{anyhow, bail}; use clap::Args; use std::collections::BTreeMap; use std::ffi::OsStr; use std::ffi::OsString; use std::fmt; use std::fmt::Debug; use std::fs::{remove_file, File}; use std::os::unix::ffi::OsStrExt; use std::process::{ExitStatus, Stdio}; use std::str::FromStr; use tracing::*; /// Option for what to do if an error is encountered while processing a queue #[derive(Debug, Clone)] pub enum OnError { /// Delete the file and mark the job as done; do not return an error code. If --never-delete /// is given, this becomes the same as Leave. Delete, /// Leave the file and mark the job as done; do not return an error code. Leave, /// Abort processing with an error code and do not mark the job as done. It can be retried by a subsequent queue processing command. Retry, } // consider enum-utils fromstr derivation or others at // https://crates.io/search?q=enum_utils impl FromStr for OnError { type Err = anyhow::Error; fn from_str(s: &str) -> Result { match s.trim().to_lowercase().as_str() { "delete" => Ok(OnError::Delete), "leave" => Ok(OnError::Leave), "retry" => Ok(OnError::Retry), _ => Err(anyhow!( "on-error must be 'delete', 'leave', or 'retry'" )), } } } impl fmt::Display for OnError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(self, f) } } /// Option for what to do with output #[derive(Debug, Clone)] pub enum OutputTo { /// Don't capture; use the calling environment's stdout/stderr PassBoth, /// Write both stdout and stderr to a file SaveBoth, } // consider enum-utils fromstr derivation or others at // https://crates.io/search?q=enum_utils impl FromStr for OutputTo { type Err = anyhow::Error; fn from_str(s: &str) -> Result { match s.trim().to_lowercase().as_str() { "passboth" => Ok(OutputTo::PassBoth), "saveboth" => Ok(OutputTo::SaveBoth), _ => Err(anyhow!("output-to must be 'passboth' or 'saveboth'")), } } } impl fmt::Display for OutputTo { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(self, f) } } #[derive(Debug, Args)] pub struct ProcessOpts { /// Allow the parameters from the job to be appended to the command's /// command line (not doing so by default) #[arg(long)] pub allow_job_params: bool, /// Ignore the payload; do not pass it to the command. #[arg(long)] pub ignore_payload: bool, /// A timeout, in seconds, for the command. #[arg(long, value_name = "SECONDS")] pub timeout: Option, /// The command to run #[arg(value_name = "COMMAND")] pub command: OsString, /// Optional parameters to pass to COMMAND, before those contained /// in the request file. #[arg(last = true)] pub params: Vec, } #[derive(Debug, Eq, PartialEq, Clone)] pub enum OrderBy { /// Order by sequence number (default) Sequence, /// Order by timestamp Timestamp, } // consider enum-utils fromstr derivation or others at // https://crates.io/search?q=enum_utils impl FromStr for OrderBy { type Err = anyhow::Error; fn from_str(s: &str) -> Result { match s.trim().to_lowercase().as_str() { "sequence" => Ok(OrderBy::Sequence), "timestamp" => Ok(OrderBy::Timestamp), _ => Err(anyhow!("order-by must be 'sequence' or 'timestamp'")), } } } impl fmt::Display for OrderBy { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(self, f) } } #[derive(Debug, Args)] pub struct QueueProcessOpts { #[clap(flatten)] pub qdopts: QueueOptsWithDecoder, /// What to do with an error. Can be Delete, Leave, or Retry. #[arg(long, value_name="ONERROR", default_value_t = OnError::Retry)] pub on_error: OnError, /// In what order to process jobs. Can be Sequence or Timestamp. #[arg(long, value_name="ORDERING", default_value_t = OrderBy::Sequence)] pub order_by: OrderBy, /// Never delete processed job files in any circumstance #[arg(long)] pub never_delete: bool, /// Maximum number of jobs to process #[arg(long, value_name = "JOBS", short = 'n')] pub maxjobs: Option, /// What to do with stdout and stderr of the process. Can be PassBoth or SaveBoth. #[arg(long, value_name="DEST", default_value_t = OutputTo::PassBoth)] pub output_to: OutputTo, #[clap(flatten)] pub processopts: ProcessOpts, } /// The result type from running a command. #[derive(Debug, Eq, PartialEq)] pub enum CmdResult { Success, Failure, ExitResult(ExitStatus), } impl CmdResult { fn success(&self) -> bool { match self { CmdResult::Success => true, CmdResult::Failure => false, CmdResult::ExitResult(es) => es.success(), } } } /// Process some input to execute, optionally writing the output to a given File. /// If the input is None, use stdin. pub fn cmd_exec_generic( opts: &ProcessOpts, mut input: Option, inputfilename: Option<&OsString>, queuedir: Option<&OsString>, output: Option, ) -> Result { let mut stdindrop = None; let meta = match &mut input { Some(x) => x.with_read(|y| jobfile::read_jobfile_header(y))?, None => { // Standard stdin is buffered; bleh. Have to deal with that here. let mut stdin = get_unbuffered_stdin(); let ret = jobfile::read_jobfile_header(&mut stdin)?; stdindrop = Some(stdin); ret } }; let input = if opts.ignore_payload { // Just to be really clear, we will not be processing input // in this case! std::mem::drop(input); None } else { input }; let res = match meta.cmd_type { CommandTypeV1::NOP => Ok(CmdResult::Success), CommandTypeV1::Fail => Ok(CmdResult::Failure), CommandTypeV1::Command(ref copts) => { let mut params = opts.params.clone(); if opts.allow_job_params { let jobparams: Vec = copts .params .iter() .map(|x| OsStr::from_bytes(x).to_os_string()) .collect(); params.extend(jobparams); } else if !copts.params.is_empty() { warn!("Parameters given in job ignored because --allow-job-params not given"); } debug!( "Preparing to execute job {} from {:?}", meta.seq, &inputfilename ); let env = meta.get_envvars(inputfilename, queuedir); if let Some(outfile) = output { Ok(CmdResult::ExitResult(exec::exec_job( &opts.command, ¶ms, env, input, Stdio::from(outfile.try_clone()?), Stdio::from(outfile), opts.timeout, )?)) } else { Ok(CmdResult::ExitResult(exec::exec_job( &opts.command, ¶ms, env, input, Stdio::inherit(), Stdio::inherit(), opts.timeout, )?)) } } }; // Explicitly hold it until here. Not strictly necessary; just for clarity. std::mem::drop(stdindrop); res } /// Execute a job using a job file piped in on stdin. This is just a very /// thin wrapper around [`cmd_exec_generic`]. pub fn cmd_execstdin(opts: &ProcessOpts) -> Result<(), anyhow::Error> { let res = cmd_exec_generic(opts, None, None, None, None)?; if res.success() { Ok(()) } else { bail!("Command exited with non-success status {:?}", res); } } pub trait SeqIncrement { /// Increments an embedded seqf if we are a sequence-order iterator. fn increment_if_seq_order(&mut self) -> Result<(), anyhow::Error>; } /// An iterator over the jobs in sequence order. An external command /// must be used to increment the seqfile. pub struct JobsBySeqIter<'a> { seqf: SeqFile<'a>, map: BTreeMap, } impl<'a> Iterator for JobsBySeqIter<'a> { type Item = (OsString, FSMetaV1); fn next(&mut self) -> Option { self.map.remove(&self.seqf.get_next()) } } /// An iterator over the jobs in timestamp order. pub struct JobsByTimestampIter<'a> { #[allow(dead_code)] seqf: SeqFile<'a>, entries: Box>, } impl<'a> JobsByTimestampIter<'a> { pub fn new(seqf: SeqFile<'a>, mut entries: Vec<(OsString, FSMetaV1)>) -> Self { entries.sort_by(|(_, ma), (_, mb)| ma.ctime.cmp(&mb.ctime)); JobsByTimestampIter { seqf, entries: Box::new(entries.into_iter()), } } } impl<'a> Iterator for JobsByTimestampIter<'a> { type Item = (OsString, FSMetaV1); fn next(&mut self) -> Option { self.entries.next() } } impl<'a> SeqIncrement for JobsBySeqIter<'a> { fn increment_if_seq_order(&mut self) -> Result<(), anyhow::Error> { self.seqf.increment()?; Ok(()) } } impl<'a> SeqIncrement for JobsByTimestampIter<'a> { fn increment_if_seq_order(&mut self) -> Result<(), anyhow::Error> { Ok(()) } } pub trait IncrementJobsIter: Iterator + SeqIncrement {} impl<'a> IncrementJobsIter for JobsByTimestampIter<'a> {} impl<'a> IncrementJobsIter for JobsBySeqIter<'a> {} /// The main worker of the program; it executes a queue. pub fn cmd_execqueue(opts: &QueueProcessOpts) -> Result<(), anyhow::Error> { let seqfn = get_seqfile(&opts.qdopts.qopts.queuedir); let mut lock = seqfile::prepare_seqfile_lock(&seqfn, false)?; let seqf = SeqFile::open(&seqfn, &mut lock)?; let mut iter: Box = match opts.order_by { OrderBy::Sequence => { let qmap = scanqueue_map(&opts.qdopts.qopts.queuedir, &opts.qdopts.decoder)?; Box::new(JobsBySeqIter { seqf, map: qmap }) } OrderBy::Timestamp => { let entries: Vec<(OsString, FSMetaV1)> = scanqueue(&opts.qdopts.qopts.queuedir, &opts.qdopts.decoder)? .flatten() .collect(); Box::new(JobsByTimestampIter::new(seqf, entries)) } }; let mut jobsprocessed = 0; let maxjobs = opts.maxjobs.unwrap_or(u64::MAX); while let Some((filename, _meta)) = iter.next() { if jobsprocessed >= maxjobs { debug!( "Stopping processing as requested maximum jobs processed {} has been hit", maxjobs ); return Ok(()); } jobsprocessed += 1; let mut inputinfo = queue_openjob(&opts.qdopts.qopts.queuedir, &filename, &opts.qdopts.decoder)?; let inhandle = inputinfo.reader.take().unwrap(); let fullfilename = queue_genfilename(&opts.qdopts.qopts.queuedir, &filename); let res = match opts.output_to { OutputTo::PassBoth => cmd_exec_generic( &opts.processopts, Some(inhandle), Some(&filename.clone()), Some(&opts.qdopts.qopts.queuedir), None, )?, OutputTo::SaveBoth => { let mut savefilename = OsString::from(&fullfilename); savefilename.push(".out"); trace!( "Writing stdout and stderr to {:?} per request", savefilename ); let savefile = File::create(savefilename)?; cmd_exec_generic( &opts.processopts, Some(inhandle), Some(&filename.clone()), Some(&opts.qdopts.qopts.queuedir), Some(savefile), )? } }; match (res.success(), &opts.on_error) { (true, _) | (false, OnError::Delete) => { iter.increment_if_seq_order()?; if !opts.never_delete { // Now we can make sure to drop the decoder before deleting. Probably // harmless not to on POSIX - early versions didn't with no problems - // but it feels like good form to do so. std::mem::drop(inputinfo); debug!("Deleting {:?}", fullfilename); remove_file(fullfilename)?; } } (false, OnError::Retry) => bail!( "Aborting processing due to exit status {:?} from command", res ), (false, OnError::Leave) => { iter.increment_if_seq_order()?; } } } match opts.order_by { OrderBy::Sequence => debug!( "Sequence processing: Stopping processing as there is no job with the next sequence ID to process"), OrderBy::Timestamp => debug!( "Timestamp processing: Stopping processing as all jobs in queue have been processed"), }; Ok(()) } filespooler-1.2.3/src/cmd/cmd_info.rs000064400000000000000000000025341046102023000156340ustar 00000000000000/* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use crate::jobfile; /// Displays info about a packet piped in on stdin pub fn cmd_infostdin() -> Result<(), anyhow::Error> { let stdin_base = std::io::stdin(); let mut stdin_fd = stdin_base.lock(); let meta = jobfile::read_jobfile_header(&mut stdin_fd)?; meta.render(None, None, &mut std::io::stdout())?; Ok(()) } /// Dumps the payload from a packet piped in on stdin pub fn cmd_payloadstdin() -> Result<(), anyhow::Error> { let stdin_base = std::io::stdin(); let mut stdin_fd = stdin_base.lock(); jobfile::read_jobfile_header(&mut stdin_fd)?; std::io::copy(&mut stdin_fd, &mut std::io::stdout())?; Ok(()) } filespooler-1.2.3/src/cmd/cmd_prepare.rs000064400000000000000000000133241046102023000163360ustar 00000000000000/* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use crate::header::{CommandDataV1, CommandTypeV1, FSMetaV1, ENV_PREFIX}; use crate::jobfile; use crate::seqfile; use anyhow; use clap::Args; use std::ffi::OsString; use std::os::unix::ffi::OsStrExt; /// The options relating to the state file #[derive(Debug, Args)] pub struct SFOpts { /// The path to the state file. Set to "-" to not use any state file at all. #[arg(short, long, value_name = "FILE")] pub seqfile: OsString, } /// The options for setting the next value in the sequence file #[derive(Debug, Args)] pub struct SFSetOpts { #[clap(flatten)] sfopts: SFOpts, /// The ID to set it to. #[arg(value_name = "ID")] pub nextid: u64, } /** The options passed to a prepare operation. These can be directly parsed from the CLI. */ #[derive(Debug, Args)] pub struct PrepareOpts { #[clap(flatten)] pub sfopts: SFOpts, /// Path to file to use for input. Set to "-" to use stdin. If not given, no input is used. #[arg(short, long, value_name = "INPUT")] pub input: Option, /// Additional parameters to send along, if any #[arg(last = true)] pub params: Vec, } /// Print the sequence number pub fn cmd_preparegetnext(opts: &SFOpts) -> Result<(), anyhow::Error> { let mut lock = seqfile::prepare_seqfile_lock(&opts.seqfile, true)?; let seqf = seqfile::SeqFile::open(&opts.seqfile, &mut lock)?; println!("{}", seqf.get_next()); Ok(()) } /// Set the next sequence number pub fn cmd_preparesetnext(opts: &SFSetOpts) -> Result<(), anyhow::Error> { let mut lock = seqfile::prepare_seqfile_lock(&opts.sfopts.seqfile, true)?; let mut seqf = seqfile::SeqFile::open(&opts.sfopts.seqfile, &mut lock)?; seqf.set(opts.nextid)?; Ok(()) } /// Prepare a NOP packet pub fn cmd_preparenop(opts: &SFOpts) -> Result<(), anyhow::Error> { let mut lock = None; let mut sf = None; if opts.seqfile != "-" { lock = Some(seqfile::prepare_seqfile_lock(&opts.seqfile, true)?); sf = Some(seqfile::SeqFile::open( &opts.seqfile, lock.as_mut().unwrap(), )?); }; let meta = FSMetaV1 { seq: match sf { None => 1, Some(ref mut x) => x.increment()?, }, cmd_type: CommandTypeV1::NOP, ..FSMetaV1::default() }; // Release the lock. It's already incremented, so why not? std::mem::drop(sf); std::mem::drop(lock); let mut fd_stdout = std::io::stdout(); jobfile::write_jobfile_header(meta, &mut fd_stdout) } /// Prepare a Fail packet pub fn cmd_preparefail(opts: &SFOpts) -> Result<(), anyhow::Error> { let mut lock = None; let mut sf = None; if opts.seqfile != "-" { lock = Some(seqfile::prepare_seqfile_lock(&opts.seqfile, true)?); sf = Some(seqfile::SeqFile::open( &opts.seqfile, lock.as_mut().unwrap(), )?); }; let meta = FSMetaV1 { seq: match sf { None => 1, Some(ref mut x) => x.increment()?, }, cmd_type: CommandTypeV1::Fail, ..FSMetaV1::default() }; // Release the lock. It's already incremented, so why not? std::mem::drop(sf); std::mem::drop(lock); let mut fd_stdout = std::io::stdout(); jobfile::write_jobfile_header(meta, &mut fd_stdout) } /// Prepare a job packet pub fn cmd_prepare(opts: &PrepareOpts) -> Result<(), anyhow::Error> { let mut lock = None; let mut sf = None; if opts.sfopts.seqfile != "-" { lock = Some(seqfile::prepare_seqfile_lock(&opts.sfopts.seqfile, true)?); sf = Some(seqfile::SeqFile::open( &opts.sfopts.seqfile, lock.as_mut().unwrap(), )?); }; let u8params: Vec> = opts .params .iter() .map(|x| Vec::from(x.as_bytes())) .collect(); let mut envtosend: Vec<(Vec, Vec)> = vec![]; for (key, val) in std::env::vars_os() { if key.as_bytes().starts_with(ENV_PREFIX) { envtosend.push(( Vec::from(&key.as_bytes()[ENV_PREFIX.len()..]), Vec::from(val.as_bytes()), )); } } let meta = FSMetaV1 { seq: match sf { None => 1, Some(ref mut x) => x.increment()?, }, cmd_type: CommandTypeV1::Command(CommandDataV1 { params: u8params, env: envtosend, }), ..FSMetaV1::default() }; // Release the lock. It's already incremented, so why not? std::mem::drop(sf); std::mem::drop(lock); let mut fd_stdout = std::io::stdout(); if let Some(inpath) = &opts.input { if inpath == "-" { let mut fd_stdin = std::io::stdin(); jobfile::write_jobfile_header_payload(meta, &mut fd_stdin, &mut fd_stdout)?; } else { let mut fd_input = std::fs::File::open(inpath)?; jobfile::write_jobfile_header_payload(meta, &mut fd_input, &mut fd_stdout)?; } } else { jobfile::write_jobfile_header(meta, &mut fd_stdout)?; } Ok(()) } filespooler-1.2.3/src/cmd/cmd_queue.rs000064400000000000000000000124771046102023000160340ustar 00000000000000/*! Tools for managing the queues */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use crate::header::FSMetaV1; use crate::jobfile; use crate::jobqueue; use crate::seqfile; use anyhow::bail; use clap::Args; use std::ffi::OsString; use std::io::Write; use std::os::unix::ffi::OsStringExt; /// Generic CLI options for all operations on queues #[derive(Debug, Args)] pub struct QueueOpts { /// The path to the queue directory #[arg(short, long, value_name = "DIR")] pub queuedir: OsString, } /// Options for creating a queue #[derive(Debug, Args)] pub struct QueueInitOpts { #[clap(flatten)] pub qopts: QueueOpts, /// If yes, create the queue in write-only mode. #[arg(long)] pub append_only: bool, } /// Generic CLI options for all operations on queues that read packets #[derive(Debug, Args)] pub struct QueueOptsWithDecoder { #[clap(flatten)] pub qopts: QueueOpts, /// Command to decode the files. Unlike other commands, this one is executed by a shell ($SHELL -c) #[arg(long, short, value_name = "DECODECMD")] pub decoder: Option, } #[derive(Debug, Args)] pub struct QueueJobOpts { #[clap(flatten)] pub qdopts: QueueOptsWithDecoder, /// The ID of the job to process. #[arg(short, long, value_name = "ID")] pub job: u64, } #[derive(Debug, Args)] pub struct QueueSetSeqOpts { #[clap(flatten)] pub qopts: QueueOpts, /// The ID to set it to. #[arg(value_name = "ID")] pub nextid: u64, } pub fn cmd_queueinit(opts: &QueueInitOpts) -> Result<(), anyhow::Error> { jobqueue::queueinit(&opts.qopts.queuedir, opts.append_only) } /// Writes a packet in stdin to the queue. Does not need to obtain a lock /// to do so. pub fn cmd_queuewrite(opts: &QueueOpts) -> Result<(), anyhow::Error> { let mut stdin_fd = std::io::stdin(); jobqueue::queuewrite(&mut stdin_fd, &opts.queuedir) } /// Prints a listing of items in the queue. pub fn cmd_queuels(opts: &QueueOptsWithDecoder) -> Result<(), anyhow::Error> { let mut entries: Vec<(OsString, FSMetaV1)> = jobqueue::scanqueue(&opts.qopts.queuedir, &opts.decoder)? .flatten() .collect(); entries.sort_by(|(_, ma), (_, mb)| ma.seq.cmp(&mb.seq)); let mut out = std::io::stdout(); println!("{:20} {:27} filename", "ID", "creation timestamp"); for (filename, meta) in entries.into_iter() { print!( "{:<20} {:27} ", meta.seq, meta.get_datestring_rfc3339_local(), ); out.write_all(&filename.into_vec())?; out.write_all(b"\n")?; } Ok(()) } /// Prints info about a specific job. pub fn cmd_queueinfo(opts: &QueueJobOpts) -> Result<(), anyhow::Error> { let qmap = jobqueue::scanqueue_map(&opts.qdopts.qopts.queuedir, &opts.qdopts.decoder)?; match qmap.get(&opts.job) { Some((filename, meta)) => { meta.render( Some(filename), Some(&opts.qdopts.qopts.queuedir), &mut std::io::stdout(), )?; } None => bail!( "Job {} not found in directory {:?}", opts.job, opts.qdopts.qopts.queuedir ), } Ok(()) } /// Render the payload of a specific job. pub fn cmd_queuepayload(opts: &QueueJobOpts) -> Result<(), anyhow::Error> { let qmap = jobqueue::scanqueue_map(&opts.qdopts.qopts.queuedir, &opts.qdopts.decoder)?; match qmap.get(&opts.job) { Some((filename, _meta)) => { let mut inputinfo = jobqueue::queue_openjob( &opts.qdopts.qopts.queuedir, filename, &opts.qdopts.decoder, )?; let mut inhandle = inputinfo.reader.take().unwrap().as_read(); let _meta = jobfile::read_jobfile_header(&mut inhandle)?; std::io::copy(&mut inhandle, &mut std::io::stdout())?; } None => bail!( "Job {} not found in directory {:?}", opts.job, opts.qdopts.qopts.queuedir ), } Ok(()) } /// Print the next sequence number pub fn cmd_queuegetnext(opts: &QueueOpts) -> Result<(), anyhow::Error> { let seqfn = jobqueue::get_seqfile(&opts.queuedir); let mut lock = seqfile::prepare_seqfile_lock(&seqfn, false)?; let seqf = seqfile::SeqFile::open(&seqfn, &mut lock)?; println!("{}", seqf.get_next()); Ok(()) } /// Set the next sequence number pub fn cmd_queuesetnext(opts: &QueueSetSeqOpts) -> Result<(), anyhow::Error> { let seqfn = jobqueue::get_seqfile(&opts.qopts.queuedir); let mut lock = seqfile::prepare_seqfile_lock(&seqfn, false)?; let mut seqf = seqfile::SeqFile::open(&seqfn, &mut lock)?; seqf.set(opts.nextid)?; Ok(()) } filespooler-1.2.3/src/cmd.rs000064400000000000000000000105731046102023000140600ustar 00000000000000/*! Handling CLI commands */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use crate::cmd::cmd_exec::{ProcessOpts, QueueProcessOpts}; use crate::cmd::cmd_prepare::{PrepareOpts, SFOpts, SFSetOpts}; use crate::cmd::cmd_queue::{ QueueInitOpts, QueueJobOpts, QueueOpts, QueueOptsWithDecoder, QueueSetSeqOpts, }; use clap::{Args, Parser, Subcommand}; use tracing::*; use uuid::Uuid; pub mod cmd_exec; pub mod cmd_info; pub mod cmd_prepare; pub mod cmd_queue; /// Sub-commands #[derive(Debug, Subcommand)] pub enum Command { /// Display a UUID to stdout GenUUID, /// Display a fspl-*.fspl filename to stdout GenFilename, /// Prepare a command for transmission, writing to stdout Prepare(PrepareOpts), /// Prepare a NOP packet for transmission, writing to stdout PrepareNOP(SFOpts), /// Prepare a fail packet for transmission, writing to stdout PrepareFail(SFOpts), /// Get the next sequence number for prepare PrepareGetNext(SFOpts), /// Set the next sequence number for prepare PrepareSetNext(SFSetOpts), /// Process a queue QueueProcess(QueueProcessOpts), /// Initialize a queue QueueInit(QueueInitOpts), /// Show information about a specific job ID in a queue QueueInfo(QueueJobOpts), /// Save a packet from stdin to a queue QueueWrite(QueueOpts), /// Gets information about the jobs in a queue QueueLs(QueueOptsWithDecoder), /// Prints the next sequence number for the queue QueueGetNext(QueueOpts), /// Sets the next sequence number for the queue QueueSetNext(QueueSetSeqOpts), /// Dump the payload of a job to stdout QueuePayload(QueueJobOpts), /// Get information about a packet piped to filespooler on stdin StdinInfo, /// Dumps the payload from the packet on stdin to stdout StdinPayload, /// Process a command from a job packet read from stdin StdinProcess(ProcessOpts), /// Show the license information ShowLicense, } #[derive(Debug, Args)] pub struct GlobalOpts { /// Gives the logging (to stderr) level. Default is INFO. Options /// in order from most to least info: TRACE, DEBUG, INFO, WARN, ERROR. #[arg(long, short, value_name = "LEVEL", default_value_t = Level::INFO)] pub log_level: Level, } #[derive(Parser, Debug)] #[command(author, version, about)] pub struct Cli { #[clap(flatten)] pub globalopts: GlobalOpts, #[clap(subcommand)] pub command: Command, } pub fn parse() -> Cli { Cli::parse() } pub static LICENSE: &str = "\ Filespooler Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . "; pub fn cmd_showlicense() -> Result<(), anyhow::Error> { println!("{}", LICENSE); Ok(()) } pub fn cmd_genuuid() -> Result<(), anyhow::Error> { let mut buf = Uuid::encode_buffer(); let uuid = Uuid::new_v4().hyphenated().encode_lower(&mut buf); println!("{}", uuid); Ok(()) } pub fn cmd_genfilename() -> Result<(), anyhow::Error> { let mut buf = Uuid::encode_buffer(); let uuid = Uuid::new_v4().hyphenated().encode_lower(&mut buf); println!("fspl-{}.fspl", uuid); Ok(()) } filespooler-1.2.3/src/exec.rs000064400000000000000000000063621046102023000142420ustar 00000000000000/*! Tools to execute (process) a job */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use anyhow; use std::ffi::{OsStr, OsString}; use std::os::unix::ffi::OsStrExt; use std::os::unix::process::ExitStatusExt; use std::process::{Command, ExitStatus, Stdio}; use std::time::Duration; use tracing::*; use wait_timeout::ChildExt; /** Low-level code to execute a job. See [`crate::cmd::cmd_exec`] for higher-level interfaces. */ pub fn exec_job>( command: &OsString, params: &[OsString], env: Vec<(Vec, Vec)>, payload: Option, stdoutput: Stdio, stderr: Stdio, timeoutsecs: Option, ) -> Result { debug!("Preparing to run {:?} with params {:?}", command, params); let mut command = Command::new(command); command .args(params) .envs( env.iter() .map(|(k, v)| (OsStr::from_bytes(k), OsStr::from_bytes(v))), ) .stdout(stdoutput) .stderr(stderr); if let Some(payload) = payload { command.stdin(payload); } let mut child = command.spawn()?; debug!("Command PID {} started successfully", child.id()); // Explicitly drop to release held FDs. std::mem::drop(command); let exitstatus = if let Some(timeout) = timeoutsecs { debug!("Waiting up to {} seconds for command to exit", timeout); match child.wait_timeout(Duration::from_secs(timeout))? { None => { // Timeout elapsed without the child exiting debug!("Command timed out; sending termination signal"); child.kill()?; // Now give it a chance to die std::thread::sleep(Duration::from_millis(500)); match child.try_wait()? { Some(r) => { debug!("Command terminated after termination signal"); r } None => { debug!("Command didn't terminate even after signal; proceeding with an error report anyway"); ExitStatus::from_raw(0x7f) // Fake an error exit status since the child didn't die } } } Some(r) => r, // Child did exit } } else { debug!("Waiting indefinitely for command to exit"); child.wait()? }; if exitstatus.success() { debug!("Command exited successfully with status {:?}", exitstatus); } else { error!("Command exited abnormally with status {:?}", exitstatus); } Ok(exitstatus) } filespooler-1.2.3/src/header.rs000064400000000000000000000264321046102023000145460ustar 00000000000000/*! The file header and metadata. See the description of the file format at [`FSPrefix`]. [`crate::jobfile`] has thin wrappers around some of these functions. */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use anyhow::ensure; use bytes::{Buf, BufMut, Bytes, BytesMut}; use chrono::offset::LocalResult; use chrono::prelude::*; use crc32fast; use rmp_serde; use serde::{Deserialize, Serialize}; use std::ffi::OsString; use std::io::{Read, Write}; use std::os::unix::ffi::OsStrExt; /// The core metadata about file spooler jobs. /// /// A file on-disk containts a [`FSPrefix`] followed by metadata. #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct FSMetaV1 { /// The sequence number of this job. pub seq: u64, /// Timestamp at creation (Unix epoch based, (seconds, nanos)) pub ctime: (i64, u32), /// Command type pub cmd_type: CommandTypeV1, } /// The type of packet this is. #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub enum CommandTypeV1 { /// A regular command Command(CommandDataV1), /// A no-op (always succeeds) NOP, /// A failure (always fails) Fail, } /// Metadata for a command #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct CommandDataV1 { /// Parameters to pass to the command pub params: Vec>, /// Environment variables to set pub env: Vec<(Vec, Vec)>, } pub const ENV_PREFIX: &[u8] = b"FSPL_SET_"; impl FSMetaV1 { /// Renders in a shell-type format. Will include the filename /// if available. pub fn render( &self, filename: Option<&OsString>, queuedir: Option<&OsString>, out: &mut W, ) -> Result<(), anyhow::Error> { for (key, val) in self.get_envvars(filename, queuedir).into_iter() { out.write_all(&key)?; out.write_all(b"=")?; out.write_all(&val)?; out.write_all(b"\n")?; } Ok(()) } /// Get settings suitable for environment variables. Also will include /// a filename if it is available. pub fn get_envvars( &self, filename: Option<&OsString>, queuedir: Option<&OsString>, ) -> Vec<(Vec, Vec)> { fn push_vu8(v: &mut Vec<(Vec, Vec)>, key: &str, value: Vec) { v.push((key.as_bytes().to_vec(), value)); } fn push_str(v: &mut Vec<(Vec, Vec)>, key: &str, value: &str) { push_vu8(v, key, value.as_bytes().to_vec()); } let mut retval: Vec<(Vec, Vec)> = Vec::new(); push_str(&mut retval, "FSPL_SEQ", &self.seq.to_string()); push_str(&mut retval, "FSPL_CTIME_SECS", &self.ctime.0.to_string()); push_str(&mut retval, "FSPL_CTIME_NANOS", &self.ctime.1.to_string()); push_str( &mut retval, "FSPL_CTIME_RFC3339_UTC", &self.get_datestring_rfc3339_utc(), ); push_str( &mut retval, "FSPL_CTIME_RFC3339_LOCAL", &self.get_datestring_rfc3339_local(), ); if let Some(filename) = filename { push_vu8( &mut retval, "FSPL_JOB_FILENAME", Vec::from(filename.as_bytes()), ); } if let Some(queuedir) = queuedir { push_vu8( &mut retval, "FSPL_JOB_QUEUEDIR", Vec::from(queuedir.as_bytes()), ); } if let (Some(filename), Some(queuedir)) = (filename, queuedir) { let mut fullpath = Vec::from(queuedir.as_bytes()); fullpath.extend(b"/jobs/"); fullpath.extend(filename.as_bytes()); push_vu8(&mut retval, "FSPL_JOB_FULLPATH", fullpath); } if let CommandTypeV1::Command(cdata) = &self.cmd_type { for (param, num) in cdata.params.iter().zip(1..) { push_vu8(&mut retval, &format!("FSPL_PARAM_{}", num), param.clone()); } for (param, val) in cdata.env.iter() { let mut key = Vec::from(ENV_PREFIX); key.append(&mut param.clone()); retval.push((key, val.clone())); } } retval } /// Gets a chrono DateTime for the timestamp pub fn get_datetime_utc(&self) -> DateTime { let nutc = NaiveDateTime::from_timestamp_opt(self.ctime.0, self.ctime.1) .expect("Invalid timestamp in struct"); DateTime::from_utc(nutc, Utc) } /// Gets a chrono Datetime for the timestamp pub fn get_datetime_local(&self) -> DateTime { match Local.timestamp_opt(self.ctime.0, self.ctime.1) { LocalResult::None => panic!("Invalid timestamp in struct"), LocalResult::Single(x) => x, LocalResult::Ambiguous(x, _) => x, // Ugh, is this even possible? } } /// Gets a chrono DateTime for the timestamp /// Renders the timestamp as RFC3339 in UTC pub fn get_datestring_rfc3339_utc(&self) -> String { let dtutc = self.get_datetime_utc(); dtutc.to_rfc3339_opts(SecondsFormat::Secs, true) } /// Gets the local time string for the timestamp pub fn get_datestring_rfc3339_local(&self) -> String { let dtlocal = self.get_datetime_local(); dtlocal.to_rfc3339_opts(SecondsFormat::Secs, false) } } impl Default for FSMetaV1 { fn default() -> Self { let ctime = Utc::now(); Self { seq: 1, cmd_type: CommandTypeV1::NOP, ctime: (ctime.timestamp(), ctime.timestamp_subsec_nanos()), } } } /// The very first bytes of the file. pub const FILETAG: &str = "*FILESPOOLER"; /** The on-disk first few bytes of the file, consisting of: - The [`FILETAG`] - A u16 version - A u64 length of the [`FSMetaV1`] - A CRC32 of the [`FSMetaV1`] - A CRC32 of this prefix (everything from start-of-file to immediately before this point) After this, the FSMetaV1 can be safely read and validated based on the length and CRC. Immediately after the FSMetaV1, the payload (if any) is present. */ #[derive(Debug, Eq, PartialEq, Clone)] pub struct FSPrefix { pub version: u16, pub meta: FSMetaV1, } impl FSPrefix { pub fn prefixlen() -> usize { FILETAG.as_bytes().len() + 2 + 8 + 4 + 4 } /// Encodes self, returning the encoded prefix and meta. /// You probably want [`FSPrefix::write`] instead. pub fn encode(&self) -> Result<(Bytes, Vec), anyhow::Error> { let bufsize = FSPrefix::prefixlen(); let crcsize = bufsize - 4; let mut prefix = BytesMut::with_capacity(bufsize); prefix.put_slice(FILETAG.as_bytes()); prefix.put_u16(self.version); let metavec = rmp_serde::encode::to_vec_named(&self.meta)?; prefix.put_u64(metavec.len() as u64); // Now the metacrc prefix.put_u32(crc32fast::hash(metavec.as_ref())); let prefixcrc = crc32fast::hash(&prefix[0..crcsize]); prefix.put_u32(prefixcrc); assert_eq!(bufsize, prefix.len()); Ok((prefix.freeze(), metavec)) } /// Reads, precisely, the prefix and metadata from the input reader. /// Does not consume any of the payload. pub fn read(input: &mut R) -> Result { let bufsize = FSPrefix::prefixlen(); let crcsize = bufsize - 4; let mut prefix = BytesMut::with_capacity(bufsize); prefix.resize(bufsize, 0); input.read_exact(&mut prefix)?; let mut prefix = prefix.freeze(); let crcpart = prefix.slice(0..crcsize); let taglen = FILETAG.as_bytes().len(); ensure!( *FILETAG.as_bytes() == prefix[0..taglen], "Input doesn't appear to be a filespooler file" ); prefix.advance(taglen); let version = prefix.get_u16(); ensure!(version == 1, "Input version {} != 1", version); let metalen = prefix.get_u64(); let metacrc = prefix.get_u32(); let prefixcrc = prefix.get_u32(); ensure!( prefixcrc == crc32fast::hash(&crcpart), "CRC-32 mismatch on file prefix" ); let mut metabuf = BytesMut::with_capacity(metalen as usize); metabuf.resize(metalen as usize, 0u8); input.read_exact(&mut metabuf)?; assert_eq!(metabuf.len(), metalen as usize); ensure!( metacrc == crc32fast::hash(&metabuf), "CRC-32 mismatch on metadata" ); let meta: FSMetaV1 = rmp_serde::decode::from_slice(&metabuf)?; Ok(Self { version: 1, meta }) } /// Precisely writes the prefix and metadata to the output. Does not /// emit any payload. pub fn write(&self, output: &mut W) -> Result<(), anyhow::Error> { let (header, metadata) = self.encode()?; output.write_all(&header)?; output.write_all(&metadata)?; Ok(()) } } impl From for FSPrefix { fn from(meta: FSMetaV1) -> Self { Self { version: 1, meta } } } impl From for FSMetaV1 { fn from(prefix: FSPrefix) -> Self { prefix.meta } } impl Default for FSPrefix { fn default() -> Self { Self { version: 1, meta: FSMetaV1::default(), } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_default() { let pref = FSPrefix::default(); let mut buf = vec![].writer(); pref.write(&mut buf).unwrap(); let buf = buf.into_inner(); let mut reader = buf.reader(); let pref2 = FSPrefix::read(&mut reader).unwrap(); assert_eq!(pref, pref2); } fn test_modified(offset: usize, value: u8, message: &str) { let pref = FSPrefix::default(); let mut buf = vec![].writer(); pref.write(&mut buf).unwrap(); let mut buf = buf.into_inner(); buf[offset] = value; let mut reader = buf.reader(); let err = FSPrefix::read(&mut reader).unwrap_err(); assert_eq!(message, format!("{}", err)); } #[test] fn test_header() { test_modified(0, 2, "Input doesn't appear to be a filespooler file"); } #[test] fn test_crc() { test_modified(11, 2, "Input doesn't appear to be a filespooler file"); test_modified(13, 2, "Input version 2 != 1"); test_modified(14, 2, "CRC-32 mismatch on file prefix"); test_modified(28, 2, "CRC-32 mismatch on file prefix"); test_modified(29, 2, "CRC-32 mismatch on file prefix"); test_modified(30, 2, "CRC-32 mismatch on metadata"); test_modified(31, 2, "CRC-32 mismatch on metadata"); } } filespooler-1.2.3/src/jobfile.rs000064400000000000000000000060211046102023000147200ustar 00000000000000/*! Tools for interacting with job files on disk. These are thin wrappers around [`FSMetaV1`] and [`FSPrefix`] impls. */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use crate::header::{FSMetaV1, FSPrefix}; use anyhow::bail; use std::io::{Read, Write}; use tracing::*; /** Write a jobfile header. If there is no payload, this is all that is needed. */ pub fn write_jobfile_header(meta: FSMetaV1, writer: &mut W) -> Result<(), anyhow::Error> { let fsp = FSPrefix::from(meta); fsp.write(writer)?; Ok(()) } /// Write a header and payload. Returns the number of payload /// bytes written. pub fn write_jobfile_header_payload( meta: FSMetaV1, reader: &mut R, writer: &mut W, ) -> Result { write_jobfile_header(meta, writer)?; Ok(std::io::copy(reader, writer)?) } /** Read a jobfile. Stops before the payload. */ pub fn read_jobfile_header(input: &mut R) -> Result { let fsp = match FSPrefix::read(input) { Ok(f) => f, Err(e) => { debug!("Error reading FSPrefix: {}", e); bail!(e); } }; trace!( "Loaded header version {}, seq {}", fsp.version, fsp.meta.seq ); Ok(fsp.into()) } #[cfg(test)] mod tests { use super::*; use std::io::Read; #[test] fn basic() { let meta = FSMetaV1::default(); let mut file = Vec::new(); write_jobfile_header(meta.clone(), &mut file).unwrap(); let mut reader = file.as_slice(); let newone = read_jobfile_header(&mut reader).unwrap(); assert_eq!(meta, newone); // Make sure there's no payload. let mut buf = [0u8; 5]; assert_eq!(reader.read(&mut buf).unwrap(), 0); } #[test] fn payload() { let payload = b"This is a test payload"; let mut reader = &payload[0..]; let meta = FSMetaV1::default(); let mut file = Vec::new(); write_jobfile_header_payload(meta.clone(), &mut reader, &mut file).unwrap(); let mut reader = file.as_slice(); let newone = read_jobfile_header(&mut reader).unwrap(); assert_eq!(meta, newone); // Verify the payload. let mut buf = Vec::new(); std::io::copy(&mut reader, &mut buf).unwrap(); assert_eq!(payload, buf.as_slice()); } } filespooler-1.2.3/src/jobqueue.rs000064400000000000000000000220461046102023000151320ustar 00000000000000/*! The on-disk job queue */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use crate::header::FSMetaV1; use crate::jobfile; use crate::seqfile; use anyhow::bail; use std::collections::BTreeMap; use std::ffi::OsString; use std::fs::{create_dir, DirEntry, File}; use std::io::{Read, Write}; use std::os::unix::ffi::OsStrExt; use std::path::PathBuf; use std::process::{Child, ChildStdout, Command, Stdio}; use tracing::*; use uuid::Uuid; /// Copies data from the given source to a new file in the queue. Does /// not need to obtain a lock to do so. pub fn queuewrite(input: &mut R, queuedir: &OsString) -> Result<(), anyhow::Error> { let mut buf = Uuid::encode_buffer(); let uuid = Uuid::new_v4().hyphenated().encode_lower(&mut buf); let fnpart: String = format!("fspl-{}.fspl", uuid); let tmpfnpart: String = format!("tmp-{}.tmp", fnpart); let tmppath = PathBuf::from(queuedir).join("jobs").join(tmpfnpart); debug!("Copying source to temporary file {:?}", tmppath); let mut tmpfile = File::create(&tmppath)?; let bytes = std::io::copy(input, &mut tmpfile)?; tmpfile.flush()?; tmpfile.sync_all()?; // Force it to close and write. std::mem::drop(tmpfile); let finalpath = PathBuf::from(queuedir).join("jobs").join(fnpart); debug!( "Wrote {} bytes. Now renaming {:?} to {:?}", bytes, tmppath, finalpath ); std::fs::rename(tmppath, finalpath)?; Ok(()) } /// Gets the name of the seqfile, given the path to the queue. pub fn get_seqfile(queuedir: &OsString) -> OsString { let mut seqfile = queuedir.clone(); seqfile.push("/nextseq"); seqfile } /// Create a queue directory structure. pub fn queueinit(queuedir: &OsString, append_only: bool) -> Result<(), anyhow::Error> { let path = PathBuf::from(queuedir); create_dir(&path)?; create_dir(path.join("jobs"))?; if !append_only { let seqfn = get_seqfile(queuedir); let mut lock = seqfile::prepare_seqfile_lock(&seqfn, true)?; seqfile::SeqFile::open(&seqfn, &mut lock)?; } Ok(()) } /// Scan a jobs directory, yielding an iterator of (filename, metadata). Any /// files in the directory that do not match the pattern `fspl-*.fspl` will be /// ignored. Any files that do match the pattern but do not possess a valid /// [`crate::header::FSPrefix`] will be ignored. /// /// Multiple jobs with the same ID are acceptable here. pub fn scanqueue( queuedir: &OsString, decoder: &Option, ) -> Result>, anyhow::Error> { fn procdir( queuedir: &OsString, decoder: &Option, entry: std::io::Result, ) -> Result<(OsString, FSMetaV1), anyhow::Error> { let e = entry?; let filename = e.file_name(); if !(filename.as_bytes().starts_with(b"fspl-") && filename.as_bytes().ends_with(b".fspl")) { debug!("Ignoring file {:?}", filename); bail!("File {:?} doesn't match our specs", filename); } debug!( "Queue scan reading header from {:?}", queue_genfilename(queuedir, &filename) ); let mut input = queue_openjob(queuedir, &filename, decoder)?; let meta = jobfile::read_jobfile_header(&mut input.reader.take().unwrap().as_read())?; Ok((filename, meta)) } let dirpath = PathBuf::from(queuedir).join("jobs"); let dir = std::fs::read_dir(&dirpath)?; let queuedir = queuedir.clone(); let decoder = decoder.clone(); Ok(dir.map(move |e| procdir(&queuedir, &decoder, e))) } /// Scan a jobs directory, returning a map from job id to (filename, FSMetaV1). /// Calls [`scanqueue`] internally, and the comments about valid files there /// apply to this function as well. /// /// Multiple jobs with the same sequence ID are not acceptable here and will generate /// an Err return. #[instrument(level = "debug")] pub fn scanqueue_map( queuedir: &OsString, decoder: &Option, ) -> Result, anyhow::Error> { let mut retval = BTreeMap::new(); // flatten() here removes the Err cases. // Called because an error here could be permission denied, file we don't know about, etc. Just ignore. for (filename, meta) in scanqueue(queuedir, decoder)?.flatten() { if let Some(prev) = retval.insert(meta.seq, (filename.clone(), meta.clone())) { bail!( "Attempted to process {:?} with seq {}, which was already seen in {:?}", filename, meta.seq, prev.0 ); } } Ok(retval) } /// Given a queue directory and a filename from [`scanqueue] or [`scanqueue_map`], /// gives the ultimate filename to use for opening. pub fn queue_genfilename(queuedir: &OsString, filename: &OsString) -> PathBuf { PathBuf::from(queuedir).join("jobs").join(filename) } /// The decoder handle pub enum DecoderHandle { DHFile(Box), DHChildStdout(Box), } impl From for DecoderHandle { fn from(f: File) -> Self { DecoderHandle::DHFile(Box::new(f)) } } impl From for DecoderHandle { fn from(f: ChildStdout) -> Self { DecoderHandle::DHChildStdout(Box::new(f)) } } impl From for Stdio { fn from(dh: DecoderHandle) -> Stdio { match dh { DecoderHandle::DHFile(f) => (*f).into(), DecoderHandle::DHChildStdout(f) => (*f).into(), } } } impl DecoderHandle { /// Converts to a Read. pub fn as_read(self) -> Box { match self { DecoderHandle::DHFile(f) => f, DecoderHandle::DHChildStdout(f) => f, } } /// Passes an underlying [`Read`] to a closure without consuming self. pub fn with_read(&mut self, func: fn(&mut dyn Read) -> T) -> T { match self { DecoderHandle::DHFile(o) => func(&mut *o), DecoderHandle::DHChildStdout(o) => func(&mut *o), } } } /// A struct representing a possible decoder. When dropped, /// kills the decoder process and tries to wait for it to prevent /// zombies and runaway decoder processes. pub struct PossibleDecoder { pub child: Option, /// The reader handle. It's an option so you can take it and move it out of here. pub reader: Option, } impl Drop for PossibleDecoder { fn drop(&mut self) { if let Some(mut child) = self.child.take() { trace!("Killing decoder"); let _ = child.kill(); trace!("Waiting for decoder to terminate"); let res = child.wait(); trace!("Decoder termination status {:?}", res); } } } /// Given a queue directory and a filename, opens the file for reading and returns /// the file handle. This is the central function used by all others that /// read or scan the queue. pub fn queue_openjob( queuedir: &OsString, filename: &OsString, decoder: &Option, ) -> Result { let genfilename = queue_genfilename(queuedir, filename); trace!("Opening queue file at {:?}", genfilename); let file = File::open(genfilename)?; match decoder { None => Ok(PossibleDecoder { child: None, reader: Some(file.into()), }), Some(decodecmd) => { let (child, childstdout) = with_decoder(decodecmd, file)?; Ok(PossibleDecoder { child: Some(child), reader: Some(childstdout.into()), }) } } } /** Wrap up input in a decoder. Returns a Child to wait on, and a ChildStdout that holds the output. */ #[instrument(level = "debug", skip(input))] pub fn with_decoder>( decoder: &OsString, input: T, ) -> Result<(Child, ChildStdout), anyhow::Error> { let args = [OsString::from("-c"), decoder.clone()]; let shell = getshell(); debug!("Preparing to invoke decoder: {:?} {:?}", shell, args); let mut child = Command::new(shell) .args(args) .stdin(input) .stdout(Stdio::piped()) .spawn()?; debug!("Decoder PID {} started successfully", child.id()); let stdout = child.stdout.take().expect("Missing stdout in child"); Ok((child, stdout)) } /// Get the name of the user's shell. pub fn getshell() -> OsString { match std::env::var_os("SHELL") { Some(x) => x, None => OsString::from("/bin/sh"), } } filespooler-1.2.3/src/lib.rs000064400000000000000000000015721046102023000140620ustar 00000000000000/*! Tools for managing a file-based job queue Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ pub mod cmd; pub mod exec; pub mod header; pub mod jobfile; pub mod jobqueue; pub mod logging; pub mod seqfile; pub mod util; filespooler-1.2.3/src/logging.rs000064400000000000000000000022411046102023000147340ustar 00000000000000/*! Logging initialization */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use tracing::*; use tracing_subscriber::FmtSubscriber; /// Initializes the Tracing logging. /// /// May panic of log setup fails (shouldn't be possible generally) pub fn init_tracing(max_level: Level) { let subscriber = FmtSubscriber::builder() .with_max_level(max_level) .with_writer(std::io::stderr) .without_time() .finish(); tracing::subscriber::set_global_default(subscriber).unwrap(); } filespooler-1.2.3/src/main.rs000064400000000000000000000044551046102023000142430ustar 00000000000000/* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use filespooler::cmd; use filespooler::cmd::cmd_exec::*; use filespooler::cmd::cmd_info::*; use filespooler::cmd::cmd_prepare::*; use filespooler::cmd::cmd_queue::*; use filespooler::logging::init_tracing; use tracing::*; fn main() -> Result<(), anyhow::Error> { let cliopts = cmd::parse(); init_tracing(cliopts.globalopts.log_level); trace!("Parsed options are {:?}", cliopts); match cliopts.command { cmd::Command::Prepare(po) => cmd_prepare(&po), cmd::Command::PrepareNOP(po) => cmd_preparenop(&po), cmd::Command::PrepareFail(po) => cmd_preparefail(&po), cmd::Command::PrepareGetNext(po) => cmd_preparegetnext(&po), cmd::Command::PrepareSetNext(po) => cmd_preparesetnext(&po), cmd::Command::StdinInfo => cmd_infostdin(), cmd::Command::StdinPayload => cmd_payloadstdin(), cmd::Command::QueueInit(qopts) => cmd_queueinit(&qopts), cmd::Command::QueueWrite(qopts) => cmd_queuewrite(&qopts), cmd::Command::QueueLs(qopts) => cmd_queuels(&qopts), cmd::Command::QueueInfo(qopts) => cmd_queueinfo(&qopts), cmd::Command::QueuePayload(qopts) => cmd_queuepayload(&qopts), cmd::Command::StdinProcess(opts) => cmd_execstdin(&opts), cmd::Command::QueueProcess(opts) => cmd_execqueue(&opts), cmd::Command::QueueGetNext(opts) => cmd_queuegetnext(&opts), cmd::Command::QueueSetNext(opts) => cmd_queuesetnext(&opts), cmd::Command::ShowLicense => cmd::cmd_showlicense(), cmd::Command::GenUUID => cmd::cmd_genuuid(), cmd::Command::GenFilename => cmd::cmd_genfilename(), } } filespooler-1.2.3/src/seqfile.rs000064400000000000000000000136041046102023000147430ustar 00000000000000/*! Handling the next-id sequence file This code is used both for the queue and for the prepare step. */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use anyhow::Context; use fd_lock; use std::ffi::OsString; use std::fs::{File, OpenOptions}; use std::io::{BufRead, BufReader, ErrorKind, Write}; use std::path::PathBuf; use tracing::*; /// The main file for recording sequence numbers. /// /// Used on both the sender and recipient. /// /// Contains one line, containing a decimal next-step counter terminated by \n. #[derive(Debug)] pub struct SeqFile<'a> { path: PathBuf, next: u64, #[allow(dead_code)] lock: fd_lock::RwLockWriteGuard<'a, File>, } /// Call this function to obtain a lock, which you can then pass to [`SeqFile::open`]. /// Due to how the lock works, it has to be held outside the SeqFile itself. #[instrument(level = "debug")] pub fn prepare_seqfile_lock( path: &OsString, create_if_missing: bool, ) -> Result, anyhow::Error> { let mut lockpath = path.clone(); lockpath.push(".lock"); debug!("Attempting to prepare lock at {:?}", lockpath); let lockfile = OpenOptions::new() .read(true) .write(true) .create(create_if_missing) .open(&lockpath) .context(format!( "Opening lock file at {:?}; if missing, this may be an append-only queue", lockpath ))?; let lock = fd_lock::RwLock::new(lockfile); Ok(lock) } impl<'a> SeqFile<'a> { /// Opens the seqfile. Pass a lock that has been created for it by /// [`prepare_seqfile_lock`]. /// /// If the file does not exist, it will be created and set to `1`. /// /// If it does exist but cannot be parsed, there will be an error. /// /// If it does exist and can be parsed, it will be treated appropriately. #[instrument(level = "debug", skip(lock))] pub fn open( path: &OsString, lock: &'a mut fd_lock::RwLock, ) -> Result { let path = PathBuf::from(path); debug!("Attempting to acquire write lock"); let retval = Self { path, lock: lock.try_write()?, next: 1, }; debug!("Attempting to open file {:?}", retval.path); let file = match File::open(&retval.path) { Err(e) => { if e.kind() == ErrorKind::NotFound { retval.write()?; return Ok(retval); } else { return Err(anyhow::Error::from(e)); } } Ok(f) => f, }; let next = SeqFile::read(file)?; Ok(Self { next, ..retval }) } /// Sets the next counter, returning its previous value. pub fn set(&mut self, next: u64) -> Result { let retval = self.next; self.next = next; self.write()?; Ok(retval) } /// Increments the next counter, returning its previous value pub fn increment(&mut self) -> Result { self.set(self.next + 1) } /// Gets the current value of the counter pub fn get_next(&self) -> u64 { self.next } fn read(file: File) -> Result { let mut br = BufReader::new(file); let mut buf = String::new(); br.read_line(&mut buf)?; let res: u64 = buf.trim().parse()?; debug!("Read next ID {} from seqfile", res); Ok(res) } fn write(&self) -> Result<(), anyhow::Error> { let mut tmppath = OsString::from(self.path.clone()); tmppath.push(".temp"); trace!("Writing {} to {:?}", self.next, tmppath); let mut file = File::create(PathBuf::from(&tmppath))?; writeln!(file, "{}", self.next)?; file.flush()?; file.sync_all()?; std::mem::drop(file); trace!("Renaming {:?} to {:?}", &tmppath, self.path); std::fs::rename(tmppath, &self.path)?; debug!("Sequence {} written to {:?}", self.next, self.path); Ok(()) } } #[cfg(test)] mod tests { use super::*; use tempfile::tempdir; #[test] fn basic() { let dir = tempdir().unwrap(); let path = OsString::from(dir.path().join("seqfile")); { let mut lock = prepare_seqfile_lock(&path, true).unwrap(); let mut sf = SeqFile::open(&path, &mut lock).unwrap(); assert_eq!(sf.get_next(), 1); sf.increment().unwrap(); sf.increment().unwrap(); assert_eq!(sf.get_next(), 3); // We shouldn't be able to access it while holding the lock. let mut lock2 = prepare_seqfile_lock(&path, false).unwrap(); let _ = SeqFile::open(&path, &mut lock2).unwrap_err(); } // OK, having dropped it, try again. { let mut lock = prepare_seqfile_lock(&path, false).unwrap(); let sf = SeqFile::open(&path, &mut lock).unwrap(); assert_eq!(sf.get_next(), 3); } // Truncate it and see what happens. File::create(&path).unwrap(); { let mut lock = prepare_seqfile_lock(&path, false).unwrap(); let _sf = SeqFile::open(&path, &mut lock).unwrap_err(); } } } filespooler-1.2.3/src/util.rs000064400000000000000000000025461046102023000142730ustar 00000000000000/*! Utility functions */ /* Copyright (C) 2022 John Goerzen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ use std::fs::File; use std::io::stdin; use std::os::unix::io::{AsRawFd, FromRawFd}; /// stdin is buffered by default on Rust, and you can't change it. Since /// we need to precisely read the header before letting a subprocess /// handle the payload in stdin-process, we have to use trickery. Bleh. /// /// Take care not to let this value drop before spawning, because that would /// cause stdin to be closed. /// /// See: https://github.com/rust-lang/rust/issues/97855 pub fn get_unbuffered_stdin() -> File { let s = stdin(); let locked = s.lock(); let file = unsafe { File::from_raw_fd(locked.as_raw_fd()) }; file }