async-compression-0.4.13/.cargo_vcs_info.json0000644000000001360000000000100145750ustar { "git": { "sha1": "3337a1b8373bf5295a099e06628347163574d7a7" }, "path_in_vcs": "" }async-compression-0.4.13/.github/dependabot.yml000064400000000000000000000006611046102023000175600ustar 00000000000000# Dependabot dependency version checks / updates version: 2 updates: - package-ecosystem: "github-actions" # Workflow files stored in the # default location of `.github/workflows` directory: "/" schedule: interval: "daily" rebase-strategy: "disabled" - package-ecosystem: "cargo" directory: "/" versioning-strategy: "widen" schedule: interval: "daily" rebase-strategy: "disabled" async-compression-0.4.13/.github/workflows/base.yml000064400000000000000000000010441046102023000204160ustar 00000000000000name: base env: RUST_BACKTRACE: 1 jobs: test: name: cargo test runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: taiki-e/install-action@v2 with: { tool: cargo-nextest } - run: cargo --locked nextest run --workspace --all-features - run: cargo --locked test --workspace --doc --all-features on: merge_group: types: [checks_requested] pull_request: branches: [main] types: [opened, synchronize, reopened, ready_for_review] async-compression-0.4.13/.github/workflows/coverage.yml000064400000000000000000000011521046102023000212770ustar 00000000000000name: coverage env: RUST_BACKTRACE: 1 jobs: codecov: name: codecov runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: taiki-e/install-action@v2 with: { tool: cargo-tarpaulin } - run: cargo --locked tarpaulin --all-features -- --skip 'proptest::' - uses: codecov/codecov-action@v4 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} on: push: branches: [main] pull_request: branches: [main] types: [opened, synchronize, reopened, ready_for_review] schedule: - cron: '0 0 * * 5' async-compression-0.4.13/.github/workflows/deny.yml000064400000000000000000000013441046102023000204460ustar 00000000000000name: deny env: RUST_BACKTRACE: 1 jobs: cargo-deny-advisories: name: cargo deny advisories runs-on: ubuntu-latest continue-on-error: true steps: - uses: actions/checkout@v4 - uses: EmbarkStudios/cargo-deny-action@v2 with: command: check advisories cargo-deny-licenses: name: cargo deny bans licenses sources runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Ignore dev-dependencies run: sed -i 's/\[dev-dependencies\]/[workaround-avoid-dev-deps]/g' Cargo.toml - uses: EmbarkStudios/cargo-deny-action@v2 with: command: check bans licenses sources on: merge_group: types: [checks_requested] pull_request: branches: [main] async-compression-0.4.13/.github/workflows/docs.yml000064400000000000000000000007631046102023000204430ustar 00000000000000name: docs env: RUST_BACKTRACE: 1 jobs: docsrs: name: cargo doc --cfg docsrs runs-on: ubuntu-latest env: RUSTDOCFLAGS: '--cfg=docsrs -Dwarnings' steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: nightly - run: cargo doc --all-features --no-deps on: merge_group: types: [checks_requested] pull_request: branches: [main] types: [opened, synchronize, reopened, ready_for_review] async-compression-0.4.13/.github/workflows/exhaustive.yml000064400000000000000000000071501046102023000216750ustar 00000000000000name: exhaustive env: RUST_BACKTRACE: 1 jobs: test: name: cargo test strategy: matrix: platform: - { toolchain: stable, target: i686-pc-windows-msvc, os: windows-latest } - { toolchain: stable, target: i686-unknown-linux-gnu, os: ubuntu-latest } - { toolchain: stable, target: x86_64-apple-darwin, os: macos-latest } - { toolchain: stable, target: x86_64-pc-windows-msvc, os: windows-latest } - { toolchain: stable, target: x86_64-unknown-linux-gnu, os: ubuntu-latest } runs-on: ${{ matrix.platform.os }} steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: ${{ matrix.platform.toolchain }} target: ${{ matrix.platform.target }} - uses: taiki-e/install-action@v2 with: { tool: cargo-nextest } - run: cargo --locked nextest run --workspace --all-features - run: cargo --locked test --workspace --doc --all-features min-versions-shallow: name: cargo test --shallow-minimal-versions runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { toolchain: nightly } - name: Update to shallow minimal versions run: cargo update $( cargo metadata --all-features --format-version 1 | jq -r ' . as $root | .resolve.nodes[] | select(.id == $root.resolve.root) | .deps[].pkg | . as $dep | $root.packages[] | select(.id == $dep) | "-p", "\(.name):\(.version)" ' ) -Z minimal-versions - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { toolchain: stable } - uses: taiki-e/install-action@v2 with: { tool: cargo-nextest } - run: cargo --locked nextest run --workspace --all-features - run: cargo --locked test --workspace --doc --all-features min-versions: name: cargo test minimal-versions runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { toolchain: nightly } - name: Update to minimal versions run: cargo update -Z minimal-versions - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { toolchain: stable } - uses: taiki-e/install-action@v2 with: { tool: cargo-nextest } - run: cargo --locked nextest run --workspace --all-features - run: cargo --locked test --workspace --doc --all-features check-features: name: cargo hack check --feature-powerset runs-on: ubuntu-latest env: RUSTFLAGS: -Dwarnings steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: taiki-e/install-action@v2 with: { tool: cargo-hack } - run: cargo hack check --workspace --feature-powerset --no-dev-deps --skip 'all,all-algorithms,all-implementations' check-test-features: name: cargo hack check --all-targets --feature-powerset runs-on: ubuntu-latest env: RUSTFLAGS: -Dwarnings steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: taiki-e/install-action@v2 with: { tool: cargo-hack } - run: cargo hack check --workspace --feature-powerset --all-targets --skip 'all,all-algorithms,all-implementations' on: merge_group: types: [checks_requested] pull_request: branches: [main] types: [opened, synchronize, reopened, ready_for_review] async-compression-0.4.13/.github/workflows/lint.yml000064400000000000000000000012751046102023000204600ustar 00000000000000name: lint env: RUST_BACKTRACE: 1 jobs: fmt: name: cargo fmt --check runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { components: rustfmt } - run: cargo fmt --all -- --check clippy: name: cargo clippy runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { components: clippy } - run: cargo --locked clippy --all --all-targets --all-features -- -D warnings on: merge_group: types: [checks_requested] pull_request: branches: [main] types: [opened, synchronize, reopened, ready_for_review] async-compression-0.4.13/.github/workflows/nightly.yml000064400000000000000000000020701046102023000211620ustar 00000000000000name: nightly env: RUST_BACKTRACE: 1 jobs: test: name: cargo +nightly test runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: nightly - uses: taiki-e/install-action@v2 with: { tool: cargo-nextest } - run: cargo --locked nextest run --workspace --all-features - run: cargo --locked test --workspace --doc --all-features fmt: name: cargo +nightly fmt --check runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: nightly components: rustfmt - run: cargo fmt --all -- --check clippy: name: cargo +nightly clippy runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: nightly components: clippy - run: cargo --locked clippy --all --all-targets --all-features -- -D warnings on: schedule: - cron: '0 2 * * *' async-compression-0.4.13/.github/workflows/release.yml000064400000000000000000000011241046102023000211230ustar 00000000000000name: Release-plz permissions: pull-requests: write contents: write on: push: branches: - main jobs: release-plz: name: Release-plz runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable - name: Run release-plz uses: MarcoIeni/release-plz-action@v0.5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} async-compression-0.4.13/.gitignore000064400000000000000000000003331046102023000153540ustar 00000000000000/target **/*.rs.bk # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock .DS_Store async-compression-0.4.13/.release-plz.toml000064400000000000000000000000341046102023000165600ustar 00000000000000[workspace] pr_draft = true async-compression-0.4.13/CHANGELOG.md000064400000000000000000000051101046102023000151730ustar 00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## Unreleased ## [0.4.13](https://github.com/Nullus157/async-compression/compare/v0.4.12...v0.4.13) - 2024-10-02 ### Feature - Update `brotli` dependency to to `7`. ## [0.4.12](https://github.com/Nullus157/async-compression/compare/v0.4.11...v0.4.12) - 2024-07-21 ### Feature - Enable customizing Zstd decoding parameters. ## [0.4.11](https://github.com/Nullus157/async-compression/compare/v0.4.10...v0.4.11) - 2024-05-30 ### Other - Expose total_in/total_out from underlying flate2 encoder types. ## [0.4.10](https://github.com/Nullus157/async-compression/compare/v0.4.9...v0.4.10) - 2024-05-09 ### Other - *(deps)* update brotli requirement from 5.0 to 6.0 ([#274](https://github.com/Nullus157/async-compression/pull/274)) - Fix pipeline doc: Warn on unexpected cfgs instead of error ([#276](https://github.com/Nullus157/async-compression/pull/276)) - Update name of release-pr.yml - Create release.yml - Create release-pr.yml ## 0.4.9 - bump dep brotli from 4.0 to 5.0 ## 0.4.8 - bump dep brotli from 3.3 to 4.0 ## 0.4.7 - Flush available data in decoder even when there's no incoming input. ## 0.4.6 - Return errors instead of panicking in all encode and decode operations. ## 0.4.5 - Add `{Lzma, Xz}Decoder::with_mem_limit()` methods. ## 0.4.4 - Update `zstd` dependency to `0.13`. ## 0.4.3 - Implement `Default` for `brotli::EncoderParams`. ## 0.4.2 - Add top-level `brotli` module containing stable `brotli` crate wrapper types. - Add `BrotliEncoder::with_quality_and_params()` constructors. - Add `Deflate64Decoder` behind new crate feature `deflate64`. ## 0.4.1 - 2023-07-10 - Add `Zstd{Encoder,Decoder}::with_dict()` constructors. - Add `zstdmt` crate feature that enables `zstd-safe/zstdmt`, allowing multi-threaded functionality to work as expected. ## 0.4.0 - 2023-05-10 - `Level::Precise` variant now takes a `i32` instead of `u32`. - Add top-level `zstd` module containing stable `zstd` crate wrapper types. - Add `ZstdEncoder::with_quality_and_params()` constructors. - Update `zstd` dependency to `0.12`. - Remove deprecated `stream`, `futures-bufread` and `futures-write` crate features. - Remove Tokio 0.2.x and 0.3.x support (`tokio-02` and `tokio-03` crate features). ## 0.3.15 - 2022-10-08 - `Level::Default::into_zstd()` now returns zstd's default value `3`. - Fix endianness when reading the `extra` field of a gzip header. async-compression-0.4.13/Cargo.lock0000644000000601760000000000100125620ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] [[package]] name = "adler2" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "alloc-no-stdlib" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] name = "alloc-stdlib" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" dependencies = [ "alloc-no-stdlib", ] [[package]] name = "async-compression" version = "0.4.13" dependencies = [ "brotli", "bytes", "bzip2", "deflate64", "flate2", "futures", "futures-core", "futures-io", "futures-test", "memchr", "ntest", "pin-project-lite", "proptest", "proptest-derive", "rand", "tokio", "tokio-util", "xz2", "zstd", "zstd-safe", ] [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backtrace" version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", "windows-targets", ] [[package]] name = "bit-set" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" dependencies = [ "bit-vec", ] [[package]] name = "bit-vec" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "brotli" version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", "brotli-decompressor", ] [[package]] name = "brotli-decompressor" version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", ] [[package]] name = "byteorder" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bzip2" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", ] [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "cc" version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938" dependencies = [ "jobserver", "libc", "shlex", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "crc32fast" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] [[package]] name = "deflate64" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da692b8d1080ea3045efaab14434d40468c3d8657e42abddfffca87b428f4c1b" [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "fastrand" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "flate2" version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "futures" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-io" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", "syn 2.0.79", ] [[package]] name = "futures-sink" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-test" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce388237b32ac42eca0df1ba55ed3bbda4eaf005d7d4b5dbc0b20ab962928ac9" dependencies = [ "futures-core", "futures-executor", "futures-io", "futures-macro", "futures-sink", "futures-task", "futures-util", "pin-project", "pin-utils", ] [[package]] name = "futures-util" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "getrandom" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "gimli" version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "hashbrown" version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" [[package]] name = "indexmap" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", "hashbrown", ] [[package]] name = "jobserver" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libm" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "linux-raw-sys" version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lzma-sys" version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "miniz_oxide" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ "adler2", ] [[package]] name = "ntest" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb183f0a1da7a937f672e5ee7b7edb727bf52b8a52d531374ba8ebb9345c0330" dependencies = [ "ntest_test_cases", "ntest_timeout", ] [[package]] name = "ntest_test_cases" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16d0d3f2a488592e5368ebbe996e7f1d44aa13156efad201f5b4d84e150eaa93" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "ntest_timeout" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc7c92f190c97f79b4a332f5e81dcf68c8420af2045c936c9be0bc9de6f63b5" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", ] [[package]] name = "object" version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" dependencies = [ "portable-atomic", ] [[package]] name = "pin-project" version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", "syn 2.0.79", ] [[package]] name = "pin-project-lite" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "portable-atomic" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "ppv-lite86" version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ "zerocopy", ] [[package]] name = "proc-macro-crate" version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ "toml_edit", ] [[package]] name = "proc-macro2" version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", "bit-vec", "bitflags", "lazy_static", "num-traits", "rand", "rand_chacha", "rand_xorshift", "regex-syntax", "rusty-fork", "tempfile", "unarray", ] [[package]] name = "proptest-derive" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", "syn 2.0.79", ] [[package]] name = "quick-error" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "rand_xorshift" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ "rand_core", ] [[package]] name = "regex-syntax" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustix" version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "rusty-fork" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", "quick-error", "tempfile", "wait-timeout", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tempfile" version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", "once_cell", "rustix", "windows-sys 0.59.0", ] [[package]] name = "tokio" version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "pin-project-lite", "tokio-macros", ] [[package]] name = "tokio-macros" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", "syn 2.0.79", ] [[package]] name = "tokio-util" version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", ] [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" [[package]] name = "toml_edit" version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", "toml_datetime", "winnow", ] [[package]] name = "unarray" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-ident" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "wait-timeout" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" dependencies = [ "libc", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] [[package]] name = "xz2" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" dependencies = [ "lzma-sys", ] [[package]] name = "zerocopy" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", "syn 2.0.79", ] [[package]] name = "zstd" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", ] async-compression-0.4.13/Cargo.toml0000644000000101610000000000100125720ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "async-compression" version = "0.4.13" authors = [ "Wim Looman ", "Allen Bui ", ] build = false autobins = false autoexamples = false autotests = false autobenches = false description = """ Adaptors between compression crates and Rust's modern asynchronous IO types. """ readme = "README.md" keywords = [ "compression", "gzip", "zstd", "brotli", "async", ] categories = [ "compression", "asynchronous", ] license = "MIT OR Apache-2.0" repository = "https://github.com/Nullus157/async-compression" [package.metadata.docs.rs] all-features = true rustdoc-args = [ "--cfg", "docsrs", ] [lib] name = "async_compression" path = "src/lib.rs" [[example]] name = "zlib_tokio_write" path = "examples/zlib_tokio_write.rs" required-features = [ "zlib", "tokio", ] [[example]] name = "zstd_gzip" path = "examples/zstd_gzip.rs" required-features = [ "zstd", "gzip", "tokio", ] [[test]] name = "brotli" path = "tests/brotli.rs" required-features = ["brotli"] [[test]] name = "bzip2" path = "tests/bzip2.rs" required-features = ["bzip2"] [[test]] name = "deflate" path = "tests/deflate.rs" required-features = ["deflate"] [[test]] name = "gzip" path = "tests/gzip.rs" required-features = ["gzip"] [[test]] name = "lzma" path = "tests/lzma.rs" required-features = ["lzma"] [[test]] name = "proptest" path = "tests/proptest.rs" [[test]] name = "xz" path = "tests/xz.rs" required-features = ["xz"] [[test]] name = "zlib" path = "tests/zlib.rs" required-features = ["zlib"] [[test]] name = "zstd" path = "tests/zstd.rs" required-features = ["zstd"] [[test]] name = "zstd-dict" path = "tests/zstd-dict.rs" required-features = [ "zstd", "tokio", ] [[test]] name = "zstd-window-size" path = "tests/zstd-window-size.rs" required-features = [ "zstd", "tokio", ] [dependencies.brotli] version = "7.0" optional = true [dependencies.bzip2] version = "0.4.4" optional = true [dependencies.deflate64] version = "0.1.5" optional = true [dependencies.flate2] version = "1.0.13" optional = true [dependencies.futures-core] version = "0.3" default-features = false [dependencies.futures-io] version = "0.3" features = ["std"] optional = true default-features = false [dependencies.libzstd] version = "0.13.1" optional = true default-features = false package = "zstd" [dependencies.memchr] version = "2" [dependencies.pin-project-lite] version = "0.2" [dependencies.tokio] version = "1.24.2" optional = true default-features = false [dependencies.xz2] version = "0.1.6" optional = true [dependencies.zstd-safe] version = "7" optional = true default-features = false [dev-dependencies.bytes] version = "1" [dev-dependencies.futures] version = "0.3.5" [dev-dependencies.futures-test] version = "0.3.5" [dev-dependencies.ntest] version = "0.9" [dev-dependencies.proptest] version = "1" [dev-dependencies.proptest-derive] version = "0.5" [dev-dependencies.rand] version = "0.8.5" [dev-dependencies.tokio] version = "1.24.2" features = [ "io-util", "macros", "rt-multi-thread", "io-std", ] default-features = false [dev-dependencies.tokio-util] version = "0.7" features = ["io"] default-features = false [features] all = [ "all-implementations", "all-algorithms", ] all-algorithms = [ "brotli", "bzip2", "deflate", "gzip", "lzma", "xz", "zlib", "zstd", "deflate64", ] all-implementations = [ "futures-io", "tokio", ] deflate = ["flate2"] deflate64 = ["dep:deflate64"] gzip = ["flate2"] lzma = ["xz2"] xz = ["xz2"] zlib = ["flate2"] zstd = [ "libzstd", "zstd-safe", ] zstdmt = [ "zstd", "zstd-safe/zstdmt", ] async-compression-0.4.13/Cargo.toml.orig000064400000000000000000000052171046102023000162610ustar 00000000000000[package] name = "async-compression" version = "0.4.13" authors = ["Wim Looman ", "Allen Bui "] edition = "2018" license = "MIT OR Apache-2.0" keywords = ["compression", "gzip", "zstd", "brotli", "async"] categories = ["compression", "asynchronous"] repository = "https://github.com/Nullus157/async-compression" description = """ Adaptors between compression crates and Rust's modern asynchronous IO types. """ [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [features] # groups all = ["all-implementations", "all-algorithms"] all-implementations = ["futures-io", "tokio"] all-algorithms = ["brotli", "bzip2", "deflate", "gzip", "lzma", "xz", "zlib", "zstd", "deflate64"] # algorithms deflate = ["flate2"] gzip = ["flate2"] lzma = ["xz2"] xz = ["xz2"] zlib = ["flate2"] zstd = ["libzstd", "zstd-safe"] zstdmt = ["zstd", "zstd-safe/zstdmt"] deflate64 = ["dep:deflate64"] [dependencies] brotli = { version = "7.0", optional = true } bzip2 = { version = "0.4.4", optional = true } flate2 = { version = "1.0.13", optional = true } futures-core = { version = "0.3", default-features = false } futures-io = { version = "0.3", default-features = false, features = ["std"], optional = true } libzstd = { package = "zstd", version = "0.13.1", optional = true, default-features = false } memchr = "2" pin-project-lite = "0.2" tokio = { version = "1.24.2", optional = true, default-features = false } xz2 = { version = "0.1.6", optional = true } zstd-safe = { version = "7", optional = true, default-features = false } deflate64 = { version = "0.1.5", optional = true } [dev-dependencies] bytes = "1" futures = "0.3.5" futures-test = "0.3.5" ntest = "0.9" proptest = "1" proptest-derive = "0.5" rand = "0.8.5" tokio = { version = "1.24.2", default-features = false, features = ["io-util", "macros", "rt-multi-thread", "io-std"] } tokio-util = { version = "0.7", default-features = false, features = ["io"] } [[test]] name = "brotli" required-features = ["brotli"] [[test]] name = "bzip2" required-features = ["bzip2"] [[test]] name = "deflate" required-features = ["deflate"] [[test]] name = "gzip" required-features = ["gzip"] [[test]] name = "lzma" required-features = ["lzma"] [[test]] name = "xz" required-features = ["xz"] [[test]] name = "zlib" required-features = ["zlib"] [[test]] name = "zstd" required-features = ["zstd"] [[test]] name = "zstd-dict" required-features = ["zstd", "tokio"] [[test]] name = "zstd-window-size" required-features = ["zstd", "tokio"] [[example]] name = "zlib_tokio_write" required-features = ["zlib", "tokio"] [[example]] name = "zstd_gzip" required-features = ["zstd", "gzip", "tokio"] async-compression-0.4.13/LICENSE-APACHE000064400000000000000000000251371046102023000153210ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. async-compression-0.4.13/LICENSE-MIT000064400000000000000000000021031046102023000150150ustar 00000000000000The MIT License (MIT) Copyright (c) 2018 the rustasync developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. async-compression-0.4.13/README.md000064400000000000000000000033461046102023000146520ustar 00000000000000# async-compression [![crates.io version][1]][2] ![build status][3] [![downloads][5]][6] [![docs.rs docs][7]][8] ![MIT or Apache 2.0 licensed][9] [![dependency status][10]][11] This crate provides adaptors between compression crates and Rust's modern asynchronous IO types. - [Documentation][8] - [Crates.io][2] - [Releases][releases] ## Development When developing you will need to enable appropriate features for the different test cases to run, the simplest is `cargo test --all-features`, but you can enable different subsets of features as appropriate for the code you are testing to avoid compiling all dependencies, e.g. `cargo test --features tokio,gzip`. ## License Licensed under either of - [Apache License, Version 2.0](LICENSE-APACHE) - [MIT license](LICENSE-MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you shall be dual licensed as above, without any additional terms or conditions. [1]: https://img.shields.io/crates/v/async-compression.svg?style=flat-square [2]: https://crates.io/crates/async-compression [3]: https://img.shields.io/github/actions/workflow/status/Nullus157/async-compression/base.yml?style=flat-square [5]: https://img.shields.io/crates/d/async-compression.svg?style=flat-square [6]: https://crates.io/crates/async-compression [7]: https://img.shields.io/badge/docs-latest-blue.svg?style=flat-square [8]: https://docs.rs/async-compression [9]: https://img.shields.io/crates/l/async-compression.svg?style=flat-square [10]: https://deps.rs/crate/async-compression/0.4.8/status.svg?style=flat-square [11]: https://deps.rs/crate/async-compression/0.4.8 [releases]: https://github.com/Nullus157/async-compression/releases async-compression-0.4.13/deny.toml000064400000000000000000000003331046102023000152200ustar 00000000000000[advisories] ignore = [ ] [licenses] allow = [ "MIT", "Apache-2.0", "BSD-3-Clause", "Unicode-DFS-2016", ] [bans] multiple-versions = "warn" skip = [ ] skip-tree = [ { name = "proptest", version = "1.0" }, ] async-compression-0.4.13/examples/zlib_tokio_write.rs000064400000000000000000000021021046102023000211230ustar 00000000000000//! Run this example with the following command in a terminal: //! //! ```console //! $ cargo run --example zlib_tokio_write --features="tokio,zlib" //! "example" //! ``` use std::io::Result; use async_compression::tokio::write::{ZlibDecoder, ZlibEncoder}; use tokio::io::AsyncWriteExt as _; // for `write_all` and `shutdown` #[tokio::main(flavor = "current_thread")] async fn main() -> Result<()> { let data = b"example"; let compressed_data = compress(data).await?; let de_compressed_data = decompress(&compressed_data).await?; assert_eq!(de_compressed_data, data); println!("{:?}", String::from_utf8(de_compressed_data).unwrap()); Ok(()) } async fn compress(in_data: &[u8]) -> Result> { let mut encoder = ZlibEncoder::new(Vec::new()); encoder.write_all(in_data).await?; encoder.shutdown().await?; Ok(encoder.into_inner()) } async fn decompress(in_data: &[u8]) -> Result> { let mut decoder = ZlibDecoder::new(Vec::new()); decoder.write_all(in_data).await?; decoder.shutdown().await?; Ok(decoder.into_inner()) } async-compression-0.4.13/examples/zstd_gzip.rs000064400000000000000000000024661046102023000175760ustar 00000000000000//! Run this example with the following command in a terminal: //! //! ```console //! $ echo -n 'example' | zstd | cargo run --example zstd_gzip --features="tokio,zstd,gzip" | gunzip -c //! 7example //! ``` //! //! Note that the "7" prefix (input length) is printed to stdout but will likely show up as shown //! above. This is not an encoding error; see the code in `main`. use std::io::Result; use async_compression::tokio::{bufread::ZstdDecoder, write::GzipEncoder}; use tokio::io::{stderr, stdin, stdout, BufReader}; use tokio::io::{ AsyncReadExt as _, // for `read_to_end` AsyncWriteExt as _, // for `write_all` and `shutdown` }; #[tokio::main(flavor = "current_thread")] async fn main() -> Result<()> { // Read zstd encoded data from stdin and decode let mut reader = ZstdDecoder::new(BufReader::new(stdin())); let mut x: Vec = vec![]; reader.read_to_end(&mut x).await?; // print to stderr the length of the decoded data let mut error = stderr(); error.write_all(x.len().to_string().as_bytes()).await?; error.shutdown().await?; // print to stdin encoded gzip data let mut writer = GzipEncoder::new(stdout()); writer.write_all(&x).await?; writer.shutdown().await?; // flush stdout let mut res = writer.into_inner(); res.flush().await?; Ok(()) } async-compression-0.4.13/src/brotli.rs000064400000000000000000000051401046102023000160150ustar 00000000000000//! This module contains Brotli-specific types for async-compression. use brotli::enc::backward_references::{BrotliEncoderMode, BrotliEncoderParams}; /// Brotli compression parameters builder. This is a stable wrapper around Brotli's own encoder /// params type, to abstract over different versions of the Brotli library. /// /// See the [Brotli documentation](https://www.brotli.org/encode.html#a9a8) for more information on /// these parameters. /// /// # Examples /// /// ``` /// use async_compression::brotli; /// /// let params = brotli::EncoderParams::default() /// .window_size(12) /// .text_mode(); /// ``` #[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] pub struct EncoderParams { window_size: Option, block_size: Option, size_hint: Option, mode: Option, } impl EncoderParams { /// Sets window size in bytes (as a power of two). /// /// Used as Brotli's `lgwin` parameter. /// /// `window_size` is clamped to `0 <= window_size <= 24`. pub fn window_size(mut self, window_size: i32) -> Self { self.window_size = Some(window_size.clamp(0, 24)); self } /// Sets input block size in bytes (as a power of two). /// /// Used as Brotli's `lgblock` parameter. /// /// `block_size` is clamped to `16 <= block_size <= 24`. pub fn block_size(mut self, block_size: i32) -> Self { self.block_size = Some(block_size.clamp(16, 24)); self } /// Sets hint for size of data to be compressed. pub fn size_hint(mut self, size_hint: usize) -> Self { self.size_hint = Some(size_hint); self } /// Sets encoder to text mode. /// /// If input data is known to be UTF-8 text, this allows the compressor to make assumptions and /// optimizations. /// /// Used as Brotli's `mode` parameter. pub fn text_mode(mut self) -> Self { self.mode = Some(BrotliEncoderMode::BROTLI_MODE_TEXT); self } pub(crate) fn as_brotli(&self) -> BrotliEncoderParams { let mut params = BrotliEncoderParams::default(); let Self { window_size, block_size, size_hint, mode, } = self; if let Some(window_size) = window_size { params.lgwin = *window_size; } if let Some(block_size) = block_size { params.lgblock = *block_size; } if let Some(size_hint) = size_hint { params.size_hint = *size_hint; } if let Some(mode) = mode { params.mode = *mode; } params } } async-compression-0.4.13/src/codec/brotli/decoder.rs000064400000000000000000000065301046102023000205030ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::{fmt, io}; use brotli::{enc::StandardAlloc, BrotliDecompressStream, BrotliResult, BrotliState}; pub struct BrotliDecoder { // `BrotliState` is very large (over 2kb) which is why we're boxing it. state: Box>, } impl BrotliDecoder { pub(crate) fn new() -> Self { Self { state: Box::new(BrotliState::new( StandardAlloc::default(), StandardAlloc::default(), StandardAlloc::default(), )), } } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { let in_buf = input.unwritten(); let mut out_buf = output.unwritten_mut(); let mut input_len = 0; let mut output_len = 0; let status = match BrotliDecompressStream( &mut in_buf.len(), &mut input_len, in_buf, &mut out_buf.len(), &mut output_len, out_buf, &mut 0, &mut self.state, ) { BrotliResult::ResultFailure => { return Err(io::Error::new(io::ErrorKind::Other, "brotli error")) } status => status, }; input.advance(input_len); output.advance(output_len); Ok(status) } } impl Decode for BrotliDecoder { fn reinit(&mut self) -> io::Result<()> { self.state = Box::new(BrotliState::new( StandardAlloc::default(), StandardAlloc::default(), StandardAlloc::default(), )); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { match self.decode(input, output)? { BrotliResult::ResultSuccess => Ok(true), BrotliResult::NeedsMoreOutput | BrotliResult::NeedsMoreInput => Ok(false), BrotliResult::ResultFailure => unreachable!(), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { match self.decode(&mut PartialBuffer::new(&[][..]), output)? { BrotliResult::ResultSuccess | BrotliResult::NeedsMoreInput => Ok(true), BrotliResult::NeedsMoreOutput => Ok(false), BrotliResult::ResultFailure => unreachable!(), } } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { match self.decode(&mut PartialBuffer::new(&[][..]), output)? { BrotliResult::ResultSuccess => Ok(true), BrotliResult::NeedsMoreOutput => Ok(false), BrotliResult::NeedsMoreInput => Err(io::Error::new( io::ErrorKind::UnexpectedEof, "reached unexpected EOF", )), BrotliResult::ResultFailure => unreachable!(), } } } impl fmt::Debug for BrotliDecoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BrotliDecoder") .field("decompress", &"") .finish() } } async-compression-0.4.13/src/codec/brotli/encoder.rs000064400000000000000000000050641046102023000205160ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::{fmt, io}; use brotli::enc::{ backward_references::BrotliEncoderParams, encode::{BrotliEncoderOperation, BrotliEncoderStateStruct}, StandardAlloc, }; pub struct BrotliEncoder { state: BrotliEncoderStateStruct, } impl BrotliEncoder { pub(crate) fn new(params: BrotliEncoderParams) -> Self { let mut state = BrotliEncoderStateStruct::new(StandardAlloc::default()); state.params = params; Self { state } } fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, op: BrotliEncoderOperation, ) -> io::Result<()> { let in_buf = input.unwritten(); let mut out_buf = output.unwritten_mut(); let mut input_len = 0; let mut output_len = 0; if !self.state.compress_stream( op, &mut in_buf.len(), in_buf, &mut input_len, &mut out_buf.len(), out_buf, &mut output_len, &mut None, &mut |_, _, _, _| (), ) { return Err(io::Error::new(io::ErrorKind::Other, "brotli error")); } input.advance(input_len); output.advance(output_len); Ok(()) } } impl Encode for BrotliEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result<()> { self.encode( input, output, BrotliEncoderOperation::BROTLI_OPERATION_PROCESS, ) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { self.encode( &mut PartialBuffer::new(&[][..]), output, BrotliEncoderOperation::BROTLI_OPERATION_FLUSH, )?; Ok(!self.state.has_more_output()) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { self.encode( &mut PartialBuffer::new(&[][..]), output, BrotliEncoderOperation::BROTLI_OPERATION_FINISH, )?; Ok(self.state.is_finished()) } } impl fmt::Debug for BrotliEncoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BrotliEncoder") .field("compress", &"") .finish() } } async-compression-0.4.13/src/codec/brotli/mod.rs000064400000000000000000000001421046102023000176460ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::BrotliDecoder, encoder::BrotliEncoder}; async-compression-0.4.13/src/codec/bzip2/decoder.rs000064400000000000000000000057361046102023000202450ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::{fmt, io}; use bzip2::{Decompress, Status}; pub struct BzDecoder { decompress: Decompress, } impl fmt::Debug for BzDecoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "BzDecoder {{total_in: {}, total_out: {}}}", self.decompress.total_in(), self.decompress.total_out() ) } } impl BzDecoder { pub(crate) fn new() -> Self { Self { decompress: Decompress::new(false), } } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { let prior_in = self.decompress.total_in(); let prior_out = self.decompress.total_out(); let status = self .decompress .decompress(input.unwritten(), output.unwritten_mut()) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; input.advance((self.decompress.total_in() - prior_in) as usize); output.advance((self.decompress.total_out() - prior_out) as usize); Ok(status) } } impl Decode for BzDecoder { fn reinit(&mut self) -> io::Result<()> { self.decompress = Decompress::new(false); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { match self.decode(input, output)? { // Decompression went fine, nothing much to report. Status::Ok => Ok(false), // The Flush action on a compression went ok. Status::FlushOk => unreachable!(), // THe Run action on compression went ok. Status::RunOk => unreachable!(), // The Finish action on compression went ok. Status::FinishOk => unreachable!(), // The stream's end has been met, meaning that no more data can be input. Status::StreamEnd => Ok(true), // There was insufficient memory in the input or output buffer to complete // the request, but otherwise everything went normally. Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { self.decode(&mut PartialBuffer::new(&[][..]), output)?; loop { let old_len = output.written().len(); self.decode(&mut PartialBuffer::new(&[][..]), output)?; if output.written().len() == old_len { break; } } Ok(!output.unwritten().is_empty()) } fn finish( &mut self, _output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { Ok(true) } } async-compression-0.4.13/src/codec/bzip2/encoder.rs000064400000000000000000000127331046102023000202520ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::{fmt, io}; use bzip2::{Action, Compress, Compression, Status}; pub struct BzEncoder { compress: Compress, } impl fmt::Debug for BzEncoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "BzEncoder {{total_in: {}, total_out: {}}}", self.compress.total_in(), self.compress.total_out() ) } } impl BzEncoder { /// Creates a new stream prepared for compression. /// /// The `work_factor` parameter controls how the compression phase behaves /// when presented with worst case, highly repetitive, input data. If /// compression runs into difficulties caused by repetitive data, the /// library switches from the standard sorting algorithm to a fallback /// algorithm. The fallback is slower than the standard algorithm by perhaps /// a factor of three, but always behaves reasonably, no matter how bad the /// input. /// /// Lower values of `work_factor` reduce the amount of effort the standard /// algorithm will expend before resorting to the fallback. You should set /// this parameter carefully; too low, and many inputs will be handled by /// the fallback algorithm and so compress rather slowly, too high, and your /// average-to-worst case compression times can become very large. The /// default value of 30 gives reasonable behaviour over a wide range of /// circumstances. /// /// Allowable values range from 0 to 250 inclusive. 0 is a special case, /// equivalent to using the default value of 30. pub(crate) fn new(level: Compression, work_factor: u32) -> Self { Self { compress: Compress::new(level, work_factor), } } fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, action: Action, ) -> io::Result { let prior_in = self.compress.total_in(); let prior_out = self.compress.total_out(); let status = self .compress .compress(input.unwritten(), output.unwritten_mut(), action) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; input.advance((self.compress.total_in() - prior_in) as usize); output.advance((self.compress.total_out() - prior_out) as usize); Ok(status) } } impl Encode for BzEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result<()> { match self.encode(input, output, Action::Run)? { // Decompression went fine, nothing much to report. Status::Ok => Ok(()), // The Flush action on a compression went ok. Status::FlushOk => unreachable!(), // The Run action on compression went ok. Status::RunOk => Ok(()), // The Finish action on compression went ok. Status::FinishOk => unreachable!(), // The stream's end has been met, meaning that no more data can be input. Status::StreamEnd => unreachable!(), // There was insufficient memory in the input or output buffer to complete // the request, but otherwise everything went normally. Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { match self.encode(&mut PartialBuffer::new(&[][..]), output, Action::Flush)? { // Decompression went fine, nothing much to report. Status::Ok => unreachable!(), // The Flush action on a compression went ok. Status::FlushOk => Ok(false), // The Run action on compression went ok. Status::RunOk => Ok(true), // The Finish action on compression went ok. Status::FinishOk => unreachable!(), // The stream's end has been met, meaning that no more data can be input. Status::StreamEnd => unreachable!(), // There was insufficient memory in the input or output buffer to complete // the request, but otherwise everything went normally. Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), } } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { match self.encode(&mut PartialBuffer::new(&[][..]), output, Action::Finish)? { // Decompression went fine, nothing much to report. Status::Ok => Ok(false), // The Flush action on a compression went ok. Status::FlushOk => unreachable!(), // The Run action on compression went ok. Status::RunOk => unreachable!(), // The Finish action on compression went ok. Status::FinishOk => Ok(false), // The stream's end has been met, meaning that no more data can be input. Status::StreamEnd => Ok(true), // There was insufficient memory in the input or output buffer to complete // the request, but otherwise everything went normally. Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), } } } async-compression-0.4.13/src/codec/bzip2/mod.rs000064400000000000000000000001321046102023000174000ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::BzDecoder, encoder::BzEncoder}; async-compression-0.4.13/src/codec/deflate/decoder.rs000064400000000000000000000017361046102023000206170ustar 00000000000000use crate::util::PartialBuffer; use std::io::Result; #[derive(Debug)] pub struct DeflateDecoder { inner: crate::codec::FlateDecoder, } impl DeflateDecoder { pub(crate) fn new() -> Self { Self { inner: crate::codec::FlateDecoder::new(false), } } } impl crate::codec::Decode for DeflateDecoder { fn reinit(&mut self) -> Result<()> { self.inner.reinit()?; Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.decode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.13/src/codec/deflate/encoder.rs000064400000000000000000000016651046102023000206320ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::Result; use flate2::Compression; #[derive(Debug)] pub struct DeflateEncoder { inner: crate::codec::FlateEncoder, } impl DeflateEncoder { pub(crate) fn new(level: Compression) -> Self { Self { inner: crate::codec::FlateEncoder::new(level, false), } } } impl Encode for DeflateEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.inner.encode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.13/src/codec/deflate/mod.rs000064400000000000000000000001441046102023000177610ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::DeflateDecoder, encoder::DeflateEncoder}; async-compression-0.4.13/src/codec/deflate64/decoder.rs000064400000000000000000000037051046102023000207670ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::io::{Error, ErrorKind, Result}; use deflate64::InflaterManaged; #[derive(Debug)] pub struct Deflate64Decoder { inflater: Box, } impl Deflate64Decoder { pub(crate) fn new() -> Self { Self { inflater: Box::new(InflaterManaged::new()), } } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let result = self .inflater .inflate(input.unwritten(), output.unwritten_mut()); input.advance(result.bytes_consumed); output.advance(result.bytes_written); if result.data_error { Err(Error::new(ErrorKind::InvalidData, "invalid data")) } else { Ok(self.inflater.finished() && self.inflater.available_output() == 0) } } } impl Decode for Deflate64Decoder { fn reinit(&mut self) -> Result<()> { self.inflater = Box::new(InflaterManaged::new()); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.decode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.decode(&mut PartialBuffer::new([]), output)?; loop { let old_len = output.written().len(); self.decode(&mut PartialBuffer::new([]), output)?; if output.written().len() == old_len { break; } } Ok(!output.unwritten().is_empty()) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.decode(&mut PartialBuffer::new([]), output) } } async-compression-0.4.13/src/codec/deflate64/mod.rs000064400000000000000000000000761046102023000201370ustar 00000000000000mod decoder; pub(crate) use self::decoder::Deflate64Decoder; async-compression-0.4.13/src/codec/flate/decoder.rs000064400000000000000000000052351046102023000203040ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::io; use flate2::{Decompress, FlushDecompress, Status}; #[derive(Debug)] pub struct FlateDecoder { zlib_header: bool, decompress: Decompress, } impl FlateDecoder { pub(crate) fn new(zlib_header: bool) -> Self { Self { zlib_header, decompress: Decompress::new(zlib_header), } } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, flush: FlushDecompress, ) -> io::Result { let prior_in = self.decompress.total_in(); let prior_out = self.decompress.total_out(); let status = self.decompress .decompress(input.unwritten(), output.unwritten_mut(), flush)?; input.advance((self.decompress.total_in() - prior_in) as usize); output.advance((self.decompress.total_out() - prior_out) as usize); Ok(status) } } impl Decode for FlateDecoder { fn reinit(&mut self) -> io::Result<()> { self.decompress.reset(self.zlib_header); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { match self.decode(input, output, FlushDecompress::None)? { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::BufError => Err(io::Error::new(io::ErrorKind::Other, "unexpected BufError")), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { self.decode( &mut PartialBuffer::new(&[][..]), output, FlushDecompress::Sync, )?; loop { let old_len = output.written().len(); self.decode( &mut PartialBuffer::new(&[][..]), output, FlushDecompress::None, )?; if output.written().len() == old_len { break; } } Ok(!output.unwritten().is_empty()) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { match self.decode( &mut PartialBuffer::new(&[][..]), output, FlushDecompress::Finish, )? { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::BufError => Err(io::Error::new(io::ErrorKind::Other, "unexpected BufError")), } } } async-compression-0.4.13/src/codec/flate/encoder.rs000064400000000000000000000057061046102023000203210ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io; use flate2::{Compress, Compression, FlushCompress, Status}; #[derive(Debug)] pub struct FlateEncoder { compress: Compress, flushed: bool, } impl FlateEncoder { pub(crate) fn new(level: Compression, zlib_header: bool) -> Self { Self { compress: Compress::new(level, zlib_header), flushed: true, } } pub(crate) fn get_ref(&self) -> &Compress { &self.compress } fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, flush: FlushCompress, ) -> io::Result { let prior_in = self.compress.total_in(); let prior_out = self.compress.total_out(); let status = self .compress .compress(input.unwritten(), output.unwritten_mut(), flush)?; input.advance((self.compress.total_in() - prior_in) as usize); output.advance((self.compress.total_out() - prior_out) as usize); Ok(status) } } impl Encode for FlateEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result<()> { self.flushed = false; match self.encode(input, output, FlushCompress::None)? { Status::Ok => Ok(()), Status::StreamEnd => unreachable!(), Status::BufError => Err(io::Error::new(io::ErrorKind::Other, "unexpected BufError")), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { // We need to keep track of whether we've already flushed otherwise we'll just keep writing // out sync blocks continuously and probably never complete flushing. if self.flushed { return Ok(true); } self.encode( &mut PartialBuffer::new(&[][..]), output, FlushCompress::Sync, )?; loop { let old_len = output.written().len(); self.encode( &mut PartialBuffer::new(&[][..]), output, FlushCompress::None, )?; if output.written().len() == old_len { break; } } self.flushed = true; Ok(!output.unwritten().is_empty()) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { self.flushed = false; match self.encode( &mut PartialBuffer::new(&[][..]), output, FlushCompress::Finish, )? { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::BufError => Err(io::Error::new(io::ErrorKind::Other, "unexpected BufError")), } } } async-compression-0.4.13/src/codec/flate/mod.rs000064400000000000000000000001401046102023000174440ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::FlateDecoder, encoder::FlateEncoder}; async-compression-0.4.13/src/codec/gzip/decoder.rs000064400000000000000000000113111046102023000201520ustar 00000000000000use crate::{ codec::{ gzip::header::{self, Header}, Decode, }, util::PartialBuffer, }; use std::io::{Error, ErrorKind, Result}; use flate2::Crc; #[derive(Debug)] enum State { Header(header::Parser), Decoding, Footer(PartialBuffer>), Done, } #[derive(Debug)] pub struct GzipDecoder { inner: crate::codec::FlateDecoder, crc: Crc, state: State, header: Header, } fn check_footer(crc: &Crc, input: &[u8]) -> Result<()> { if input.len() < 8 { return Err(Error::new( ErrorKind::InvalidData, "Invalid gzip footer length", )); } let crc_sum = crc.sum().to_le_bytes(); let bytes_read = crc.amount().to_le_bytes(); if crc_sum != input[0..4] { return Err(Error::new( ErrorKind::InvalidData, "CRC computed does not match", )); } if bytes_read != input[4..8] { return Err(Error::new( ErrorKind::InvalidData, "amount of bytes read does not match", )); } Ok(()) } impl GzipDecoder { pub(crate) fn new() -> Self { Self { inner: crate::codec::FlateDecoder::new(false), crc: Crc::new(), state: State::Header(header::Parser::default()), header: Header::default(), } } fn process, O: AsRef<[u8]> + AsMut<[u8]>>( &mut self, input: &mut PartialBuffer, output: &mut PartialBuffer, inner: impl Fn(&mut Self, &mut PartialBuffer, &mut PartialBuffer) -> Result, ) -> Result { loop { match &mut self.state { State::Header(parser) => { if let Some(header) = parser.input(input)? { self.header = header; self.state = State::Decoding; } } State::Decoding => { let prior = output.written().len(); let res = inner(self, input, output); if (output.written().len() > prior) { // update CRC even if there was an error self.crc.update(&output.written()[prior..]); } let done = res?; if done { self.state = State::Footer(vec![0; 8].into()) } } State::Footer(footer) => { footer.copy_unwritten_from(input); if footer.unwritten().is_empty() { check_footer(&self.crc, footer.written())?; self.state = State::Done } } State::Done => {} }; if let State::Done = self.state { return Ok(true); } if input.unwritten().is_empty() || output.unwritten().is_empty() { return Ok(false); } } } } impl Decode for GzipDecoder { fn reinit(&mut self) -> Result<()> { self.inner.reinit()?; self.crc = Crc::new(); self.state = State::Header(header::Parser::default()); self.header = Header::default(); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.process(input, output, |this, input, output| { this.inner.decode(input, output) }) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { loop { match self.state { State::Header(_) | State::Footer(_) | State::Done => return Ok(true), State::Decoding => { let prior = output.written().len(); let done = self.inner.flush(output)?; self.crc.update(&output.written()[prior..]); if done { return Ok(true); } } }; if output.unwritten().is_empty() { return Ok(false); } } } fn finish( &mut self, _output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { // Because of the footer we have to have already flushed all the data out before we get here if let State::Done = self.state { Ok(true) } else { Err(Error::new( ErrorKind::UnexpectedEof, "unexpected end of file", )) } } } async-compression-0.4.13/src/codec/gzip/encoder.rs000064400000000000000000000106661046102023000202000ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io; use flate2::{Compression, Crc}; #[derive(Debug)] enum State { Header(PartialBuffer>), Encoding, Footer(PartialBuffer>), Done, } #[derive(Debug)] pub struct GzipEncoder { inner: crate::codec::FlateEncoder, crc: Crc, state: State, } fn header(level: Compression) -> Vec { let level_byte = if level.level() >= Compression::best().level() { 0x02 } else if level.level() <= Compression::fast().level() { 0x04 } else { 0x00 }; vec![0x1f, 0x8b, 0x08, 0, 0, 0, 0, 0, level_byte, 0xff] } impl GzipEncoder { pub(crate) fn new(level: Compression) -> Self { Self { inner: crate::codec::FlateEncoder::new(level, false), crc: Crc::new(), state: State::Header(header(level).into()), } } fn footer(&mut self) -> Vec { let mut output = Vec::with_capacity(8); output.extend(&self.crc.sum().to_le_bytes()); output.extend(&self.crc.amount().to_le_bytes()); output } } impl Encode for GzipEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result<()> { loop { match &mut self.state { State::Header(header) => { output.copy_unwritten_from(&mut *header); if header.unwritten().is_empty() { self.state = State::Encoding; } } State::Encoding => { let prior_written = input.written().len(); self.inner.encode(input, output)?; self.crc.update(&input.written()[prior_written..]); } State::Footer(_) | State::Done => { return Err(io::Error::new( io::ErrorKind::Other, "encode after complete", )); } }; if input.unwritten().is_empty() || output.unwritten().is_empty() { return Ok(()); } } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { loop { let done = match &mut self.state { State::Header(header) => { output.copy_unwritten_from(&mut *header); if header.unwritten().is_empty() { self.state = State::Encoding; } false } State::Encoding => self.inner.flush(output)?, State::Footer(footer) => { output.copy_unwritten_from(&mut *footer); if footer.unwritten().is_empty() { self.state = State::Done; true } else { false } } State::Done => true, }; if done { return Ok(true); } if output.unwritten().is_empty() { return Ok(false); } } } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { loop { match &mut self.state { State::Header(header) => { output.copy_unwritten_from(&mut *header); if header.unwritten().is_empty() { self.state = State::Encoding; } } State::Encoding => { if self.inner.finish(output)? { self.state = State::Footer(self.footer().into()); } } State::Footer(footer) => { output.copy_unwritten_from(&mut *footer); if footer.unwritten().is_empty() { self.state = State::Done; } } State::Done => {} }; if let State::Done = self.state { return Ok(true); } if output.unwritten().is_empty() { return Ok(false); } } } } async-compression-0.4.13/src/codec/gzip/header.rs000064400000000000000000000117601046102023000200050ustar 00000000000000use crate::util::PartialBuffer; use std::io; #[derive(Debug, Default)] struct Flags { ascii: bool, crc: bool, extra: bool, filename: bool, comment: bool, } #[derive(Debug, Default)] pub(super) struct Header { flags: Flags, } #[derive(Debug)] enum State { Fixed(PartialBuffer<[u8; 10]>), ExtraLen(PartialBuffer<[u8; 2]>), Extra(PartialBuffer>), Filename(Vec), Comment(Vec), Crc(PartialBuffer<[u8; 2]>), Done, } impl Default for State { fn default() -> Self { State::Fixed(<_>::default()) } } #[derive(Debug, Default)] pub(super) struct Parser { state: State, header: Header, } impl Header { fn parse(input: &[u8; 10]) -> io::Result { if input[0..3] != [0x1f, 0x8b, 0x08] { return Err(io::Error::new( io::ErrorKind::InvalidData, "Invalid gzip header", )); } let flag = input[3]; let flags = Flags { ascii: (flag & 0b0000_0001) != 0, crc: (flag & 0b0000_0010) != 0, extra: (flag & 0b0000_0100) != 0, filename: (flag & 0b0000_1000) != 0, comment: (flag & 0b0001_0000) != 0, }; Ok(Header { flags }) } } impl Parser { pub(super) fn input( &mut self, input: &mut PartialBuffer>, ) -> io::Result> { loop { match &mut self.state { State::Fixed(data) => { data.copy_unwritten_from(input); if data.unwritten().is_empty() { self.header = Header::parse(&data.take().into_inner())?; self.state = State::ExtraLen(<_>::default()); } else { return Ok(None); } } State::ExtraLen(data) => { if !self.header.flags.extra { self.state = State::Filename(<_>::default()); continue; } data.copy_unwritten_from(input); if data.unwritten().is_empty() { let len = u16::from_le_bytes(data.take().into_inner()); self.state = State::Extra(vec![0; usize::from(len)].into()); } else { return Ok(None); } } State::Extra(data) => { data.copy_unwritten_from(input); if data.unwritten().is_empty() { self.state = State::Filename(<_>::default()); } else { return Ok(None); } } State::Filename(data) => { if !self.header.flags.filename { self.state = State::Comment(<_>::default()); continue; } if let Some(len) = memchr::memchr(0, input.unwritten()) { data.extend_from_slice(&input.unwritten()[..len]); input.advance(len + 1); self.state = State::Comment(<_>::default()); } else { data.extend_from_slice(input.unwritten()); input.advance(input.unwritten().len()); return Ok(None); } } State::Comment(data) => { if !self.header.flags.comment { self.state = State::Crc(<_>::default()); continue; } if let Some(len) = memchr::memchr(0, input.unwritten()) { data.extend_from_slice(&input.unwritten()[..len]); input.advance(len + 1); self.state = State::Crc(<_>::default()); } else { data.extend_from_slice(input.unwritten()); input.advance(input.unwritten().len()); return Ok(None); } } State::Crc(data) => { if !self.header.flags.crc { self.state = State::Done; return Ok(Some(std::mem::take(&mut self.header))); } data.copy_unwritten_from(input); if data.unwritten().is_empty() { self.state = State::Done; return Ok(Some(std::mem::take(&mut self.header))); } else { return Ok(None); } } State::Done => { return Err(io::Error::new( io::ErrorKind::Other, "parser used after done", )); } }; } } } async-compression-0.4.13/src/codec/gzip/mod.rs000064400000000000000000000001521046102023000173250ustar 00000000000000mod decoder; mod encoder; mod header; pub(crate) use self::{decoder::GzipDecoder, encoder::GzipEncoder}; async-compression-0.4.13/src/codec/lzma/decoder.rs000064400000000000000000000021161046102023000201470ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::io::Result; #[derive(Debug)] pub struct LzmaDecoder { inner: crate::codec::Xz2Decoder, } impl LzmaDecoder { pub fn new() -> Self { Self { inner: crate::codec::Xz2Decoder::new(u64::MAX), } } pub fn with_memlimit(memlimit: u64) -> Self { Self { inner: crate::codec::Xz2Decoder::new(memlimit), } } } impl Decode for LzmaDecoder { fn reinit(&mut self) -> Result<()> { self.inner.reinit() } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.decode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.13/src/codec/lzma/encoder.rs000064400000000000000000000016711046102023000201660ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::Result; #[derive(Debug)] pub struct LzmaEncoder { inner: crate::codec::Xz2Encoder, } impl LzmaEncoder { pub fn new(level: u32) -> Self { Self { inner: crate::codec::Xz2Encoder::new(crate::codec::Xz2FileFormat::Lzma, level), } } } impl Encode for LzmaEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.inner.encode(input, output) } fn flush( &mut self, _output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { // Flush on LZMA 1 is not supported Ok(true) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.13/src/codec/lzma/mod.rs000064400000000000000000000001361046102023000173210ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::LzmaDecoder, encoder::LzmaEncoder}; async-compression-0.4.13/src/codec/mod.rs000064400000000000000000000051701046102023000163610ustar 00000000000000use crate::util::PartialBuffer; use std::io::Result; #[cfg(feature = "brotli")] mod brotli; #[cfg(feature = "bzip2")] mod bzip2; #[cfg(feature = "deflate")] mod deflate; #[cfg(feature = "deflate64")] mod deflate64; #[cfg(feature = "flate2")] mod flate; #[cfg(feature = "gzip")] mod gzip; #[cfg(feature = "lzma")] mod lzma; #[cfg(feature = "xz")] mod xz; #[cfg(feature = "xz2")] mod xz2; #[cfg(feature = "zlib")] mod zlib; #[cfg(feature = "zstd")] mod zstd; #[cfg(feature = "brotli")] pub(crate) use self::brotli::{BrotliDecoder, BrotliEncoder}; #[cfg(feature = "bzip2")] pub(crate) use self::bzip2::{BzDecoder, BzEncoder}; #[cfg(feature = "deflate")] pub(crate) use self::deflate::{DeflateDecoder, DeflateEncoder}; #[cfg(feature = "deflate64")] pub(crate) use self::deflate64::Deflate64Decoder; #[cfg(feature = "flate2")] pub(crate) use self::flate::{FlateDecoder, FlateEncoder}; #[cfg(feature = "gzip")] pub(crate) use self::gzip::{GzipDecoder, GzipEncoder}; #[cfg(feature = "lzma")] pub(crate) use self::lzma::{LzmaDecoder, LzmaEncoder}; #[cfg(feature = "xz")] pub(crate) use self::xz::{XzDecoder, XzEncoder}; #[cfg(feature = "xz2")] pub(crate) use self::xz2::{Xz2Decoder, Xz2Encoder, Xz2FileFormat}; #[cfg(feature = "zlib")] pub(crate) use self::zlib::{ZlibDecoder, ZlibEncoder}; #[cfg(feature = "zstd")] pub(crate) use self::zstd::{ZstdDecoder, ZstdEncoder}; pub trait Encode { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()>; /// Returns whether the internal buffers are flushed fn flush(&mut self, output: &mut PartialBuffer + AsMut<[u8]>>) -> Result; /// Returns whether the internal buffers are flushed and the end of the stream is written fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result; } pub trait Decode { /// Reinitializes this decoder ready to decode a new member/frame of data. fn reinit(&mut self) -> Result<()>; /// Returns whether the end of the stream has been read fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result; /// Returns whether the internal buffers are flushed fn flush(&mut self, output: &mut PartialBuffer + AsMut<[u8]>>) -> Result; /// Returns whether the internal buffers are flushed fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result; } async-compression-0.4.13/src/codec/xz/decoder.rs000064400000000000000000000041561046102023000176530ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::io::{Error, ErrorKind, Result}; #[derive(Debug)] pub struct XzDecoder { inner: crate::codec::Xz2Decoder, skip_padding: Option, } impl XzDecoder { pub fn new() -> Self { Self { inner: crate::codec::Xz2Decoder::new(u64::MAX), skip_padding: None, } } pub fn with_memlimit(memlimit: u64) -> Self { Self { inner: crate::codec::Xz2Decoder::new(memlimit), skip_padding: None, } } } impl Decode for XzDecoder { fn reinit(&mut self) -> Result<()> { self.skip_padding = Some(4); self.inner.reinit() } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { if let Some(ref mut count) = self.skip_padding { while input.unwritten().first() == Some(&0) { input.advance(1); *count -= 1; if *count == 0 { *count = 4; } } if input.unwritten().is_empty() { return Ok(true); } // If this is non-padding then it cannot start with null bytes, so it must be invalid // padding if *count != 4 { return Err(Error::new( ErrorKind::InvalidData, "stream padding was not a multiple of 4 bytes", )); } self.skip_padding = None; } self.inner.decode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { if self.skip_padding.is_some() { return Ok(true); } self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { if self.skip_padding.is_some() { return Ok(true); } self.inner.finish(output) } } async-compression-0.4.13/src/codec/xz/encoder.rs000064400000000000000000000016241046102023000176620ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::Result; #[derive(Debug)] pub struct XzEncoder { inner: crate::codec::Xz2Encoder, } impl XzEncoder { pub fn new(level: u32) -> Self { Self { inner: crate::codec::Xz2Encoder::new(crate::codec::Xz2FileFormat::Xz, level), } } } impl Encode for XzEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.inner.encode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.13/src/codec/xz/mod.rs000064400000000000000000000001321046102023000170130ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::XzDecoder, encoder::XzEncoder}; async-compression-0.4.13/src/codec/xz2/decoder.rs000064400000000000000000000047531046102023000177400ustar 00000000000000use std::{fmt, io}; use xz2::stream::{Action, Status, Stream}; use crate::{codec::Decode, util::PartialBuffer}; pub struct Xz2Decoder { stream: Stream, } impl fmt::Debug for Xz2Decoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Xz2Decoder").finish_non_exhaustive() } } impl Xz2Decoder { pub fn new(mem_limit: u64) -> Self { Self { stream: Stream::new_auto_decoder(mem_limit, 0).unwrap(), } } } impl Decode for Xz2Decoder { fn reinit(&mut self) -> io::Result<()> { *self = Self::new(self.stream.memlimit()); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { let previous_in = self.stream.total_in() as usize; let previous_out = self.stream.total_out() as usize; let status = self .stream .process(input.unwritten(), output.unwritten_mut(), Action::Run)?; input.advance(self.stream.total_in() as usize - previous_in); output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::GetCheck => Err(io::Error::new( io::ErrorKind::Other, "Unexpected lzma integrity check", )), Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "More memory needed")), } } fn flush( &mut self, _output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { // While decoding flush is a noop Ok(true) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { let previous_out = self.stream.total_out() as usize; let status = self .stream .process(&[], output.unwritten_mut(), Action::Finish)?; output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::GetCheck => Err(io::Error::new( io::ErrorKind::Other, "Unexpected lzma integrity check", )), Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "More memory needed")), } } } async-compression-0.4.13/src/codec/xz2/encoder.rs000064400000000000000000000062211046102023000177420ustar 00000000000000use std::{fmt, io}; use xz2::stream::{Action, Check, LzmaOptions, Status, Stream}; use crate::{ codec::{Encode, Xz2FileFormat}, util::PartialBuffer, }; pub struct Xz2Encoder { stream: Stream, } impl fmt::Debug for Xz2Encoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Xz2Encoder").finish_non_exhaustive() } } impl Xz2Encoder { pub fn new(format: Xz2FileFormat, level: u32) -> Self { let stream = match format { Xz2FileFormat::Xz => Stream::new_easy_encoder(level, Check::Crc64).unwrap(), Xz2FileFormat::Lzma => { Stream::new_lzma_encoder(&LzmaOptions::new_preset(level).unwrap()).unwrap() } }; Self { stream } } } impl Encode for Xz2Encoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result<()> { let previous_in = self.stream.total_in() as usize; let previous_out = self.stream.total_out() as usize; let status = self .stream .process(input.unwritten(), output.unwritten_mut(), Action::Run)?; input.advance(self.stream.total_in() as usize - previous_in); output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok | Status::StreamEnd => Ok(()), Status::GetCheck => Err(io::Error::new( io::ErrorKind::Other, "Unexpected lzma integrity check", )), Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { let previous_out = self.stream.total_out() as usize; let status = self .stream .process(&[], output.unwritten_mut(), Action::SyncFlush)?; output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::GetCheck => Err(io::Error::new( io::ErrorKind::Other, "Unexpected lzma integrity check", )), Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), } } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> io::Result { let previous_out = self.stream.total_out() as usize; let status = self .stream .process(&[], output.unwritten_mut(), Action::Finish)?; output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::GetCheck => Err(io::Error::new( io::ErrorKind::Other, "Unexpected lzma integrity check", )), Status::MemNeeded => Err(io::Error::new(io::ErrorKind::Other, "out of memory")), } } } async-compression-0.4.13/src/codec/xz2/mod.rs000064400000000000000000000002121046102023000170740ustar 00000000000000mod decoder; mod encoder; pub enum Xz2FileFormat { Xz, Lzma, } pub(crate) use self::{decoder::Xz2Decoder, encoder::Xz2Encoder}; async-compression-0.4.13/src/codec/zlib/decoder.rs000064400000000000000000000017241046102023000201500ustar 00000000000000use crate::util::PartialBuffer; use std::io::Result; #[derive(Debug)] pub struct ZlibDecoder { inner: crate::codec::FlateDecoder, } impl ZlibDecoder { pub(crate) fn new() -> Self { Self { inner: crate::codec::FlateDecoder::new(true), } } } impl crate::codec::Decode for ZlibDecoder { fn reinit(&mut self) -> Result<()> { self.inner.reinit()?; Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.decode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.13/src/codec/zlib/encoder.rs000064400000000000000000000020101046102023000201470ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::Result; use flate2::Compression; #[derive(Debug)] pub struct ZlibEncoder { inner: crate::codec::FlateEncoder, } impl ZlibEncoder { pub(crate) fn new(level: Compression) -> Self { Self { inner: crate::codec::FlateEncoder::new(level, true), } } pub(crate) fn get_ref(&self) -> &crate::codec::FlateEncoder { &self.inner } } impl Encode for ZlibEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.inner.encode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.13/src/codec/zlib/mod.rs000064400000000000000000000001361046102023000173160ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::ZlibDecoder, encoder::ZlibEncoder}; async-compression-0.4.13/src/codec/zstd/decoder.rs000064400000000000000000000043441046102023000201750ustar 00000000000000use std::io; use std::io::Result; use crate::{codec::Decode, unshared::Unshared, util::PartialBuffer}; use libzstd::stream::raw::{Decoder, Operation}; #[derive(Debug)] pub struct ZstdDecoder { decoder: Unshared>, } impl ZstdDecoder { pub(crate) fn new() -> Self { Self { decoder: Unshared::new(Decoder::new().unwrap()), } } pub(crate) fn new_with_params(params: &[crate::zstd::DParameter]) -> Self { let mut decoder = Decoder::new().unwrap(); for param in params { decoder.set_parameter(param.as_zstd()).unwrap(); } Self { decoder: Unshared::new(decoder), } } pub(crate) fn new_with_dict(dictionary: &[u8]) -> io::Result { let mut decoder = Decoder::with_dictionary(dictionary)?; Ok(Self { decoder: Unshared::new(decoder), }) } } impl Decode for ZstdDecoder { fn reinit(&mut self) -> Result<()> { self.decoder.get_mut().reinit()?; Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let status = self .decoder .get_mut() .run_on_buffers(input.unwritten(), output.unwritten_mut())?; input.advance(status.bytes_read); output.advance(status.bytes_written); Ok(status.remaining == 0) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); let bytes_left = self.decoder.get_mut().flush(&mut out_buf)?; let len = out_buf.as_slice().len(); output.advance(len); Ok(bytes_left == 0) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); let bytes_left = self.decoder.get_mut().finish(&mut out_buf, true)?; let len = out_buf.as_slice().len(); output.advance(len); Ok(bytes_left == 0) } } async-compression-0.4.13/src/codec/zstd/encoder.rs000064400000000000000000000042441046102023000202060ustar 00000000000000use crate::{codec::Encode, unshared::Unshared, util::PartialBuffer}; use libzstd::stream::raw::{CParameter, Encoder, Operation}; use std::io; use std::io::Result; #[derive(Debug)] pub struct ZstdEncoder { encoder: Unshared>, } impl ZstdEncoder { pub(crate) fn new(level: i32) -> Self { Self { encoder: Unshared::new(Encoder::new(level).unwrap()), } } pub(crate) fn new_with_params(level: i32, params: &[crate::zstd::CParameter]) -> Self { let mut encoder = Encoder::new(level).unwrap(); for param in params { encoder.set_parameter(param.as_zstd()).unwrap(); } Self { encoder: Unshared::new(encoder), } } pub(crate) fn new_with_dict(level: i32, dictionary: &[u8]) -> io::Result { let mut encoder = Encoder::with_dictionary(level, dictionary)?; Ok(Self { encoder: Unshared::new(encoder), }) } } impl Encode for ZstdEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { let status = self .encoder .get_mut() .run_on_buffers(input.unwritten(), output.unwritten_mut())?; input.advance(status.bytes_read); output.advance(status.bytes_written); Ok(()) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); let bytes_left = self.encoder.get_mut().flush(&mut out_buf)?; let len = out_buf.as_slice().len(); output.advance(len); Ok(bytes_left == 0) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); let bytes_left = self.encoder.get_mut().finish(&mut out_buf, true)?; let len = out_buf.as_slice().len(); output.advance(len); Ok(bytes_left == 0) } } async-compression-0.4.13/src/codec/zstd/mod.rs000064400000000000000000000001361046102023000173420ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::ZstdDecoder, encoder::ZstdEncoder}; async-compression-0.4.13/src/futures/bufread/generic/decoder.rs000064400000000000000000000105361046102023000226550ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{codec::Decode, util::PartialBuffer}; use futures_core::ready; use futures_io::{AsyncBufRead, AsyncRead}; use pin_project_lite::pin_project; #[derive(Debug)] enum State { Decoding, Flushing, Done, Next, } pin_project! { #[derive(Debug)] pub struct Decoder { #[pin] reader: R, decoder: D, state: State, multiple_members: bool, } } impl Decoder { pub fn new(reader: R, decoder: D) -> Self { Self { reader, decoder, state: State::Decoding, multiple_members: false, } } pub fn get_ref(&self) -> &R { &self.reader } pub fn get_mut(&mut self) -> &mut R { &mut self.reader } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().reader } pub fn into_inner(self) -> R { self.reader } pub fn multiple_members(&mut self, enabled: bool) { self.multiple_members = enabled; } fn do_poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, output: &mut PartialBuffer<&mut [u8]>, ) -> Poll> { let mut this = self.project(); let mut first = true; loop { *this.state = match this.state { State::Decoding => { let input = if first { &[][..] } else { ready!(this.reader.as_mut().poll_fill_buf(cx))? }; if input.is_empty() && !first { // Avoid attempting to reinitialise the decoder if the // reader has returned EOF. *this.multiple_members = false; State::Flushing } else { let mut input = PartialBuffer::new(input); let done = this.decoder.decode(&mut input, output).or_else(|err| { // ignore the first error, occurs when input is empty // but we need to run decode to flush if first { Ok(false) } else { Err(err) } })?; first = false; let len = input.written().len(); this.reader.as_mut().consume(len); if done { State::Flushing } else { State::Decoding } } } State::Flushing => { if this.decoder.finish(output)? { if *this.multiple_members { this.decoder.reinit()?; State::Next } else { State::Done } } else { State::Flushing } } State::Done => State::Done, State::Next => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { State::Done } else { State::Decoding } } }; if let State::Done = *this.state { return Poll::Ready(Ok(())); } if output.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } } impl AsyncRead for Decoder { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut output = PartialBuffer::new(buf); match self.do_poll_read(cx, &mut output)? { Poll::Pending if output.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(output.written().len())), } } } async-compression-0.4.13/src/futures/bufread/generic/encoder.rs000064400000000000000000000055371046102023000226740ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{codec::Encode, util::PartialBuffer}; use futures_core::ready; use futures_io::{AsyncBufRead, AsyncRead}; use pin_project_lite::pin_project; #[derive(Debug)] enum State { Encoding, Flushing, Done, } pin_project! { #[derive(Debug)] pub struct Encoder { #[pin] reader: R, encoder: E, state: State, } } impl Encoder { pub fn new(reader: R, encoder: E) -> Self { Self { reader, encoder, state: State::Encoding, } } pub fn get_ref(&self) -> &R { &self.reader } pub fn get_mut(&mut self) -> &mut R { &mut self.reader } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().reader } pub(crate) fn get_encoder_ref(&self) -> &E { &self.encoder } pub fn into_inner(self) -> R { self.reader } fn do_poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, output: &mut PartialBuffer<&mut [u8]>, ) -> Poll> { let mut this = self.project(); loop { *this.state = match this.state { State::Encoding => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { State::Flushing } else { let mut input = PartialBuffer::new(input); this.encoder.encode(&mut input, output)?; let len = input.written().len(); this.reader.as_mut().consume(len); State::Encoding } } State::Flushing => { if this.encoder.finish(output)? { State::Done } else { State::Flushing } } State::Done => State::Done, }; if let State::Done = *this.state { return Poll::Ready(Ok(())); } if output.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } } impl AsyncRead for Encoder { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut output = PartialBuffer::new(buf); match self.do_poll_read(cx, &mut output)? { Poll::Pending if output.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(output.written().len())), } } } async-compression-0.4.13/src/futures/bufread/generic/mod.rs000064400000000000000000000001171046102023000220210ustar 00000000000000mod decoder; mod encoder; pub use self::{decoder::Decoder, encoder::Encoder}; async-compression-0.4.13/src/futures/bufread/macros/decoder.rs000064400000000000000000000070421046102023000225230ustar 00000000000000macro_rules! decoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncRead`](futures_io::AsyncRead) interface and will /// read compressed data from an underlying stream and emit a stream of uncompressed data. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::futures::bufread::Decoder<$inner, crate::codec::$name>, } } impl<$inner: futures_io::AsyncBufRead> $name<$inner> { /// Creates a new decoder which will read compressed data from the given stream and /// emit a uncompressed stream. pub fn new(read: $inner) -> $name<$inner> { $name { inner: crate::futures::bufread::Decoder::new(read, crate::codec::$name::new()), } } $($($inherent_methods)*)* /// Configure multi-member/frame decoding, if enabled this will reset the decoder state /// when reaching the end of a compressed member/frame and expect either EOF or another /// compressed member/frame to follow it in the stream. pub fn multiple_members(&mut self, enabled: bool) { self.inner.multiple_members(enabled); } /// Acquires a reference to the underlying reader that this decoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this decoder returning the underlying reader. /// /// Note that this may discard internal state of this decoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: futures_io::AsyncBufRead> futures_io::AsyncRead for $name<$inner> { fn poll_read( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut [u8], ) -> std::task::Poll> { self.project().inner.poll_read(cx, buf) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use futures_io::AsyncBufRead; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.13/src/futures/bufread/macros/encoder.rs000064400000000000000000000057661046102023000225500ustar 00000000000000macro_rules! encoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncRead`](futures_io::AsyncRead) interface and will /// read uncompressed data from an underlying stream and emit a stream of compressed data. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::futures::bufread::Encoder<$inner, crate::codec::$name>, } } impl<$inner: futures_io::AsyncBufRead> $name<$inner> { $( /// Creates a new encoder which will read uncompressed data from the given stream /// and emit a compressed stream. /// $($inherent_methods)* )* /// Acquires a reference to the underlying reader that this encoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this encoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this encoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this encoder returning the underlying reader. /// /// Note that this may discard internal state of this encoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: futures_io::AsyncBufRead> futures_io::AsyncRead for $name<$inner> { fn poll_read( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut [u8], ) -> std::task::Poll> { self.project().inner.poll_read(cx, buf) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use futures_io::AsyncBufRead; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.13/src/futures/bufread/macros/mod.rs000064400000000000000000000000641046102023000216720ustar 00000000000000#[macro_use] mod decoder; #[macro_use] mod encoder; async-compression-0.4.13/src/futures/bufread/mod.rs000064400000000000000000000003671046102023000204140ustar 00000000000000//! Types which operate over [`AsyncBufRead`](futures_io::AsyncBufRead) streams, both encoders and //! decoders for various formats. #[macro_use] mod macros; mod generic; pub(crate) use generic::{Decoder, Encoder}; algos!(futures::bufread); async-compression-0.4.13/src/futures/mod.rs000064400000000000000000000001551046102023000167770ustar 00000000000000//! Implementations for IO traits exported by [`futures-io`](::futures_io). pub mod bufread; pub mod write; async-compression-0.4.13/src/futures/write/buf_write.rs000064400000000000000000000025361046102023000213450ustar 00000000000000use std::{ io, pin::Pin, task::{Context, Poll}, }; pub(crate) trait AsyncBufWrite { /// Attempt to return an internal buffer to write to, flushing data out to the inner reader if /// it is full. /// /// On success, returns `Poll::Ready(Ok(buf))`. /// /// If the buffer is full and cannot be flushed, the method returns `Poll::Pending` and /// arranges for the current task context (`cx`) to receive a notification when the object /// becomes readable or is closed. fn poll_partial_flush_buf( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>; /// Tells this buffer that `amt` bytes have been written to its buffer, so they should be /// written out to the underlying IO when possible. /// /// This function is a lower-level call. It needs to be paired with the `poll_flush_buf` method to /// function properly. This function does not perform any I/O, it simply informs this object /// that some amount of its buffer, returned from `poll_flush_buf`, has been written to and should /// be sent. As such, this function may do odd things if `poll_flush_buf` isn't /// called before calling it. /// /// The `amt` must be `<=` the number of bytes in the buffer returned by `poll_flush_buf`. fn produce(self: Pin<&mut Self>, amt: usize); } async-compression-0.4.13/src/futures/write/buf_writer.rs000064400000000000000000000153261046102023000215300ustar 00000000000000// Originally sourced from `futures_util::io::buf_writer`, needs to be redefined locally so that // the `AsyncBufWrite` impl can access its internals, and changed a bit to make it more efficient // with those methods. use super::AsyncBufWrite; use futures_core::ready; use futures_io::{AsyncSeek, AsyncWrite, SeekFrom}; use pin_project_lite::pin_project; use std::{ cmp::min, fmt, io, pin::Pin, task::{Context, Poll}, }; const DEFAULT_BUF_SIZE: usize = 8192; pin_project! { pub struct BufWriter { #[pin] inner: W, buf: Box<[u8]>, written: usize, buffered: usize, } } impl BufWriter { /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. pub fn new(inner: W) -> Self { Self::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufWriter` with the specified buffer capacity. pub fn with_capacity(cap: usize, inner: W) -> Self { Self { inner, buf: vec![0; cap].into(), written: 0, buffered: 0, } } fn partial_flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); let mut ret = Ok(()); while *this.written < *this.buffered { match this .inner .as_mut() .poll_write(cx, &this.buf[*this.written..*this.buffered]) { Poll::Pending => { break; } Poll::Ready(Ok(0)) => { ret = Err(io::Error::new( io::ErrorKind::WriteZero, "failed to write the buffered data", )); break; } Poll::Ready(Ok(n)) => *this.written += n, Poll::Ready(Err(e)) => { ret = Err(e); break; } } } if *this.written > 0 { this.buf.copy_within(*this.written..*this.buffered, 0); *this.buffered -= *this.written; *this.written = 0; Poll::Ready(ret) } else if *this.buffered == 0 { Poll::Ready(ret) } else { ret?; Poll::Pending } } fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); let mut ret = Ok(()); while *this.written < *this.buffered { match ready!(this .inner .as_mut() .poll_write(cx, &this.buf[*this.written..*this.buffered])) { Ok(0) => { ret = Err(io::Error::new( io::ErrorKind::WriteZero, "failed to write the buffered data", )); break; } Ok(n) => *this.written += n, Err(e) => { ret = Err(e); break; } } } this.buf.copy_within(*this.written..*this.buffered, 0); *this.buffered -= *this.written; *this.written = 0; Poll::Ready(ret) } /// Gets a reference to the underlying writer. pub fn get_ref(&self) -> &W { &self.inner } /// Gets a mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_mut(&mut self) -> &mut W { &mut self.inner } /// Gets a pinned mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().inner } /// Consumes this `BufWriter`, returning the underlying writer. /// /// Note that any leftover data in the internal buffer is lost. pub fn into_inner(self) -> W { self.inner } } impl AsyncWrite for BufWriter { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { let this = self.as_mut().project(); if *this.buffered + buf.len() > this.buf.len() { ready!(self.as_mut().partial_flush_buf(cx))?; } let this = self.as_mut().project(); if buf.len() >= this.buf.len() { if *this.buffered == 0 { this.inner.poll_write(cx, buf) } else { // The only way that `partial_flush_buf` would have returned with // `this.buffered != 0` is if it were Pending, so our waker was already queued Poll::Pending } } else { let len = min(this.buf.len() - *this.buffered, buf.len()); this.buf[*this.buffered..*this.buffered + len].copy_from_slice(&buf[..len]); *this.buffered += len; Poll::Ready(Ok(len)) } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_flush(cx) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_close(cx) } } impl AsyncBufWrite for BufWriter { fn poll_partial_flush_buf( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { ready!(self.as_mut().partial_flush_buf(cx))?; let this = self.project(); Poll::Ready(Ok(&mut this.buf[*this.buffered..])) } fn produce(self: Pin<&mut Self>, amt: usize) { *self.project().buffered += amt; } } impl fmt::Debug for BufWriter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BufWriter") .field("writer", &self.inner) .field( "buffer", &format_args!("{}/{}", self.buffered, self.buf.len()), ) .field("written", &self.written) .finish() } } impl AsyncSeek for BufWriter { /// Seek to the offset, in bytes, in the underlying writer. /// /// Seeking always writes out the internal buffer before seeking. fn poll_seek( mut self: Pin<&mut Self>, cx: &mut Context<'_>, pos: SeekFrom, ) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_seek(cx, pos) } } async-compression-0.4.13/src/futures/write/generic/decoder.rs000064400000000000000000000116371046102023000224020ustar 00000000000000use std::{ io, pin::Pin, task::{Context, Poll}, }; use crate::{ codec::Decode, futures::write::{AsyncBufWrite, BufWriter}, util::PartialBuffer, }; use futures_core::ready; use futures_io::AsyncWrite; use pin_project_lite::pin_project; #[derive(Debug)] enum State { Decoding, Finishing, Done, } pin_project! { #[derive(Debug)] pub struct Decoder { #[pin] writer: BufWriter, decoder: D, state: State, } } impl Decoder { pub fn new(writer: W, decoder: D) -> Self { Self { writer: BufWriter::new(writer), decoder, state: State::Decoding, } } pub fn get_ref(&self) -> &W { self.writer.get_ref() } pub fn get_mut(&mut self) -> &mut W { self.writer.get_mut() } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().writer.get_pin_mut() } pub fn into_inner(self) -> W { self.writer.into_inner() } fn do_poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, input: &mut PartialBuffer<&[u8]>, ) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Decoding => { if this.decoder.decode(input, &mut output)? { State::Finishing } else { State::Decoding } } State::Finishing => { if this.decoder.finish(&mut output)? { State::Done } else { State::Finishing } } State::Done => { return Poll::Ready(Err(io::Error::new( io::ErrorKind::Other, "Write after end of stream", ))) } }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if let State::Done = this.state { return Poll::Ready(Ok(())); } if input.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } fn do_poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); let (state, done) = match this.state { State::Decoding => { let done = this.decoder.flush(&mut output)?; (State::Decoding, done) } State::Finishing => { if this.decoder.finish(&mut output)? { (State::Done, false) } else { (State::Finishing, false) } } State::Done => (State::Done, true), }; *this.state = state; let produced = output.written().len(); this.writer.as_mut().produce(produced); if done { return Poll::Ready(Ok(())); } } } } impl AsyncWrite for Decoder { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut input = PartialBuffer::new(buf); match self.do_poll_write(cx, &mut input)? { Poll::Pending if input.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(input.written().len())), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_flush(cx))?; ready!(self.project().writer.as_mut().poll_flush(cx))?; Poll::Ready(Ok(())) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if let State::Decoding = self.as_mut().project().state { *self.as_mut().project().state = State::Finishing; } ready!(self.as_mut().do_poll_flush(cx))?; if let State::Done = self.as_mut().project().state { ready!(self.as_mut().project().writer.as_mut().poll_close(cx))?; Poll::Ready(Ok(())) } else { Poll::Ready(Err(io::Error::new( io::ErrorKind::Other, "Attempt to close before finishing input", ))) } } } async-compression-0.4.13/src/futures/write/generic/encoder.rs000064400000000000000000000115041046102023000224050ustar 00000000000000use std::{ io, pin::Pin, task::{Context, Poll}, }; use crate::{ codec::Encode, futures::write::{AsyncBufWrite, BufWriter}, util::PartialBuffer, }; use futures_core::ready; use futures_io::AsyncWrite; use pin_project_lite::pin_project; #[derive(Debug)] enum State { Encoding, Finishing, Done, } pin_project! { #[derive(Debug)] pub struct Encoder { #[pin] writer: BufWriter, encoder: E, state: State, } } impl Encoder { pub fn new(writer: W, encoder: E) -> Self { Self { writer: BufWriter::new(writer), encoder, state: State::Encoding, } } pub fn get_ref(&self) -> &W { self.writer.get_ref() } pub fn get_mut(&mut self) -> &mut W { self.writer.get_mut() } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().writer.get_pin_mut() } pub(crate) fn get_encoder_ref(&self) -> &E { &self.encoder } pub fn into_inner(self) -> W { self.writer.into_inner() } fn do_poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, input: &mut PartialBuffer<&[u8]>, ) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Encoding => { this.encoder.encode(input, &mut output)?; State::Encoding } State::Finishing | State::Done => { return Poll::Ready(Err(io::Error::new( io::ErrorKind::Other, "Write after close", ))) } }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if input.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } fn do_poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); let done = match this.state { State::Encoding => this.encoder.flush(&mut output)?, State::Finishing | State::Done => { return Poll::Ready(Err(io::Error::new( io::ErrorKind::Other, "Flush after close", ))) } }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if done { return Poll::Ready(Ok(())); } } } fn do_poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Encoding | State::Finishing => { if this.encoder.finish(&mut output)? { State::Done } else { State::Finishing } } State::Done => State::Done, }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if let State::Done = this.state { return Poll::Ready(Ok(())); } } } } impl AsyncWrite for Encoder { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut input = PartialBuffer::new(buf); match self.do_poll_write(cx, &mut input)? { Poll::Pending if input.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(input.written().len())), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_flush(cx))?; ready!(self.project().writer.as_mut().poll_flush(cx))?; Poll::Ready(Ok(())) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_close(cx))?; ready!(self.project().writer.as_mut().poll_close(cx))?; Poll::Ready(Ok(())) } } async-compression-0.4.13/src/futures/write/generic/mod.rs000064400000000000000000000001171046102023000215430ustar 00000000000000mod decoder; mod encoder; pub use self::{decoder::Decoder, encoder::Encoder}; async-compression-0.4.13/src/futures/write/macros/decoder.rs000064400000000000000000000071421046102023000222460ustar 00000000000000macro_rules! decoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncWrite`](futures_io::AsyncWrite) interface and will /// take in compressed data and write it uncompressed to an underlying stream. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::futures::write::Decoder<$inner, crate::codec::$name>, } } impl<$inner: futures_io::AsyncWrite> $name<$inner> { /// Creates a new decoder which will take in compressed data and write it uncompressed /// to the given stream. pub fn new(read: $inner) -> $name<$inner> { $name { inner: crate::futures::write::Decoder::new(read, crate::codec::$name::new()), } } $($($inherent_methods)*)* /// Acquires a reference to the underlying reader that this decoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this decoder returning the underlying reader. /// /// Note that this may discard internal state of this decoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: futures_io::AsyncWrite> futures_io::AsyncWrite for $name<$inner> { fn poll_write( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { self.project().inner.poll_write(cx, buf) } fn poll_flush( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_flush(cx) } fn poll_close( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_close(cx) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use futures_io::AsyncWrite; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.13/src/futures/write/macros/encoder.rs000064400000000000000000000067111046102023000222610ustar 00000000000000macro_rules! encoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncWrite`](futures_io::AsyncWrite) interface and will /// take in uncompressed data and write it compressed to an underlying stream. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::futures::write::Encoder<$inner, crate::codec::$name>, } } impl<$inner: futures_io::AsyncWrite> $name<$inner> { $( /// Creates a new encoder which will take in uncompressed data and write it /// compressed to the given stream. /// $($inherent_methods)* )* /// Acquires a reference to the underlying writer that this encoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying writer that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the writer which /// may otherwise confuse this encoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying writer that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the writer which /// may otherwise confuse this encoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this encoder returning the underlying writer. /// /// Note that this may discard internal state of this encoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: futures_io::AsyncWrite> futures_io::AsyncWrite for $name<$inner> { fn poll_write( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { self.project().inner.poll_write(cx, buf) } fn poll_flush( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_flush(cx) } fn poll_close( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_close(cx) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use futures_io::AsyncWrite; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.13/src/futures/write/macros/mod.rs000064400000000000000000000000641046102023000214140ustar 00000000000000#[macro_use] mod decoder; #[macro_use] mod encoder; async-compression-0.4.13/src/futures/write/mod.rs000064400000000000000000000005161046102023000201320ustar 00000000000000//! Types which operate over [`AsyncWrite`](futures_io::AsyncWrite) streams, both encoders and //! decoders for various formats. #[macro_use] mod macros; mod generic; mod buf_write; mod buf_writer; use self::{ buf_write::AsyncBufWrite, buf_writer::BufWriter, generic::{Decoder, Encoder}, }; algos!(futures::write); async-compression-0.4.13/src/lib.rs000064400000000000000000000171521046102023000152760ustar 00000000000000//! Adaptors between compression crates and Rust's modern asynchronous IO types. //! //! # Feature Organization //! //! This crate is divided up along two axes, which can each be individually selected via Cargo //! features. //! //! All features are disabled by default, you should enable just the ones you need from the lists //! below. //! //! If you want to pull in everything there are three group features defined: //! //! Feature | Does //! ---------|------ //! `all` | Activates all implementations and algorithms. //! `all-implementations` | Activates all implementations, needs to be paired with a selection of algorithms //! `all-algorithms` | Activates all algorithms, needs to be paired with a selection of implementations //! //! ## IO implementation //! //! The first division is which underlying asynchronous IO trait will be wrapped, these are //! available as separate features that have corresponding top-level modules: //! //! Feature | Type //! ---------|------ // TODO: Kill rustfmt on this section, `#![rustfmt::skip::attributes(cfg_attr)]` should do it, but // that's unstable #![allow(unexpected_cfgs)] #![cfg_attr( feature = "futures-io", doc = "[`futures-io`](crate::futures) | [`futures::io::AsyncBufRead`](futures_io::AsyncBufRead), [`futures::io::AsyncWrite`](futures_io::AsyncWrite)" )] #![cfg_attr( not(feature = "futures-io"), doc = "`futures-io` (*inactive*) | `futures::io::AsyncBufRead`, `futures::io::AsyncWrite`" )] #![cfg_attr( feature = "tokio", doc = "[`tokio`](crate::tokio) | [`tokio::io::AsyncBufRead`](::tokio::io::AsyncBufRead), [`tokio::io::AsyncWrite`](::tokio::io::AsyncWrite)" )] #![cfg_attr( not(feature = "tokio"), doc = "`tokio` (*inactive*) | `tokio::io::AsyncBufRead`, `tokio::io::AsyncWrite`" )] //! //! ## Compression algorithm //! //! The second division is which compression schemes to support, there are currently a few //! available choices, these determine which types will be available inside the above modules: //! //! Feature | Types //! ---------|------ #![cfg_attr( feature = "brotli", doc = "`brotli` | [`BrotliEncoder`](?search=BrotliEncoder), [`BrotliDecoder`](?search=BrotliDecoder)" )] #![cfg_attr( not(feature = "brotli"), doc = "`brotli` (*inactive*) | `BrotliEncoder`, `BrotliDecoder`" )] #![cfg_attr( feature = "bzip2", doc = "`bzip2` | [`BzEncoder`](?search=BzEncoder), [`BzDecoder`](?search=BzDecoder)" )] #![cfg_attr( not(feature = "bzip2"), doc = "`bzip2` (*inactive*) | `BzEncoder`, `BzDecoder`" )] #![cfg_attr( feature = "deflate", doc = "`deflate` | [`DeflateEncoder`](?search=DeflateEncoder), [`DeflateDecoder`](?search=DeflateDecoder)" )] #![cfg_attr( not(feature = "deflate"), doc = "`deflate` (*inactive*) | `DeflateEncoder`, `DeflateDecoder`" )] #![cfg_attr( feature = "gzip", doc = "`gzip` | [`GzipEncoder`](?search=GzipEncoder), [`GzipDecoder`](?search=GzipDecoder)" )] #![cfg_attr( not(feature = "gzip"), doc = "`gzip` (*inactive*) | `GzipEncoder`, `GzipDecoder`" )] #![cfg_attr( feature = "lzma", doc = "`lzma` | [`LzmaEncoder`](?search=LzmaEncoder), [`LzmaDecoder`](?search=LzmaDecoder)" )] #![cfg_attr( not(feature = "lzma"), doc = "`lzma` (*inactive*) | `LzmaEncoder`, `LzmaDecoder`" )] #![cfg_attr( feature = "xz", doc = "`xz` | [`XzEncoder`](?search=XzEncoder), [`XzDecoder`](?search=XzDecoder)" )] #![cfg_attr( not(feature = "xz"), doc = "`xz` (*inactive*) | `XzEncoder`, `XzDecoder`" )] #![cfg_attr( feature = "zlib", doc = "`zlib` | [`ZlibEncoder`](?search=ZlibEncoder), [`ZlibDecoder`](?search=ZlibDecoder)" )] #![cfg_attr( not(feature = "zlib"), doc = "`zlib` (*inactive*) | `ZlibEncoder`, `ZlibDecoder`" )] #![cfg_attr( feature = "zstd", doc = "`zstd` | [`ZstdEncoder`](?search=ZstdEncoder), [`ZstdDecoder`](?search=ZstdDecoder)" )] #![cfg_attr( not(feature = "zstd"), doc = "`zstd` (*inactive*) | `ZstdEncoder`, `ZstdDecoder`" )] #![cfg_attr( feature = "deflate64", doc = "`deflate64` | (encoder not implemented), [`Deflate64Decoder`](?search=Deflate64Decoder)" )] #![cfg_attr( not(feature = "deflate64"), doc = "`deflate64` (*inactive*) | (encoder not implemented), `Deflate64Decoder`" )] //! #![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))] #![warn( missing_docs, rust_2018_idioms, missing_copy_implementations, missing_debug_implementations )] #![cfg_attr(not(all), allow(unused))] #[cfg(any(feature = "bzip2", feature = "flate2", feature = "xz2"))] use std::convert::TryInto; #[macro_use] mod macros; mod codec; #[cfg(feature = "futures-io")] pub mod futures; #[cfg(feature = "tokio")] pub mod tokio; mod unshared; mod util; #[cfg(feature = "brotli")] pub mod brotli; #[cfg(feature = "zstd")] pub mod zstd; /// Level of compression data should be compressed with. #[non_exhaustive] #[derive(Clone, Copy, Debug)] pub enum Level { /// Fastest quality of compression, usually produces bigger size. Fastest, /// Best quality of compression, usually produces the smallest size. Best, /// Default quality of compression defined by the selected compression algorithm. Default, /// Precise quality based on the underlying compression algorithms' qualities. The /// interpretation of this depends on the algorithm chosen and the specific implementation /// backing it. Qualities are implicitly clamped to the algorithm's maximum. Precise(i32), } impl Level { #[cfg(feature = "brotli")] fn into_brotli( self, mut params: ::brotli::enc::backward_references::BrotliEncoderParams, ) -> ::brotli::enc::backward_references::BrotliEncoderParams { match self { Self::Fastest => params.quality = 0, Self::Best => params.quality = 11, Self::Precise(quality) => params.quality = quality.clamp(0, 11), Self::Default => (), } params } #[cfg(feature = "bzip2")] fn into_bzip2(self) -> bzip2::Compression { let fastest = bzip2::Compression::fast(); let best = bzip2::Compression::best(); match self { Self::Fastest => fastest, Self::Best => best, Self::Precise(quality) => bzip2::Compression::new( quality .try_into() .unwrap_or(0) .clamp(fastest.level(), best.level()), ), Self::Default => bzip2::Compression::default(), } } #[cfg(feature = "flate2")] fn into_flate2(self) -> flate2::Compression { let fastest = flate2::Compression::fast(); let best = flate2::Compression::best(); match self { Self::Fastest => fastest, Self::Best => best, Self::Precise(quality) => flate2::Compression::new( quality .try_into() .unwrap_or(0) .clamp(fastest.level(), best.level()), ), Self::Default => flate2::Compression::default(), } } #[cfg(feature = "zstd")] fn into_zstd(self) -> i32 { let (fastest, best) = libzstd::compression_level_range().into_inner(); match self { Self::Fastest => fastest, Self::Best => best, Self::Precise(quality) => quality.clamp(fastest, best), Self::Default => libzstd::DEFAULT_COMPRESSION_LEVEL, } } #[cfg(feature = "xz2")] fn into_xz2(self) -> u32 { match self { Self::Fastest => 0, Self::Best => 9, Self::Precise(quality) => quality.try_into().unwrap_or(0).min(9), Self::Default => 5, } } } async-compression-0.4.13/src/macros.rs000064400000000000000000000251271046102023000160150ustar 00000000000000macro_rules! algos { (@algo $algo:ident [$algo_s:expr] $decoder:ident $encoder:ident <$inner:ident> { @enc $($encoder_methods:tt)* } { @dec $($decoder_methods:tt)* } ) => { #[cfg(feature = $algo_s)] decoder! { #[doc = concat!("A ", $algo_s, " decoder, or decompressor")] #[cfg(feature = $algo_s)] $decoder<$inner> { $($decoder_methods)* } } #[cfg(feature = $algo_s)] encoder! { #[doc = concat!("A ", $algo_s, " encoder, or compressor.")] #[cfg(feature = $algo_s)] $encoder<$inner> { pub fn new(inner: $inner) -> Self { Self::with_quality(inner, crate::Level::Default) } } { $($encoder_methods)* } } }; (@algo $algo:ident [$algo_s:expr] $decoder:ident $encoder:ident <$inner:ident> { @dec $($decoder_methods:tt)* } ) => { #[cfg(feature = $algo_s)] decoder! { #[doc = concat!("A ", $algo_s, " decoder, or decompressor")] #[cfg(feature = $algo_s)] $decoder<$inner> { $($decoder_methods)* } } }; ($($mod:ident)::+ <$inner:ident>) => { algos!(@algo brotli ["brotli"] BrotliDecoder BrotliEncoder <$inner> { @enc pub fn with_quality(inner: $inner, level: crate::Level) -> Self { let params = brotli::enc::backward_references::BrotliEncoderParams::default(); Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::BrotliEncoder::new(level.into_brotli(params)), ), } } /// Creates a new encoder, using the specified compression level and parameters, which /// will read uncompressed data from the given stream and emit a compressed stream. pub fn with_quality_and_params( inner: $inner, level: crate::Level, params: crate::brotli::EncoderParams, ) -> Self { let params = level.into_brotli(params.as_brotli()); Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::BrotliEncoder::new(params), ), } } } { @dec } ); algos!(@algo bzip2 ["bzip2"] BzDecoder BzEncoder <$inner> { @enc pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::BzEncoder::new(level.into_bzip2(), 0), ), } } } { @dec } ); algos!(@algo deflate ["deflate"] DeflateDecoder DeflateEncoder <$inner> { @enc pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::DeflateEncoder::new(level.into_flate2()), ), } } } { @dec } ); algos!(@algo deflate ["deflate64"] Deflate64Decoder Deflate64Encoder <$inner> { @dec } ); algos!(@algo gzip ["gzip"] GzipDecoder GzipEncoder <$inner> { @enc pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::GzipEncoder::new(level.into_flate2()), ), } } } { @dec } ); algos!(@algo zlib ["zlib"] ZlibDecoder ZlibEncoder <$inner> { @enc pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::ZlibEncoder::new(level.into_flate2()), ), } } /// Returns the total number of input bytes which have been processed by this compression object. pub fn total_in(&self) -> u64 { self.inner.get_encoder_ref().get_ref().get_ref().total_in() } /// Returns the total number of output bytes which have been produced by this compression object. pub fn total_out(&self) -> u64 { self.inner.get_encoder_ref().get_ref().get_ref().total_out() } } { @dec } ); algos!(@algo zstd ["zstd"] ZstdDecoder ZstdEncoder <$inner> { @enc pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::ZstdEncoder::new(level.into_zstd()), ), } } /// Creates a new encoder, using the specified compression level and parameters, which /// will read uncompressed data from the given stream and emit a compressed stream. /// /// # Panics /// /// Panics if this function is called with a [`CParameter::nb_workers()`] parameter and /// the `zstdmt` crate feature is _not_ enabled. /// /// [`CParameter::nb_workers()`]: crate::zstd::CParameter // // TODO: remove panic note on next breaking release, along with `CParameter::nb_workers` // change pub fn with_quality_and_params(inner: $inner, level: crate::Level, params: &[crate::zstd::CParameter]) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::ZstdEncoder::new_with_params(level.into_zstd(), params), ), } } /// Creates a new encoder, using the specified compression level and pre-trained /// dictionary, which will read uncompressed data from the given stream and emit a /// compressed stream. /// /// Dictionaries provide better compression ratios for small files, but are required to /// be present during decompression. /// /// # Errors /// /// Returns error when `dictionary` is not valid. pub fn with_dict(inner: $inner, level: crate::Level, dictionary: &[u8]) -> ::std::io::Result { Ok(Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::ZstdEncoder::new_with_dict(level.into_zstd(), dictionary)?, ), }) } } { @dec /// Creates a new decoder, using the specified parameters, which will read compressed /// data from the given stream and emit a decompressed stream. pub fn with_params(inner: $inner, params: &[crate::zstd::DParameter]) -> Self { Self { inner: crate::$($mod::)+generic::Decoder::new( inner, crate::codec::ZstdDecoder::new_with_params(params), ), } } /// Creates a new decoder, using the specified compression level and pre-trained /// dictionary, which will read compressed data from the given stream and emit an /// uncompressed stream. /// /// Dictionaries provide better compression ratios for small files, but are required to /// be present during decompression. The dictionary used must be the same as the one /// used for compression. /// /// # Errors /// /// Returns error when `dictionary` is not valid. pub fn with_dict(inner: $inner, dictionary: &[u8]) -> ::std::io::Result { Ok(Self { inner: crate::$($mod::)+generic::Decoder::new( inner, crate::codec::ZstdDecoder::new_with_dict(dictionary)?, ), }) } } ); algos!(@algo xz ["xz"] XzDecoder XzEncoder <$inner> { @enc pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::XzEncoder::new(level.into_xz2()), ), } } } { @dec /// Creates a new decoder with the specified limit of memory. /// /// # Errors /// /// An IO error may be returned during decoding if the specified limit is too small. pub fn with_mem_limit(read: $inner, memlimit: u64) -> Self { Self { inner: crate::$($mod::)+generic::Decoder::new( read, crate::codec::XzDecoder::with_memlimit(memlimit), ), } } } ); algos!(@algo lzma ["lzma"] LzmaDecoder LzmaEncoder <$inner> { @enc pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::LzmaEncoder::new(level.into_xz2()), ), } } } { @dec /// Creates a new decoder with the specified limit of memory. /// /// # Errors /// /// An IO error may be returned during decoding if the specified limit is too small. pub fn with_mem_limit(read: $inner, memlimit: u64) -> Self { Self { inner: crate::$($mod::)+generic::Decoder::new( read, crate::codec::LzmaDecoder::with_memlimit(memlimit), ), } } } ); } } async-compression-0.4.13/src/tokio/bufread/generic/decoder.rs000064400000000000000000000076401046102023000223070ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{codec::Decode, util::PartialBuffer}; use futures_core::ready; use pin_project_lite::pin_project; use tokio::io::{AsyncBufRead, AsyncRead, ReadBuf}; #[derive(Debug)] enum State { Decoding, Flushing, Done, Next, } pin_project! { #[derive(Debug)] pub struct Decoder { #[pin] reader: R, decoder: D, state: State, multiple_members: bool, } } impl Decoder { pub fn new(reader: R, decoder: D) -> Self { Self { reader, decoder, state: State::Decoding, multiple_members: false, } } pub fn get_ref(&self) -> &R { &self.reader } pub fn get_mut(&mut self) -> &mut R { &mut self.reader } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().reader } pub fn into_inner(self) -> R { self.reader } pub fn multiple_members(&mut self, enabled: bool) { self.multiple_members = enabled; } fn do_poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, output: &mut PartialBuffer<&mut [u8]>, ) -> Poll> { let mut this = self.project(); loop { *this.state = match this.state { State::Decoding => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { // Avoid attempting to reinitialise the decoder if the reader // has returned EOF. *this.multiple_members = false; State::Flushing } else { let mut input = PartialBuffer::new(input); let done = this.decoder.decode(&mut input, output)?; let len = input.written().len(); this.reader.as_mut().consume(len); if done { State::Flushing } else { State::Decoding } } } State::Flushing => { if this.decoder.finish(output)? { if *this.multiple_members { this.decoder.reinit()?; State::Next } else { State::Done } } else { State::Flushing } } State::Done => State::Done, State::Next => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { State::Done } else { State::Decoding } } }; if let State::Done = *this.state { return Poll::Ready(Ok(())); } if output.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } } impl AsyncRead for Decoder { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { if buf.remaining() == 0 { return Poll::Ready(Ok(())); } let mut output = PartialBuffer::new(buf.initialize_unfilled()); match self.do_poll_read(cx, &mut output)? { Poll::Pending if output.written().is_empty() => Poll::Pending, _ => { let len = output.written().len(); buf.advance(len); Poll::Ready(Ok(())) } } } } async-compression-0.4.13/src/tokio/bufread/generic/encoder.rs000064400000000000000000000057471046102023000223270ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{codec::Encode, util::PartialBuffer}; use futures_core::ready; use pin_project_lite::pin_project; use tokio::io::{AsyncBufRead, AsyncRead, ReadBuf}; #[derive(Debug)] enum State { Encoding, Flushing, Done, } pin_project! { #[derive(Debug)] pub struct Encoder { #[pin] reader: R, encoder: E, state: State, } } impl Encoder { pub fn new(reader: R, encoder: E) -> Self { Self { reader, encoder, state: State::Encoding, } } pub fn get_ref(&self) -> &R { &self.reader } pub fn get_mut(&mut self) -> &mut R { &mut self.reader } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().reader } pub(crate) fn get_encoder_ref(&self) -> &E { &self.encoder } pub fn into_inner(self) -> R { self.reader } fn do_poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, output: &mut PartialBuffer<&mut [u8]>, ) -> Poll> { let mut this = self.project(); loop { *this.state = match this.state { State::Encoding => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { State::Flushing } else { let mut input = PartialBuffer::new(input); this.encoder.encode(&mut input, output)?; let len = input.written().len(); this.reader.as_mut().consume(len); State::Encoding } } State::Flushing => { if this.encoder.finish(output)? { State::Done } else { State::Flushing } } State::Done => State::Done, }; if let State::Done = *this.state { return Poll::Ready(Ok(())); } if output.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } } impl AsyncRead for Encoder { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { if buf.remaining() == 0 { return Poll::Ready(Ok(())); } let mut output = PartialBuffer::new(buf.initialize_unfilled()); match self.do_poll_read(cx, &mut output)? { Poll::Pending if output.written().is_empty() => Poll::Pending, _ => { let len = output.written().len(); buf.advance(len); Poll::Ready(Ok(())) } } } } async-compression-0.4.13/src/tokio/bufread/generic/mod.rs000064400000000000000000000001171046102023000214510ustar 00000000000000mod decoder; mod encoder; pub use self::{decoder::Decoder, encoder::Encoder}; async-compression-0.4.13/src/tokio/bufread/macros/decoder.rs000064400000000000000000000070501046102023000221520ustar 00000000000000macro_rules! decoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncRead`](tokio::io::AsyncRead) interface and will /// read compressed data from an underlying stream and emit a stream of uncompressed data. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::tokio::bufread::Decoder<$inner, crate::codec::$name>, } } impl<$inner: tokio::io::AsyncBufRead> $name<$inner> { /// Creates a new decoder which will read compressed data from the given stream and /// emit a uncompressed stream. pub fn new(read: $inner) -> $name<$inner> { $name { inner: crate::tokio::bufread::Decoder::new(read, crate::codec::$name::new()), } } $($($inherent_methods)*)* /// Configure multi-member/frame decoding, if enabled this will reset the decoder state /// when reaching the end of a compressed member/frame and expect either EOF or another /// compressed member/frame to follow it in the stream. pub fn multiple_members(&mut self, enabled: bool) { self.inner.multiple_members(enabled); } /// Acquires a reference to the underlying reader that this decoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this decoder returning the underlying reader. /// /// Note that this may discard internal state of this decoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: tokio::io::AsyncBufRead> tokio::io::AsyncRead for $name<$inner> { fn poll_read( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> std::task::Poll> { self.project().inner.poll_read(cx, buf) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use tokio::io::AsyncBufRead; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.13/src/tokio/bufread/macros/encoder.rs000064400000000000000000000057761046102023000222010ustar 00000000000000macro_rules! encoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncRead`](tokio::io::AsyncRead) interface and will /// read uncompressed data from an underlying stream and emit a stream of compressed data. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::tokio::bufread::Encoder<$inner, crate::codec::$name>, } } impl<$inner: tokio::io::AsyncBufRead> $name<$inner> { $( /// Creates a new encoder which will read uncompressed data from the given stream /// and emit a compressed stream. /// $($inherent_methods)* )* /// Acquires a reference to the underlying reader that this encoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this encoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this encoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this encoder returning the underlying reader. /// /// Note that this may discard internal state of this encoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: tokio::io::AsyncBufRead> tokio::io::AsyncRead for $name<$inner> { fn poll_read( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> std::task::Poll> { self.project().inner.poll_read(cx, buf) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use tokio::io::AsyncBufRead; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.13/src/tokio/bufread/macros/mod.rs000064400000000000000000000000641046102023000213220ustar 00000000000000#[macro_use] mod decoder; #[macro_use] mod encoder; async-compression-0.4.13/src/tokio/bufread/mod.rs000064400000000000000000000003661046102023000200430ustar 00000000000000//! Types which operate over [`AsyncBufRead`](::tokio::io::AsyncBufRead) streams, both encoders and //! decoders for various formats. #[macro_use] mod macros; mod generic; pub(crate) use generic::{Decoder, Encoder}; algos!(tokio::bufread); async-compression-0.4.13/src/tokio/mod.rs000064400000000000000000000001501046102023000164220ustar 00000000000000//! Implementations for IO traits exported by [`tokio` v1.x](::tokio). pub mod bufread; pub mod write; async-compression-0.4.13/src/tokio/write/buf_write.rs000064400000000000000000000025361046102023000207750ustar 00000000000000use std::{ io, pin::Pin, task::{Context, Poll}, }; pub(crate) trait AsyncBufWrite { /// Attempt to return an internal buffer to write to, flushing data out to the inner reader if /// it is full. /// /// On success, returns `Poll::Ready(Ok(buf))`. /// /// If the buffer is full and cannot be flushed, the method returns `Poll::Pending` and /// arranges for the current task context (`cx`) to receive a notification when the object /// becomes readable or is closed. fn poll_partial_flush_buf( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>; /// Tells this buffer that `amt` bytes have been written to its buffer, so they should be /// written out to the underlying IO when possible. /// /// This function is a lower-level call. It needs to be paired with the `poll_flush_buf` method to /// function properly. This function does not perform any I/O, it simply informs this object /// that some amount of its buffer, returned from `poll_flush_buf`, has been written to and should /// be sent. As such, this function may do odd things if `poll_flush_buf` isn't /// called before calling it. /// /// The `amt` must be `<=` the number of bytes in the buffer returned by `poll_flush_buf`. fn produce(self: Pin<&mut Self>, amt: usize); } async-compression-0.4.13/src/tokio/write/buf_writer.rs000064400000000000000000000144101046102023000211510ustar 00000000000000// Originally sourced from `futures_util::io::buf_writer`, needs to be redefined locally so that // the `AsyncBufWrite` impl can access its internals, and changed a bit to make it more efficient // with those methods. use super::AsyncBufWrite; use futures_core::ready; use pin_project_lite::pin_project; use std::{ cmp::min, fmt, io, pin::Pin, task::{Context, Poll}, }; use tokio::io::AsyncWrite; const DEFAULT_BUF_SIZE: usize = 8192; pin_project! { pub struct BufWriter { #[pin] inner: W, buf: Box<[u8]>, written: usize, buffered: usize, } } impl BufWriter { /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. pub fn new(inner: W) -> Self { Self::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufWriter` with the specified buffer capacity. pub fn with_capacity(cap: usize, inner: W) -> Self { Self { inner, buf: vec![0; cap].into(), written: 0, buffered: 0, } } fn partial_flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); let mut ret = Ok(()); while *this.written < *this.buffered { match this .inner .as_mut() .poll_write(cx, &this.buf[*this.written..*this.buffered]) { Poll::Pending => { break; } Poll::Ready(Ok(0)) => { ret = Err(io::Error::new( io::ErrorKind::WriteZero, "failed to write the buffered data", )); break; } Poll::Ready(Ok(n)) => *this.written += n, Poll::Ready(Err(e)) => { ret = Err(e); break; } } } if *this.written > 0 { this.buf.copy_within(*this.written..*this.buffered, 0); *this.buffered -= *this.written; *this.written = 0; Poll::Ready(ret) } else if *this.buffered == 0 { Poll::Ready(ret) } else { ret?; Poll::Pending } } fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); let mut ret = Ok(()); while *this.written < *this.buffered { match ready!(this .inner .as_mut() .poll_write(cx, &this.buf[*this.written..*this.buffered])) { Ok(0) => { ret = Err(io::Error::new( io::ErrorKind::WriteZero, "failed to write the buffered data", )); break; } Ok(n) => *this.written += n, Err(e) => { ret = Err(e); break; } } } this.buf.copy_within(*this.written..*this.buffered, 0); *this.buffered -= *this.written; *this.written = 0; Poll::Ready(ret) } /// Gets a reference to the underlying writer. pub fn get_ref(&self) -> &W { &self.inner } /// Gets a mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_mut(&mut self) -> &mut W { &mut self.inner } /// Gets a pinned mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().inner } /// Consumes this `BufWriter`, returning the underlying writer. /// /// Note that any leftover data in the internal buffer is lost. pub fn into_inner(self) -> W { self.inner } } impl AsyncWrite for BufWriter { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { let this = self.as_mut().project(); if *this.buffered + buf.len() > this.buf.len() { ready!(self.as_mut().partial_flush_buf(cx))?; } let this = self.as_mut().project(); if buf.len() >= this.buf.len() { if *this.buffered == 0 { this.inner.poll_write(cx, buf) } else { // The only way that `partial_flush_buf` would have returned with // `this.buffered != 0` is if it were Pending, so our waker was already queued Poll::Pending } } else { let len = min(this.buf.len() - *this.buffered, buf.len()); this.buf[*this.buffered..*this.buffered + len].copy_from_slice(&buf[..len]); *this.buffered += len; Poll::Ready(Ok(len)) } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_flush(cx) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_shutdown(cx) } } impl AsyncBufWrite for BufWriter { fn poll_partial_flush_buf( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { ready!(self.as_mut().partial_flush_buf(cx))?; let this = self.project(); Poll::Ready(Ok(&mut this.buf[*this.buffered..])) } fn produce(self: Pin<&mut Self>, amt: usize) { *self.project().buffered += amt; } } impl fmt::Debug for BufWriter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BufWriter") .field("writer", &self.inner) .field( "buffer", &format_args!("{}/{}", self.buffered, self.buf.len()), ) .field("written", &self.written) .finish() } } async-compression-0.4.13/src/tokio/write/generic/decoder.rs000064400000000000000000000116451046102023000220310ustar 00000000000000use std::{ io, pin::Pin, task::{Context, Poll}, }; use crate::{ codec::Decode, tokio::write::{AsyncBufWrite, BufWriter}, util::PartialBuffer, }; use futures_core::ready; use pin_project_lite::pin_project; use tokio::io::AsyncWrite; #[derive(Debug)] enum State { Decoding, Finishing, Done, } pin_project! { #[derive(Debug)] pub struct Decoder { #[pin] writer: BufWriter, decoder: D, state: State, } } impl Decoder { pub fn new(writer: W, decoder: D) -> Self { Self { writer: BufWriter::new(writer), decoder, state: State::Decoding, } } pub fn get_ref(&self) -> &W { self.writer.get_ref() } pub fn get_mut(&mut self) -> &mut W { self.writer.get_mut() } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().writer.get_pin_mut() } pub fn into_inner(self) -> W { self.writer.into_inner() } fn do_poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, input: &mut PartialBuffer<&[u8]>, ) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Decoding => { if this.decoder.decode(input, &mut output)? { State::Finishing } else { State::Decoding } } State::Finishing => { if this.decoder.finish(&mut output)? { State::Done } else { State::Finishing } } State::Done => { return Poll::Ready(Err(io::Error::new( io::ErrorKind::Other, "Write after end of stream", ))) } }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if let State::Done = this.state { return Poll::Ready(Ok(())); } if input.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } fn do_poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); let (state, done) = match this.state { State::Decoding => { let done = this.decoder.flush(&mut output)?; (State::Decoding, done) } State::Finishing => { if this.decoder.finish(&mut output)? { (State::Done, false) } else { (State::Finishing, false) } } State::Done => (State::Done, true), }; *this.state = state; let produced = output.written().len(); this.writer.as_mut().produce(produced); if done { return Poll::Ready(Ok(())); } } } } impl AsyncWrite for Decoder { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut input = PartialBuffer::new(buf); match self.do_poll_write(cx, &mut input)? { Poll::Pending if input.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(input.written().len())), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_flush(cx))?; ready!(self.project().writer.as_mut().poll_flush(cx))?; Poll::Ready(Ok(())) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if let State::Decoding = self.as_mut().project().state { *self.as_mut().project().state = State::Finishing; } ready!(self.as_mut().do_poll_flush(cx))?; if let State::Done = self.as_mut().project().state { ready!(self.as_mut().project().writer.as_mut().poll_shutdown(cx))?; Poll::Ready(Ok(())) } else { Poll::Ready(Err(io::Error::new( io::ErrorKind::Other, "Attempt to shutdown before finishing input", ))) } } } async-compression-0.4.13/src/tokio/write/generic/encoder.rs000064400000000000000000000115231046102023000220360ustar 00000000000000use std::{ io, pin::Pin, task::{Context, Poll}, }; use crate::{ codec::Encode, tokio::write::{AsyncBufWrite, BufWriter}, util::PartialBuffer, }; use futures_core::ready; use pin_project_lite::pin_project; use tokio::io::AsyncWrite; #[derive(Debug)] enum State { Encoding, Finishing, Done, } pin_project! { #[derive(Debug)] pub struct Encoder { #[pin] writer: BufWriter, encoder: E, state: State, } } impl Encoder { pub fn new(writer: W, encoder: E) -> Self { Self { writer: BufWriter::new(writer), encoder, state: State::Encoding, } } pub fn get_ref(&self) -> &W { self.writer.get_ref() } pub fn get_mut(&mut self) -> &mut W { self.writer.get_mut() } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().writer.get_pin_mut() } pub(crate) fn get_encoder_ref(&self) -> &E { &self.encoder } pub fn into_inner(self) -> W { self.writer.into_inner() } fn do_poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, input: &mut PartialBuffer<&[u8]>, ) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Encoding => { this.encoder.encode(input, &mut output)?; State::Encoding } State::Finishing | State::Done => { return Poll::Ready(Err(io::Error::new( io::ErrorKind::Other, "Write after shutdown", ))) } }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if input.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } fn do_poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); let done = match this.state { State::Encoding => this.encoder.flush(&mut output)?, State::Finishing | State::Done => { return Poll::Ready(Err(io::Error::new( io::ErrorKind::Other, "Flush after shutdown", ))) } }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if done { return Poll::Ready(Ok(())); } } } fn do_poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Encoding | State::Finishing => { if this.encoder.finish(&mut output)? { State::Done } else { State::Finishing } } State::Done => State::Done, }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if let State::Done = this.state { return Poll::Ready(Ok(())); } } } } impl AsyncWrite for Encoder { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut input = PartialBuffer::new(buf); match self.do_poll_write(cx, &mut input)? { Poll::Pending if input.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(input.written().len())), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_flush(cx))?; ready!(self.project().writer.as_mut().poll_flush(cx))?; Poll::Ready(Ok(())) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_shutdown(cx))?; ready!(self.project().writer.as_mut().poll_shutdown(cx))?; Poll::Ready(Ok(())) } } async-compression-0.4.13/src/tokio/write/generic/mod.rs000064400000000000000000000001171046102023000211730ustar 00000000000000mod decoder; mod encoder; pub use self::{decoder::Decoder, encoder::Encoder}; async-compression-0.4.13/src/tokio/write/macros/decoder.rs000064400000000000000000000071371046102023000217020ustar 00000000000000macro_rules! decoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncWrite`](tokio::io::AsyncWrite) interface and will /// take in compressed data and write it uncompressed to an underlying stream. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::tokio::write::Decoder<$inner, crate::codec::$name>, } } impl<$inner: tokio::io::AsyncWrite> $name<$inner> { /// Creates a new decoder which will take in compressed data and write it uncompressed /// to the given stream. pub fn new(read: $inner) -> $name<$inner> { $name { inner: crate::tokio::write::Decoder::new(read, crate::codec::$name::new()), } } $($($inherent_methods)*)* /// Acquires a reference to the underlying reader that this decoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this decoder returning the underlying reader. /// /// Note that this may discard internal state of this decoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: tokio::io::AsyncWrite> tokio::io::AsyncWrite for $name<$inner> { fn poll_write( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { self.project().inner.poll_write(cx, buf) } fn poll_flush( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_flush(cx) } fn poll_shutdown( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_shutdown(cx) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use tokio::io::AsyncWrite; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.13/src/tokio/write/macros/encoder.rs000064400000000000000000000067101046102023000217100ustar 00000000000000macro_rules! encoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($inherent_methods:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncWrite`](tokio::io::AsyncWrite) interface and will /// take in uncompressed data and write it compressed to an underlying stream. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::tokio::write::Encoder<$inner, crate::codec::$name>, } } impl<$inner: tokio::io::AsyncWrite> $name<$inner> { $( /// Creates a new encoder which will take in uncompressed data and write it /// compressed to the given stream. /// $($inherent_methods)* )* /// Acquires a reference to the underlying writer that this encoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying writer that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the writer which /// may otherwise confuse this encoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying writer that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the writer which /// may otherwise confuse this encoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this encoder returning the underlying writer. /// /// Note that this may discard internal state of this encoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: tokio::io::AsyncWrite> tokio::io::AsyncWrite for $name<$inner> { fn poll_write( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { self.project().inner.poll_write(cx, buf) } fn poll_flush( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_flush(cx) } fn poll_shutdown( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_shutdown(cx) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use tokio::io::AsyncWrite; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.13/src/tokio/write/macros/mod.rs000064400000000000000000000000641046102023000210440ustar 00000000000000#[macro_use] mod decoder; #[macro_use] mod encoder; async-compression-0.4.13/src/tokio/write/mod.rs000064400000000000000000000005131046102023000175570ustar 00000000000000//! Types which operate over [`AsyncWrite`](tokio::io::AsyncWrite) streams, both encoders and //! decoders for various formats. #[macro_use] mod macros; mod generic; mod buf_write; mod buf_writer; use self::{ buf_write::AsyncBufWrite, buf_writer::BufWriter, generic::{Decoder, Encoder}, }; algos!(tokio::write); async-compression-0.4.13/src/unshared.rs000064400000000000000000000022711046102023000163350ustar 00000000000000#![allow(dead_code)] // unused without any features use core::fmt::{self, Debug}; /// Wraps a type and only allows unique borrowing, the main use case is to wrap a `!Sync` type and /// implement `Sync` for it as this type blocks having multiple shared references to the inner /// value. /// /// # Safety /// /// We must be careful when accessing `inner`, there must be no way to create a shared reference to /// it from a shared reference to an `Unshared`, as that would allow creating shared references on /// multiple threads. /// /// As an example deriving or implementing `Clone` is impossible, two threads could attempt to /// clone a shared `Unshared` reference which would result in accessing the same inner value /// concurrently. pub struct Unshared { inner: T, } impl Unshared { pub fn new(inner: T) -> Self { Unshared { inner } } pub fn get_mut(&mut self) -> &mut T { &mut self.inner } } /// Safety: See comments on main docs for `Unshared` unsafe impl Sync for Unshared {} impl Debug for Unshared { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct(core::any::type_name::()).finish() } } async-compression-0.4.13/src/util.rs000064400000000000000000000027561046102023000155110ustar 00000000000000pub fn _assert_send() {} pub fn _assert_sync() {} #[derive(Debug, Default)] pub struct PartialBuffer> { buffer: B, index: usize, } impl> PartialBuffer { pub(crate) fn new(buffer: B) -> Self { Self { buffer, index: 0 } } pub(crate) fn written(&self) -> &[u8] { &self.buffer.as_ref()[..self.index] } pub(crate) fn unwritten(&self) -> &[u8] { &self.buffer.as_ref()[self.index..] } pub(crate) fn advance(&mut self, amount: usize) { self.index += amount; } pub(crate) fn get_mut(&mut self) -> &mut B { &mut self.buffer } pub(crate) fn into_inner(self) -> B { self.buffer } } impl + AsMut<[u8]>> PartialBuffer { pub(crate) fn unwritten_mut(&mut self) -> &mut [u8] { &mut self.buffer.as_mut()[self.index..] } pub(crate) fn copy_unwritten_from>(&mut self, other: &mut PartialBuffer) { let len = std::cmp::min(self.unwritten().len(), other.unwritten().len()); self.unwritten_mut()[..len].copy_from_slice(&other.unwritten()[..len]); self.advance(len); other.advance(len); } } impl + Default> PartialBuffer { pub(crate) fn take(&mut self) -> Self { std::mem::replace(self, Self::new(B::default())) } } impl + AsMut<[u8]>> From for PartialBuffer { fn from(buffer: B) -> Self { Self::new(buffer) } } async-compression-0.4.13/src/zstd.rs000064400000000000000000000113721046102023000155120ustar 00000000000000//! This module contains zstd-specific types for async-compression. /// A compression parameter for zstd. This is a stable wrapper around zstd's own `CParameter` /// type, to abstract over different versions of the zstd library. /// /// See the [zstd documentation](https://facebook.github.io/zstd/zstd_manual.html) for more /// information on these parameters. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct CParameter(libzstd::stream::raw::CParameter); impl CParameter { /// Window size in bytes (as a power of two) pub fn window_log(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::WindowLog(value)) } /// Size of the initial probe table in 4-byte entries (as a power of two) pub fn hash_log(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::HashLog(value)) } /// Size of the multi-probe table in 4-byte entries (as a power of two) pub fn chain_log(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::ChainLog(value)) } /// Number of search attempts (as a power of two) pub fn search_log(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::SearchLog(value)) } /// Minimum size of matches searched for pub fn min_match(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::MinMatch(value)) } /// Strategy-dependent length modifier pub fn target_length(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::TargetLength(value)) } /// Enable long-distance matching mode to look for and emit long-distance references. /// /// This increases the default window size. pub fn enable_long_distance_matching(value: bool) -> Self { Self(libzstd::stream::raw::CParameter::EnableLongDistanceMatching(value)) } /// Size of the long-distance matching table (as a power of two) pub fn ldm_hash_log(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::LdmHashLog(value)) } /// Minimum size of long-distance matches searched for pub fn ldm_min_match(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::LdmMinMatch(value)) } /// Size of each bucket in the LDM hash table for collision resolution (as a power of two) pub fn ldm_bucket_size_log(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::LdmBucketSizeLog(value)) } /// Frequency of using the LDM hash table (as a power of two) pub fn ldm_hash_rate_log(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::LdmHashRateLog(value)) } /// Emit the size of the content (default: true). pub fn content_size_flag(value: bool) -> Self { Self(libzstd::stream::raw::CParameter::ContentSizeFlag(value)) } /// Emit a checksum (default: false). pub fn checksum_flag(value: bool) -> Self { Self(libzstd::stream::raw::CParameter::ChecksumFlag(value)) } /// Emit a dictionary ID when using a custom dictionary (default: true). pub fn dict_id_flag(value: bool) -> Self { Self(libzstd::stream::raw::CParameter::DictIdFlag(value)) } /// Number of threads to spawn. /// /// If set to 0, compression functions will block; if set to 1 or more, compression will /// run in background threads and `flush` pushes bytes through the compressor. /// /// # Panics /// /// This parameter requires feature `zstdmt` to be enabled, otherwise it will cause a panic /// when used in `ZstdEncoder::with_quality_and_params()` calls. // // TODO: make this a normal feature guarded fn on next breaking release #[cfg_attr(docsrs, doc(cfg(feature = "zstdmt")))] pub fn nb_workers(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::NbWorkers(value)) } /// Number of bytes given to each worker. /// /// If set to 0, zstd selects a job size based on compression parameters. pub fn job_size(value: u32) -> Self { Self(libzstd::stream::raw::CParameter::JobSize(value)) } pub(crate) fn as_zstd(&self) -> libzstd::stream::raw::CParameter { self.0 } } /// A decompression parameter for zstd. This is a stable wrapper around zstd's own `DParameter` /// type, to abstract over different versions of the zstd library. /// /// See the [zstd documentation](https://facebook.github.io/zstd/zstd_manual.html) for more /// information on these parameters. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct DParameter(libzstd::stream::raw::DParameter); impl DParameter { /// Maximum window size in bytes (as a power of two) pub fn window_log_max(value: u32) -> Self { Self(libzstd::stream::raw::DParameter::WindowLogMax(value)) } pub(crate) fn as_zstd(&self) -> libzstd::stream::raw::DParameter { self.0 } } async-compression-0.4.13/tests/artifacts/dictionary-rust000064400000000000000000001475111046102023000216030ustar 000000000000007¤0ì~{œ?°§ 0 Ã0 Ã0 ÃÀí·D­)­\µ½¼‘2;Þ[·ÆâÆiã‘n5„¤ªûh´¸ÙŽš¶pwHÄ¢™T6¢UÀÀ„#*,&ˆÃÁH(‡‚‘A„Q Çq@Œ„†JT¼*d! let input = InputStream::new(vec![]); } mod write { mod com00)] fn multiple_members() { assert_eq!(trailer, &[7, 8, 9, 10][..meout(1000)] fn trailer() { let decod result = read::poll_read(encoder, &mut output); est::timeout(1000)] fn empty() mod $impl { mod bufread { { limited::Limited::new(self, limit) _cases; pub mod algos; pub mod impls; pub use seure = "tokio")] mod tokio_ext; mod track_closed; m ready!(self.as_mut().do_poll_flush(cx))?; ?; decoder.shutdown().await?; Ok(decoder.ihutdown` #[tokio::main(flavor = "current_thread")th the following command in a terminal: //! //! `` &mut self.buffer.as_mut()[self.index..] }, index: usize, } impl> Partiaync>() {} #[derive(Debug, Default)] pub struct Pacompress.total_out(); let status = , } impl FlateDecoder { pub(crate) fn new(zli_args!("{}/{}", self.buffered, self.buf.len()), s.buffered += len; Poll::Ready(Ok(len) } } if *this.written > 0d]) { Poll::Pending =>ufWriter` with the specified buffersyncWrite; const DEFAULTe] mod utils; test_cases!(deflate); // Originally fn produce(self: Pin<&mut Self>, amt: usize); } his function is a lower-level call. It needs nderlying IO when possible. /// /// This ffull. /// /// On success, returns `Poll::R output.advance(len); Ok(bytes_left == 0) tten_mut())?; input.advance(status.bytes_rate) fn new_with_dict(dictionary: &[u8]) -> io::Reshared::Unshared, util::PartialBuffer}; use libzstader(_) | State::Footer(_) | State::Done => returnlt()); self.header = Header::default(); ter(footer) => { footer.copy_u.state { State::Header(parser) => } } fn process, O: AsRef let crc_sum = crc.sum().to_le_bytes(); le2::Crc; #[derive(Debug)] enum State { Header(, level), } } } impl Encode for LzmaElBuffer::new(&[][..]), output, Action::Finish)? { othing much to report. Status::Ok => u Ok(status) } } impl Encode for BzEnc /// equivalent to using the default value of 30. r over a wide range of /// circumstances. /// a factor of three, but always behaves reader { /// Creates a new stream prepared for coerwise we'll just keep writing // out sync AsRef<[u8]> + AsMut<[u8]>>, flush: FlushCw(level, zlib_header), flushed: true, d::io::Result; #[derive(Debug)] pub struct LzmaDebytes, so it must be invalid // paddin*count == 0 { *count = 4; Kind, Result}; #[derive(Debug)] pub struct XzDeco { pub(crate) fn new(level: Compression) -> Seut Self>, amt: usize) { self.project().0.c } Poll::Ready(Ok(0)) er, eof } = Pin::into_inner(self); (Pin::n inner: R, eof: bool, } impl TrackEa from the given stream /// and emState::Encoding => { if self.i true } State::Encoding => self.iitten().is_empty() { return Ok(()).state { State::Header(header) => ty(8); output.extend(&self.crc.sum().to_lx08, 0, 0, 0, 0, 0, level_byte, 0xff] } impl Gzip crc: Crc, state: State, } fn header(level://! Implementations for IO traits exported by [`fute /// when reaching the end of a comp// This structure implements an [`AsyncRead`](tokiate) use self::{decoder::XzDecoder, encoder::XzEnc match status { Status::Ok | Status pub fn new(format: Xz2FileFormat, level: u32) fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult } State::Crc(data) e(len) = memchr::memchr(0, input.unwritten()) { self.state = State::Comment(<_>::d if !self.header.flags.extra { t self.state { State::Fixed(data) ErrorKind::InvalidData, "Invalid gzip header")); { if input[0..3] != [0x1f, 0x8b, 0x08] { { ascii: bool, crc: bool, extra: bool impl AsyncRead for En #[derive(Debug)] pub struct Encoder Err? { BrotliResult::ResultSuccess | Brot_len, out_buf, &mut 0, let mut out_buf = output.unwritten_mut(); . state: Box Self { Self(LdmHashRateLog(valSelf { Self(EnableLongDistanceMatchinghing mode to look for and emit long-distance refer } /// Number of search attempts (as antation](https://facebook.github.io/zstd/zstd_manuty) => quality.try_into().unwrap_or(0).min(9), bzstd::compression_level_range().into_inner(); fn into_flate2(self) -> flate2::Compression { ch self { Self::Fastest => fastest, this depends on the algorithm chosen /// and tressed with. #[non_exhaustive] #[derive(Clone, Copkio")] pub mod tokio; mod unshared; mod util; #[ doc = "`zlib` | [`ZlibEncoder`](?search=ZlibEncarch=LzmaEncoder), [`LzmaDecoder`](?search=LzmaDec | [`GzipEncoder`](?search=GzipEncoder), [`GzipDec(?search=BzEncoder), [`BzDecoder`](?search=BzDecodcWrite`" )] //! //! ## Compression algorithm //! ion, `#![rustfmt::skip::attributes(cfg_attlection of algorithms //! `all-algorithms` | Acti which can each be individually selected via Cargo } } } //! Adaptors between compression c #[derive(Debug)] pub struct Decoder SeParams, encode::{ BrotliEncoderCompres algos!(@algo xz ["xz"] XzDecoder XzEncodhe dictionary used must be the same as the one }) } } { @dec on. /// /// # Errors ::new_with_params(level.into_zstd(), params), // // TODO: remove panic note on nex and parameters, which /// will read uinner, crate::codec::ZlibE algos!(@algo gzip ["gzip"] GzipDener, crate::codec::Deflate algos!(@algo bzip2 ["bzip2"] BzDecoder BzEncoBrotliEncoder::new(level.into_brotli(params)), t!("A ", $algo_s, " encoder, or compressor.")] algos!(futures::bufread); macro_rules! algos {rue) } } //! Types which operate over [`AsyncBushOk => unreachable!(), // THe Run a let prior_in = self.decompress.total_in(); ompress.total_out() ) } } impl BzDeco::Decode, util::PartialBuffer}; use std::fmt; use inner: crate::tokio::write::Decoder Err(std::io::Error::new( std::i) -> Result<()> { *self = Self::new(); der { stream: Stream, } impl Debug for Xz2Decnts an [`AsyncWrite`](tokio::io::AsyncWrite) inter } } #[macro_use] mod utils; test_cases!(bzip2); next = Pin::new(&mut self.inner).poll_flush(cx); tokio::io::AsyncWrite for InterleavePending { self::{decoder::BzDecoder, encoder::BzEncoder}; usmember/frame and expect either EOF or another ssed stream. pub fn new(read: R) -> $nyncRead) interface and will /// read c Pin::new(&mut this.reader).consume(i); let buffer = ready!(Pin::new(&mut this.reader).pwriter: &'a mut W, amt: u64, } impl Futopy_buf(reader: R, writer: &mut W) -> CopyBuin::new(&mut self.inner).poll_write(cx, buf) } match Pin::new(&mut self.inner).poll_close(cx) { Write for TrackClosed { fn poll_write(mut s} } pub fn is_closed(&self) -> bool { tdown before finishing input", ))) ck_on(decoder.read_to_end(&mut output)).is_err());t input = InputStream::from(vec![compressed]); [][..]); let input = InputStream::from(vec![c)] use utils::{algos::xz::sync, InputStream}; #[cten to and should /// be sent. As such, this fs buffer, so they should be /// written out tok(buf))`. /// /// If the buffer is full anrs { this.decoder.rein } State::Flushing = *this.multiple_members = fals) { self.multiple_members = enabled; }e> Decoder { pub fn new(reader: R, decod(Debug)] enum State { Decoding, Flushing, whether the end of the stream has been read f buffers are flushed fn flush(&mut self, outpu)] pub(crate) use self::zlib::{ZlibDecoder, ZlibEnpub(crate) use self::flate::{FlateDecoder, FlateEn "lzma")] mod lzma; #[cfg(feature = "xz")] mod xz;f_write::AsyncBufWrite, buf_writer::BufWriter, { /// Seek to the offset, in bytes, in the unt::Result { f.debug_struct("BufWriter") )?; self.project().inner.poll_close(cx) .*this.buffered + len].copy_from_slice(&buf[..len]t this = self.as_mut().project(); if buf.lthe underlying writer. /// /// Note that aReady(ret) } else if *this.buffered == 0 {.written += n, Poll::Ready(Err(e))ed]) { Poll::Pending =pacity. pub fn with_capacity(cap: usize, innertures_io::{AsyncSeek, AsyncWrite, SeekFrom}; use p // the `AsyncBufWrite` impl can access its intern } } mod decompity( input, ).as_ref(), |input| { te::Encoder::with_quality(input, Level::Best)), ress { use crate::utils::{ let output = read::to_vec(decoder); sync::compress(&[1, 2, 3, 4, 5, 6]), let trailer = read::to_vec(reader) let output = bufread::decompress(&mut reader);::timeout(1000)] fn zeros() { let compressed = sync::compress(&[]); ::$variant::{ sync, Level::Precise(0), am()), Level::Best, ::compress(bufread::from(&one_to_six_stream())); let mut output = []; let mut input: &[u8] = &[]; tokio::io::AsyncWrite> AsyncWriteTestExt for T {} ending; mod limited; pub use copy_buf::copy_buf; :{future::Future, io::Result, iter::FromIterator, erent tests use a different subset of functions mlf.as_mut().project().state { *self.asn compress(in_data: &[u8]) -> Result> { tures="tokio,zlib" //! "example" //! ``` use $ cargo run --example zlib_tokio_write --features = std::cmp::min(self.unwritten().len(), other.unwf.index] } pub(crate) fn unwritten(&self) } } } pub fn _assert_send() {} pub fw(&[][..]), output, FlushDsMut<[u8]>>, flush: FlushDecompress, )ress, Status}; #[derive(Debug)] pub struct FlateD .field( "buffer", ush_buf(cx))?; self.project().inner.poll_st len = min(this.buf.len() - *this.buffered, buf.lwere Pending, so our waker was already queued t W> { self.project().inner > *this.written += n, Err(e) => { :Ready(ret) } else { ret?; } Poll::Ready(Ok(n)) => *t: vec![0; cap].into(), written: 0, Self::with_capacity(DEFAULT_BUF_SIZE, inner) } efficient // with those methods. use super::Asyn ready!(self.project().writer.as_mut().poll_cle::Finishing | State::Done => panic!("Flush after ; use crate::{ codec::Encode, futures::wrbuf` method to /// function properly. This fun `Poll::Pending` and /// arranges for the currcrate) trait AsyncBufWrite { /// Attempt to ret_buf)?; let len = out_buf.as_slice().len( .get_mut() .run_on_buffers(inputcoder = Decoder::with_dictionary(dictionary)?; "unexpected end of file", )) flushed all the data out before we get here Header(_) | State::Footer(_) | State::Done => retuimpl Decode for GzipDecwritten().is_empty() { che self.state = State::Footer(vec![0; 8].into()) r, &mut PartialBuffer) -> Result, if bytes_read != input[4..8] { } fn check_footer(crc: &Crc, input: &[u8]) -> Re codec::{ gzip::header::{self, Header}, >, ) -> Result { // Flush on LZM inner: crate::codec::Xz2Encoder, } impl LzmaEn // Decompression went fine, nothing much to repoBuffer::new(&[][..]), output, Action::Flush)? { atch self.encode(input, output, Action::Run)? { unwritten(), output.unwritten_mut(), action) u8]> + AsMut<[u8]>>, action: Action, )rd /// algorithm will expend before resorting presented with worst case, highly repetitive, inpu write!( f, "BzEncoduse] mod decoder; #[macro_use] mod encoder; use crd_len = output.written().len(); self.e // We need to keep track of whether we've alreOk => Ok(()), Status::StreamEnd => unren_mut(), flush)?; input.advance((self.coesult}; use flate2::{Compress, Compression, Flush inner: crate::codec::Xz2Decoder, } impl LzmaDe "stream padding was not a multiple of 4 bytelet Some(ref mut count) = self.skip_padding { padding = Some(4); self.inner.reinit() er: crate::codec::FlateEncoder::new(level, false),cBufRead for TrackEof { fn poll_fill_buf(sePoll::Ready(Ok(())) => { if buf.fi } Poll::Ready(Ok(buf)) text) -> Poll> { let (inner,yncRead + Unpin> futures::io::AsyncRead for TrackE Self { inner, eof: false } } pub fn proj } } impl<$inner: futurAsyncBufRead> $name<$inner> { $( from an underlying stream and emit a stream of comitten().is_empty() { return Ok(fal self.state = State::Footer(self.footer().into( } State::Done => true, output.copy_unwritten_from(&mut *fooState::Done => panic!("encode after complete"), self.state = State::Encoding; level).into()), } } fn footer(&mu 0x02 } else if level.level() <= CompreBuffer>), Encoding, Footer(Partiallt> { let limit = self.limit; o`](::futures_io). pub mod bufread; pub mod write<'_>, buf: &mut tokio::io::ReadBuf /// compressed member/frame to follow it in the sethods)*)* /// Configure multi-member inner: crate::tokio::bufread::Decoder $({ $($inherent_methods:tt)* })*) => { [], output.unwritten_mut(), Action::SyncFlush)?; out() as usize - previous_out); elf.stream.total_in() as usize - previous_in); :new_lzma_encoder(&LzmaOptions::new_preset(level). panic!("parser used after done"); f.header.flags.crc { input.advance(input.unwritten().len()); self.state = State::Crc(<_>::d:default()); continue; } State::Extra(data) self.state = State::Filename(<_>::AsRef<[u8]>>, ) -> Result> { Ok(Header { flags }) } } impl Parser { e, header: Header, } impl Header { fn parol, filename: bool, comment: bool, } #[de let mut output = PartialBuffer::new(buf); ume(len); State::Encoding t); this.encoder.encode(&m, } } impl Encodeelf.project().writer.as_mut().poll_shutdown(cx))?; } fn do_poll_shutdown(self: alBuffer::new(output); let done = matncoding => { this.encoder.encoive(Debug)] pub struct Encoder {of, "reached unexpected EOF", sMoreOutput => , BrotliResult::NeedsMoreOutput | Brot } status => status, let status = match BrotliDecompressStream( liState` is very large (over 2kb) which is why we'stream::raw::CParameter { self.0 :with_quality_and_params()` calls. // // Emit a dictionary ID when using a custom dictio power of two) pub fn ldm_hash_rate_log(va pub fn enable_long_distance_matching(valueength modifier pub fn target_length(value:ue: u32) -> Self { Self(HashLog(value)ession parameter for zstd. This is a stable wrappe Self::Default => libzstd::DEFAULT_COMPRESSION_LEVelf) -> i32 { let (fastest, best) = libzst let best = flate2::Compression::best(); ::Compression::new( quality g it. /// Qualities are implicitly clamped to ession algorithm. Default, /// Precise quarn( missing_docs, rust_2018_idioms, mi doc = "`zstd` | [`ZstdEncoder`](?search=ZstdEnce = "xz"), doc = "`xz` (*inactive*) | `XzEncodoc = "`deflate` | [`DeflateEncoder`](?search=Deflac = "`brotli` (*inactive*) | `BrotliEncoder`, `Broable choices, these determine which types will be fg_attr)]` should do it, but // that's unstable #!s that have corresponding top-level modules: //! tes and Rust's modern asynchronous IO types. //! utput = PartialBuffer::new(buf.initialize_unfilled> State::Done, State::Next => { if *this.multiple_members { f the reader // has return self.reader } pub fn mulf>) -> Pin<&mut R> { self.project().readert; use tokio::io::{AsyncBufRead, AsyncRead, ReadBuk(BrotliEncoderIsFinished(&self.state) == 1) } Ok(BrotliEncoderHasMoreOutput(&self.state) == Error::new(ErrorKind::Other, "brotli error")); out_buf, &mut output_len, iEncoderStateStruct, } impl BrotlirKind, Result}, }; use brotli::enc::{ backwar inner, crate::codec::Lzma crate::codec::ZstdDecoder::new_with_dict(diction /// be present during decompression. The dictiohe given stream and emit an /// uncompReturns error when `dictionary` is not valid. ::Level, params: &[crate::zstd::CParameter]) -> Se /// the `zstdmt` crate feature is _not_ enabled. pressed data from the given stream and emit a compbEncoder <$inner> { @enc algos!(@algo zlib ["zlib"] ZlibDecoder ZlibEnc; algos!(@algo deflate ["deflate"] Deflatcodec::BzEncoder::new(level.into_bzip2(), 0), lgos!(@algo brotli ["brotli"] BrotliDecoder Brotlithods)* } } }; ($($mod:ident)::+ (@algo $algo:ident [$algo_s:expr] $decoder:idend`](futures_io::AsyncBufRead) streams, both encodede(&mut PartialBuffer::new(&[][..]), output)?; ut. Status::StreamEnd => Ok(true), .advance((self.decompress.total_out() - prio::Error::new(std::io::ErrorKind::Other, e))?; s: Decompress, } impl fmt::Debug for Bz; } } mod decoder; mod encoder; mod header; p } fn poll_shutdown( x, buf) } fn poll_flush( o_inner() } } impl>, ) -> Result { let prev Status, Stream}; pub struct Xz2Decoder { str_assert_send::<$name, buf: &[u8], /// Consumes this epin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> /// Creates a new encoder which will take iecoder, encoder::ZlibEncoder}; macro_rules! encode: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { inner: T, pended: ssert_send::<$name futures_io::AsyncRead for $ndecoder state /// when reaching the enDecoder::new(read, crate::codec::$name::new()), macro_rules! decod this.amt += i as u64; Output> { let this = &mut *self; Unpin + ?Sized, { CopyBuf { reader, ncWrite}; pub fn copy_buf(reader: R, writerure = "tokio")] impl, cx: &mut Context, buf: uct TrackClosed { inner: W, closed: boo) } else { Poll::Ready(Err(Erresult<()>> { if let State::Decoding = self impl AsyncWrite for Deut)? { (State::Done, falsetput)? { State::Finishing , D> { pub fn new(writer: W, decoder: D) -> Seembers(true); let mut output = Vec::new(); 1][..]); } #[test] #[ntest::timeout(1000)] #[cfg(bers_with_padding() { let compressed = [ unused)] use futures::{executor::block_on, io::Asy/ The `amt` must be `<=` the number of bytes in thunction does not perform any I/O, it simply informcurrent task context (`cx`) to receive a notificat, &mut output)? { Poll::Pending if outbers { this.decoder.re, output)?; let len = inpupty() { // Avoid attemptin>, output: &mut PartialBuffer<&mut [u8]>, Decoding, multiple_members: false, y; use futures_io::{AsyncBufRead, AsyncRead}; use u8]> + AsMut<[u8]>>) -> Result; are flushed and the end of the stream is written ZstdDecoder, ZstdEncoder}; pub trait Encode { pub(crate) use self::brotli::{BrotliDecoder, Brotlate")] mod deflate; #[cfg(feature = "flate2")] mods; mod generic; mod buf_write; mod buf_writer; u ) .field("written", &self.writte&mut [u8]>> { ready!(self.as_mut().partialResult<()>> { ready!(self.as_mut().flush_buld have returned with // `this.buf buf.len() >= this.buf.len() { if *th &mut self.inner } /// Gets a pin/ Gets a reference to the underlying writer. pfered])) { Ok(0) => { *this.buffered { match ready!(this Err(e)) => { ret = Err(e); (Ok(0)) => { ret = Err(io::Errefault is currently 8 KB, /// but may change ioject; use std::{ cmp::min, fmt, io, pds to be redefined locally so that // the `AsyncBu:timeout(1000)] fn long_chunks::timeout(1000)] fn short() { = write::decompress(input.as_ref(), 65_536); one_to_six, InputStream, Box::pin(write::Encoder::with_quality( coder::with_quality(input, Level::Precise(0))) |input| Box::pin(write::Encoder::new(input)), sed = write::to_vec( pressed = write::compress(input.as_ref(), 20); let input = InputStream::new(vec![ e::compress(one_to_six_stream().as_ref(), 2); mpress(one_to_six_stream().as_ref(), 65_536); t(1000)] fn empty_chunk() { mpress { use crate::utils::{ utput, &[1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1][..]);m(compressed.chunks(1024)); = (0..65_536).map(|_| rand::random()).collect(); :timeout(1000)] fn long() { compressed.extend_from_slice(&[7, 8, 9, 10]);(1000)] fn short_chunks() { assert_eq!(output, &[0; 10][..]); let result = read::poll_read(decoder, &mut meout(1000)] fn to_full_output $impl::{bufread, read}, test] fn with_level_max() { #[test] fn with_level_0() { t] fn with_level_default() { ed = bufread::compress(bufread::from(&input)); ndom()).collect(), ]); let input = InputStream::new(vec![vec![]]); assert!(matches!(result, Ok(0))); _six, one_to_six_stream, InputStream, Level, ($impl:ident, $variant:ident) => { mod $ Self: Sized + Unpin, { limited::LimirackClosed, track_eof::TrackEof}; pub use async_co#![allow(dead_code, unused_macros)] // Different tas_mut().project().state = State::Finishing; if input.written().is_empty() => Poll::Pending, State::Done => (State::Done, true), put)? { (State::Done, fals else { State::Decoding pl DecodesyncWrite; use pin_project_lite::pin_project; #[dr::new(Vec::new()); decoder.write_all(in_data) println!("{:?}", String::from_utf8(de_compres std::mem::replace(self, Self::new(B::defauitten_from>(&mut self, other: &mut Buffer { pub(crate) fn new(buffer: B) -> Seruct PartialBuffer> { buffer: B); self.decode( &mut Pdecode(input, output, FlushDecompress::None)? { Ok(status) } } impl Decode for FlateDec self.decompress .decompub enum Xz2FileFormat { Xz, Lzma, } pub(cize) { *self.project().buffered += amt; l::Ready(Ok(&mut this.buf[*this.buffered..])) [*this.buffered..*this.buffered + len].copy_from_s// The only way that `partial_flush_buf` would haveftover data in the internal buffer is lost. p&W { &self.inner } /// Gets a mutthis.buffered, 0); *this.buffered -= *this } } fn flush_buf(self: ten > 0 { this.buf.copy_within(*this.w .as_mut() .poll_w self.project(); let mut ret = Ok(()); a new `BufWriter` with a default buffer capacity.nternals, and changed a bit to make it more effici_mut().do_poll_close(cx))?; ready!(self.prpty() { return Poll::Ready(Ok(0)); roduced); if let State::Done = this.s } } } fn do_poll_close(self: } } fn do_poll_flush(self: | State::Done => panic!("Write after close"), ject(); loop { let output = r, E> { pub fn new(writer: W, encoder: E) -> Sell_flush_buf`. fn produce(self:ush_buf` isn't /// called before calling it. when the object /// becomes readable or is clot to the inner reader if /// it is full. / mut out_buf = zstd_safe::OutBuffer::around(output_written); Ok(status.remaining == 0) }ecoder: Unshared::new(Decoder::new().unwrap()), ErrorKind::UnexpectedEof, "unexp> { // Because of the footer we have to haDecoding => { let prior = outprocess(input, output, |this, input, output| { if input.unwritten().is_empty() || output self.crc.update(&output.written()[prior..]); r: impl Fn(&mut Self, &mut PartialBuffer, &mut "amount of bytes read does not match", t bytes_read = crc.amount().to_le_bytes(); if Decode, }, util::PartialBuffer, }; us fn flush( &mut self, _output:r::new(crate::codec::Xz2FileFormat::Lzma, level), Error::new(ErrorKind::Other, "out of memory")), => unreachable!(), // The Run actionest, but otherwise everything went normally. atus::FlushOk => Ok(false), // The Rus been met, meaning that no more data can be inputompression went ok. Status::RunOk => O let prior_out = self.compress.total_out(); to 250 inclusive. 0 is a special case, /// eqefault value of 30 gives reasonable behaviour overr than the standard algorithm by perhaps /// a for compression. /// /// The `work_factor FlushCompress::Finish, )? { self.flushed = true; Ok(!output.sync blocks continuously and probably never comple>, ) -> Result<()> { self.flushed = faelf.compress.total_in() - prior_in) as usize); ate) fn new(level: Compression, zlib_header: bool)coder::new(), } } } impl Decode for Lnon-padding then it cannot start with null bytes, while input.unwritten().first() == Some(&0) { on, } impl XzDecoder { pub fn new() -> Se>, ) -> Result { self.inner.fini inner: crate::codec::FlateEncoder, } impl Deflatll_buf(cx) { Poll::Ready(Ok(buf)) => {.filled().len() == len && buf.remaining() > 0 { f buf.is_empty() { *eof = trueContext) -> Poll> { let (inneof); match inner.poll_read(cx, buf) { g_attr(not(feature = "all-implementations"), allow use futures_io::AsyncBufRead; (self) -> $inner { self.inner.into /// may otherwise confuse this encoder. . pub fn get_ref(&self) -> &$inner $($inherent_methods)* )* l /// read uncompressed data from an u } State::Done => {} &mut *header); if header.unwr } else { false ut *footer); if footer.unwritFooter(_) | State::Done => panic!("encode after coelf.crc.update(&input.written()[prior_written..]);c::with_capacity(8); output.extend(&self.ression) -> Vec { let level_byte = if leve crate::codec::FlateEncoder, crc: Crc, stacx, &buf[..std::cmp::min(limit, buf.len())]) } fn new(io: Io, limit: usize) -> Limited { + Send>>>>(); _assert_sync::<$nam } impl tokios this decoder returning the underlying reader. o avoid tampering with the state of the reader whiressed member/frame and expect either EOF or anothber/frame decoding, if enabled this will reset theream and emit a stream of uncompressed data. d encoder; pub(crate) use self::{decoder::XzDecod&[], output.unwritten_mut(), Action::Finish)?; => panic!("Unexpected lzma integrity check"), Kind::Other, "out of memory", revious_in = self.stream.total_in() as usize; ew_easy_encoder(level, Check::Crc64).unwrap(), esult; use xz2::stream::{Action, Check, LzmaOptionen_from(input); if data.unwri input.advance(len + 1); } State::Comment(data) => { Some(len) = memchr::memchr(0, input.unwritten()) ra(vec![0; usize::from(len)].into()); let len = u16::from_le_bytes(data.ta self.header = Header::parse(&data.take().into_i flag = input[3]; let flags = Flags { Buffer>), Filename(Vec), Commeefault)] pub(super) struct Header { flags: Flax: &mut Context<'_>, buf: &mut [u8], ) }; if let State::Done = *this. let mut input = PartialBuffer::new(input) pub fn into_inner(self) -> R { self.r[pin] reader: R, encoder: E, ::Ready(Ok(())) } fn poll_shutdown(mut seontext<'_>, buf: &[u8]) -> PocWrite for Encoder { fn poll_write(self: State::Encoding | State::Finishing => e::Encoding => this.encoder.flush(&mut output)?, State::Done => panic!("Write after shutdown"), xt<'_>, input: &mut PartialBuffer<&[u8]>, ate { Encoding, Finishing, Done, } pi"decompress", &"") .finish()? { BrotliResult::ResultSuccess => Ok(self.decode(&mut PartialBuffer::new(&[][..]), outpself) -> Result<()> { self.state = Box::neadvance(input_len); output.advance(output_ -> Result { let in_buf = inBox::new(BrotliState::new( Standar// If set to 0, zstd selects a job size based on c feature guarded fn on next breaking release } /// Number of threads to spawn. /// Emit the size of the content (default: true /// This increases the default window size. /// Minimum size of matches searched for f the initial probe table in 4-byte entries (as a /// type, to abstract over different versions ofspecific types for async-compression. pub mod zstd .try_into() .unwrap_or(0) .clamp(fastest.level(), best.level()), ature = "bzip2")] fn into_bzip2(self) -> bzip2ise(quality) => params.quality = quality.clamp(0, lity of compression, usually produces bigger size. "bzip2", feature = "flate2", feature = "xz2"))] ur`" )] //! #![cfg_attr(docsrs, feature(doc_auto_c feature = "lzma", doc = "`lzma` | [`LzmaEncure = "gzip"), doc = "`gzip` (*inactive*) | `G=BrotliEncoder), [`BrotliDecoder`](?search=BrotliDsupport, there are currently a few //! available c doc = "[`futures-io`](crate::futures) | [`futurntation //! //! The first division is which underl: //! //! Feature | Does //! ---------|------ //nfilled()); match self.do_poll_read(cx, &m&mut Context<'_>, buf: &mut ReadBuf<'_>, State::Next this.reader.as_mut().consume(len);returned EOF. *this.multipol) { self.multiple_members = enabled; et_mut(&mut self) -> &mut R { &mut self.re decoder: D, state: State, multipesult { f.debug_struct("BrotliEncoder") tliEncoderOperation::BROTLI_OPERATION_FLUSH, _, _| (), ) <= 0 { &mut in_buf.len(), in_buf, StandardAlloc::default()); state.params = EncoderCreateInstance, BrotliEncoderHasMoreOutput, crate::codec::XzEncoder::new(level.into_xz2()), ?, ), }) /// Dictionaries provide better compression ratw_with_dict(level.into_zstd(), dictionary)?, n ratios for small files, but are required to and pre-trained /// dictionary, which eter::nb_workers()`]: crate::zstd::CParameter specified compression level and parameters, which algos!(@algo zstd ["zstd"] ZstdDecoder ZstdEncDecoder GzipEncoder <$inner> { @enc h_quality(inner: $inner, level: crate::Level) -> S{ @dec } ); algos!(@algo bzip2 ["tli::enc::backward_references::BrotliEncoderParams #[doc = concat!("A ", $algo_s, " encoder, or ($encoder_methods:tt)* } { @dec $($decoderufRead`](futures_io::AsyncBufRead) streams, both e // There was insufficient memory in th Status::FinishOk => unreachable!(), self.decompress = Decompress::new(false); .map_err(|e| std::io::Error::new(std::i "BzDecoder {{total_in: {}, total_out: {}}}", ; use bzip2::{Decompress, Status}; pub struct Bz use crate::util::{_assert_send, _asserncWrite> tokio::io::AsyncWrite for $name &mut W { self.inner.get_mut() ) } /// Acquires a mutabl W) -> $name<$inner> { $name { te::pin_project! { $(#[$attr])* .advance(self.stream.total_out() as usize - previo "More memory needed", )), tatus::GetCheck => panic!("Unexpected lzma integri stream: Stream::new_auto_decoder(u64::max_ve std::fmt::{Debug, Formatter, Result as FmtResultin; use tokio::io::AsyncWrite; &mut Self>, cx: &mut std::task::Cote of this encoder, so care should be taken avoid tampering with the state of the writer whic the underlying writer that this encoder is wrappi write it /// compressed to the gi /// take in uncompressed data and write it comprl_shutdown(cx); if next.is_ready() { cx.waker().wake_by_ref(); ded = false; } next fRead + Sync>>>>(); } }; }rt_sync}; use core::pin::Pin; ote that this may discard internal state of this dpin::Pin<&mut Self>) -> std::pin::Pin<&mut R> { /// may otherwise confuse this decoder. $($($inherent_methods)*)* eam and /// emit a uncompressed streamecoder { ($(#[$attr:meta])* $name:ident<$inner>, ) -> Result<()> { self.inner.encodee std::io::Result; use flate2::Compression; #[deeady!(Pin::new(&mut this.writer).poll_flush(cx))?; W> Future for CopyBuf<'_, R, W> where R: Asyn())) => { self.closed = true; cWrite + Unpin> tokio::io::AsyncWrite for TrackClout self.inner).poll_write_vectored(cx, bufs) }Result> { assert!(!self.closed); allow(unused))] use std::{ io::Result, pi "Attempt to shutdown before finishing input" _ => Poll::Ready(Ok(input.written().len())), match self.do_poll_write(cx, &mut input)? }; *this.state = state; new(output); let (state, done) = matc State::Done => panic!("Write after e if this.decoder.decode(input, &mut ouf>) -> Pin<&mut W> { self.project().writer(&self) -> &W { self.writer.get_ref() sult}; use crate::{ codec::Decode, tokio:es-io")] fn bufread_multiple_members_with_invalid_, vec![0, 0, 0, 0], ] .join(&[][..riter, generic::{Decoder, Encoder}, }; algos!/! Types which operate over [`AsyncWrite`](tokio::uffer that `amt` bytes have been written to its bull and cannot be flushed, the method returns `PollBufRead, D: Decode> AsyncRead for Decoder { ill_buf(cx))?; if input.is_empone => State::Done, State::Next = let done = this.decoder.decode(&mut input,tempting to reinitialise the decoder if the reader fn multiple_members(&mut self, enabled: bool) { _members: bool, } } impl fmt::Debug for BufWriter { fn poll_partial_flush_buf( ffered != 0` is if it were Pending, so our waker w ) -> Poll> { let this that any leftover data in the internal buffer is /// It is inadvisable to directly write to th Ok(n) => *this.written += n, this.written; *this.written = 0; io::ErrorKind::WriteZero, ady(Ok(0)) => { ret = Err(io::ange in the future. pub fn new(inner: W) -> SeFAULT_BUF_SIZE: usize = 8192; pin_project! { ginally sourced from `futures_util::io::buf_writer assert_eq!(output, bytes); let compressed = sync::compress(&bytes); put = InputStream::from(compressed.chunks(2)); let output = write::decom assert_eq!(output, &[][..]); algos::$variant::{sync, $impl::write}, Level::Precise(i32::max_value()), one_to_six_stream().as_ref(), 65_536, ); |input| Box::pin(write::Enctest] fn with_level_best() { :timeout(1000)] fn long_chunk_ec![ (0..32_768).map(|)] fn short_chunk_output() { assert_eq!(output, input.bytes()); ssed = write::compress(input.as_ref(), 65_536); let mut decoder = bufread::Decoder::new(bufrea sync::compress(&[6, 5, 4, 3, 2, 1]), { let bytes: Vec = (0.> = (0..65_536).map(|_| rand::random()).collect();&input); let output = bufr= bufread::decompress(bufread::from(&input)); pressed = sync::compress(&[1, 2, 3, 4, 5, 6]); let compressed = sync::compress(&[0; 10]); et input = InputStream::new(vec![compressed]); }, one_to_six, one_t bufread::from(&one_to_six_stream()), let encoder = bufread::Encoder::with_quality( let compressed = read::to_vec(encoder); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); let compressed = bufread::compress(bufread::fromimeout(1000)] fn empty_chunk() #[test] #[ntest::timeout(er, encoder::FlateEncoder}; macro_rules! io_test_c interleave_pending::InterleavePending w; pub fn one_to_six_stream() -> InputStream { d input_stream; #[cfg(feature = "tokio")] mod tokif let State::Done = self.as_mut().project().state Poll::Pending if input.written().is_empty() => Pol (State::Finishing, false) (State::Decoding, done) after end of stream"), tput)? { State::Done self.writer.into_inner() } fn do_poller { pub fn new(writer: W, decoder: D) -Debug)] pub struct Decoder { ?; Ok(encoder.into_inner()) } async fn decompmpress(&compressed_data).await?; assert_eq!(delen]); self.advance(len); other.a-> B { self.buffer } } impl &[u8] { &self.buffer.as_ref()[self.indexssert_sync() {} #[derive(Debug, Default)press::None, )?; if output> Ok(true), Status::BufError => Err(Er self.decompress.reset(self.zlib_header); decompress: Decompress::new(zlib_header), d, Result}; use flate2::{Decompress, FlushDecomprer") .field("writer", &self.inner) AsyncBufWrite for BufWriter { Ok(len)) } } fn poll_flush(mut seuffered + buf.len() > this.buf.len() { er } /// Consumes this `BufWriter`, returet_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { "failed to write the buffered data", this .inner .as_mu:Ready(ret) } else if *this.buffered == 0 e(cx, &this.buf[*this.written..*this.buffered]) ; while *this.written < *this.buffered { <[u8]>, written: usize, buffered: core::ready; use pin_project_lite::pin_project; usll::Ready(Ok(())) } fn poll_close(mut selalBuffer::new(buf); match self.do_poll_wr { if this.encoder.finish(&mut }; let produced = outp) -> Poll> { let mut this = sel this.writer.as_mut().produce(produced); let mut output = PartialBuffer::new(output); ter: BufWriter::new(writer), encoder, res::write::{AsyncBufWrite, BufWriter}, util:: some amount of its buffer, returned from `poll_fl ) -> Poll>; /// Tellsternal buffer to write to, flushing data out to thbytes_left = self.decoder.get_mut().flush(&mut out output.advance(status.bytes_written); lf.decoder.get_mut().reinit()?; Ok(()) truct ZstdDecoder { decoder: Unshared Ok(()), () - prior_out) as usize); Ok(status) esult { let prior_in = self.compree fallback. You should set /// this parameter o matter how bad the /// input. /// //, f: &mut fmt::Formatter<'_>) -> fmt::Result { new(ErrorKind::Other, "unexpected BufError")), ().len() == old_len { break; FlushCompress::Sync, )?; h self.encode(input, output, FlushCompress::None)?ess .compress(input.unwritten(), outpu{ compress: Compress, flushed: bool, } imec::Encode, util::PartialBuffer}; use std::io::{Ere for LzmaDecoder { fn reinit(&mut self) -> Reool> { if self.skip_padding.is_some() { ErrorKind::InvalidData, *count -= 1; if *count == 0 { codec::Xz2Decoder, skip_padding: Option, }sult { self.inner.flush(output) t().0.consume(amt) } } use crate::{codec::Enco other => other, } } fn const!(!*eof); let len = buf.filled().len(); fn consume(self: Pin<&mut Self>, amt: usize) { -io")] impl inner, eof) = self.project(); assert!(!*eol}, }; pub struct TrackEof { inner: R, const _: () = { fn _assert() { internal state of this encoder, so care should be /// Acquires a pinned mutable refereder that this encoder is /// wrapping. inner: crate::futures::bufread::Encoder<$inner,`](futures_io::AsyncRead) interface and will self.state = State::Done; output.copy_unwritten_from(&mut *header); if done { return Ok(true); ut)?, State::Footer(footer) => { ; } } } fn flush( tate::Encoding => { let prior_sult<()> { loop { match &mut sGzipEncoder { pub(crate) fn new(level: Compres.poll_shutdown(cx) } } mod decoder; mod encode Pin::new(&mut self.io).poll_write(cx, &buf[.or Limited { fn poll_write( mut sel _assert_send::<$name $({ $($inherent_methods:tt)* })*) => let status = self .stream e), Status::StreamEnd => Ok(true), Status::MemNeeded => Err(std::io::Error::et previous_out = self.stream.total_out() as usize} } impl Encode for Xz2Encoder { fn encode( ncoder { fn fmt(&self, f: &mut Formatter<'_>) urn Ok(Some(std::mem::take(&mut self.header))); data.extend_from_slice(input.unwritten()); if !self.header.flags.comment { extend_from_slice(&input.unwritten()[..len]); se { return Ok(None); data.copy_unwritten_from(input); self.state = State::ExtraLen(<_>::default()); ascii: (flag & 0b0000_0001) != 0, Buffer<[u8; 2]>), Done, } impl Default for St _ => Poll::Ready(Ok(output.written().len())ll> { if buf.is_empty() { e::Done, }; if let State:op { *this.state = match this.state { (&self) -> &R { &self.reader } puEncoder { pub fn new(reader: R, encoder::Encode, util::PartialBuffer}; use futures_core::rult<()>> { ready!(self.as_mut().do_poll_fl} impl AsyncWrite for Eoject(); loop { let output = tate::Finishing | State::Done => panic!("Flush aft Pin<&mut Self>, cx: &mut Context<'_>) -> Poll &W { self.writer.get_ #[pin] writer: BufWriter, task::{Context, Poll}, }; use std::io::Result; BrotliResult::ResultFailure => unreachable!(), | BrotliResult::NeedsMoreInput => Ok(false), t { match self.decode(input, output) &mut self.state, ) { Brotliuf = input.unwritten(); let mut out_buf = tandardAlloc, BrotliDecompressStream, BrotliResult} pub(crate) fn as_zstd(&self) -> libzstd otherwise it will cause a panic /// when ). pub fn checksum_flag(value: bool) -> Se /// Size of each bucket in the LDM hash table fohe long-distance matching table (as a power of two Self(MinMatch(value)) } /// St of two) pub fn hash_log(value: u32) -> Se)] pub struct CParameter(libzstd::stream::raw: } } #[cfg(feature = "zstd")] /// This module co Self::Best => best, Self::Precise(qualf::Default => bzip2::Compression::default(), let fastest = bzip2::Compression::fast(); match self { Self::Fastest => params.qebug)] pub enum Level { /// Fastest quality ofnto; #[macro_use] mod macros; mod codec; #[cfg(f"`zlib` (*inactive*) | `ZlibEncoder`, `ZlibDecoderXzEncoder), [`XzDecoder`](?search=XzDecoder)" )] #er`, `DeflateDecoder`" )] #![cfg_attr( featurep2", doc = "`bzip2` | [`BzEncoder`](?search=Bztokio` (*inactive*) | `tokio::io::AsyncBufRead`, `)" )] #![cfg_attr( not(feature = "futures-io")tations, needs to be paired with a selection of al` | Activates all implementations and algorithmsoll> { if buf.remaining() == 0 { return Poll::Ready(Ok(())); poll_fill_buf(cx))?; if input. if this.decoder.finish(output)? { State::Flushing } else { let input = ready!(this.reader.as_mut().poll_fidecoder, state: State::Decoding, #[derive(Debug)] enum State { Decoding, inish() } } use core::{ pin::Pin, taskialBuffer::new(&[][..]), output, return Err(Error::new(ErrorKind::Other, "br let mut input_len = 0; let mut output_len &mut self, input: &mut PartialBuffer { Ok(Self { codec::ZstdEncoder::new_with_dict(level.into_zstd( dictionary: &[u8]) -> ::std::io::Result { /// Creates a new encoder, using the specified if this function is called with a [`CParameter::n uncompressed data from the given stream and emit pub fn with_quality(inner: $inner, level: crate(level.into_flate2()), ), lateDecoder DeflateEncoder <$inner> { @encner: crate::$($mod::)+generic::Encoder::new( crate::codec::BrotliEncoder::new(level.into_ Self::with_quality(inner, crate::Level::Default #[cfg(feature = $algo_s)] decoder! {nwritten().is_empty()) } fn finish( p { let old_len = output.written().len // The Finish action on compression went ok. ) } } impl Decode for BzDecoder { fn rein input.advance((self.decompress.total_in() - p } } fn decode( &mut self, ; pub(crate) use self::{decoder::GzipDecoder, enc self.project().inner.poll_flush(cx) self.project().inner.poll_write(cx, buf) d. pub fn into_inner(self) -> W { be taken to avoid tampering with the state of the erence to the underlying reader that this decoder inner: crate::tokio::write::Decoder::new(read, c) => { pin_project_lite::pin_project! { .process(&[], output.unwritten_mut(), Actiof<[u8]> + AsMut<[u8]>>, ) -> Result { ut); match status { Status::Oer>, output: &mut Partial::{codec::Decode, util::PartialBuffer}; use std::rt() { use crate::util::{_assert_sask::Context<'_>, buf: &[u8], his encoder returning the underlying writer. self.project().inner.get_pin_mut() pub fn get_mut(&mut self) -> &mut $inner { impl<$inner: tokio::io::AsyncWrite> $name< compressed to an underlying stream. # Poll::Pending } } } #[macro_uset<()>> { if self.pended { let mut self: Pin<&mut Self>, cx: &mut Context) -> Self { Self { inner, ) -> std::task::Poll fn poll_read( self: s pub fn get_pin_mut(self: std::pin::Pin<&mut Selapping. /// /// Note that self.inner.multiple_members(enabled); :codec::$name>, } } iug)] pub struct $name<$inner> { async-compression-0.4.13/tests/artifacts/dictionary-rust-other000064400000000000000000003340001046102023000227110ustar 000000000000007¤0ì° r*B°70 Ã0 Ã0 ÃÀÙ·m˜ÝRm‘Ýv„,%’ü¸ªšE£E_oÄ1Z|ôÓþ}´¶^Zܪ{5ÚI€#x ³ ‡Äb¡hL*ÐËà Šž”“ J$Ò`0ˆqEA Å@Ä0†bJ1BDyô4é1)); assert!(ContentLength(0) < 123); assert!(0 < ContentLength(123)); } } use std::sync::Arc; use actix_utils:/// The "Content-Length" header field indicates the associated representation's data length as a /// decimal non-negative integer number of octets. /// /// # ABNF /// ```plain /// Content-Length = 1*DIGIT /// ``` /// /// [RFC 9110 §8.6]: https://www.rfc-editor.org/rfc/rfc9110#name-content-length #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct ContentLength(usize); impl ContentLength { /// Returns Content-Length value. pub fn into_inner(&self) -> usize { self.0 } } impl str::FromStr for ContentLength { type Err = ::Err; #[inline] fn from_str(val: &str) -> Result { let val = val.trim(); // decoder prevents this case debug_assert!(!val.starts_with('+')); val.parse().map(Self) } } impl TryIntoHeaderValue for ContentLength { type Error = Infallible; fn try_into_value(self) -> Result { Ok(HeaderValue::from(self.0)) } } impl Header for C/ /// Also see the [Forwarded header's MDN docs][mdn] for field semantics. /// /// [RFC 7239]: https://datatracker.ietf.org/doc/html/rfc7239 /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Forwarded #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] #[cfg_attr(test, derive(Default))] pub struct Forwarded { /// The interface where the request came in to the proxy server. The identifier can be: /// /// - an obfuscated identifier (such as "hidden" or "secret"). This should be treated as the /// default. /// - an IP address (v4 or v6, optionally with a port. IPv6 address are quoted and enclosed in /// square brackets) /// - "unknown" when the preceding entity is not known (and you still want to indicate that /// forwarding of the request was made) by: Option, /// The client that initiated the request and subsequent proxies in a chain of proxies. The /// identifier has the same possible values as the by directive. r#for: Vec { let inner = ready!(inner_fut.poll(cx))?; Poll::Ready(Ok(BodyHash { inner, hash: mem::take(hash), })) } BodyHashFutProj::Inner { inner_fut, hasher, mut forked_payload, } => { // poll original extractor match inner_fut.poll(cx)? { Poll::Ready(inner) => { trace!("inner extractor complete"); let next = BodyHashFut::InnerDone { inner: Some(inner), hasher: mem::replace(hasher, D::new()), forked_payload: mem::replace(forked_payload, dev::Payload::None), }; self.set(next); // re-enter poll in done state Create a relative or absolute redirect. /// /// _This feature has [graduated to Actix Web][graduated]. Further development will occur there._ /// /// See [`Redirect`] docs for usage details. /// /// # Examples /// ``` /// use actix_web::App; /// use actix_web_lab::web as web_lab; /// /// let app = App::new() /// .service(web_lab::redirect("/one", "/two")); /// ``` /// /// [graduated]: https://docs.rs/actix-web/4/actix_web/web/struct.Redirect.html #[allow(deprecated)] #[deprecated(since = "0.19.0", note = "Type has graduated to Actix Web.")] pub fn redirect(from: impl Into>, to: impl Into>) -> Redirect { Redirect::new(from, to) } /// Constructs a new Single-page Application (SPA) builder. /// /// See [`Spa`] docs for more details. /// /// # Examples /// ``` /// # use actix_web::App; /// # use actix_web_lab::web::spa; /// let app = App::new() /// // ...api routes... /// .service( /// spa() /// .index_file("./examples/assets/spa.html") /// fut: Cell::new(Some(Box::pin(init()))), }), } } /// Returns reference to result of lazy `T` value, initializing if necessary. pub async fn get(&self) -> &T { self.inner .cell .get_or_init(|| async move { match self.inner.fut.take() { Some(fut) => fut.await, None => panic!("LazyData instance has previously been poisoned"), } }) .await } } impl FromRequest for LazyData { type Error = Error; type Future = Ready>; #[inline] fn from_request(req: &HttpRequest, _: &mut dev::Payload) -> Self::Future { if let Some(lazy) = req.app_data::>() { ready(Ok(lazy.clone())) } else { debug!( "Failed to extract `LazyData<{}>` for `{}` handler. For the Data extractor to work \ correctly, wrap the data with `LazyData::nreq.headers().get("Last-Event-ID")); common_countdown(n.try_into().unwrap()) } fn common_countdown(n: i32) -> impl Responder { let countdown_stream = stream::unfold((false, n), |(started, n)| async move { // allow first countdown value to yield immediately if started { sleep(Duration::from_secs(1)).await; } if n > 0 { let data = sse::Data::new(n.to_string()) .event("countdown") .id(n.to_string()); Some((Ok::<_, Infallible>(sse::Event::Data(data)), (true, n - 1))) } else { None } }); sse::Sse::from_stream(countdown_stream).with_retry_duration(Duration::from_secs(5)) } #[get("/time")] async fn timestamp() -> impl Responder { let (sender, sse) = sse::channel(2); actix_web::rt::spawn(async move { loop { let time = time::OffsetDateTime::now_utc(); let msg = sse::Data::new(time.format(&Rfc3339).unwrap()).event("timestamp"); der(( header::CONTENT_LENGTH, header::HeaderValue::from_static("9"), )) .set_payload(Bytes::from_static(b"name=test")) .to_http_parts(); let s = UrlEncodedForm::::from_request(&req, &mut pl).await; let err = format!("{}", s.unwrap_err()); assert!( err.contains("payload is larger") && err.contains("than allowed"), "unexpected error string: {err:?}" ); } #[actix_web::test] async fn test_form_body() { let (req, mut pl) = TestRequest::default().to_http_parts(); let form = UrlEncodedFormBody::::new(&req, &mut pl) .await; assert!(err_eq(form.unwrap_err(), UrlencodedError::ContentType)); let (req, mut pl) = TestRequest::default() .insert_header(( header::CONTENT_TYPE, header::HeaderValue::from_static("application/text"), ix_web::test] async fn compat_compat() { let _ = App::new().wrap(Compat::new(from_fn(noop))); let _ = App::new().wrap(Compat::new(from_fn(mutate_body_type))); } #[actix_web::test] async fn feels_good() { let app = test::init_service( App::new() .wrap(from_fn(mutate_body_type)) .wrap(from_fn(add_res_header)) .wrap(Logger::default()) .wrap(from_fn(noop)) .default_service(web::to(HttpResponse::NotFound)), ) .await; let req = test::TestRequest::default().to_request(); let res = test::call_service(&app, req).await; assert!(res.headers().contains_key(header::WARNING)); } #[actix_web::test] async fn closure_capture_and_return_from_fn() { let app = test::init_service( App::new() .wrap(Logger::default()) .wrap(MyMw(true).into_middleware()) .wrap(Logger::default()), fut: Box::pin(T::from_request(req, payload)), counter_pl: counter, size: 0, }, } } } pub struct BodyLimitFut where T: FromRequest + 'static, T::Error: fmt::Debug + fmt::Display, { inner: Inner, } impl BodyLimitFut where T: FromRequest + 'static, T::Error: fmt::Debug + fmt::Display, { fn new_error(err: BodyLimitError) -> Self { Self { inner: Inner::Error { err: Some(err) }, } } } enum Inner where T: FromRequest + 'static, T::Error: fmt::Debug + fmt::Display, { Error { err: Option>, }, Body { /// Wrapped extractor future. fut: Pin>, /// Forked request payload. counter_pl: dev::Payload, /// Running payload size count. size: usize, }, } impl Unpin for Inner< use crate::util::{InfallibleStream, MutWriter}; pin_project! { /// A buffered CSV serializing body stream. /// /// This has significant memory efficiency advantages over returning an array of CSV rows when /// the data set is very large because it avoids buffering the entire response. /// /// # Examples /// ``` /// # use actix_web::Responder; /// # use actix_web_lab::respond::Csv; /// # use futures_core::Stream; /// fn streaming_data_source() -> impl Stream { /// // get item stream from source /// # futures_util::stream::empty() /// } /// /// async fn handler() -> impl Responder { /// let data_stream = streaming_data_source(); /// /// Csv::new_infallible(data_stream) /// .into_responder() /// } /// ``` pub struct Csv { // The wrapped item stream. #[pin] stream: S, } } impl Csv { /// Constructs a new `Csv` from a stream of rows. string. Example: `/users?n=100`. //! //! Also includes a low-efficiency route to demonstrate the difference. use std::io::{self, Write as _}; use actix_web::{ get, web::{self, BufMut as _, BytesMut}, App, HttpResponse, HttpServer, Responder, }; use actix_web_lab::respond::NdJson; use futures_core::Stream; use futures_util::{stream, StreamExt as _}; use rand::{distributions::Alphanumeric, Rng as _}; use serde::Deserialize; use serde_json::json; use tracing::info; fn streaming_data_source(n: u32) -> impl Stream> { stream::repeat_with(|| { Ok(json!({ "email": random_email(), "address": random_address(), })) }) .take(n as usize) } #[derive(Debug, Deserialize)] struct Opts { n: Option, } /// This handler streams data as NDJSON to the client in a fast and memory efficient way. /// /// A real data source might be a downstream server, database query, or other external resource. #[get("/users")] as body, hex!( "cf83e135 7eefb8bd f1542850 d66d8007 d620e405 0b5715dc 83f4a921 d36ce9ce 47d0d13c 5d85f2b0 ff8318d2 877eec2f 63b931bd 47417a81 a538327a f927da3e" ) .as_ref() ); let (req, _) = test::TestRequest::default() .to_request() .replace_payload(dev::Payload::Stream { payload: Box::pin( stream::iter([b"a", b"b", b"c"].map(|b| Bytes::from_static(b))).map(Ok), ) as BoxedPayloadStream, }); let body = test::call_and_read_body(&app, req).await; assert_eq!( body, hex!("ba7816bf 8f01cfea 414140de 5dae2223 b00361a3 96177a9c b410ff61 f20015ad").as_ref() ); } #[actix_web::test] async fn type_alias_equivalence() { let app = test::init_service( App::new() .route( "/alias", web::get().to(|body: BodySha256| async move { Bytes::copy_from_slice(body.hash()unt = self.static_resources_mount.into_owned(); let files = { let index_file = index_file.clone(); Files::new(&static_resources_mount, static_resources_location) // HACK: FilesService will try to read a directory listing unless index_file is provided // FilesService will fail to load the index_file and will then call our default_handler .index_file("extremely-unlikely-to-exist-!@$%^&*.txt") .default_handler(move |req| serve_index(req, index_file.clone())) }; SpaService { index_file, files } } } #[derive(Debug)] struct SpaService { index_file: String, files: Files, } impl HttpServiceFactory for SpaService { fn register(self, config: &mut actix_web::dev::AppService) { // let Files register its mount path as-is self.files.register(config); // also define a root prefix handler directed towards our SPA index let rdef = ResourceDef::root_prefix(""); t!("{s}, {s:?}"), "test, Query(Id { id: \"test\" })"); s.id = "test1".to_string(); let s = s.into_inner(); assert_eq!(s.id, "test1"); } #[actix_web::test] #[should_panic] async fn test_tuple_panic() { let req = TestRequest::with_uri("/?one=1&two=2").to_srv_request(); let (req, mut pl) = req.into_parts(); Query::<(u32, u32)>::from_request(&req, &mut pl) .await .unwrap(); } } //! Hashing utilities for Actix Web. //! //! # Crate Features //! All features are enabled by default. //! - `blake2`: Blake2 types //! - `blake3`: Blake3 types //! - `md5`: MD5 types 🚩 //! - `md4`: MD4 types 🚩 //! - `sha1`: SHA-1 types 🚩 //! - `sha2`: SHA-2 types //! - `sha3`: SHA-3 types //! //! # Security Warning 🚩 //! The `md4`, `md5`, and `sha1` types are included for completeness and interoperability but they //! are considered cryptographically broken by modern standards. For security critical use cases, //! you should move to Some(code) if path_altered => { let mut res = HttpResponse::with_body(code, ()); res.headers_mut().insert( header::LOCATION, req.head_mut().uri.to_string().parse().unwrap(), ); NormalizePathFuture::redirect(req.into_response(res)) } _ => NormalizePathFuture::service(self.service.call(req)), } } } pin_project! { pub struct NormalizePathFuture, B> { #[pin] inner: Inner, } } impl, B> NormalizePathFuture { fn service(fut: S::Future) -> Self { Self { inner: Inner::Service { fut, _body: PhantomData, }, } } fn redirect(res: ServiceResponse<()>) -> Self { Self { inner: Inner::Redirect { res: Some(res) }, } } } pin_project! { #[project = InnerProj] enum Inner App< impl ServiceFactory< ServiceRequest, Response = ServiceResponse, Config = (), InitError = (), Error = Error, >, > { App::new().wrap(from_fn(redirect_to_www)).route( "/", web::get().to(|| async { HttpResponse::Ok().body("content") }), ) } #[actix_web::test] async fn redirect_non_www() { es; use tokio::{ io::AsyncWrite, sync::mpsc::{UnboundedReceiver, UnboundedSender}, }; /// Returns an `AsyncWrite` response body writer and its associated body type. /// /// # Examples /// ``` /// # use actix_web::{HttpResponse, web}; /// use tokio::io::AsyncWriteExt as _; /// use actix_web_lab::body; /// /// # async fn index() { /// let (mut wrt, body) = body::writer(); /// /// let _ = tokio::spawn(async move { /// wrt.write_all(b"body from another thread").await /// }); /// /// HttpResponse::Ok().body(body) /// # ;} /// ``` pub fn writer() -> (Writer, impl MessageBody) { let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); (Writer { tx }, BodyStream { rx }) } /// An `AsyncWrite` response body writer. #[derive(Debug, Clone)] pub struct Writer { tx: UnboundedSender, } impl AsyncWrite for Writer { fn poll_write( self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { self.tx .send(Bytes(), "data: foo\n\n"); let st = stream::repeat(Ok::<_, Infallible>(Event::Data(Data::new("foo")))).take(2); let sse = Sse::from_stream(st); assert_eq!( body::to_bytes(sse).await.unwrap(), "data: foo\n\ndata: foo\n\n", ); } #[actix_web::test] async fn appropriate_headers_are_set_on_responder() { let st = stream::empty::>(); let sse = Sse::from_stream(st); let res = sse.respond_to(&TestRequest::default().to_http_request()); assert_response_matches!(res, OK; "content-type" => "text/event-stream" "content-encoding" => "identity" "cache-control" => "no-cache" ); } #[actix_web::test] async fn messages_are_received_from_sender() { let (sender, mut sse) = channel(9); assert!(poll_fn(|cx| Pin::new(&mut sse).poll_next(cx)) .now_or_never() .is_none()); sender.send(Data::new("bar").event("foo"amples /// ``` /// #[actix_web::main] async fn test() { /// use actix_web_lab::sse; /// /// let (sender, sse_stream) = sse::channel(5); /// sender.try_send(sse::Data::new("my data").event("my event name")).unwrap(); /// sender.try_send(sse::Event::Comment("my comment".into())).unwrap(); /// # } test(); /// ``` pub fn try_send(&self, msg: impl Into) -> Result<(), TrySendError> { self.tx.try_send(msg.into()).map_err(|err| match err { mpsc::error::TrySendError::Full(ev) => TrySendError::Full(ev), mpsc::error::TrySendError::Closed(ev) => TrySendError::Closed(ev), }) } } pin_project! { /// Server-sent events (`text/event-stream`) responder. /// /// Constructed with an [SSE channel](channel) or [using your own stream](Self::from_stream). #[must_use] #[derive(Debug)] pub struct Sse { #[pin] stream: S, keep_alive: Option, retry_interval: Option, p_data(Data::new(AbcSigningKey([0; 32]))) .route( "/", web::post().to(|body: RequestSignature| async move { let (body, sig) = body.into_parts(); let sig = sig.into_bytes().to_vec(); format!("{body:?}\n\n{sig:x?}") }), ) }) .workers(1) .bind(("127.0.0.1", 8080))? .run() .await } //! Expiremental testing utilities. #[doc(inline)] #[cfg(test)] pub(crate) use crate::test_header_macros::{header_round_trip_test, header_test_module}; #[doc(inline)] pub use crate::test_request_macros::test_request; #[doc(inline)] pub use crate::test_response_macros::assert_response_matches; pub use crate::test_services::echo_path_service; //! Semantic server-sent events (SSE) responder with a channel-like interface. //! //! # Examples //! ```no_run //! use std::{convert::Infallible, time::Duration}; //! use actix_web::{Responder, get}; //! use actix_web_lab::sse; //! /ranges: unsafe { IpCidrCombiner::from_cidr_vec_unchecked(ipv4_cidr_vec, ipv6_cidr_vec) }, } } } /// Fetched trusted Cloudflare IP addresses from their API. #[cfg(feature = "fetch-ips")] pub async fn fetch_trusted_cf_ips() -> Result { let client = awc::Client::new(); tracing::debug!("fetching cloudflare ips"); let mut res = client.get(CF_URL_IPS).send().await.map_err(|err| { tracing::error!("{err}"); Err::Fetch })?; tracing::debug!("parsing response"); let res = res.json::().await.map_err(|err| { tracing::error!("{err}"); Err::Fetch })?; TrustedIps::try_from_response(res) } #[cfg(test)] mod tests { use super::*; #[test] fn cf_ips_from_response() { let res = CfIpsResponse::Failure { success: false }; assert!(Trustednse::Success { result } => result, CfIpsResponse::Failure { .. } => { tracing::error!("parsing response returned success: false"); return Err(Err::Fetch); } }; let mut cidr_ranges = IpCidrCombiner::new(); for cidr in ips.ipv4_cidrs { cidr_ranges.push(IpCidr::V4(cidr)); } for cidr in ips.ipv6_cidrs { cidr_ranges.push(IpCidr::V6(cidr)); } Ok(Self { cidr_ranges }) } /// Add trusted IP range to list. pub fn with_ip_range(mut self, cidr: IpCidr) -> Self { self.cidr_ranges.push(cidr); self } /// Returns true if `ip` is controlled by Cloudflare. pub fn contains(&self, ip: IpAddr) -> bool { self.cidr_ranges.contains(ip) } } impl Clone for TrustedIps { fn clone(&self) -> Self { let ipv4_cidr_vec = self.cidr_ranges.get_ipv4_cidrs().to_vec(); let ipv6_cidr_vec = self.cidr_ranges.get_ipv6_cidrs().to_vec(); //! Utilities for working with Actix Web types. // stuff in here comes in and out of usage #![allow(dead_code)] use std::{ convert::Infallible, io, pin::Pin, task::{ready, Context, Poll}, }; use actix_http::{error::PayloadError, BoxedPayloadStream}; use actix_web::{dev, web::BufMut}; use futures_core::Stream; use futures_util::StreamExt as _; use local_channel::mpsc; /// Returns an effectively cloned payload that supports streaming efficiently. /// /// The cloned payload: /// - yields identical chunks; /// - does not poll ahead of the original; /// - does not poll significantly slower than the original; /// - receives an error signal if the original errors, but details are opaque to the copy. /// /// If the payload is forked in one of the extractors used in a handler, then the original _must_ be /// read in another extractor or else the request will hang. pub fn fork_request_payload(orig_payload: &mut dev::Payload) -> dev::Payload { const TARGET: &str = concat!(module_path!(), "::fork_q).unwrap(), expect); } #[track_caller] pub(crate) fn assert_parse_fail< H: Header + fmt::Debug, I: IntoIterator, V: AsRef<[u8]>, >( headers: I, ) { let req = req_from_raw_headers::(headers); H::parse(&req).unwrap_err(); } } #[cfg(test)] pub(crate) use header_test_helpers::{assert_parse_eq, assert_parse_fail}; use actix_client_ip_cloudflare::{fetch_trusted_cf_ips, CfConnectingIp, TrustedClientIp}; use actix_web::{get, web::Header, App, HttpServer, Responder}; #[get("/raw-header")] async fn header(Header(client_ip): Header) -> impl Responder { match client_ip { CfConnectingIp::Trusted(_ip) => unreachable!(), CfConnectingIp::Untrusted(ip) => format!("Possibly fake client IP: {ip}"), } } #[get("/client-ip")] async fn trusted_client_ip(client_ip: TrustedClientIp) -> impl Responder { format!("Trusted client IP: {client_ip}") } #[actix_web::main] async fn main() -> std::io::Res by: Some("203.0.113.43".to_owned()), r#for: vec!["192.0.2.60".to_owned()], host: Some("rust-lang.org".to_owned()), proto: Some("https".to_owned()), }; assert_eq!( fwd.try_into_value().unwrap(), r#"by="203.0.113.43"; for="192.0.2.60"; host="rust-lang.org"; proto="https""# ); } #[test] fn case_sensitivity() { assert_parse_eq::( ["For=192.0.2.60"], Forwarded { r#for: vec!["192.0.2.60".to_owned()], ..Forwarded::default() }, ); } #[test] fn weird_whitespace() { assert_parse_eq::( ["for= 1.2.3.4; proto= https"], Forwarded { r#for: vec!["1.2.3.4".to_owned()], proto: Some("https".to_owned()), ..Forwarded::default() }, ); assert_parse_eq::( [" for = 1.2. let mut proto = None; let mut r#for = vec![]; // "for=1.2.3.4, for=5.6.7.8; scheme=https" for (name, val) in val .split(';') // ["for=1.2.3.4, for=5.6.7.8", " proto=https"] .flat_map(|vals| vals.split(',')) // ["for=1.2.3.4", " for=5.6.7.8", " proto=https"] .flat_map(|pair| { let mut items = pair.trim().splitn(2, '='); Some((items.next()?, items.next()?)) }) { // [(name , val ), ... ] // [("for", "1.2.3.4"), ("for", "5.6.7.8"), ("scheme", "https")] match name.trim().to_lowercase().as_str() { "by" => { // multiple values on other properties have no defined semantics by.get_or_insert_with(|| unquote(val)); } "for" => { // parameter order is defined to be client first and last proxy last tractor that /// _takes_ the payload. In this case, the resulting hash will be as if an empty input was given to /// the hasher. /// /// # Example /// ``` /// use actix_web::{Responder, web}; /// use actix_hash::BodyHash; /// use sha2::Sha256; /// /// # type T = u64; /// async fn hash_payload(form: BodyHash, Sha256>) -> impl Responder { /// if !form.verify_slice(b"correct-signature") { /// // return unauthorized error /// } /// /// "Ok" /// } /// ``` #[derive(Debug, Clone)] pub struct BodyHash { inner: T, hash: GenericArray, } impl BodyHash { /// Returns hash slice. pub fn hash(&self) -> &[u8] { self.hash.as_slice() } /// Returns hash output size. pub fn hash_size(&self) -> usize { self.hash.len() } /// Verifies HMAC hash against provided `tag` using constant-time equality. pub fn verify_slice(&self, tag: &[u8]) -> bool { use subtle::ConstantTimeEq as _; use async_trait::async_trait; use bytes::{BufMut as _, BytesMut}; use ed25519_dalek::{PublicKey, Signature, Verifier as _}; use hex_literal::hex; use once_cell::sync::Lazy; use rustls::{Certificate, PrivateKey, ServerConfig}; use rustls_pemfile::{certs, pkcs8_private_keys}; use tracing::info; const APP_PUBLIC_KEY_BYTES: &[u8] = &hex!("d7d9a14753b591be99a0c5721be8083b1e486c3fcdc6ac08bfb63a6e5c204569"); static SIG_HDR_NAME: HeaderName = HeaderName::from_static("x-signature-ed25519"); static TS_HDR_NAME: HeaderName = HeaderName::from_static("x-signature-timestamp"); static APP_PUBLIC_KEY: Lazy = Lazy::new(|| PublicKey::from_bytes(APP_PUBLIC_KEY_BYTES).unwrap()); #[derive(Debug)] struct DiscordWebhook { /// Signature taken from webhook request header. candidate_signature: Signature, /// Cloned payload state. chunks: Vec, } impl DiscordWebhook { fn get_timestamp(req: &HttpRequest) -> Result<&[u8], Error> { req.headers() .get(&TS_HDR_NAME) s_core::ready; use pin_project_lite::pin_project; /// Creates a middleware from an async function that is used as a mapping function for a /// [`ServiceResponse`]. /// /// # Examples /// Adds header: /// ``` /// # use actix_web_lab::middleware::map_response; /// use actix_web::{body::MessageBody, dev::ServiceResponse, http::header}; /// /// async fn add_header( /// mut res: ServiceResponse, /// ) -> actix_web::Result> { /// res.headers_mut() /// .insert(header::WARNING, header::HeaderValue::from_static("42")); /// /// Ok(res) /// } /// # actix_web::App::new().wrap(map_response(add_header)); /// ``` /// /// Maps body: /// ``` /// # use actix_web_lab::middleware::map_response; /// use actix_web::{body::MessageBody, dev::ServiceResponse}; /// /// async fn mutate_body_type( /// res: ServiceResponse, /// ) -> actix_web::Result> { /// Ok(res.map_into_left_body::<()>()) /// }b); // catch panics in service call AssertUnwindSafe(self.service.call(req)) .catch_unwind() .map(move |maybe_res| match maybe_res { Ok(res) => res, Err(panic_err) => { // invoke callback with panic arg (cb)(&panic_err); // continue unwinding panic::resume_unwind(panic_err) } }) .boxed_local() } } #[cfg(test)] mod tests { use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; use actix_web::{ dev::Service as _, test, web::{self, ServiceConfig}, App, }; use super::*; fn configure_test_app(cfg: &mut ServiceConfig) { cfg.route("/", web::get().to(|| async { "content" })).route( "/disco", #[allow(unreachable_code)] web::get().to(|| async { panic!("the disco"); "" // /// /// Deserialize payload with a higher 32MiB limit. /// #[post("/big-payload")] /// async fn big_payload(info: UrlEncodedForm) -> String { /// format!("Welcome {}!", info.username) /// } /// ``` #[doc(alias = "html_form", alias = "html form", alias = "form")] #[derive(Debug, Deref, DerefMut, Display)] pub struct UrlEncodedForm(pub T); impl UrlEncodedForm { /// Unwraps into inner `T` value. pub fn into_inner(self) -> T { self.0 } } /// See [here](#extractor) for example of usage as an extractor. impl FromRequest for UrlEncodedForm { type Error = Error; type Future = UrlEncodedFormExtractFut; #[inline] fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { UrlEncodedFormExtractFut { req: Some(req.clone()), fut: UrlEncodedFormBody::new(req, payloice { service: boxed::rc_service(service), mw_fn: Rc::clone(&self.mw_fn), _phantom: PhantomData, })) } } /// Middleware service for [`from_fn`]. pub struct MiddlewareFnService { service: RcService, Error>, mw_fn: Rc, _phantom: PhantomData<(B, Es)>, } impl Service for MiddlewareFnService where F: Fn(ServiceRequest, Next) -> Fut, Fut: Future, Error>>, B2: MessageBody, { type Response = ServiceResponse; type Error = Error; type Future = Fut; forward_ready!(service); fn call(&self, req: ServiceRequest) -> Self::Future { (self.mw_fn)( req, Next:: { service: Rc::clone(&self.service), }, ) } } macro_rules! impl_middleware_fn_service { ($($ext_type:ident),*) => { impl Trans assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); } #[actix_web::test] async fn test_override_data() { let srv = init_service( App::new().app_data(LocalData::new(1usize)).service( web::resource("/") .app_data(LocalData::new(10usize)) .route(web::get().to(|data: LocalData| { assert_eq!(*data, 10); HttpResponse::Ok() })), ), ) .await; let req = TestRequest::default().to_request(); let resp = srv.call(req).await.unwrap(); assert_eq!(resp.status(), StatusCode::OK); } #[actix_web::test] async fn test_data_from_rc() { let data_new = LocalData::new(String::from("test-123")); let data_from_rc = LocalData::from(Rc::new(String::from("test-123"))); assert_eq!(data_new.0, data_from_rc.0); } #[actix_web::test] async fn test_data_from_dyn_rc() nsume_chunk(&mut self, _req: &HttpRequest, chunk: Bytes) -> Result<(), Self::Error> { Digest::update(&mut self.hasher, &chunk); Ok(()) } async fn finalize(self, _req: &HttpRequest) -> Result { println!("using key: {:X?}", &self.key); let mut hmac = >::new_from_slice(&self.key).unwrap(); let payload_hash = self.hasher.finalize(); println!("payload hash: {payload_hash:X?}"); Mac::update(&mut hmac, &payload_hash); Ok(hmac.finalize()) } fn verify( signature: Self::Signature, req: &HttpRequest, ) -> Result { let user_sig = get_user_signature(req)?; let user_sig = CtOutput::new(GenericArray::from_slice(&user_sig).to_owned()); if signature == user_sig { Ok(signature) } else { Err(error::ErrorUnauthorized( "given signature does not match calculated signature", form: web::Json>, } fn main() {} use std::io; use actix_web::{ error, http::header::HeaderValue, middleware::Logger, web::{self, Bytes}, App, Error, HttpRequest, HttpServer, }; use actix_web_lab::extract::{RequestSignature, RequestSignatureScheme}; use async_trait::async_trait; use digest::{CtOutput, Digest, Mac}; use generic_array::GenericArray; use hmac::SimpleHmac; use sha2::{Sha256, Sha512}; use tracing::info; #[allow(non_upper_case_globals)] const db: () = (); async fn lookup_public_key_in_db(_db: &(), val: T) -> T { val } /// Extracts user's public key from request and pretends to look up secret key in the DB. async fn get_base64_api_key(req: &HttpRequest) -> actix_web::Result> { // public key, not encryption key let pub_key = req .headers() .get("Api-Key") .map(HeaderValue::as_bytes) .map(base64::decode) .transpose() .map_err(|_| error::ErrorInternalServerError("invalid api key"))?app, req).await; assert_eq!(res.status(), StatusCode::OK); let body = test::read_body(res).await; assert_eq!(body, "content"); } #[actix_web::test] async fn catch_panic_return_internal_server_error_response() { let app = test::init_service(test_app()).await; let req = test::TestRequest::with_uri("/disco").to_request(); let err = match app.call(req).await { Ok(_) => panic!("unexpected Ok response"), Err(err) => err, }; let res = err.error_response(); assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); let body = to_bytes(res.into_body()).await.unwrap(); assert!(body.is_empty()); } } use std::{ pin::Pin, task::{Context, Poll}, }; use actix_web::body::{BodySize, MessageBody}; use bytes::Bytes; use tokio::sync::mpsc::{error::SendError, UnboundedReceiver, UnboundedSender}; use crate::BoxError; /// Returns a sender half and a receiver half that can be used as a body ton: Cow<'static, str>, } impl Spa { /// Location of the SPA index file. /// /// This file will be served if: /// - the Actix Web router has reached this service, indicating that none of the API routes /// matched the URL path; /// - and none of the static resources handled matched. /// /// The default is "./index.html". I.e., the `index.html` file located in the directory that /// the server is running from. pub fn index_file(mut self, index_file: impl Into>) -> Self { self.index_file = index_file.into(); self } /// The URL path prefix that static files should be served from. /// /// The default is "/". I.e., static files are served from the root URL path. pub fn static_resources_mount( mut self, static_resources_mount: impl Into>, ) -> Self { self.static_resources_mount = static_resources_mount.into(); self } /// The location in the filesystem to ser #[doc = concat!("# type Hasher = ", stringify!($digest), ";")] #[doc = concat!("# const OutSize: usize = ", $out_size, ";")] /// # assert_eq!( /// # digest::generic_array::GenericArray::::OutputSize /// # >::default().len(), /// # OutSize /// # ); /// ``` #[cfg(feature = $feature)] pub type $name = BodyHash; }; } // Obsolete body_hash_alias!(BodyMd4, md4::Md4, "md4", "MD4", 16); body_hash_alias!(BodyMd5, md5::Md5, "md5", "MD5", 16); body_hash_alias!(BodySha1, sha1::Sha1, "sha1", "SHA-1", 20); // SHA-2 body_hash_alias!(BodySha224, sha2::Sha224, "sha2", "SHA-224", 28); body_hash_alias!(BodySha256, sha2::Sha256, "sha2", "SHA-256", 32); body_hash_alias!(BodySha384, sha2::Sha384, "sha2", "SHA-384", 48); body_hash_alias!(BodySha512, sha2::Sha512, "sha2", "SHA-512", 64); // SHA-3 body_hash_alias!(BodySha3_224, sha3::Sha3_224, "sha3", "SHA-3-224", e_slash.replace_all(&path, "/"); // Ensure root paths are still resolvable. If resulting path is blank after previous // step it means the path was one or more slashes. Reduce to single slash. let path = if path.is_empty() { "/" } else { path.as_ref() }; // Check whether the path has been changed // // This check was previously implemented as string length comparison // // That approach fails when a trailing slash is added, // and a duplicate slash is removed, // since the length of the strings remains the same // // For example, the path "/v1//s" will be normalized to "/v1/s/" // Both of the paths have the same length, // so the change can not be deduced from the length comparison if path != original_path { let mut parts = head.uri.clone().into_parts(); let query = parts.path_and_query.as_ref().and3, 21, 247, 0]))); dbg!(ips.contains(IpAddr::from([103, 21, 248, 0]))); } use actix_web::{ body::MessageBody, dev::{ServiceRequest, ServiceResponse}, web::Redirect, Error, Responder, }; use crate::middleware_from_fn::Next; /// A function middleware to redirect traffic to `www.` if not already there. /// /// # Examples /// ``` /// # use actix_web::App; /// use actix_web_lab::middleware::{from_fn, redirect_to_www}; /// /// App::new() /// .wrap(from_fn(redirect_to_www)) /// # ; /// ``` pub async fn redirect_to_www( req: ServiceRequest, next: Next, ) -> Result, Error> { #![allow(clippy::await_holding_refcell_ref)] // RefCell is dropped before await let (req, pl) = req.into_parts(); let conn_info = req.connection_info(); if !conn_info.host().starts_with("www.") { let scheme = conn_info.scheme(); let host = conn_info.host(); let path = req.uri().path(); let uri = form$hdr_name:expr => $hdr_val:expr)+; @raw $payload:expr) => {{ assert_response_matches!($res, $status; $($hdr_name => $hdr_val)+); assert_eq!(::actix_web::test::read_body($res).await, $payload); }}; ($res:ident, $status:ident; @json $payload:tt) => {{ assert_response_matches!($res, $status); assert_eq!( ::actix_web::test::read_body_json::<$crate::__reexports::serde_json::Value, _>($res).await, $crate::__reexports::serde_json::json!($payload), ); }}; } pub use assert_response_matches; #[cfg(test)] mod tests { use actix_web::{ dev::ServiceResponse, http::header::ContentType, test::TestRequest, HttpResponse, }; use super::*; #[actix_web::test] async fn response_matching() { let res = ServiceResponse::new( TestRequest::default().to_http_request(), HttpResponse::Created() .insert_header(("date", "today")) .insert_header(("set-cookie", "a=b")) (Some(Ok(Event::retry_to_bytes(retry)))); } if let Poll::Ready(msg) = this.stream.poll_next(cx) { return match msg { Some(Ok(msg)) => Poll::Ready(Some(Ok(msg.into_bytes()))), Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), None => Poll::Ready(None), }; } if let Some(ref mut keep_alive) = this.keep_alive { if keep_alive.poll_tick(cx).is_ready() { return Poll::Ready(Some(Ok(Event::keep_alive_bytes()))); } } Poll::Pending } } /// Create server-sent events (SSE) channel pair. /// /// The `buffer` argument controls how many unsent messages can be stored without waiting. /// /// The first item in the tuple is the sender half. Much like a regular channel, it can be cloned, /// sent to another thread/task, and send event messages to the response stream. It provides several /// methods that represent the event-stream format. /// /// The second ite.into()); self } /// Sets `id` field. pub fn set_id(&mut self, id: impl Into) { self.id = Some(id.into()); } /// Sets `event` name field, returning a new data message. pub fn event(mut self, event: impl Into) -> Self { self.event = Some(event.into()); self } /// Sets `event` name field. pub fn set_event(&mut self, event: impl Into) { self.event = Some(event.into()); } } impl From for Event { fn from(data: Data) -> Self { Self::Data(data) } } /// Server-sent events message containing one or more fields. #[must_use] #[derive(Debug, Clone)] pub enum Event { /// A `data` message with optional ID and event name. /// /// Data messages looks like this in the response stream. /// ```plain /// event: foo /// id: 42 /// data: my data /// /// data: { /// data: "multiline": "data" /// data: } /// ``` Data(Data), /// A comm(noop))); let _ = App::new().wrap(Compat::new(map_response_body(mutate_body_type))); } #[actix_web::test] async fn feels_good() { let app = test::init_service( App::new() .default_service(web::to(HttpResponse::Ok)) .wrap(map_response_body(|_req, body| async move { Ok(body) })) .wrap(map_response_body(noop)) .wrap(Logger::default()) .wrap(map_response_body(mutate_body_type)), ) .await; let req = test::TestRequest::default().to_request(); let body = test::call_and_read_body(&app, req).await; assert_eq!(body, "foo"); } } use actix_web::{ http::{Method, StatusCode}, web, App, HttpResponse, Responder, }; use actix_web_lab_derive::FromRequest; #[derive(Debug, FromRequest)] struct RequestParts { method: Method, pool: web::Data, body: String, body2: String, #[from_request(copy_from_app_data)] copied_data: u64, } asstedIps::try_from_response(res).is_err()); } } //! For path segment extractor documentation, see [`Path`]. use actix_router::PathDeserializer; use actix_utils::future::{ready, Ready}; use actix_web::{ dev::Payload, error::{Error, ErrorNotFound}, FromRequest, HttpRequest, }; use derive_more::{AsRef, Display, From}; use serde::de; use tracing::debug; /// Extract typed data from request path segments. /// /// Alternative to `web::Path` extractor from Actix Web that allows deconstruction, but omits the /// implementation of `Deref`. /// /// Unlike, [`HttpRequest::match_info`], this extractor will fully percent-decode dynamic segments, /// including `/`, `%`, and `+`. /// /// # Examples /// ``` /// use actix_web::get; /// use actix_web_lab::extract::Path; /// /// // extract path info from "/{name}/{count}/index.html" into tuple /// // {name} - deserialize a String /// // {count} - deserialize a u32 /// #[get("/{name}/{count}/index.html")] /// async fn index(Path((name, count)): Path<(String, u32 else { buf.extend_from_slice(&chunk); } } None => { let json = serde_json::from_slice::(buf) .map_err(JsonPayloadError::Deserialize)?; return Poll::Ready(Ok(json)); } } }, JsonBody::Error(e) => Poll::Ready(Err(e.take().unwrap())), } } } #[cfg(test)] mod tests { use actix_web::{http::header, test::TestRequest, web::Bytes}; use serde::{Deserialize, Serialize}; use super::*; #[derive(Serialize, Deserialize, PartialEq, Debug)] struct MyObject { name: String, } fn json_eq(err: JsonPayloadError, other: JsonPayloadError) -> bool { match err { JsonPayloadError::Overflow { .. } => { matches!(other, JsonPayloadError::Overflow { .. }) } JsonPayloadError::OverflowKnownLength { .. }format!("error from original stream: {err}"), )))) .unwrap(), } })); tracing::trace!(target: TARGET, "creating proxy payload"); *orig_payload = dev::Payload::from(proxy_stream); dev::Payload::Stream { payload: Box::pin(rx), } } /// An `io::Write`r that only requires mutable reference and assumes that there is space available /// in the buffer for every write operation or that it can be extended implicitly (like /// `bytes::BytesMut`, for example). /// /// This is slightly faster (~10%) than `bytes::buf::Writer` in such cases because it does not /// perform a remaining length check before writing. pub(crate) struct MutWriter<'a, B>(pub(crate) &'a mut B); impl<'a, B> MutWriter<'a, B> { pub fn get_ref(&self) -> &B { self.0 } } impl<'a, B: BufMut> io::Write for MutWriter<'a, B> { fn write(&mut self, buf: &[u8]) -> io::Result { self.0.put_slice(buf); Ok(buf.len()) } fn flush(&mut self) -> iof.into_chunk_stream()) } /// Creates a `Responder` type with a line-by-line serializing stream and `text/plain` /// content-type header. pub fn into_responder(self) -> impl Responder where S: 'static, T: 'static, E: 'static, { HttpResponse::Ok() .content_type(mime::TEXT_PLAIN_UTF_8) .message_body(self.into_body_stream()) .unwrap() } /// Creates a stream of serialized chunks. pub fn into_chunk_stream(self) -> impl Stream> { self.stream.map_ok(write_display) } } fn write_display(item: impl fmt::Display) -> Bytes { let mut buf = BytesMut::new(); let mut wrt = MutWriter(&mut buf); writeln!(wrt, "{item}").unwrap(); buf.freeze() } #[cfg(test)] mod tests { use std::error::Error as StdError; use actix_web::body; use futures_util::stream; use super::*; #[actix_web::test] async fn serializes_into_body() { let ndjson_body = Dio.map(|proto| format!("proto=\"{proto}\""))) .join("; ") .try_into_value() } } impl Header for Forwarded { fn name() -> HeaderName { header::FORWARDED } fn parse(msg: &M) -> Result { let combined = msg .headers() .get_all(Self::name()) .filter_map(|hdr| hdr.to_str().ok()) .filter_map(|hdr_str| match hdr_str.trim() { "" => None, val => Some(val), }) .collect::>(); if combined.is_empty() { return Err(ParseError::Header); } // pass to FromStr impl as if it were one concatenated header with semicolon joiners // https://datatracker.ietf.org/doc/html/rfc7239#section-7.1 combined.join(";").parse().map_err(|_| ParseError::Header) } } /// Trim whitespace then any quote marks. fn unquote(val: &str) -> &str { val.trim().trim_start_matches('"').trim_end_matche/// Should equal the `Host` request header field as received by the proxy. pub fn host(&self) -> Option<&str> { self.host.as_deref() } /// Returns the "proto" identifier, if set. /// /// Indicates which protocol was used to make the request (typically "http" or "https"). pub fn proto(&self) -> Option<&str> { self.proto.as_deref() } /// Adds an identifier to the "for" chain. /// /// Useful when re-forwarding a request and needing to update the request headers with previous /// proxy's address. pub fn push_for(&mut self, identifier: impl Into) { self.r#for.push(identifier.into()) } /// Returns true if all of the fields are empty. fn has_no_info(&self) -> bool { self.by.is_none() && self.r#for.is_empty() && self.host.is_none() && self.proto.is_none() } // TODO: parse with trusted IP ranges fn } impl str::FromStr for Forwarded { type Err = Infallible; #[inline] fn from_str(val: &str) -> Re // Ignore Pending because its possible the inner extractor never // polls the payload stream and ignore errors because they will be // propagated by original payload polls. Poll::Ready(Some(Err(_))) | Poll::Pending => break, } } Poll::Pending } } } BodyHashFutProj::InnerDone { inner, hasher, forked_payload, } => { let mut pl = Pin::new(forked_payload); // drain forked payload loop { match pl.as_mut().poll_next(cx) { // update hasher with chunks Poll::Ready(Some(Ok(chunk))) => hasher.update(&chunk), // when drain is complete, finalize hash and return parts keys: Vec = pkcs8_private_keys(key_file) .unwrap() .into_iter() .map(PrivateKey) .collect(); // exit if no keys could be parsed if keys.is_empty() { eprintln!("Could not locate PKCS 8 private keys."); std::process::exit(1); } config.with_single_cert(cert_chain, keys.remove(0)).unwrap() } use std::{io, time::Duration}; use actix_web::{ get, http::{ self, header::{ContentEncoding, ContentType}, }, App, HttpResponse, HttpServer, Responder, }; use actix_web_lab::body; use async_zip::{write::ZipFileWriter, ZipEntryBuilder}; use tokio::{ fs, io::{AsyncWrite, AsyncWriteExt as _}, }; fn zip_to_io_err(err: async_zip::error::ZipError) -> io::Error { io::Error::new(io::ErrorKind::Other, err) } async fn read_dir(zipper: &mut ZipFileWriter) -> io::Result<()> where W: AsyncWrite + Unpin, { let mut path = fs::canonicalize(env!("CARGO_MANIFEST_DIR")).await?; path.push("examples"); sync::OnceCell; use tracing::debug; /// A lazy extractor for thread-local data. /// /// Using `LazyData` as an extractor will not initialize the data; [`get`](Self::get) must be used. pub struct LazyData { inner: Rc>, } struct LazyDataInner { cell: OnceCell, fut: Cell>>, } impl Clone for LazyData { fn clone(&self) -> Self { Self { inner: self.inner.clone(), } } } impl fmt::Debug for LazyData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Lazy") .field("cell", &self.inner.cell) .field("fut", &"..") .finish() } } impl LazyData { /// Constructs a new `LazyData` extractor with the given initialization function. /// /// Initialization functions must return a future that resolves to `T`. pub fn new(init: F) -> LazyData where F: FnOnce() -> Fut, Fut assert!(s.is_err()); let err_str = s.unwrap_err().to_string(); assert_eq!( err_str, "URL encoded payload is larger (9 bytes) than allowed (limit: 8 bytes).", ); } } //! Panic reporter middleware. //! //! See [`PanicReporter`] for docs. use std::{ any::Any, future::{ready, Ready}, panic::{self, AssertUnwindSafe}, rc::Rc, }; use actix_web::dev::{forward_ready, Service, Transform}; use futures_core::future::LocalBoxFuture; use futures_util::FutureExt as _; type PanicCallback = Rc; /// A middleware that triggers a callback when the worker is panicking. /// /// Mostly useful for logging or metrics publishing. The callback received the object with which /// panic was originally invoked to allow down-casting. /// /// # Examples /// ```ignore /// # use actix_web::App; /// use actix_web_lab::middleware::PanicReporter; /// use metrics::increment_counter; /// /// App::new() /// .wrap(PanicReporter::new(|_| increme::Bytes}; use serde::{Deserialize, Serialize}; use super::*; #[derive(Serialize, Deserialize, PartialEq, Debug)] struct MyObject { name: String, } fn err_eq(err: UrlencodedError, other: UrlencodedError) -> bool { match err { UrlencodedError::Overflow { .. } => { matches!(other, UrlencodedError::Overflow { .. }) } UrlencodedError::ContentType => matches!(other, UrlencodedError::ContentType), _ => false, } } #[actix_web::test] async fn test_extract() { let (req, mut pl) = TestRequest::default() .insert_header(header::ContentType::form_url_encoded()) .insert_header(( header::CONTENT_LENGTH, header::HeaderValue::from_static("9"), )) .set_payload(Bytes::from_static(b"name=test")) .to_http_parts(); let s = UrlEncodedForm::::fro let mw_fn = Rc::clone(&self.mw_fn); let service = Rc::clone(&self.service); Box::pin(async move { let ($($ext_type,)*) = req.extract::<($($ext_type,)*)>().await?; (mw_fn)($($ext_type),*, req, Next:: { service }).await }) } } }; } impl_middleware_fn_service!(E1); impl_middleware_fn_service!(E1, E2); impl_middleware_fn_service!(E1, E2, E3); impl_middleware_fn_service!(E1, E2, E3, E4); impl_middleware_fn_service!(E1, E2, E3, E4, E5); impl_middleware_fn_service!(E1, E2, E3, E4, E5, E6); impl_middleware_fn_service!(E1, E2, E3, E4, E5, E6, E7); impl_middleware_fn_service!(E1, E2, E3, E4, E5, E6, E7, E8); impl_middleware_fn_service!(E1, E2, E3, E4, E5, E6, E7, E8, E9); /// Wraps the "next" service in the middleware chain. pub struct Next { service: RcService, Error>, } impl Next { /// Equivalent to `Service::call(self, req)`. pub fn cal/ This works when Sized is required let dyn_rc_box: Rc> = Rc::new(Box::new(A {})); let data_arc_box = LocalData::from(dyn_rc_box); // This works when Data Sized Bound is removed let dyn_rc: Rc = Rc::new(A {}); let data_arc = LocalData::from(dyn_rc); assert_eq!(data_arc_box.get_num(), data_arc.get_num()) } #[actix_web::test] async fn test_get_ref_from_dyn_data() { let dyn_rc: Rc = Rc::new(A {}); let data_arc = LocalData::from(dyn_rc); let ref_data: &dyn TestTrait = &*data_arc; assert_eq!(data_arc.get_num(), ref_data.get_num()) } } use std::io; use actix_web::{get, App, HttpServer, Responder}; use actix_web_lab::respond::Cbor; use serde::Serialize; use tracing::info; #[derive(Debug, Serialize)] struct Test { one: u32, two: String, } #[get("/")] async fn index() -> impl Responder { Cbor(Test { one: 42, two: "two".to_owned(), }) } #[ [456, 789], ])) .into_body_stream(); let body_bytes = body::to_bytes(ndjson_body) .await .map_err(Into::>::into) .unwrap(); const EXP_BYTES: &str = "123,456\n\ 789,12\n\ 345,678\n\ 901,234\n\ 456,789\n"; assert_eq!(body_bytes, EXP_BYTES); } } use std::{any::type_name, ops::Deref, rc::Rc}; use actix_utils::future::{err, ok, Ready}; use actix_web::{dev::Payload, error, Error, FromRequest, HttpRequest}; use tracing::debug; /// A thread-local equivalent to [`SharedData`](crate::extract::SharedData). #[doc(alias = "state")] #[derive(Debug)] pub struct LocalData(Rc); impl LocalData { /// Constructs a new `LocalData` instance. pub fn new(item: T) -> LocalData { LocalData(Rc::new(item)) } } impl Deref for LocalData { type Target = T; fn deref(&self) -> &T { &self.0 } } impl Clone for Lo-results", n_items)) // alternative if you need more control of the HttpResponse: // // HttpResponse::Ok() // .insert_header(("content-type", NdJson::mime())) // .insert_header(("num-results", n_items)) // .body(NdJson::new(data_stream).into_body_stream()) } /// A comparison route that loads all the data into memory before sending it to the client. /// /// If you provide a high number in the query string like `?n=300000` you should be able to observe /// increasing memory usage of the process in your process monitor. #[get("/users-high-mem")] async fn get_high_mem_user_list(opts: web::Query) -> impl Responder { let n_items = opts.n.unwrap_or(10); let mut stream = streaming_data_source(n_items); // buffer all data from the source into a Bytes container let mut buf = BytesMut::new().writer(); while let Some(Ok(item)) = stream.next().await { serde_json::to_writer(&mut buf, &item).unwrap(); buf.write_all(b"\n").unwrap(); } Hrn self.tx.send(Err(err)).map_err(|SendError(err)| match err { Ok(_) => unreachable!(), Err(err) => err, }); } Ok(()) } } #[derive(Debug)] struct Receiver { rx: UnboundedReceiver>, } impl Receiver { fn new(rx: UnboundedReceiver>) -> Self { Self { rx } } } impl MessageBody for Receiver where E: Into, { type Error = E; fn size(&self) -> BodySize { BodySize::Stream } fn poll_next( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>> { self.rx.poll_recv(cx) } } #[cfg(test)] mod tests { use std::io; use super::*; static_assertions::assert_impl_all!(Sender: Send, Sync, Unpin); static_assertions::assert_impl_all!(Receiver: Send, Sync, Unpin, MessageBody); } use actix_hash::{BodyHash, BodySha256}; use actix_http::BoxedPayloadStreamregister_service( rdef, None, fn_service(move |req| serve_index(req, self.index_file.clone())), None, ); } } async fn serve_index( req: ServiceRequest, index_file: String, ) -> Result { trace!("serving default SPA page"); let (req, _) = req.into_parts(); let file = NamedFile::open_async(&index_file).await?; let res = file.into_response(&req); Ok(ServiceResponse::new(req, res)) } impl Default for Spa { fn default() -> Self { Self { index_file: Cow::Borrowed("./index.html"), static_resources_mount: Cow::Borrowed("/"), static_resources_location: Cow::Borrowed("./"), } } } // Code mostly copied from `tower`: // https://github.com/tower-rs/tower/tree/5064987f/tower/src/load_shed //! Load-shedding middleware. use std::{ cell::Cell, error::Error as StdError, fmt, future::Future, pin::Pin, task::{ready, Contexrde(rename = "user")] /// users: Vec, /// } /// /// // Deserialize `LogsParams` struct from query string. /// // This handler gets called only if the request's query parameters contain both fields. /// // A valid request path for this handler would be `/logs?type=reports&user=foo&user=bar"`. /// #[get("/logs")] /// async fn index(info: Query) -> impl Responder { /// let LogsParams { log_type, users } = info.into_inner(); /// format!("Logs request for type={log_type} and user list={users:?}!") /// } /// /// // Or use destructuring, which is equivalent to `.into_inner()`. /// #[get("/debug2")] /// async fn debug2(Query(info): Query) -> impl Responder { /// dbg!("Authorization object = {info:?}"); /// "OK" /// } /// ``` #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct Query(pub T); impl_more::impl_deref_and_mut!( in Query => T); impl_more::forward_display!( in Query); impl Query { /// Unwrap into inner `T` value. redirects, })) } } pub struct NormalizePathService { service: S, merge_slash: Regex, trailing_slash_behavior: TrailingSlash, use_redirects: Option, } impl Service for NormalizePathService where S: Service, Error = Error>, S::Future: 'static, { type Response = ServiceResponse>; type Error = Error; type Future = NormalizePathFuture; actix_service::forward_ready!(service); fn call(&self, mut req: ServiceRequest) -> Self::Future { let head = req.head_mut(); let mut path_altered = false; let original_path = head.uri.path(); // An empty path here means that the URI has no valid path. We skip normalization in this // case, because adding a path can make the URI invalid if !original_path.is_empty() { // Either adds a string to the end (duplicates will be removed anyways) or trims all ly. /// /// # Normalization Steps /// - Merges consecutive slashes into one. (For example, `/path//one` always becomes `/path/one`.) /// - Appends a trailing slash if one is not present, removes one if present, or keeps trailing /// slashes as-is, depending on which [`TrailingSlash`] variant is supplied /// to [`new`](NormalizePath::new()). /// /// # Default Behavior /// The default constructor chooses to strip trailing slashes from the end of paths with them /// ([`TrailingSlash::Trim`]). The implication is that route definitions should be defined without /// trailing slashes or else they will be inaccessible (or vice versa when using the /// `TrailingSlash::Always` behavior), as shown in the example tests below. /// /// # Examples /// ``` /// use actix_web::{web, middleware, App}; /// /// # actix_web::rt::System::new().block_on(async { /// let app = App::new() /// .wrap(middleware::NormalizePath::trim()) /// .route("/test", web::get().to(|| async { "test" })) /// .route("/unmatchable/", web: = match self.0 { true => req.into_response("short-circuited").map_into_right_body(), false => next.call(req).await?.map_into_left_body(), }; res.headers_mut() .insert(header::WARNING, HeaderValue::from_static("42")); Ok(res) } pub fn into_middleware( self, ) -> impl Transform< S, ServiceRequest, Response = ServiceResponse, Error = Error, InitError = (), > where S: Service, Error = Error> + 'static, B: MessageBody + 'static, { let this = Rc::new(self); from_fn(move |req, next| { let this = Rc::clone(&this); async move { Self::mw_cb(&this, req, next).await } }) } } #[actix_web::main] async fn main() -> io::Result<()> { env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); let bind = ("127.0.0.1", 8080); inond item is the responder and can, therefore, be used as a handler return type directly. /// The stream will be closed after all [senders](SseSender) are dropped. /// /// Read more about server-sent events in [this MDN article][mdn-sse]. /// /// See [module docs](self) for usage example. /// /// [mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events pub fn channel(buffer: usize) -> (Sender, Sse) { let (tx, rx) = mpsc::channel(buffer); ( Sender { tx }, Sse { stream: ChannelStream(rx), keep_alive: None, retry_interval: None, }, ) } /// Stream implementation for channel-based SSE [`Sender`]. #[derive(Debug)] pub struct ChannelStream(mpsc::Receiver); impl Stream for ChannelStream { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.0.poll_recv(cx).map(|ev| ev.map(Ok)) A comment message. /// /// Comments look like this in the response stream. /// ```plain /// : my comment /// /// : another comment /// ``` Comment(ByteString), } #[doc(hidden)] #[deprecated(since = "0.17.0", note = "Renamed to `Event`. Prefer `sse::Event`.")] pub type SseMessage = Event; impl Event { /// Splits data into lines and prepend each line with `prefix`. fn line_split_with_prefix(buf: &mut BytesMut, prefix: &'static str, data: ByteString) { // initial buffer size guess is len(data) + 10 lines of prefix + EOLs + EOF buf.reserve(data.len() + (10 * (prefix.len() + 1)) + 1); // append prefix + space + line to buffer for line in data.split('\n') { buf.put_slice(prefix.as_bytes()); buf.put_slice(line.as_bytes()); buf.put_u8(b'\n'); } } /// Serializes message into event-stream format. fn into_bytes(self) -> Bytes { let mut buf = BytesMut::new(); match self { e_stream) = sse::channel(10); //! //! // note: sender will typically be spawned or handed off somewhere else //! let _ = sender.send(sse::Event::Comment("my comment".into())).await; //! let _ = sender.send(sse::Data::new("my data").event("chat_msg")).await; //! //! sse_stream.with_retry_duration(Duration::from_secs(10)) //! } //! //! #[get("/from-stream")] //! async fn from_stream() -> impl Responder { //! let event_stream = futures_util::stream::iter([ //! Ok::<_, Infallible>(sse::Event::Data(sse::Data::new("foo"))), //! ]); //! //! sse::Sse::from_stream(event_stream) //! .with_keep_alive(Duration::from_secs(5)) //! } //! ``` //! //! Complete usage examples can be found in the examples directory of the source code repo. #![doc( alias = "server sent", alias = "server-sent", alias = "server sent events", alias = "server-sent events", alias = "event-stream" )] use std::{ convert::Infallible, pin::Pin, task::{Context, Poll}, time::p(); } #[actix_web::test] async fn test_request_extract() { let mut req = TestRequest::with_uri("/name/user1/?id=test").to_srv_request(); let resource = ResourceDef::new("/{key}/{value}/"); resource.capture_match_info(req.match_info_mut()); let (req, mut pl) = req.into_parts(); let s = Path::::from_request(&req, &mut pl).await.unwrap(); assert_eq!(format!("{s}"), "MyStruct(name, user1)"); assert_eq!( format!("{s:?}"), "Path(MyStruct { key: \"name\", value: \"user1\" })" ); let mut s = s.into_inner(); assert_eq!(s.key, "name"); assert_eq!(s.value, "user1"); s.value = "user2".to_string(); assert_eq!(s.value, "user2"); let Path(s) = Path::<(String, String)>::from_request(&req, &mut pl) .await .unwrap(); assert_eq!(s.0, "name"); assert_eq!(s.1, "user1"); let mut req = TestRequest::with_uri("/name/32/").to_b enum Err { Fetch, } impl_more::impl_display_enum!(Err, Fetch => "failed to fetch"); impl std::error::Error for Err {} #[derive(Debug, Deserialize)] pub struct CfIpsResult { ipv4_cidrs: Vec, ipv6_cidrs: Vec, } #[derive(Debug, Deserialize)] #[serde(untagged)] pub enum CfIpsResponse { Success { result: CfIpsResult }, Failure { success: bool }, } /// Trusted IP ranges. #[derive(Debug)] pub struct TrustedIps { cidr_ranges: IpCidrCombiner, } impl TrustedIps { pub fn try_from_response(res: CfIpsResponse) -> Result { let ips = match res { CfIpsResponse::Success { result } => result, CfIpsResponse::Failure { .. } => { tracing::error!("parsing response returned success: false"); return Err(Err::Fetch); } }; let mut cidr_ranges = IpCidrCombiner::new(); for cidr in ips.ipv4_cidrs { cidr_ranges.push(IpCidr::Vet req = this.req.take().unwrap(); debug!( "Failed to deserialize Json<{}> from payload in handler: {}", core::any::type_name::(), req.match_name().unwrap_or_else(|| req.path()) ); Err(err.into()) } Ok(data) => Ok(Json(data)), }; Poll::Ready(res) } } /// Future that resolves to some `T` when parsed from a JSON payload. /// /// Can deserialize any type `T` that implements [`Deserialize`][serde::Deserialize]. /// /// Returns error if: /// - `Content-Type` is not `application/json`. /// - `Content-Length` is greater than `LIMIT`. /// - The payload, when consumed, is not valid JSON. pub enum JsonBody { Error(Option), Body { /// Length as reported by `Content-Length` header, if present. length: Option, // #[cfg(feature = "__compress")] // payload: Decompress, // next load in handler loads new value let extracted_data = SwapData::::extract(&req).await.unwrap(); assert_eq!(**extracted_data.load(), NonCopy(80)); // initial extracted data stays the same assert_eq!(*initial_data, NonCopy(42)); } } //! Experimental body types. //! //! Analogous to the `body` module in Actix Web. pub use crate::{ body_async_write::{writer, Writer}, body_channel::{channel, Sender}, infallible_body_stream::{new_infallible_body_stream, new_infallible_sized_stream}, }; //! Experimental typed headers. pub use crate::{ cache_control::{CacheControl, CacheDirective}, content_length::ContentLength, forwarded::Forwarded, strict_transport_security::StrictTransportSecurity, }; #[cfg(test)] mod header_test_helpers { use std::fmt; use actix_http::header::Header; use actix_web::{test, HttpRequest}; fn req_from_raw_headers, V: AsRef<[u8]>>( header_lines: I, )e { fn eq(&self, other: &ContentLength) -> bool { *self == other.0 } } impl PartialOrd for ContentLength { fn partial_cmp(&self, other: &usize) -> Option { self.0.partial_cmp(other) } } impl PartialOrd for usize { fn partial_cmp(&self, other: &ContentLength) -> Option { self.partial_cmp(&other.0) } } #[cfg(test)] mod tests { use super::*; use crate::header::{assert_parse_eq, assert_parse_fail}; #[test] fn missing_header() { assert_parse_fail::([""; 0]); assert_parse_fail::([""]); } #[test] fn bad_header() { assert_parse_fail::(["-123"]); assert_parse_fail::(["123_456"]); assert_parse_fail::(["123.456"]); // too large for u64 (2^64, 2^64 + 1) assert_parse_fail::(["18446744073709551616"]); //! Experimental route guards. //! //! Analogous to the `guard` module in Actix Web. #[allow(deprecated)] pub use crate::acceptable::Acceptable; //! Extractor for client IP addresses when proxied through Cloudflare. // #![forbid(unsafe_code)] // urgh why cidr-utils #![deny(rust_2018_idioms, nonstandard_style)] #![warn(future_incompatible)] // #![warn(missing_docs)] #![cfg_attr(docsrs, feature(doc_auto_cfg))] mod extract; mod fetch_cf_ips; mod header_v4; // mod header_v6; pub use self::extract::TrustedClientIp; #[cfg(feature = "fetch-ips")] pub use self::fetch_cf_ips::fetch_trusted_cf_ips; pub use self::fetch_cf_ips::{TrustedIps, CF_URL_IPS}; pub use self::header_v4::CfConnectingIp; //! Forwarded typed header. //! //! See [`Forwarded`] docs. use std::{convert::Infallible, str}; use actix_web::{ error::ParseError, http::header::{self, Header, HeaderName, HeaderValue, TryIntoHeaderValue}, HttpMessage, }; use itertools::Itertools as _; // TODO: implement typed parsing of Node identifiers as pergest::{generic_array::GenericArray, Digest}; use futures_core::Stream as _; use pin_project_lite::pin_project; use tracing::trace; /// Parts of the resulting body hash extractor. pub struct BodyHashParts { /// Extracted item. pub inner: T, /// Bytes of the calculated hash. pub hash_bytes: Vec, } /// Wraps an extractor and calculates a body checksum hash alongside. /// /// If your extractor would usually be `T` and you want to create a hash of type `D` then you need /// to use `BodyHash`. E.g., `BodyHash`. /// /// Any hasher that implements [`Digest`] can be used. Type aliases for common hashing algorithms /// are available at the crate root. /// /// # Errors /// This extractor produces no errors of its own and all errors from the underlying extractor are /// propagated correctly; for example, if the payload limits are exceeded. /// /// # When Used On The Wrong Extractor /// Use on a non-body extractor is tolerated unless it is used after a different extractor thignature"))? .ok_or_else(|| error::ErrorUnauthorized("signature not provided"))? .try_into() .map_err(|_| error::ErrorInternalServerError("invalid signature"))?; Ok(Signature::from(sig)) } } #[async_trait(?Send)] impl RequestSignatureScheme for DiscordWebhook { type Signature = (BytesMut, Signature); type Error = Error; async fn init(req: &HttpRequest) -> Result { let ts = Self::get_timestamp(req)?.to_owned(); let candidate_signature = Self::get_signature(req)?; Ok(Self { candidate_signature, chunks: vec![Bytes::from(ts)], }) } async fn consume_chunk(&mut self, _req: &HttpRequest, chunk: Bytes) -> Result<(), Self::Error> { self.chunks.push(chunk); Ok(()) } async fn finalize(self, _req: &HttpRequest) -> Result { let buf_len = self.chunks.iter().map(|chunk| chunk.len()).sum(); let mut buf = ByteRc, #[pin] state: MapResFutState, } } pin_project! { #[project = MapResFutStateProj] enum MapResFutState { Svc { #[pin] fut: SvcFut }, Fn { #[pin] fut: FnFut }, } } impl Future for MapResFut where SvcFut: Future, Error>>, F: Fn(ServiceResponse) -> FnFut, FnFut: Future, Error>>, { type Output = Result, Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.as_mut().project(); match this.state.as_mut().project() { MapResFutStateProj::Svc { fut } => { let res = ready!(fut.poll(cx))?; let fut = (this.mw_fn)(res); this.state.set(MapResFutState::Fn { fut }); self.poll(cx) } MapResFutStateProj::Fn { fut } => fu chunk stream. /// /// This could be stabilized into Actix Web as `SizedStream::from_infallible()`. pub fn new_infallible_sized_stream>( size: u64, stream: S, ) -> SizedStream> { SizedStream::new(size, InfallibleStream::new(stream)) } use std::{convert::Infallible, io, time::Duration}; use actix_web::{get, middleware::Logger, App, HttpRequest, HttpServer, Responder}; use actix_web_lab::{extract::Path, respond::Html, sse}; use futures_util::stream; use time::format_description::well_known::Rfc3339; use tokio::time::sleep; #[get("/")] async fn index() -> impl Responder { Html(include_str!("./assets/sse.html").to_string()) } /// Countdown event stream starting from 8. #[get("/countdown")] async fn countdown(req: HttpRequest) -> impl Responder { // note: a more production-ready implementation might want to use the lastEventId header // sent by the reconnecting browser after the _retry_ period tracing::debug!("lastEventId: {:?}", req.headere> UrlEncodedFormBody { /// Create a new future to decode a URL-encoded request payload. pub fn new(req: &HttpRequest, payload: &mut Payload) -> Self { // check content-type let can_parse_form = if let Ok(Some(mime)) = req.mime_type() { mime == mime::APPLICATION_WWW_FORM_URLENCODED } else { false }; if !can_parse_form { return UrlEncodedFormBody::Error(Some(UrlencodedError::ContentType)); } let length = req .headers() .get(&header::CONTENT_LENGTH) .and_then(|l| l.to_str().ok()) .and_then(|s| s.parse::().ok()); // Notice the content-length is not checked against config limit here. // As the internal usage always call UrlEncodedBody::limit after UrlEncodedBody::new. // And limit check to return an error variant of UrlEncodedBody happens there. let payload = payload.take(); if let Some(len) = length { new() /// .wrap(from_fn(my_mw)) /// # ; /// ``` /// /// It is also possible to write a middleware that automatically uses extractors, similar to request /// handlers, by declaring them as the first parameters: /// ``` /// # use std::collections::HashMap; /// # use actix_web::{ /// # App, Error, /// # body::MessageBody, /// # dev::{ServiceRequest, ServiceResponse, Service as _}, /// # web, /// # }; /// use actix_web_lab::middleware::Next; /// /// async fn my_extracting_mw( /// string_body: String, /// query: web::Query>, /// req: ServiceRequest, /// next: Next, /// ) -> Result, Error> { /// // pre-processing /// next.call(req).await /// // post-processing /// } /// # actix_web::App::new().wrap(actix_web_lab::middleware::from_fn(my_extracting_mw)); pub fn from_fn(mw_fn: F) -> MiddlewareFn { MiddlewareFn { mw_fn: Rc::new(mw_fn), _phantom: PhantomData, }g, PartialEq, Eq, AsRef, Display, From)] pub struct BodyLimit { inner: T, } impl BodyLimit { /// Returns inner extracted type. pub fn into_inner(self) -> T { self.inner } } impl FromRequest for BodyLimit where T: FromRequest + 'static, T::Error: fmt::Debug + fmt::Display, { type Error = BodyLimitError; type Future = BodyLimitFut; fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { // fast check of Content-Length header match req.get_header::() { // CL header indicated that payload would be too large Some(len) if len > LIMIT => return BodyLimitFut::new_error(BodyLimitError::Overflow), _ => {} } let counter = crate::util::fork_request_payload(payload); BodyLimitFut { inner: Inner::Body { fut: Box::pin( if let Some(st) = req.app_data::>() { ok(st.clone()) } else { debug!( "Failed to extract `LocalData<{}>` for `{}` handler. For the LocalData extractor \ to work correctly, wrap the data with `LocalData::new()` and pass it to \ `App::app_data()`. Ensure that types align in both the set and retrieve calls.", type_name::(), req.match_name().unwrap_or_else(|| req.path()) ); err(error::ErrorInternalServerError( "Requested application data is not configured correctly. \ View/enable debug logs for more details.", )) } } } #[cfg(test)] mod tests { use actix_web::{ dev::Service, http::StatusCode, test::{init_service, TestRequest}, web, App, HttpResponse, }; use super::*; trait TestTrait { fn get_num(&self) -> i32; } struct A {} impl TestTrhttp://{}:{}", &bind.0, &bind.1); HttpServer::new(|| { App::new() .service(get_user_list) .service(get_high_mem_user_list) }) .workers(1) .bind(bind)? .run() .await } fn random_email() -> String { let rng = rand::thread_rng(); let id: String = rng .sample_iter(Alphanumeric) .take(10) .map(char::from) .collect(); format!("user_{id}@example.com") } fn random_address() -> String { let mut rng = rand::thread_rng(); let street_no: u16 = rng.gen_range(10..99); format!("{street_no} Random Street") } /// Create a `TestRequest` using a DSL that looks kinda like on-the-wire HTTP/1.x requests. /// /// # Examples /// ``` /// use actix_web::test::TestRequest; /// use actix_web_lab::test_request; /// /// let _req: TestRequest = test_request! { /// POST "/"; /// "Origin" => "example.com" /// "Access-Control-Request-Method" => "POST" /// "Access-Control-Request-Headers" => "Content-Type, X-CSRFxFuture; use futures_util::FutureExt as _; /// A middleware to catch panics in wrapped handlers and middleware, returning empty 500 responses. /// /// **This middleware should never be used as replacement for proper error handling.** See [this /// thread](https://github.com/actix/actix-web/issues/1501#issuecomment-627517783) for historical /// discussion on why Actix Web does not do this by default. /// /// It is recommended that this middleware be registered last. That is, `wrap`ed after everything /// else except `Logger`. /// /// # Examples /// ``` /// # use actix_web::App; /// use actix_web_lab::middleware::CatchPanic; /// /// App::new() /// .wrap(CatchPanic::default()) /// # ; /// ``` /// /// ```ignore /// // recommended wrap order /// App::new() /// .wrap(NormalizePath::default()) /// .wrap(CatchPanic::default()) // <- after everything except logger /// .wrap(Logger::default()) /// # ; /// ``` #[derive(Debug, Clone, Default)] #[non_exhaustive] pub struct CatchPanic; impl rr(err)) => return Poll::Ready(Err(Overloaded::Service(err))), res => res.is_ready(), }; self.is_ready.set(is_ready); // But we always report Ready, so that layers above don't wait until // the inner service is ready (the entire point of this layer!) Poll::Ready(Ok(())) } fn call(&self, req: Req) -> Self::Future { if self.is_ready.get() { // readiness only counts once, you need to check again! self.is_ready.set(false); LoadShedFuture::called(self.inner.call(req)) } else { LoadShedFuture::overloaded() } } } pin_project! { /// Future for [`LoadShedService`]. pub struct LoadShedFuture { #[pin] state: LoadShedFutureState, } } pin_project! { #[project = LoadShedFutureStateProj] enum LoadShedFutureState { Called { #[pin] fut: F }, Overloaded, } } impl LoadShedFuture { pub(crate) fn called(fut: F) -> Se:Blake2s256, "blake2", "Blake2s", 32); // Blake3 body_hash_alias!(BodyBlake3, blake3::Hasher, "blake3", "Blake3", 32); //! Demonstrates forking a request payload so that multiple extractors can derive data from a body. //! //! ```sh //! curl -X POST localhost:8080/ -d 'foo' //! //! # or using HTTPie //! http POST :8080/ --raw foo //! ``` use std::io; use actix_web::{dev, middleware, web, App, FromRequest, HttpRequest, HttpServer}; use actix_web_lab::util::fork_request_payload; use futures_util::{future::LocalBoxFuture, TryFutureExt as _}; use tokio::try_join; use tracing::info; struct TwoBodies(T, U); impl TwoBodies { fn into_parts(self) -> (T, U) { (self.0, self.1) } } impl FromRequest for TwoBodies where T: FromRequest, T::Future: 'static, U: FromRequest, U::Future: 'static, { type Error = actix_web::Error; type Future = LocalBoxFuture<'static, Result>; fn from_request(req: &HttpRequest, pl: &mut dev::Payload////something?query=test", ]; for uri in test_uris { let req = TestRequest::with_uri(uri).to_request(); let res = call_service(&app, req).await; assert!(res.status().is_success(), "Failed uri: {uri}"); } } #[actix_web::test] async fn always_trailing_slashes() { let app = init_service( App::new() .wrap(NormalizePath::new(TrailingSlash::Always)) .service(web::resource("/").to(HttpResponse::Ok)) .service(web::resource("/v1/something/").to(HttpResponse::Ok)) .service( web::resource("/v2/something/") .guard(fn_guard(|ctx| ctx.head().uri.query() == Some("query=test"))) .to(HttpResponse::Ok), ), ) .await; let test_uris = vec![ "/", "///", "/v1/something", "/v1/something/", "/v1/something////", h` middleware with the specified trailing slash style. pub fn new(behavior: TrailingSlash) -> Self { Self { trailing_slash_behavior: behavior, use_redirects: None, } } /// Constructs a new `NormalizePath` middleware with [trim](TrailingSlash::Trim) semantics. /// /// Use this instead of `NormalizePath::default()` to avoid deprecation warning. pub fn trim() -> Self { Self::new(TrailingSlash::Trim) } /// Configures middleware to respond to requests with non-normalized paths with a 307 redirect. /// /// If configured /// /// For example, a request with the path `/api//v1/foo/` would receive a response with a /// `Location: /api/v1/foo` header (assuming `Trim` trailing slash behavior.) /// /// To customize the status code, use [`use_redirects_with`](Self::use_redirects_with). pub fn use_redirects(mut self) -> Self { self.use_redirects = Some(StatusCode::TEMPORARY_REDIRECT); self } .body("Hello World!"), /// ); /// /// assert_response_matches!(res, CREATED; /// "date" => "today" /// "set-cookie" => "a=b"; /// @raw "Hello World!" /// ); /// /// let res = ServiceResponse::new( /// TestRequest::default().to_http_request(), /// HttpResponse::Created() /// .content_type(ContentType::json()) /// .insert_header(("date", "today")) /// .insert_header(("set-cookie", "a=b")) /// .body(r#"{"abc":"123"}"#), /// ); /// /// assert_response_matches!(res, CREATED; @json { "abc": "123" }); /// # }); /// ``` #[macro_export] macro_rules! assert_response_matches { ($res:ident, $status:ident) => {{ assert_eq!($res.status(), ::actix_web::http::StatusCode::$status) }}; ($res:ident, $status:ident; $($hdr_name:expr => $hdr_val:expr)+) => {{ assert_response_matches!($res, $status); $( assert_eq!( $res.headers().get(::actix_web::http::header::HeaderName::from_static($hdr_name)).unwrap(), assert_eq!( Event::Data(Data { id: Some("42".into()), event: Some("bar".into()), data: "foo".into() }) .into_bytes(), "id: 42\nevent: bar\ndata: foo\n\n" ); } #[test] fn retry_is_first_msg() { let waker = noop_waker(); let mut cx = Context::from_waker(&waker); { let (_sender, mut sse) = channel(9); assert!(Pin::new(&mut sse).poll_next(&mut cx).is_pending()); } { let (_sender, sse) = channel(9); let mut sse = sse.with_retry_duration(Duration::from_millis(42)); match Pin::new(&mut sse).poll_next(&mut cx) { Poll::Ready(Some(Ok(bytes))) => assert_eq!(bytes, "retry: 42\n\n"), res => panic!("poll should return retry message, got {res:?}"), } } } #[actix_web::test] async fn dropping_responder_causes_send_fails() { let (sender,(source))] Event), /// The receiving ([`Sse`]) has been dropped, likely because the client disconnected. #[display(fmt = "channel closed")] Closed(#[error(not(source))] Event), } #[doc(hidden)] #[deprecated( since = "0.17.0", note = "Renamed to `TrySendError`. Prefer `sse::TrySendError`." )] pub type SseTrySendError = TrySendError; /// Server-sent events data message containing a `data` field and optional `id` and `event` fields. /// /// Since it implements `Into`, this can be passed directly to [`send`](SseSender::send) /// or [`try_send`](SseSender::try_send). /// /// # Examples /// ``` /// # #[actix_web::main] async fn test() { /// use std::convert::Infallible; /// use actix_web::body; /// use serde::Serialize; /// use futures_util::stream; /// use actix_web_lab::sse; /// /// #[derive(serde::Serialize)] /// struct Foo { /// bar: u32, /// } /// /// let sse = sse::Sse::from_stream(stream::iter([ /// Ok::<_, Infallible>(sse::Event::Data(sse::Data::new("foo"))), /// .app_data(web::Data::new(42u32)) .default_service(web::to(handler)) }); let res = srv.post("/").send_body("foo").await.unwrap(); assert_eq!(res.status(), StatusCode::OK); } //! Alternative approach to using `BodyHmac` type using more flexible `RequestSignature` type. use std::io; use actix_web::{ middleware::Logger, web::{self, Bytes, Data}, App, Error, FromRequest, HttpRequest, HttpServer, }; use actix_web_lab::extract::{RequestSignature, RequestSignatureScheme}; use async_trait::async_trait; use digest::{CtOutput, Mac}; use hmac::SimpleHmac; use sha2::Sha256; use tracing::info; struct AbcSigningKey([u8; 32]); /// Grabs variable signing key from app data. async fn get_signing_key(req: &HttpRequest) -> actix_web::Result<[u8; 32]> { let key = Data::::extract(req).into_inner()?.0; Ok(key) } #[derive(Debug)] struct AbcApi { /// Payload hash state. hmac: SimpleHmac, } #[async_trait(?Send)] impl RequestSignatureScheme for Aody}, /// web::{BufMut as _, BytesMut}, /// HttpRequest, /// }; /// /// async fn append_bytes( /// _req: HttpRequest, /// body: impl MessageBody /// ) -> actix_web::Result { /// let buf = body::to_bytes(body).await.ok().unwrap(); /// /// let mut body = BytesMut::from(&buf[..]); /// body.put_slice(b" - hope you like things ruining your payload format"); /// /// Ok(body) /// } /// # actix_web::App::new().wrap(map_response_body(append_bytes)); /// ``` pub fn map_response_body(mapper_fn: F) -> MapResBodyMiddleware { MapResBodyMiddleware { mw_fn: Rc::new(mapper_fn), } } /// Middleware transform for [`map_response_body`]. pub struct MapResBodyMiddleware { mw_fn: Rc, } impl Transform for MapResBodyMiddleware where S: Service, Error = Error>, F: Fn(HttpRequest, B) -> Fut, Fut: Future>, B2: MessageBody, { tains("JSON payload (16 bytes) is larger than allowed (limit: 10 bytes)."), "unexpected error string: {err:?}" ); let (req, mut pl) = TestRequest::default() .insert_header(header::ContentType::json()) .insert_header(( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), )) .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .to_http_parts(); let s = Json::::from_request(&req, &mut pl).await; let err = format!("{}", s.unwrap_err()); assert!( err.contains("larger than allowed"), "unexpected error string: {err:?}" ); } #[actix_web::test] async fn test_json_body() { let (req, mut pl) = TestRequest::default().to_http_parts(); let json = JsonBody::::new(&req, &mut pl).await; assert!(json_eq(json.unwrap_err(), JsonPayloadError::ContentType)); zeOwned; use tracing::debug; /// Default JSON payload size limit of 2MiB. pub const DEFAULT_JSON_LIMIT: usize = 2_097_152; /// JSON extractor with const-generic payload size limit. /// /// `Json` is used to extract typed data from JSON request payloads. /// /// # Extractor /// To extract typed data from a request body, the inner type `T` must implement the /// [`serde::Deserialize`] trait. /// /// Use the `LIMIT` const generic parameter to control the payload size limit. The default limit /// that is exported (`DEFAULT_LIMIT`) is 2MiB. /// /// ``` /// use actix_web::{post, App}; /// use actix_web_lab::extract::{DEFAULT_JSON_LIMIT, Json}; /// use serde::Deserialize; /// /// #[derive(Deserialize)] /// struct Info { /// username: String, /// } /// /// /// Deserialize `Info` from request's body. /// #[post("/")] /// async fn index(info: Json) -> String { /// format!("Welcome {}!", info.username) /// } /// /// const LIMIT_32_MB: usize = 33_554_432; /// /// /// Deserialize payload with a higher 32Mi /// async fn handler() -> impl Responder { /// let data_stream = streaming_data_source(); /// /// DisplayStream::new_infallible(data_stream) /// .into_responder() /// } /// ``` pub struct DisplayStream { // The wrapped item stream. #[pin] stream: S, } } impl DisplayStream { /// Constructs a new `DisplayStream` from a stream of lines. pub fn new(stream: S) -> Self { Self { stream } } } impl DisplayStream { /// Constructs a new `DisplayStream` from an infallible stream of lines. pub fn new_infallible(stream: S) -> DisplayStream> { DisplayStream::new(InfallibleStream::new(stream)) } } impl DisplayStream where S: Stream>, T: fmt::Display, E: Into> + 'static, { /// Creates a chunked body stream that serializes as CSV on-the-fly. pub fn into_body_stream(self) -> impl MessageBody { BodyStr #[test] fn for_multiple() { let fwd = Forwarded { r#for: vec!["192.0.2.60".to_owned(), "198.51.100.17".to_owned()], ..Forwarded::default() }; assert_eq!(fwd.for_client().unwrap(), "192.0.2.60"); assert_parse_eq::(["for=192.0.2.60, for=198.51.100.17"], fwd); } } //! Expiremental responders and response helpers. pub use crate::{csv::Csv, display_stream::DisplayStream, html::Html, ndjson::NdJson}; #[cfg(feature = "cbor")] pub use crate::cbor::Cbor; #[cfg(feature = "msgpack")] pub use crate::msgpack::{MessagePack, MessagePackNamed}; //! Content-Length typed header. //! //! See [`ContentLength`] docs. use std::{convert::Infallible, str}; use actix_web::{ error::ParseError, http::header::{ from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue, CONTENT_LENGTH, }, HttpMessage, }; /// `Content-Length` header, defined in [RFC 9110 §8.6]. /// /// The "Content-Length" header field indicatw `Forwarded` header from a single "for" identifier. pub fn new_for(r#for: impl Into) -> Self { Self { by: None, r#for: vec![r#for.into()], host: None, proto: None, } } /// Returns first "for" parameter which is typically the client's identifier. pub fn for_client(&self) -> Option<&str> { // Taking the first value for each property is correct because spec states that first "for" // value is client and rest are proxies. We collect them in the order they are read. // // ```plain // > In a chain of proxy servers where this is fully utilized, the first // > "for" parameter will disclose the client where the request was first // > made, followed by any subsequent proxy identifiers. // - https://datatracker.ietf.org/doc/html/rfc7239#section-5.2 // ``` self.r#for.first().map(String::as_str) } /// Returns iterator over the "for" chain. /nue, // we can't read the file }; let filename = match entry.file_name().into_string() { Ok(filename) => filename, Err(_) => continue, // the file has a non UTF-8 name }; let mut entry = zipper .write_entry_stream(ZipEntryBuilder::new( filename, async_zip::Compression::Deflate, )) .await .map_err(zip_to_io_err)?; tokio::io::copy(&mut file, &mut entry).await?; entry.close().await.map_err(zip_to_io_err)?; } Ok(()) } #[get("/")] async fn index() -> impl Responder { let (wrt, body) = body::writer(); // allow response to be started while this is processing #[allow(clippy::let_underscore_future)] let _ = tokio::spawn(async move { let mut zipper = async_zip::write::ZipFileWriter::new(wrt); if let Err(err) = read_dir(&mut zipper).await { tracing::warn!("Failed to write files from directory to zip: {err}") tatic") /// .static_resources_location("./examples/assets") /// .finish() /// ); /// ``` #[cfg(feature = "spa")] pub fn spa() -> Spa { Spa::default() } //! MessagePack responder. use actix_web::{HttpRequest, HttpResponse, Responder}; use bytes::Bytes; use derive_more::{Deref, DerefMut, Display}; use mime::Mime; use once_cell::sync::Lazy; use serde::Serialize; static MSGPACK_MIME: Lazy = Lazy::new(|| "application/msgpack".parse().unwrap()); /// MessagePack responder. /// /// If you require the fields to be named, use [`MessagePackNamed`]. #[derive(Debug, Deref, DerefMut, Display)] pub struct MessagePack(pub T); impl Responder for MessagePack { type Body = Bytes; fn respond_to(self, _req: &HttpRequest) -> HttpResponse { let body = Bytes::from(rmp_serde::to_vec(&self.0).unwrap()); HttpResponse::Ok() .content_type(MSGPACK_MIME.clone()) .message_body(body) .unwrap() } } /// Macing::warn!("client disconnected; could not send SSE message"); break; } sleep(Duration::from_secs(10)).await; } }); sse.with_keep_alive(Duration::from_secs(3)) } #[actix_web::main] async fn main() -> io::Result<()> { env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); tracing::info!("starting HTTP server at http://localhost:8080"); HttpServer::new(|| { App::new() .service(index) .service(countdown) .service(countdown_from) .service(timestamp) .wrap(Logger::default()) }) .workers(2) .bind(("127.0.0.1", 8080))? .run() .await } use std::{ future::{ready, Future, Ready}, marker::PhantomData, pin::Pin, rc::Rc, task::{Context, Poll}, }; use actix_service::{forward_ready, Service, Transform}; use actix_web::{ body::MessageBody, dev::{ServiceRequest, ServiceResponse}, Error, }; use futures_core::read /// Constructs new panic reporter middleware with `callback`. pub fn new(callback: impl Fn(&(dyn Any + Send)) + 'static) -> Self { Self { cb: Rc::new(callback), } } } impl std::fmt::Debug for PanicReporter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("PanicReporter") .field("cb", &"") .finish() } } impl Transform for PanicReporter where S: Service, S::Future: 'static, { type Response = S::Response; type Error = S::Error; type Transform = PanicReporterMiddleware; type InitError = (); type Future = Ready>; fn new_transform(&self, service: S) -> Self::Future { ready(Ok(PanicReporterMiddleware { service: Rc::new(service), cb: Rc::clone(&self.cb), })) } } pub struct PanicReporterMiddleware { service: Rc, cb: PanicCallback, } immBody::Error(Some(UrlencodedError::Overflow { size: len, limit: LIMIT, })); } } UrlEncodedFormBody::Body { length, payload, buf: web::BytesMut::with_capacity(8192), _res: PhantomData, } } } impl Future for UrlEncodedFormBody { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); match this { UrlEncodedFormBody::Body { buf, payload, .. } => loop { let res = ready!(Pin::new(&mut *payload).poll_next(cx)); match res { Some(chunk) => { let chunk = chunk?; let buf_len = buf.len() + chunk.len(); if buf_len > LIMIT { return Poll::Ready(Err(Urlenclt>; forward_ready!(service); fn call(&self, req: ServiceRequest) -> Self::Future { self.service.call(req) } } #[cfg(test)] mod tests { use actix_web::{ http::header::{self, HeaderValue}, middleware::{Compat, Logger}, test, web, App, HttpResponse, }; use super::*; async fn noop(req: ServiceRequest, next: Next) -> Result, Error> { next.call(req).await } async fn add_res_header( req: ServiceRequest, next: Next, ) -> Result, Error> { let mut res = next.call(req).await?; res.headers_mut() .insert(header::WARNING, HeaderValue::from_static("42")); Ok(res) } async fn mutate_body_type( req: ServiceRequest, next: Next, ) -> Result, Error> { let res = next.call(req).await?; Ok(res.map_into_left_body:::Overflow"), } } } impl ResponseError for BodyLimitError where T: FromRequest + 'static, T::Error: fmt::Debug + fmt::Display, { } #[cfg(test)] mod tests { use actix_web::{http::header, test::TestRequest}; use bytes::Bytes; use super::*; static_assertions::assert_impl_all!(BodyLimitFut<(), 100>: Unpin); static_assertions::assert_impl_all!(BodyLimitFut: Unpin); #[actix_web::test] async fn within_limit() { let (req, mut pl) = TestRequest::default() .insert_header(header::ContentType::plaintext()) .insert_header(( header::CONTENT_LENGTH, header::HeaderValue::from_static("9"), )) .set_payload(Bytes::from_static(b"123456789")) .to_http_parts(); let body = BodyLimit::::from_request(&req, &mut pl).await; assert_eq!( body.ok().unwrap().into_inner(), Bytes::from_static(b"123456789") ); } actix_utils::future::ok; use actix_web::{ body::BoxBody, dev::{fn_service, Service, ServiceRequest, ServiceResponse}, http::StatusCode, Error, HttpResponseBuilder, }; /// Creates service that always responds with given status code and echoes request path as response /// body. pub fn echo_path_service( status_code: StatusCode, ) -> impl Service, Error = Error> { fn_service(move |req: ServiceRequest| { let path = req.path().to_owned(); ok(req.into_response(HttpResponseBuilder::new(status_code).body(path))) }) } use std::{convert::Infallible, error::Error as StdError}; use actix_web::{ body::{BodyStream, MessageBody}, HttpResponse, Responder, }; use bytes::{Bytes, BytesMut}; use futures_core::Stream; use futures_util::TryStreamExt as _; use mime::Mime; use pin_project_lite::pin_project; use serde::Serialize; use crate::util::{InfallibleStream, MutWriter}; pin_project! { /// A buffered CSV serializing hex!("03ac6742 16f3e15c 761ee1a5 e255f067 953623c8 b388b445 9e13f978 d7c846f4").as_ref() ); } #[actix_web::test] async fn use_on_wrong_extractor_in_wrong_order() { let app = test::init_service(App::new().route( "/", web::get().to( |_body: Json, null: BodyHash<(), Sha256>| async move { Bytes::copy_from_slice(null.hash()) }, ), )) .await; let req = test::TestRequest::default().set_json(1234).to_request(); let res = test::call_service(&app, req).await; assert_eq!(res.status(), StatusCode::OK); let body = test::read_body(res).await; // if the hash wrapper is on a non-body extractor _and_ a body extractor has already taken the // payload, this should return the empty input hash assert_eq!( body, hex!("e3b0c442 98fc1c14 9afbf4c8 996fb924 27ae41e4 649b934c a495991b 7852b855").as_ref() ); } //! How to use `NdJson` as an efficient streaming response type. //! //! The same techniquedy type. /// /// # Examples /// ``` /// # use actix_web::{HttpResponse, web}; /// use std::convert::Infallible; /// use actix_web_lab::body; /// /// # async fn index() { /// let (mut body_tx, body) = body::channel::(); /// /// let _ = web::block(move || { /// body_tx.send(web::Bytes::from_static(b"body from another thread")).unwrap(); /// }); /// /// HttpResponse::Ok().body(body) /// # ;} /// ``` pub fn channel>() -> (Sender, impl MessageBody) { let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); (Sender::new(tx), Receiver::new(rx)) } /// A channel-like sender for body chunks. #[derive(Debug, Clone)] pub struct Sender { tx: UnboundedSender>, } impl Sender { fn new(tx: UnboundedSender>) -> Self { Self { tx } } /// Submits a chunk of bytes to the response body stream. /// /// # Errors /// Errors if other side of channel body was dropped, returning `chunk`. pub fn send(&mut self, _env(env_logger::Env::new().default_filter_or("info")); info!("staring server at http://localhost:8080"); HttpServer::new(|| { App::new() .wrap(middleware::Logger::default().log_target("@")) .route( "/", web::post().to(|body: TwoBodies| async move { let (string, bytes) = body.into_parts(); // proves that body was extracted twice since the bytes extracted are byte-equal to // the string, without forking the request payload, the bytes parts would be empty assert_eq!(string.as_bytes(), &bytes); // echo string string }), ) }) .workers(1) .bind(("127.0.0.1", 8080))? .run() .await } use std::borrow::Cow; use actix_files::{Files, NamedFile}; use actix_service::fn_service; use actix_web::dev::{HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponseequest for Query { type Error = Error; type Future = Ready>; #[inline] fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { serde_html_form::from_str::(req.query_string()) .map(|val| ready(Ok(Query(val)))) .unwrap_or_else(move |e| { let err = QueryPayloadError::Deserialize(e); debug!( "Failed during Query extractor deserialization. \ Request path: {:?}", req.path() ); ready(Err(err.into())) }) } } #[cfg(test)] mod tests { use actix_web::test::TestRequest; use derive_more::Display; use serde::Deserialize; use super::*; #[derive(Deserialize, Debug, Display)] struct Id { id: String, } async-compression-0.4.13/tests/artifacts/lib.rs000064400000000000000000000263171046102023000176340ustar 00000000000000//! Adaptors between compression crates and Rust's modern asynchronous IO types. //! //! # Feature Organization //! //! This crate is divided up along two axes, which can each be individually selected via Cargo //! features. //! //! All features are disabled by default, you should enable just the ones you need from the lists //! below. //! //! If you want to pull in everything there are three group features defined: //! //! Feature | Does //! ---------|------ //! `all` | Activates all implementations and algorithms. //! `all-implementations` | Activates all implementations, needs to be paired with a selection of algorithms //! `all-algorithms` | Activates all algorithms, needs to be paired with a selection of implementations //! //! ## IO implementation //! //! The first division is which underlying asynchronous IO trait will be wrapped, these are //! available as separate features that have corresponding top-level modules: //! //! Feature | Type //! ---------|------ // TODO: Kill rustfmt on this section, `#![rustfmt::skip::attributes(cfg_attr)]` should do it, but // that's unstable #![cfg_attr( feature = "futures-io", doc = "[`futures-io`](crate::futures) | [`futures::io::AsyncBufRead`](futures_io::AsyncBufRead), [`futures::io::AsyncWrite`](futures_io::AsyncWrite)" )] #![cfg_attr( not(feature = "futures-io"), doc = "`futures-io` (*inactive*) | `futures::io::AsyncBufRead`, `futures::io::AsyncWrite`" )] #![cfg_attr( feature = "tokio", doc = "[`tokio`](crate::tokio) | [`tokio::io::AsyncBufRead`](::tokio::io::AsyncBufRead), [`tokio::io::AsyncWrite`](::tokio::io::AsyncWrite)" )] #![cfg_attr( not(feature = "tokio"), doc = "`tokio` (*inactive*) | `tokio::io::AsyncBufRead`, `tokio::io::AsyncWrite`" )] //! //! ## Compression algorithm //! //! The second division is which compression schemes to support, there are currently a few //! available choices, these determine which types will be available inside the above modules: //! //! Feature | Types //! ---------|------ #![cfg_attr( feature = "brotli", doc = "`brotli` | [`BrotliEncoder`](?search=BrotliEncoder), [`BrotliDecoder`](?search=BrotliDecoder)" )] #![cfg_attr( not(feature = "brotli"), doc = "`brotli` (*inactive*) | `BrotliEncoder`, `BrotliDecoder`" )] #![cfg_attr( feature = "bzip2", doc = "`bzip2` | [`BzEncoder`](?search=BzEncoder), [`BzDecoder`](?search=BzDecoder)" )] #![cfg_attr( not(feature = "bzip2"), doc = "`bzip2` (*inactive*) | `BzEncoder`, `BzDecoder`" )] #![cfg_attr( feature = "deflate", doc = "`deflate` | [`DeflateEncoder`](?search=DeflateEncoder), [`DeflateDecoder`](?search=DeflateDecoder)" )] #![cfg_attr( not(feature = "deflate"), doc = "`deflate` (*inactive*) | `DeflateEncoder`, `DeflateDecoder`" )] #![cfg_attr( feature = "gzip", doc = "`gzip` | [`GzipEncoder`](?search=GzipEncoder), [`GzipDecoder`](?search=GzipDecoder)" )] #![cfg_attr( not(feature = "gzip"), doc = "`gzip` (*inactive*) | `GzipEncoder`, `GzipDecoder`" )] #![cfg_attr( feature = "lzma", doc = "`lzma` | [`LzmaEncoder`](?search=LzmaEncoder), [`LzmaDecoder`](?search=LzmaDecoder)" )] #![cfg_attr( not(feature = "lzma"), doc = "`lzma` (*inactive*) | `LzmaEncoder`, `LzmaDecoder`" )] #![cfg_attr( feature = "xz", doc = "`xz` | [`XzEncoder`](?search=XzEncoder), [`XzDecoder`](?search=XzDecoder)" )] #![cfg_attr( not(feature = "xz"), doc = "`xz` (*inactive*) | `XzEncoder`, `XzDecoder`" )] #![cfg_attr( feature = "zlib", doc = "`zlib` | [`ZlibEncoder`](?search=ZlibEncoder), [`ZlibDecoder`](?search=ZlibDecoder)" )] #![cfg_attr( not(feature = "zlib"), doc = "`zlib` (*inactive*) | `ZlibEncoder`, `ZlibDecoder`" )] #![cfg_attr( feature = "zstd", doc = "`zstd` | [`ZstdEncoder`](?search=ZstdEncoder), [`ZstdDecoder`](?search=ZstdDecoder)" )] #![cfg_attr( not(feature = "zstd"), doc = "`zstd` (*inactive*) | `ZstdEncoder`, `ZstdDecoder`" )] //! #![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))] #![warn( missing_docs, rust_2018_idioms, missing_copy_implementations, missing_debug_implementations )] #![cfg_attr(not(all), allow(unused))] #[cfg(any(feature = "bzip2", feature = "flate2", feature = "xz2"))] use std::convert::TryInto; #[macro_use] mod macros; mod codec; #[cfg(feature = "futures-io")] pub mod futures; #[cfg(feature = "tokio")] pub mod tokio; mod unshared; mod util; #[cfg(feature = "brotli")] use brotli::enc::backward_references::BrotliEncoderParams; /// Level of compression data should be compressed with. #[non_exhaustive] #[derive(Clone, Copy, Debug)] pub enum Level { /// Fastest quality of compression, usually produces bigger size. Fastest, /// Best quality of compression, usually produces the smallest size. Best, /// Default quality of compression defined by the selected compression algorithm. Default, /// Precise quality based on the underlying compression algorithms' /// qualities. The interpretation of this depends on the algorithm chosen /// and the specific implementation backing it. /// Qualities are implicitly clamped to the algorithm's maximum. Precise(i32), } impl Level { #[cfg(feature = "brotli")] fn into_brotli(self, mut params: BrotliEncoderParams) -> BrotliEncoderParams { match self { Self::Fastest => params.quality = 0, Self::Best => params.quality = 11, Self::Precise(quality) => params.quality = quality.clamp(0, 11), Self::Default => (), } params } #[cfg(feature = "bzip2")] fn into_bzip2(self) -> bzip2::Compression { let fastest = bzip2::Compression::fast(); let best = bzip2::Compression::best(); match self { Self::Fastest => fastest, Self::Best => best, Self::Precise(quality) => bzip2::Compression::new( quality .try_into() .unwrap_or(0) .clamp(fastest.level(), best.level()), ), Self::Default => bzip2::Compression::default(), } } #[cfg(feature = "flate2")] fn into_flate2(self) -> flate2::Compression { let fastest = flate2::Compression::fast(); let best = flate2::Compression::best(); match self { Self::Fastest => fastest, Self::Best => best, Self::Precise(quality) => flate2::Compression::new( quality .try_into() .unwrap_or(0) .clamp(fastest.level(), best.level()), ), Self::Default => flate2::Compression::default(), } } #[cfg(feature = "zstd")] fn into_zstd(self) -> i32 { let (fastest, best) = libzstd::compression_level_range().into_inner(); match self { Self::Fastest => fastest, Self::Best => best, Self::Precise(quality) => quality.clamp(fastest, best), Self::Default => libzstd::DEFAULT_COMPRESSION_LEVEL, } } #[cfg(feature = "xz2")] fn into_xz2(self) -> u32 { match self { Self::Fastest => 0, Self::Best => 9, Self::Precise(quality) => quality.try_into().unwrap_or(0).min(9), Self::Default => 5, } } } #[cfg(feature = "zstd")] /// This module contains zstd-specific types for async-compression. pub mod zstd { use libzstd::stream::raw::CParameter::*; /// A compression parameter for zstd. This is a stable wrapper around zstd's own `CParameter` /// type, to abstract over different versions of the zstd library. /// /// See the [zstd documentation](https://facebook.github.io/zstd/zstd_manual.html) for more /// information on these parameters. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct CParameter(libzstd::stream::raw::CParameter); impl CParameter { /// Window size in bytes (as a power of two) pub fn window_log(value: u32) -> Self { Self(WindowLog(value)) } /// Size of the initial probe table in 4-byte entries (as a power of two) pub fn hash_log(value: u32) -> Self { Self(HashLog(value)) } /// Size of the multi-probe table in 4-byte entries (as a power of two) pub fn chain_log(value: u32) -> Self { Self(ChainLog(value)) } /// Number of search attempts (as a power of two) pub fn search_log(value: u32) -> Self { Self(SearchLog(value)) } /// Minimum size of matches searched for pub fn min_match(value: u32) -> Self { Self(MinMatch(value)) } /// Strategy-dependent length modifier pub fn target_length(value: u32) -> Self { Self(TargetLength(value)) } /// Enable long-distance matching mode to look for and emit long-distance references. /// /// This increases the default window size. pub fn enable_long_distance_matching(value: bool) -> Self { Self(EnableLongDistanceMatching(value)) } /// Size of the long-distance matching table (as a power of two) pub fn ldm_hash_log(value: u32) -> Self { Self(LdmHashLog(value)) } /// Minimum size of long-distance matches searched for pub fn ldm_min_match(value: u32) -> Self { Self(LdmMinMatch(value)) } /// Size of each bucket in the LDM hash table for collision resolution (as a power of two) pub fn ldm_bucket_size_log(value: u32) -> Self { Self(LdmBucketSizeLog(value)) } /// Frequency of using the LDM hash table (as a power of two) pub fn ldm_hash_rate_log(value: u32) -> Self { Self(LdmHashRateLog(value)) } /// Emit the size of the content (default: true). pub fn content_size_flag(value: bool) -> Self { Self(ContentSizeFlag(value)) } /// Emit a checksum (default: false). pub fn checksum_flag(value: bool) -> Self { Self(ChecksumFlag(value)) } /// Emit a dictionary ID when using a custom dictionary (default: true). pub fn dict_id_flag(value: bool) -> Self { Self(DictIdFlag(value)) } /// Number of threads to spawn. /// /// If set to 0, compression functions will block; if set to 1 or more, compression will /// run in background threads and `flush` pushes bytes through the compressor. /// /// # Panics /// /// This parameter requires feature `zstdmt` to be enabled, otherwise it will cause a panic /// when used in `ZstdEncoder::with_quality_and_params()` calls. // // TODO: make this a normal feature guarded fn on next breaking release #[cfg_attr(docsrs, doc(cfg(feature = "zstdmt")))] pub fn nb_workers(value: u32) -> Self { Self(NbWorkers(value)) } /// Number of bytes given to each worker. /// /// If set to 0, zstd selects a job size based on compression parameters. pub fn job_size(value: u32) -> Self { Self(JobSize(value)) } pub(crate) fn as_zstd(&self) -> libzstd::stream::raw::CParameter { self.0 } } } async-compression-0.4.13/tests/artifacts/lib.rs.zst000064400000000000000000000042051046102023000204430ustar 00000000000000(µ/ýg~{œÏ+Ccäh¾}¯îWg6'ì-˲òÐ œöV“‡žéšl&'Ï<í¥–âËU•Ó%N]veË\ñpÄ$ZG£·§û¹ãñ G~dK£¶[tÑ}(X…hA;Ž¢"÷¹&‹¤Ï&6á©-åØèÈž¦µNô:ÏZ#zè‘,Ër¿\R<ÌâñhÔÃÑщ~›‰~[æI~G1óÆ%EI³¥w–²üöRio6óhãò1_µÀ‡%ùí}Íi:wøk¥`L“ÇM3ŽG9i¤¼FÑÓRmJSwñösâ’¶ôkíL6ž´”Ó­“¤3­yÚû³¤½‡²Öœ‰ŠOú7{k|S÷rJzƒÊÏô3ý¨¦¡yËŠ8‚‹ü-‹Öa¬†˜gܱL‡ŽìÌ¢¹ÐU-L–ôef¶Ea/GÙDËå¶ø!Úõ¼x%Ê^Ý¡¹JGÂßÿñEù1&@ã}7B¼O!_CnP¤aXð´( » ´þ™Ú(¿ÚôÞÉ6$¶ 6w ?:Çz#1Üî"Ápè 0ƒ_ôR~ôXî…j©ßçBƒR%:+æ«BcØðò†Î‡ ŽŠ>µMò¨žè”I^ýgH%oógÄfŒú«¤’gm_¼XŒnt]^K¹Ûé€DÂr›IÛÓ"µRÞÖ#ëþó…®Ê°·(Êd±jkï¸>hK™¯ÄÒèXÓ~ÉIT-c7¨ ~Ï47Xo„9€>z3ŠŽÊÂ4µìàÊ_ÓÇšÞŸƒh¹i-øÉÌXñ²ž’\<‡‰•r~wÈN>c¶\¯nŒ!a%ÌL/YÎÜ åŽUý%ä‹pã—y­\kf%kZZΦ*¤•Ñî>tœ5¤^Ãööä–æ²0ÎçÄ} x<Ð_Nµ)g½$&âIº*ãBM•'ÝC<«Ö©61Ð긘‡Ó²©‹oi,8 sUÊ’FÉš±á³¸ÂÂÊ]ûr®QážÇêì1ZýY8OÌAKó B¨—‰·5®Íè½ó“æ ®ªs\æLU7±hŸpª1/Ëæ•sÔ¢r[ï½ñÙ´h?‡!4=ã au=nm¨ø°l=öUØ ”š6à ‚Iá„Z:6í¢òŒ8y]ì<ÉÒ;5‡&,e¬¢—‘c€#dÀ™ßZV«¾y}$Øí }UE<‹°*BAã·‚hN^1ÙX*/o¯E }Á70úÀÄŦÜ{vk²¨Q¬LÙõí|hd½Žº6’€÷qà‰ý†œyˆ Ú«ñ£$f©æö¼Át¢4¤‚+¦ìC… ÝÈ~88UAþÁ…õT7¢9#csGUDŒ…¸?¦àûÌw©¾½X”a‘ OêþìÉ \t¾2jD‘úHîs!6¡ ?ì¶rFœ3ä9Žú} ž¢ìbã¢-áˆtœÈVEvŠNÉðâmR¿Ô¨ÎÎâÛ‘¸’àÃ@kÅ’* ÙÄõ²À5Ä\ì¸#¬Øø&/wõ‚à¾%GëçÜ’‘¶´ð'¥Ý~ÊѬD9 «.™R·š÷V h±pã-Ä8v‹å½ãr? }ûµeùšáƒO»~%\1O 5OÔ‰Pý¿( ‚Ó`ÿ,kb ½E<JÓ]™½%¡¯ðdÉDgªB^° 28Bönd+Vü‘D°* r,þ.ó‡+bk‰¥|X °_bQLšy B'Ñ#õ»4Ë.‰ì¢T=&ÿ:•.ÃB‰üßèµÖuRßÛF¢½%%ÏtLò n‘{>n”žeêÐ)9¥ü˜Áýœµ!Ê]Ô]ó¯ÈÀñq~.&PL%å(%Êd² ÐÑŸ+Þ[4…W(3<Æó#3Ç%"»µÑàù/±èUEs…ÝC†«†÷Î7Ñ.W[¬Ðž—¬f¶¡R H§–_ Ëîˆc}:¯  ×):>;‰îÒõö~ž£S>ÈÃÜ qøè~T}O=õ¤UtK—»QuàÕÃËä¦<ží F"Î{)ä -Uб8(ì»ñs­t²6ç‹L‰ÒÀ¯Lœ ÏÚ0¹V ™1œ½Wscõv†b>ÛÛš 123;async-compression-0.4.13/tests/artifacts/long-window-size-lib.rs.zst000064400000000000000000000064511046102023000236620ustar 00000000000000(µ/ý¨åhššt0 ™ºˆ#bEvŸíÆAw’Úø¨Ó&¤ºqÕó¼T?HSÏ`M9Œ—ãÅÃ˱ †ª¤ ‘™ùP4ë"}1AÓ<>FÉ/*Òc¿†{×|p[Wé ÞãÙž¦ï+dùšÝZU³|»’ß(ö[ÚäBÓ“™<;ò}ß5B6McAßN1Ú‰d&VK$³¯„²†Û4á÷¨TÇ>¯ŸFò‘¾6©-²išÇÂ+…:Áq|Ù[¸Ò§A•p0‡½uÇÛB¥üÅD¶®2ŠÖcca€`ÛšAx¶®ëñiÈ´Òhx< ú_Åf8ÎüfŽÓÞs=>6–…ááêØWÇ0yªLÓhx<6–¥­ÇI;§É,Ωòý¦m{&V³ª¹ñS€ƒÏ{„κ=Ócw®TÉ2IìaÝ’™$öôG}X7…qè¬[{oïí‚ö~µ÷+bGAÄŽ‚ˆÝ±» (èê¸:&˜àê¸:´º,´º,´ºœV—ƒ d ‹¯,¾°øš,¾&šØW'mƒ®ëÁâëÁâ‹v"‚bP×P6 ¸­Ù£€Aì¾a´‘Žtûã}\Áým¾Œ›b’•Rsâþ-„‘1Û²¢oµóöš­KdCã8ÊG?Äd…\Ø?ÓÇ(¹(ßocy0,û %`–GÙ€šîR„ÀÞÄ8N³ãd…\GÙ€ †ŒHÖahvˆRVKàJÚJMW%}åã”t{,A6M³(ù,Â÷Œ¢E" Nù¡NHÆ×+ØÒã…]-®z â4>­žö~5Í(RÏ„£;Z¦RÓ8nýSÊFdÝ4#'PaÄ.×áFò%n 7ÒùLá{¤ÙŸÌZ“‡ |’NgÐ^l¨ SÓÒÞ/¸¿¯ùÁ"Çû+á8…ŽSéÂ÷õØÞ/ˆlÉ¡½_éõ_³lšæ PWâÃÕaA…ÅÓÀEÅ$\H<<ˆ@1±(pßcÈ¢ìô>ϱˈ\ò×=ÀíýÚW'í«“23™E4P`?–BÉ‹‹Òÿø™À‡(’}‘ôØ(‚X”w•GÊ_ç8 ”< bqà ™ dqà€i"Gr‹‘™)¹õ¤Øë¹¤cåÇ*É­+Oÿã|Ϊøñ¸àÁ,°+îÐ*A¶µ*u,'î™±³*vòœÓc¦Q2–˜­Õž¾õ¦Xz{km“:Zpÿ1Ä—ø¶uÖ™-œ0‰rÂû:§•çRìÅžp§‰£ôe±5´ ’[ÏÌÀ5Ÿ”K-Œ¨ˆº‚¨+HMïÏ·ž$ˆp¸°%Oô–…AaÝÖuÝB]åw쓲mƒ§Ô öK]­Ü9e -Š?íéóPÏÇòq¶eüÔJÍõ¨,×Y—úN¦A_ùBަ3~:!`SSlH½/åÑJˆòè$(ÛÓ‰ƒé‹0cI:à„à`í^”˜ë)U<‰Z® ©oÏx¢ñóŒ[Õï‘¥¾¥$9Q‘¨ÜÜ©TEç`K±Ô{1†„:iÙ?%ÿ-õ\Q¼>@¹*ð =²·‘ÂÂ{1„ªYìÅþD׾йf [’0a‘œ¿,VÍ⩚«šÅS5'abzmảç™^»Ç×u!{MÓ@ž«Íö”£ÛeN—=}çæ0½vÏïõ—%n]%õÓ[—ÿÖa2í+| %<+ØR¶õ•ºŠ?–€ñVµÿšQx¬”Äc¥˜¿‹åªåJ1ü·¹?‰ÈÇ™5´Ò)??Î¥öbŒlšf±P0ˆéltQ¦v$Ÿ”Q ‚¿¨Âk³ 15#""I’¤’ÅÂh´@‰,‹‚$„ŒBCFDDDDDD4M²í'ëåÖùÖö'ÜG• šÄÛ#“ ¾±NCò ç3ó•U˜ˆŽ~þ(£\cÉd $J5‡OÿiÈÏ Í'ý{ÓqÒ&in/¤0Œå¢j«Sß¶Ná5Ð8ÝÑß*º5­â-±ë¡ãät—WÖ–‡²€Ù\òÛ4º0ïžn]›ôRâÎx¬¥6}‚×)ÓHW0ÛŒüA¦ î‘‰aø;™bE’—Ã0WÿÝ\ïú<ÇN³Ÿ¡(mÑp©Ã½•¸˜H±9òªkÂüd *ºÄ/Hp¥mFãœ:¤T›c’%ß4í6 o(ÿåSç^áZ.Ç­†—ac‡pÆV]œÐ¹ÔÖ4éš•JLóm$þdáÖ‚Fò0UîEµîb½ç]3“åHyaîi·ËÈ… Kyÿ î¼,`*;mTëÄ’ÁÊb}73šo/­Hõ*§ÈrÁ‚Œsä»É)o%6¥ÃYJ­Bö=ùäÅ»«P0»*yV|*v½0í!°õìô?Q¦,ç`(à ق¢'b̃ iXªë jdÒñŸ`œ}åW˜ |¿iÿ‘Ÿ_áÛ1¼›O òÀ›PÁ/yësqyè¡Ù×ì>„T('k/ Âyãî" g׈¦DÐ:—!orõ³Ñ úcȲ|û£teùðʼn¥|cÖmž!6‡xÑð²:¯C@4p‰¦LñÑI¬nmÁ‚©ó:V'ð@ºÐ…Á0«³ü%ÖáÖ ÉIp%F¹ Ìq´ PÍwy–à­öïèÓ€×Ô•‹¯Í{çyaÔ¼‹$®Z*õÖ~&^?»k`Ì2Y‘ÌÊëÓNŸÚÊ"yB¨–¨ò*³_³‘óócÙŒ6Ê˨ÚSïQà°M?Xð‹îÅ6)"¡‹ÛÎCô­¿Žâ€ð0fJì§*5¶ ¦ˆàel ×°_i³ª{õÉ]7ÂC¥LG‰¹X¬žŠ(v8ˆ%FÏJšÓ9wñ“O†>×¶rË_È_<7¼?F;É^c½0 ËH˜T9Rk?÷=ŽÔ“zíRbꯉýM(ãÈ0ö"Nóm¡Ä‹^ªƒ¿‚ƒÎ”hºUòP ÝR­h°µxåð®šÛ€µ¬o ÁskTð}· z×úk6ŸàÂZÓÌ…²q6 %5§<:k+µ $`©9*Ý72rxðFöòˆ‡còY?çRY¯ëñqt ¦§¥[¢µ¿¼¢Ûä ø;iÃõsÛÑ,4G%ûFF„_Âf¯y¸LÌó3ŠZ?!ؼéØÈ7ñŸ~ê ´yVˆËÈ#P²wË|ò\%×U£&àØA= T>B8ÇÃ(¬‘‡.>‘‚ÇŸ þ­“ªÍ_Cñ,kònØ›{ˆ‰t÷@·:Ëûä›ô’ý©çQ)؆U¬Q×Jà PšqO$4Ð-üÕ¬ùM¬¬0Öüí¡dÄ ¢êÜä9“¸ÚÄÂá=OñØPñæ´yƒëžð g€ðQÇõæy•c)ž?à’†Ë¾)²|¸MÔºA­x¾Ñ…‡’¥q²šRß7¦Á3 Ìë¦9÷´Ž"›*àñ«”IaÊ Ù0þ®”qÎZ3)’Þ L×cçn;ùÇH6üḟëзûÁéì'›¢yå[èJ’Hü½ðD¸„L½WÜô©êâe¸¾Ó ­ÊФ˜ÚW¤(§¢†ÍÏÆþ& QEÜLÎkx@‹ÅTÛ®.±;69óó‚ÈÅ õ®Ò¨8ÂÃBqèiP+ù§êªþ: iÿ1ÑsŒüi°ŽÁcên23;async-compression-0.4.13/tests/brotli.rs000064400000000000000000000000561046102023000163710ustar 00000000000000#[macro_use] mod utils; test_cases!(brotli); async-compression-0.4.13/tests/bzip2.rs000064400000000000000000000000551046102023000161230ustar 00000000000000#[macro_use] mod utils; test_cases!(bzip2); async-compression-0.4.13/tests/deflate.rs000064400000000000000000000000571046102023000165030ustar 00000000000000#[macro_use] mod utils; test_cases!(deflate); async-compression-0.4.13/tests/gzip.rs000064400000000000000000000024341046102023000160510ustar 00000000000000#[macro_use] mod utils; test_cases!(gzip); #[allow(unused)] use utils::{algos::gzip::sync, InputStream}; #[cfg(feature = "futures-io")] use utils::algos::gzip::futures::bufread; #[allow(unused)] fn compress_with_header(data: &[u8]) -> Vec { use flate2::{Compression, GzBuilder}; use std::io::Write; let mut bytes = Vec::new(); { let mut gz = GzBuilder::new() .filename("hello_world.txt") .comment("test file, please delete") .extra(vec![1, 2, 3, 4]) .write(&mut bytes, Compression::fast()); gz.write_all(data).unwrap(); } bytes } #[test] #[ntest::timeout(1000)] #[cfg(feature = "futures-io")] fn gzip_bufread_decompress_with_extra_header() { let bytes = compress_with_header(&[1, 2, 3, 4, 5, 6]); let input = InputStream::from(vec![bytes]); let output = bufread::decompress(bufread::from(&input)); assert_eq!(output, &[1, 2, 3, 4, 5, 6][..]); } #[test] #[ntest::timeout(1000)] #[cfg(feature = "futures-io")] fn gzip_bufread_chunks_decompress_with_extra_header() { let bytes = compress_with_header(&[1, 2, 3, 4, 5, 6]); let input = InputStream::from(bytes.chunks(2)); let output = bufread::decompress(bufread::from(&input)); assert_eq!(output, &[1, 2, 3, 4, 5, 6][..]); } async-compression-0.4.13/tests/lzma.rs000064400000000000000000000000541046102023000160370ustar 00000000000000#[macro_use] mod utils; test_cases!(lzma); async-compression-0.4.13/tests/proptest.proptest-regressions000064400000000000000000000010541046102023000225520ustar 00000000000000# Seeds for failure cases proptest has generated in the past. It is # automatically read and these particular cases re-run before any # novel cases are generated. # # It is recommended to check this file in to source control so that # everyone who runs the test benefits from these saved cases. cc 56136f76bf926382b1bf5a24304af3826b03e674763b5c99d41496e40f56c1c3 # shrinks to ref input = [0, 0, 0, 0, 0, 0, 0], chunk_size = 1 cc e6c96e0924384950a67cc69e8ce9d7da5a11873cac62705241d7176ba9b20875 # shrinks to ref input = InputStream([]), level = Precise(11) async-compression-0.4.13/tests/proptest.rs000064400000000000000000000105001046102023000167510ustar 00000000000000use async_compression::Level; use ::proptest::{ arbitrary::any, prop_oneof, strategy::{Just, Strategy}, }; mod utils; #[allow(dead_code)] fn any_level() -> impl Strategy { prop_oneof![ Just(Level::Fastest), Just(Level::Best), Just(Level::Default), any::().prop_map(Level::Precise), ] } #[allow(unused_macros)] macro_rules! io_tests { ($impl:ident, $variant:ident) => { mod $impl { mod bufread { use crate::utils::{algos::$variant::{$impl::{read, bufread}, sync}, InputStream}; use proptest::{prelude::{any, ProptestConfig}, proptest}; use std::iter::FromIterator; proptest! { #[test] fn compress(ref input in any::()) { let compressed = bufread::compress(bufread::from(input)); let output = sync::decompress(&compressed); assert_eq!(output, input.bytes()); } #[test] fn decompress( ref bytes in any::>(), chunk_size in 1..20usize, ) { let compressed = sync::compress(bytes); let input = InputStream::from(Vec::from_iter(compressed.chunks(chunk_size).map(Vec::from))); let output = bufread::decompress(bufread::from(&input)); assert_eq!(&output, bytes); } } proptest! { #![proptest_config(ProptestConfig::with_cases(32))] #[test] fn compress_with_level( ref input in any::(), level in crate::any_level(), ) { let encoder = bufread::Encoder::with_quality(bufread::from(input), level); let compressed = read::to_vec(encoder); let output = sync::decompress(&compressed); assert_eq!(output, input.bytes()); } } } mod write { use crate::utils::{algos::$variant::{$impl::write, sync}, InputStream}; use proptest::{prelude::{any, ProptestConfig}, proptest}; proptest! { #[test] fn compress( ref input in any::(), limit in 1..20usize, ) { let compressed = write::compress(input.as_ref(), limit); let output = sync::decompress(&compressed); assert_eq!(output, input.bytes()); } } proptest! { #![proptest_config(ProptestConfig::with_cases(32))] #[test] fn compress_with_level( ref input in any::(), limit in 1..20usize, level in crate::any_level(), ) { let compressed = write::to_vec( input.as_ref(), |input| Box::pin(write::Encoder::with_quality(input, level)), limit, ); let output = sync::decompress(&compressed); assert_eq!(output, input.bytes()); } } } } } } #[allow(unused_macros)] macro_rules! tests { ($variant:ident) => { mod $variant { #[cfg(feature = "futures-io")] io_tests!(futures, $variant); #[cfg(feature = "tokio")] io_tests!(tokio, $variant); } }; } mod proptest { #[cfg(feature = "brotli")] tests!(brotli); #[cfg(feature = "bzip2")] tests!(bzip2); #[cfg(feature = "deflate")] tests!(deflate); #[cfg(feature = "gzip")] tests!(gzip); #[cfg(feature = "lzma")] tests!(lzma); #[cfg(feature = "xz")] tests!(xz); #[cfg(feature = "zlib")] tests!(zlib); #[cfg(feature = "zstd")] tests!(zstd); } async-compression-0.4.13/tests/utils/algos.rs000064400000000000000000000153251046102023000173500ustar 00000000000000macro_rules! io_algo { ($impl:ident, $algo:ident($encoder:ident, $decoder:ident)) => { pub mod $impl { pub mod read { pub use crate::utils::impls::$impl::read::{poll_read, to_vec}; } pub mod bufread { pub use crate::utils::impls::$impl::bufread::{from, AsyncBufRead}; pub use async_compression::$impl::bufread::{ $decoder as Decoder, $encoder as Encoder, }; use crate::utils::{pin_mut, Level}; pub fn compress(input: impl AsyncBufRead) -> Vec { pin_mut!(input); super::read::to_vec(Encoder::with_quality(input, Level::Fastest)) } pub fn decompress(input: impl AsyncBufRead) -> Vec { pin_mut!(input); super::read::to_vec(Decoder::new(input)) } } pub mod write { pub use crate::utils::impls::$impl::write::to_vec; pub use async_compression::$impl::write::{ $decoder as Decoder, $encoder as Encoder, }; use crate::utils::Level; pub fn compress(input: &[Vec], limit: usize) -> Vec { to_vec( input, |input| Box::pin(Encoder::with_quality(input, Level::Fastest)), limit, ) } pub fn decompress(input: &[Vec], limit: usize) -> Vec { to_vec(input, |input| Box::pin(Decoder::new(input)), limit) } } } }; } macro_rules! algos { ($(pub mod $name:ident($feat:literal, $encoder:ident, $decoder:ident) { pub mod sync { $($tt:tt)* } })*) => { $( #[cfg(feature = $feat)] pub mod $name { pub mod sync { $($tt)* } #[cfg(feature = "futures-io")] io_algo!(futures, $name($encoder, $decoder)); #[cfg(feature = "tokio")] io_algo!(tokio, $name($encoder, $decoder)); } )* } } algos! { pub mod brotli("brotli", BrotliEncoder, BrotliDecoder) { pub mod sync { pub use crate::utils::impls::sync::to_vec; pub fn compress(bytes: &[u8]) -> Vec { use brotli::{enc::backward_references::BrotliEncoderParams, CompressorReader}; let params = BrotliEncoderParams { quality: 1, ..Default::default() }; to_vec(CompressorReader::with_params(bytes, 0, ¶ms)) } pub fn decompress(bytes: &[u8]) -> Vec { use brotli::Decompressor; to_vec(Decompressor::new(bytes, 0)) } } } pub mod bzip2("bzip2", BzEncoder, BzDecoder) { pub mod sync { pub use crate::utils::impls::sync::to_vec; pub fn compress(bytes: &[u8]) -> Vec { use bzip2::{bufread::BzEncoder, Compression}; to_vec(BzEncoder::new(bytes, Compression::fast())) } pub fn decompress(bytes: &[u8]) -> Vec { use bzip2::bufread::BzDecoder; to_vec(BzDecoder::new(bytes)) } } } pub mod deflate("deflate", DeflateEncoder, DeflateDecoder) { pub mod sync { pub use crate::utils::impls::sync::to_vec; pub fn compress(bytes: &[u8]) -> Vec { use flate2::{bufread::DeflateEncoder, Compression}; to_vec(DeflateEncoder::new(bytes, Compression::fast())) } pub fn decompress(bytes: &[u8]) -> Vec { use flate2::bufread::DeflateDecoder; to_vec(DeflateDecoder::new(bytes)) } } } pub mod zlib("zlib", ZlibEncoder, ZlibDecoder) { pub mod sync { pub use crate::utils::impls::sync::to_vec; pub fn compress(bytes: &[u8]) -> Vec { use flate2::{bufread::ZlibEncoder, Compression}; to_vec(ZlibEncoder::new(bytes, Compression::fast())) } pub fn decompress(bytes: &[u8]) -> Vec { use flate2::bufread::ZlibDecoder; to_vec(ZlibDecoder::new(bytes)) } } } pub mod gzip("gzip", GzipEncoder, GzipDecoder) { pub mod sync { pub use crate::utils::impls::sync::to_vec; pub fn compress(bytes: &[u8]) -> Vec { use flate2::{bufread::GzEncoder, Compression}; to_vec(GzEncoder::new(bytes, Compression::fast())) } pub fn decompress(bytes: &[u8]) -> Vec { use flate2::bufread::GzDecoder; to_vec(GzDecoder::new(bytes)) } } } pub mod zstd("zstd", ZstdEncoder, ZstdDecoder) { pub mod sync { pub use crate::utils::impls::sync::to_vec; pub fn compress(bytes: &[u8]) -> Vec { use libzstd::stream::read::Encoder; use libzstd::DEFAULT_COMPRESSION_LEVEL; to_vec(Encoder::new(bytes, DEFAULT_COMPRESSION_LEVEL).unwrap()) } pub fn decompress(bytes: &[u8]) -> Vec { use libzstd::stream::read::Decoder; to_vec(Decoder::new(bytes).unwrap()) } } } pub mod xz("xz", XzEncoder, XzDecoder) { pub mod sync { pub use crate::utils::impls::sync::to_vec; pub fn compress(bytes: &[u8]) -> Vec { use xz2::bufread::XzEncoder; to_vec(XzEncoder::new(bytes, 0)) } pub fn decompress(bytes: &[u8]) -> Vec { use xz2::bufread::XzDecoder; to_vec(XzDecoder::new(bytes)) } } } pub mod lzma("lzma", LzmaEncoder, LzmaDecoder) { pub mod sync { pub use crate::utils::impls::sync::to_vec; pub fn compress(bytes: &[u8]) -> Vec { use xz2::bufread::XzEncoder; use xz2::stream::{LzmaOptions, Stream}; to_vec(XzEncoder::new_stream( bytes, Stream::new_lzma_encoder(&LzmaOptions::new_preset(0).unwrap()).unwrap(), )) } pub fn decompress(bytes: &[u8]) -> Vec { use xz2::bufread::XzDecoder; use xz2::stream::Stream; to_vec(XzDecoder::new_stream( bytes, Stream::new_lzma_decoder(u64::MAX).unwrap(), )) } } } } async-compression-0.4.13/tests/utils/impls.rs000064400000000000000000000123561046102023000173700ustar 00000000000000pub mod sync { use std::io::Read; pub fn to_vec(mut read: impl Read) -> Vec { let mut output = vec![]; read.read_to_end(&mut output).unwrap(); output } } #[cfg(feature = "futures-io")] pub mod futures { pub mod bufread { pub use futures::io::AsyncBufRead; use crate::utils::{InputStream, TrackEof}; use futures::stream::{StreamExt as _, TryStreamExt as _}; pub fn from(input: &InputStream) -> impl AsyncBufRead { // By using the stream here we ensure that each chunk will require a separate // read/poll_fill_buf call to process to help test reading multiple chunks. TrackEof::new(input.stream().map(Ok).into_async_read()) } } pub mod read { use crate::utils::{block_on, pin_mut}; use futures::io::{copy_buf, AsyncRead, AsyncReadExt, BufReader, Cursor}; pub fn to_vec(read: impl AsyncRead) -> Vec { // TODO: https://github.com/rust-lang-nursery/futures-rs/issues/1510 // All current test cases are < 100kB let mut output = Cursor::new(vec![0; 102_400]); pin_mut!(read); let len = block_on(copy_buf(BufReader::with_capacity(2, read), &mut output)).unwrap(); let mut output = output.into_inner(); output.truncate(len as usize); output } pub fn poll_read(reader: impl AsyncRead, output: &mut [u8]) -> std::io::Result { pin_mut!(reader); block_on(reader.read(output)) } } pub mod write { use crate::utils::{block_on, Pin, TrackClosed}; use futures::io::{AsyncWrite, AsyncWriteExt as _}; use futures_test::io::AsyncWriteTestExt as _; pub fn to_vec( input: &[Vec], create_writer: impl for<'a> FnOnce( &'a mut (dyn AsyncWrite + Unpin), ) -> Pin>, limit: usize, ) -> Vec { let mut output = Vec::new(); { let mut test_writer = TrackClosed::new( (&mut output) .limited_write(limit) .interleave_pending_write(), ); { let mut writer = create_writer(&mut test_writer); for chunk in input { block_on(writer.write_all(chunk)).unwrap(); block_on(writer.flush()).unwrap(); } block_on(writer.close()).unwrap(); } assert!(test_writer.is_closed()); } output } } } #[cfg(feature = "tokio")] pub mod tokio { pub mod bufread { use crate::utils::{InputStream, TrackEof}; use bytes::Bytes; use futures::stream::StreamExt; pub use tokio::io::AsyncBufRead; use tokio_util::io::StreamReader; pub fn from(input: &InputStream) -> impl AsyncBufRead { // By using the stream here we ensure that each chunk will require a separate // read/poll_fill_buf call to process to help test reading multiple chunks. TrackEof::new(StreamReader::new( input.stream().map(Bytes::from).map(std::io::Result::Ok), )) } } pub mod read { use crate::utils::{block_on, pin_mut, tokio_ext::copy_buf}; use std::io::Cursor; use tokio::io::{AsyncRead, AsyncReadExt, BufReader}; pub fn to_vec(read: impl AsyncRead) -> Vec { let mut output = Cursor::new(vec![0; 102_400]); pin_mut!(read); let len = block_on(copy_buf(BufReader::with_capacity(2, read), &mut output)).unwrap(); let mut output = output.into_inner(); output.truncate(len as usize); output } pub fn poll_read(reader: impl AsyncRead, output: &mut [u8]) -> std::io::Result { pin_mut!(reader); block_on(reader.read(output)) } } pub mod write { use crate::utils::{ block_on, tokio_ext::AsyncWriteTestExt as _, track_closed::TrackClosed, Pin, }; use std::io::Cursor; use tokio::io::{AsyncWrite, AsyncWriteExt as _}; pub fn to_vec( input: &[Vec], create_writer: impl for<'a> FnOnce( &'a mut (dyn AsyncWrite + Unpin), ) -> Pin>, limit: usize, ) -> Vec { let mut output = Cursor::new(Vec::new()); { let mut test_writer = TrackClosed::new( (&mut output) .limited_write(limit) .interleave_pending_write(), ); { let mut writer = create_writer(&mut test_writer); for chunk in input { block_on(writer.write_all(chunk)).unwrap(); block_on(writer.flush()).unwrap(); } block_on(writer.shutdown()).unwrap(); } assert!(test_writer.is_closed()); } output.into_inner() } } } async-compression-0.4.13/tests/utils/input_stream.rs000064400000000000000000000024251046102023000207520ustar 00000000000000use futures::stream::Stream; use futures_test::stream::StreamTestExt as _; use proptest_derive::Arbitrary; #[derive(Arbitrary, Debug, Clone)] pub struct InputStream(Vec>); impl InputStream { pub fn new(input: Vec>) -> Self { InputStream(input) } pub fn as_ref(&self) -> &[Vec] { &self.0 } pub fn stream(&self) -> impl Stream> { // The resulting stream here will interleave empty chunks before and after each chunk, and // then interleave a `Poll::Pending` between each yielded chunk, that way we test the // handling of these two conditions in every point of the tested stream. futures::stream::iter( self.0 .clone() .into_iter() .flat_map(|bytes| vec![vec![], bytes]) .chain(Some(vec![])), ) .interleave_pending() } pub fn bytes(&self) -> Vec { self.0.iter().flatten().cloned().collect() } pub fn len(&self) -> usize { self.0.iter().map(Vec::len).sum() } } impl From for InputStream where I: IntoIterator, I::Item: Into>, { fn from(input: I) -> InputStream { Self::new(input.into_iter().map(|b| b.into()).collect()) } } async-compression-0.4.13/tests/utils/mod.rs000064400000000000000000000012651046102023000170200ustar 00000000000000#![allow(dead_code, unused_imports, unused_macros)] // Different tests use a different subset of functions mod input_stream; #[cfg(feature = "tokio")] mod tokio_ext; mod track_closed; mod track_eof; #[macro_use] mod test_cases; pub mod algos; pub mod impls; pub use self::{input_stream::InputStream, track_closed::TrackClosed, track_eof::TrackEof}; pub use async_compression::Level; pub use futures::{executor::block_on, pin_mut, stream::Stream}; pub use std::{future::Future, io::Result, iter::FromIterator, pin::Pin}; pub fn one_to_six_stream() -> InputStream { InputStream::new(vec![vec![1, 2, 3], vec![4, 5, 6]]) } pub fn one_to_six() -> &'static [u8] { &[1, 2, 3, 4, 5, 6] } async-compression-0.4.13/tests/utils/test_cases.rs000064400000000000000000000423301046102023000203740ustar 00000000000000macro_rules! io_test_cases { ($impl:ident, $variant:ident) => { mod $impl { mod bufread { mod compress { use crate::utils::{ algos::$variant::{ sync, $impl::{bufread, read}, }, one_to_six, one_to_six_stream, InputStream, Level, }; #[test] #[ntest::timeout(1000)] fn empty() { let mut input: &[u8] = &[]; let compressed = bufread::compress(&mut input); let output = sync::decompress(&compressed); assert_eq!(output, &[][..]); } #[test] #[ntest::timeout(1000)] fn to_full_output() { let mut output = []; let encoder = bufread::Encoder::new(bufread::from(&one_to_six_stream())); let result = read::poll_read(encoder, &mut output); assert!(matches!(result, Ok(0))); } #[test] #[ntest::timeout(1000)] fn empty_chunk() { let input = InputStream::new(vec![vec![]]); let compressed = bufread::compress(bufread::from(&input)); let output = sync::decompress(&compressed); assert_eq!(output, input.bytes()); } #[test] #[ntest::timeout(1000)] fn short() { let compressed = bufread::compress(bufread::from(&one_to_six_stream())); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } #[test] #[ntest::timeout(1000)] fn long() { let input = InputStream::new(vec![ (0..32_768).map(|_| rand::random()).collect(), (0..32_768).map(|_| rand::random()).collect(), ]); let compressed = bufread::compress(bufread::from(&input)); let output = sync::decompress(&compressed); assert_eq!(output, input.bytes()); } #[test] fn with_level_best() { let encoder = bufread::Encoder::with_quality( bufread::from(&one_to_six_stream()), Level::Best, ); let compressed = read::to_vec(encoder); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } #[test] fn with_level_default() { let encoder = bufread::Encoder::new(bufread::from(&one_to_six_stream())); let compressed = read::to_vec(encoder); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } #[test] fn with_level_0() { let encoder = bufread::Encoder::with_quality( bufread::from(&one_to_six_stream()), Level::Precise(0), ); let compressed = read::to_vec(encoder); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } #[test] fn with_level_max() { let encoder = bufread::Encoder::with_quality( bufread::from(&one_to_six_stream()), Level::Precise(i32::MAX), ); let compressed = read::to_vec(encoder); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } } mod decompress { use crate::utils::{ algos::$variant::{ sync, $impl::{bufread, read}, }, one_to_six, one_to_six_stream, InputStream, }; #[test] #[ntest::timeout(1000)] fn empty() { let compressed = sync::compress(&[]); let input = InputStream::new(vec![compressed]); let output = bufread::decompress(bufread::from(&input)); assert_eq!(output, &[][..]); } #[test] #[ntest::timeout(1000)] fn to_full_output() { let mut output = []; let decoder = bufread::Decoder::new(bufread::from(&one_to_six_stream())); let result = read::poll_read(decoder, &mut output); assert!(matches!(result, Ok(0))); } #[test] #[ntest::timeout(1000)] fn zeros() { let compressed = sync::compress(&[0; 10]); let input = InputStream::new(vec![compressed]); let output = bufread::decompress(bufread::from(&input)); assert_eq!(output, &[0; 10][..]); } #[test] #[ntest::timeout(1000)] fn short() { let compressed = sync::compress(&[1, 2, 3, 4, 5, 6]); let input = InputStream::new(vec![compressed]); let output = bufread::decompress(bufread::from(&input)); assert_eq!(output, one_to_six()); } #[test] #[ntest::timeout(1000)] fn short_chunks() { let compressed = sync::compress(&[1, 2, 3, 4, 5, 6]); let input = InputStream::from(compressed.chunks(2)); let output = bufread::decompress(bufread::from(&input)); assert_eq!(output, one_to_six()); } #[test] #[ntest::timeout(1000)] fn trailer() { let mut compressed = sync::compress(&[1, 2, 3, 4, 5, 6]); compressed.extend_from_slice(&[7, 8, 9, 10]); let input = InputStream::new(vec![compressed]); let mut reader = bufread::from(&input); let output = bufread::decompress(&mut reader); let trailer = read::to_vec(reader); assert_eq!(output, one_to_six()); assert_eq!(trailer, &[7, 8, 9, 10][..]); } #[test] #[ntest::timeout(1000)] fn long() { let bytes: Vec = (0..65_536).map(|_| rand::random()).collect(); let compressed = sync::compress(&bytes); let input = InputStream::new(vec![compressed]); let output = bufread::decompress(bufread::from(&input)); assert_eq!(output, bytes); } #[test] #[ntest::timeout(1000)] fn long_chunks() { let bytes: Vec = (0..65_536).map(|_| rand::random()).collect(); let compressed = sync::compress(&bytes); let input = InputStream::from(compressed.chunks(1024)); let output = bufread::decompress(bufread::from(&input)); assert_eq!(output, bytes); } #[test] #[ntest::timeout(1000)] fn multiple_members() { let compressed = [ sync::compress(&[1, 2, 3, 4, 5, 6]), sync::compress(&[6, 5, 4, 3, 2, 1]), ] .join(&[][..]); let input = InputStream::new(vec![compressed]); let mut decoder = bufread::Decoder::new(bufread::from(&input)); decoder.multiple_members(true); let output = read::to_vec(decoder); assert_eq!(output, &[1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1][..]); } } } mod write { mod compress { use crate::utils::{ algos::$variant::{sync, $impl::write}, one_to_six, one_to_six_stream, InputStream, Level, }; #[test] #[ntest::timeout(1000)] fn empty() { let input = InputStream::new(vec![]); let compressed = write::compress(input.as_ref(), 65_536); let output = sync::decompress(&compressed); assert_eq!(output, &[][..]); } #[test] #[ntest::timeout(1000)] fn empty_chunk() { let input = InputStream::new(vec![vec![]]); let compressed = write::compress(input.as_ref(), 65_536); let output = sync::decompress(&compressed); assert_eq!(output, input.bytes()); } #[test] #[ntest::timeout(1000)] fn short() { let compressed = write::compress(one_to_six_stream().as_ref(), 65_536); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } #[test] #[ntest::timeout(1000)] fn short_chunk_output() { let compressed = write::compress(one_to_six_stream().as_ref(), 2); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } #[test] #[ntest::timeout(1000)] fn long() { let input = InputStream::new(vec![ (0..32_768).map(|_| rand::random()).collect(), (0..32_768).map(|_| rand::random()).collect(), ]); let compressed = write::compress(input.as_ref(), 65_536); let output = sync::decompress(&compressed); assert_eq!(output, input.bytes()); } #[test] #[ntest::timeout(1000)] fn long_chunk_output() { let input = InputStream::new(vec![ (0..32_768).map(|_| rand::random()).collect(), (0..32_768).map(|_| rand::random()).collect(), ]); let compressed = write::compress(input.as_ref(), 20); let output = sync::decompress(&compressed); assert_eq!(output, input.bytes()); } #[test] fn with_level_best() { let compressed = write::to_vec( one_to_six_stream().as_ref(), |input| Box::pin(write::Encoder::with_quality(input, Level::Best)), 65_536, ); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } #[test] fn with_level_default() { let compressed = write::to_vec( one_to_six_stream().as_ref(), |input| Box::pin(write::Encoder::new(input)), 65_536, ); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } #[test] fn with_level_0() { let compressed = write::to_vec( one_to_six_stream().as_ref(), |input| { Box::pin(write::Encoder::with_quality(input, Level::Precise(0))) }, 65_536, ); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } #[test] fn with_level_max() { let compressed = write::to_vec( one_to_six_stream().as_ref(), |input| { Box::pin(write::Encoder::with_quality( input, Level::Precise(i32::MAX), )) }, 65_536, ); let output = sync::decompress(&compressed); assert_eq!(output, one_to_six()); } } mod decompress { use crate::utils::{ algos::$variant::{sync, $impl::write}, one_to_six, InputStream, }; #[test] #[ntest::timeout(1000)] fn empty() { let compressed = sync::compress(&[]); let input = InputStream::new(vec![compressed]); let output = write::decompress(input.as_ref(), 65_536); assert_eq!(output, &[][..]); } #[test] #[ntest::timeout(1000)] fn zeros() { let compressed = sync::compress(&[0; 10]); let input = InputStream::new(vec![compressed]); let output = write::decompress(input.as_ref(), 65_536); assert_eq!(output, &[0; 10][..]); } #[test] #[ntest::timeout(1000)] fn short() { let compressed = sync::compress(&[1, 2, 3, 4, 5, 6]); let input = InputStream::new(vec![compressed]); let output = write::decompress(input.as_ref(), 65_536); assert_eq!(output, one_to_six()); } #[test] #[ntest::timeout(1000)] fn short_chunks() { let compressed = sync::compress(&[1, 2, 3, 4, 5, 6]); let input = InputStream::from(compressed.chunks(2)); let output = write::decompress(input.as_ref(), 65_536); assert_eq!(output, one_to_six()); } #[test] #[ntest::timeout(1000)] fn long() { let bytes: Vec = (0..65_536).map(|_| rand::random()).collect(); let compressed = sync::compress(&bytes); let input = InputStream::new(vec![compressed]); let output = write::decompress(input.as_ref(), 65_536); assert_eq!(output, bytes); } #[test] #[ntest::timeout(1000)] fn long_chunks() { let bytes: Vec = (0..65_536).map(|_| rand::random()).collect(); let compressed = sync::compress(&bytes); let input = InputStream::from(compressed.chunks(1024)); let output = write::decompress(input.as_ref(), 65_536); assert_eq!(output, bytes); } } } } }; } macro_rules! test_cases { ($variant:ident) => { mod $variant { #[cfg(feature = "futures-io")] io_test_cases!(futures, $variant); #[cfg(feature = "tokio")] io_test_cases!(tokio, $variant); } }; } async-compression-0.4.13/tests/utils/tokio_ext/copy_buf.rs000064400000000000000000000024211046102023000220470ustar 00000000000000use core::{ future::Future, pin::Pin, task::{Context, Poll}, }; use futures::ready; use tokio::io::{AsyncBufRead, AsyncWrite}; pub fn copy_buf(reader: R, writer: &mut W) -> CopyBuf<'_, R, W> where R: AsyncBufRead + Unpin, W: AsyncWrite + Unpin + ?Sized, { CopyBuf { reader, writer, amt: 0, } } #[derive(Debug)] pub struct CopyBuf<'a, R, W: ?Sized> { reader: R, writer: &'a mut W, amt: u64, } impl Future for CopyBuf<'_, R, W> where R: AsyncBufRead + Unpin, W: AsyncWrite + Unpin + ?Sized, { type Output = std::io::Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = &mut *self; loop { let buffer = ready!(Pin::new(&mut this.reader).poll_fill_buf(cx))?; if buffer.is_empty() { ready!(Pin::new(&mut this.writer).poll_flush(cx))?; return Poll::Ready(Ok(this.amt)); } let i = ready!(Pin::new(&mut this.writer).poll_write(cx, buffer))?; if i == 0 { return Poll::Ready(Err(std::io::ErrorKind::WriteZero.into())); } this.amt += i as u64; Pin::new(&mut this.reader).consume(i); } } } async-compression-0.4.13/tests/utils/tokio_ext/interleave_pending.rs000064400000000000000000000032161046102023000241060ustar 00000000000000use std::{ pin::Pin, task::{Context, Poll}, }; pub struct InterleavePending { inner: T, pended: bool, } impl InterleavePending { pub(crate) fn new(inner: T) -> Self { Self { inner, pended: false, } } } impl tokio::io::AsyncWrite for InterleavePending { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { if self.pended { let next = Pin::new(&mut self.inner).poll_write(cx, buf); if next.is_ready() { self.pended = false; } next } else { cx.waker().wake_by_ref(); self.pended = true; Poll::Pending } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if self.pended { let next = Pin::new(&mut self.inner).poll_flush(cx); if next.is_ready() { self.pended = false; } next } else { cx.waker().wake_by_ref(); self.pended = true; Poll::Pending } } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if self.pended { let next = Pin::new(&mut self.inner).poll_shutdown(cx); if next.is_ready() { self.pended = false; } next } else { cx.waker().wake_by_ref(); self.pended = true; Poll::Pending } } } async-compression-0.4.13/tests/utils/tokio_ext/limited.rs000064400000000000000000000016141046102023000216730ustar 00000000000000use std::{ pin::Pin, task::{Context, Poll}, }; #[derive(Debug)] pub struct Limited { io: Io, limit: usize, } impl Limited { pub(crate) fn new(io: Io, limit: usize) -> Limited { Limited { io, limit } } } impl tokio::io::AsyncWrite for Limited { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { let limit = self.limit; Pin::new(&mut self.io).poll_write(cx, &buf[..std::cmp::min(limit, buf.len())]) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.io).poll_flush(cx) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.io).poll_shutdown(cx) } } async-compression-0.4.13/tests/utils/tokio_ext/mod.rs000064400000000000000000000010461046102023000210220ustar 00000000000000mod copy_buf; mod interleave_pending; mod limited; pub use copy_buf::copy_buf; pub trait AsyncWriteTestExt: tokio::io::AsyncWrite { fn interleave_pending_write(self) -> interleave_pending::InterleavePending where Self: Sized + Unpin, { interleave_pending::InterleavePending::new(self) } fn limited_write(self, limit: usize) -> limited::Limited where Self: Sized + Unpin, { limited::Limited::new(self, limit) } } impl AsyncWriteTestExt for T {} async-compression-0.4.13/tests/utils/track_closed.rs000064400000000000000000000043731046102023000207010ustar 00000000000000#[cfg_attr(not(feature = "all-implementations"), allow(unused))] use std::{ io::Result, pin::Pin, task::{Context, Poll}, }; pub struct TrackClosed { inner: W, closed: bool, } impl TrackClosed { pub fn new(inner: W) -> Self { Self { inner, closed: false, } } pub fn is_closed(&self) -> bool { self.closed } } #[cfg(feature = "futures-io")] impl futures::io::AsyncWrite for TrackClosed { fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { assert!(!self.closed); Pin::new(&mut self.inner).poll_write(cx, buf) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { assert!(!self.closed); Pin::new(&mut self.inner).poll_flush(cx) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { assert!(!self.closed); match Pin::new(&mut self.inner).poll_close(cx) { Poll::Ready(Ok(())) => { self.closed = true; Poll::Ready(Ok(())) } other => other, } } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context, bufs: &[std::io::IoSlice], ) -> Poll> { assert!(!self.closed); Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) } } #[cfg(feature = "tokio")] impl tokio::io::AsyncWrite for TrackClosed { fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { assert!(!self.closed); Pin::new(&mut self.inner).poll_write(cx, buf) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { assert!(!self.closed); Pin::new(&mut self.inner).poll_flush(cx) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { assert!(!self.closed); match Pin::new(&mut self.inner).poll_shutdown(cx) { Poll::Ready(Ok(())) => { self.closed = true; Poll::Ready(Ok(())) } other => other, } } } async-compression-0.4.13/tests/utils/track_eof.rs000064400000000000000000000055241046102023000202000ustar 00000000000000#[cfg_attr(not(feature = "all-implementations"), allow(unused))] use std::{ io::Result, pin::Pin, task::{Context, Poll}, }; pub struct TrackEof { inner: R, eof: bool, } impl TrackEof { pub fn new(inner: R) -> Self { Self { inner, eof: false } } pub fn project(self: Pin<&mut Self>) -> (Pin<&mut R>, &mut bool) { let Self { inner, eof } = Pin::into_inner(self); (Pin::new(inner), eof) } } #[cfg(feature = "futures-io")] impl futures::io::AsyncRead for TrackEof { fn poll_read(self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll> { let (inner, eof) = self.project(); assert!(!*eof); match inner.poll_read(cx, buf) { Poll::Ready(Ok(0)) => { if !buf.is_empty() { *eof = true; } Poll::Ready(Ok(0)) } other => other, } } } #[cfg(feature = "futures-io")] impl futures::io::AsyncBufRead for TrackEof { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let (inner, eof) = self.project(); assert!(!*eof); match inner.poll_fill_buf(cx) { Poll::Ready(Ok(buf)) => { if buf.is_empty() { *eof = true; } Poll::Ready(Ok(buf)) } other => other, } } fn consume(self: Pin<&mut Self>, amt: usize) { self.project().0.consume(amt) } } #[cfg(feature = "tokio")] impl tokio::io::AsyncRead for TrackEof { fn poll_read( self: Pin<&mut Self>, cx: &mut Context, buf: &mut tokio::io::ReadBuf, ) -> Poll> { let (inner, eof) = self.project(); assert!(!*eof); let len = buf.filled().len(); match inner.poll_read(cx, buf) { Poll::Ready(Ok(())) => { if buf.filled().len() == len && buf.remaining() > 0 { *eof = true; } Poll::Ready(Ok(())) } other => other, } } } #[cfg(feature = "tokio")] impl tokio::io::AsyncBufRead for TrackEof { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let (inner, eof) = self.project(); assert!(!*eof); match inner.poll_fill_buf(cx) { Poll::Ready(Ok(buf)) => { if buf.is_empty() { *eof = true; } Poll::Ready(Ok(buf)) } other => other, } } fn consume(self: Pin<&mut Self>, amt: usize) { self.project().0.consume(amt) } } async-compression-0.4.13/tests/xz.rs000064400000000000000000000025551046102023000155450ustar 00000000000000#[allow(unused)] use futures::{executor::block_on, io::AsyncReadExt}; #[macro_use] mod utils; test_cases!(xz); #[allow(unused)] use utils::{algos::xz::sync, InputStream}; #[cfg(feature = "futures-io")] use utils::algos::xz::futures::{bufread, read}; #[test] #[ntest::timeout(1000)] #[cfg(feature = "futures-io")] fn bufread_multiple_members_with_padding() { let compressed = [ sync::compress(&[1, 2, 3, 4, 5, 6]), vec![0, 0, 0, 0], sync::compress(&[6, 5, 4, 3, 2, 1]), vec![0, 0, 0, 0], ] .join(&[][..]); let input = InputStream::from(vec![compressed]); let mut decoder = bufread::Decoder::new(bufread::from(&input)); decoder.multiple_members(true); let output = read::to_vec(decoder); assert_eq!(output, &[1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1][..]); } #[test] #[ntest::timeout(1000)] #[cfg(feature = "futures-io")] fn bufread_multiple_members_with_invalid_padding() { let compressed = [ sync::compress(&[1, 2, 3, 4, 5, 6]), vec![0, 0, 0], sync::compress(&[6, 5, 4, 3, 2, 1]), vec![0, 0, 0, 0], ] .join(&[][..]); let input = InputStream::from(vec![compressed]); let mut decoder = bufread::Decoder::new(bufread::from(&input)); decoder.multiple_members(true); let mut output = Vec::new(); assert!(block_on(decoder.read_to_end(&mut output)).is_err()); } async-compression-0.4.13/tests/zlib.rs000064400000000000000000000000541046102023000160340ustar 00000000000000#[macro_use] mod utils; test_cases!(zlib); async-compression-0.4.13/tests/zstd-dict.rs000064400000000000000000000022331046102023000170020ustar 00000000000000#![cfg(not(windows))] use tokio::io::AsyncWriteExt as _; #[tokio::test] async fn trained_zstd_decode_no_dict() { let compressed = include_bytes!("./artifacts/lib.rs.zst"); let mut decoder = async_compression::tokio::write::ZstdDecoder::new(Vec::new()); decoder.write_all(compressed).await.unwrap_err(); } #[tokio::test] async fn trained_zstd_decode_with_dict() { let source = include_bytes!("./artifacts/lib.rs"); let dict = include_bytes!("./artifacts/dictionary-rust"); let compressed = include_bytes!("./artifacts/lib.rs.zst"); let mut decoder = async_compression::tokio::write::ZstdDecoder::with_dict(Vec::new(), dict).unwrap(); decoder.write_all(compressed).await.unwrap(); decoder.shutdown().await.unwrap(); assert_eq!(decoder.into_inner(), source); } #[tokio::test] async fn trained_zstd_decode_with_wrong_dict() { let dict = include_bytes!("./artifacts/dictionary-rust-other"); let compressed = include_bytes!("./artifacts/lib.rs.zst"); let mut decoder = async_compression::tokio::write::ZstdDecoder::with_dict(Vec::new(), dict).unwrap(); decoder.write_all(compressed).await.unwrap_err(); } async-compression-0.4.13/tests/zstd-window-size.rs000064400000000000000000000033001046102023000203320ustar 00000000000000#![cfg(not(windows))] use async_compression::zstd::DParameter; use tokio::io::AsyncWriteExt as _; #[tokio::test] async fn zstd_decode_large_window_size_default() { let compressed = include_bytes!("./artifacts/long-window-size-lib.rs.zst"); // Default decoder should throw with an error, window size maximum is too low. let mut decoder = async_compression::tokio::write::ZstdDecoder::new(Vec::new()); decoder.write_all(compressed).await.unwrap_err(); } #[tokio::test] async fn zstd_decode_large_window_size_explicit_small_window_size() { let compressed = include_bytes!("./artifacts/long-window-size-lib.rs.zst"); // Short window decoder should throw with an error, window size maximum is too low. let mut decoder = async_compression::tokio::write::ZstdDecoder::with_params( Vec::new(), &[DParameter::window_log_max(16)], ); decoder.write_all(compressed).await.unwrap_err(); } #[tokio::test] async fn zstd_decode_large_window_size_explicit_large_window_size() { let compressed = include_bytes!("./artifacts/long-window-size-lib.rs.zst"); let source = include_bytes!("./artifacts/lib.rs"); // Long window decoder should succeed as the window size is large enough to decompress the given input. let mut long_window_size_decoder = async_compression::tokio::write::ZstdDecoder::with_params( Vec::new(), &[DParameter::window_log_max(31)], ); // Long window size decoder should successfully decode the given input data. long_window_size_decoder .write_all(compressed) .await .unwrap(); long_window_size_decoder.shutdown().await.unwrap(); assert_eq!(long_window_size_decoder.into_inner(), source); } async-compression-0.4.13/tests/zstd.rs000064400000000000000000000000541046102023000160600ustar 00000000000000#[macro_use] mod utils; test_cases!(zstd);