async-compression-0.4.0/.github/dependabot.yml000064400000000000000000000006611046102023000174740ustar 00000000000000# Dependabot dependency version checks / updates version: 2 updates: - package-ecosystem: "github-actions" # Workflow files stored in the # default location of `.github/workflows` directory: "/" schedule: interval: "daily" rebase-strategy: "disabled" - package-ecosystem: "cargo" directory: "/" versioning-strategy: "widen" schedule: interval: "daily" rebase-strategy: "disabled" async-compression-0.4.0/.github/workflows/base.yml000064400000000000000000000005261046102023000203360ustar 00000000000000name: base env: RUST_BACKTRACE: 1 jobs: test: name: cargo test runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 - run: cargo --locked test --workspace --all-features on: merge_group: types: [checks_requested] pull_request: branches: [main] async-compression-0.4.0/.github/workflows/coverage.yml000064400000000000000000000007721046102023000212220ustar 00000000000000name: coverage env: RUST_BACKTRACE: 1 jobs: codecov: name: codecov runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: taiki-e/cache-cargo-install-action@v1 with: { tool: cargo-tarpaulin } - run: cargo --locked tarpaulin --all-features -- --skip 'proptest::' - uses: codecov/codecov-action@v3 on: push: branches: [main] pull_request: branches: [main] schedule: - cron: '0 0 * * 5' async-compression-0.4.0/.github/workflows/deny.yml000064400000000000000000000011541046102023000203610ustar 00000000000000name: deny env: RUST_BACKTRACE: 1 jobs: cargo-deny-advisories: name: cargo deny advisories runs-on: ubuntu-latest continue-on-error: true steps: - uses: actions/checkout@v3 - uses: EmbarkStudios/cargo-deny-action@v1 with: command: check advisories cargo-deny-licenses: name: cargo deny bans licenses sources runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: EmbarkStudios/cargo-deny-action@v1 with: command: check bans licenses sources on: merge_group: types: [checks_requested] pull_request: branches: [main] async-compression-0.4.0/.github/workflows/docs.yml000064400000000000000000000006661046102023000203610ustar 00000000000000name: docs env: RUST_BACKTRACE: 1 jobs: docsrs: name: cargo doc --cfg docsrs runs-on: ubuntu-latest env: RUSTDOCFLAGS: '--cfg=docsrs -Dwarnings' steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: nightly - run: cargo doc --all-features --no-deps on: merge_group: types: [checks_requested] pull_request: branches: [main] async-compression-0.4.0/.github/workflows/exhaustive.yml000064400000000000000000000052771046102023000216210ustar 00000000000000name: exhaustive env: RUST_BACKTRACE: 1 jobs: test: name: cargo test strategy: matrix: platform: - { toolchain: stable, target: i686-pc-windows-msvc, os: windows-latest } - { toolchain: stable, target: i686-unknown-linux-gnu, os: ubuntu-latest } - { toolchain: stable, target: x86_64-apple-darwin, os: macos-latest } - { toolchain: stable, target: x86_64-pc-windows-msvc, os: windows-latest } - { toolchain: stable, target: x86_64-unknown-linux-gnu, os: ubuntu-latest } runs-on: ${{ matrix.platform.os }} steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: ${{ matrix.platform.toolchain }} target: ${{ matrix.platform.target }} - run: cargo --locked test --all --all-features min-versions: name: cargo test --shallow-minimal-versions runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { toolchain: nightly } - name: Update to shallow minimal versions run: cargo update $( cargo metadata --all-features --format-version 1 | jq -r ' . as $root | .resolve.nodes[] | select(.id == $root.resolve.root) | .deps[].pkg | . as $dep | $root.packages[] | select(.id == $dep) | "-p", "\(.name):\(.version)" ' ) -Z minimal-versions - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { toolchain: stable } - run: cargo --locked test --workspace --all-features check-features: name: cargo hack check --feature-powerset runs-on: ubuntu-latest env: RUSTFLAGS: -Dwarnings steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: taiki-e/cache-cargo-install-action@v1 with: { tool: cargo-hack } - run: cargo hack check --workspace --feature-powerset --no-dev-deps --skip 'all,all-algorithms,all-implementations' check-test-features: name: cargo hack check --all-targets --feature-powerset runs-on: ubuntu-latest env: RUSTFLAGS: -Dwarnings steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: taiki-e/cache-cargo-install-action@v1 with: { tool: cargo-hack } - run: cargo hack check --workspace --feature-powerset --all-targets --skip 'all,all-algorithms,all-implementations' on: merge_group: types: [checks_requested] pull_request: branches: [main] async-compression-0.4.0/.github/workflows/lint.yml000064400000000000000000000012001046102023000203600ustar 00000000000000name: lint env: RUST_BACKTRACE: 1 jobs: fmt: name: cargo fmt --check runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { components: rustfmt } - run: cargo fmt --all -- --check clippy: name: cargo clippy runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: { components: clippy } - run: cargo --locked clippy --all --all-targets --all-features -- -D warnings on: merge_group: types: [checks_requested] pull_request: branches: [main] async-compression-0.4.0/.github/workflows/nightly.yml000064400000000000000000000016411046102023000211010ustar 00000000000000name: nightly env: RUST_BACKTRACE: 1 jobs: test: name: cargo +nightly test runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: nightly - run: cargo --locked test --all --all-features fmt: name: cargo +nightly fmt --check runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: nightly components: rustfmt - run: cargo fmt --all -- --check clippy: name: cargo +nightly clippy runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: nightly components: clippy - run: cargo --locked clippy --all --all-targets --all-features -- -D warnings on: schedule: - cron: '0 2 * * *' async-compression-0.4.0/.gitignore000064400000000000000000000000231046102023000152640ustar 00000000000000/target **/*.rs.bk async-compression-0.4.0/CHANGELOG.md000064400000000000000000000015301046102023000151110ustar 00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## Unreleased ## 0.4.0 - 2023-05-10 - `Level::Precise` variant now takes a `i32` instead of `u32`. - Add top level `zstd` module containing stable `zstd` crate wrapper types. - Add `ZstdEncoder::with_quality_and_params()` constructors. - Update `zstd` dependency to `0.12`. - Remove deprecated `stream`, `futures-bufread` and `futures-write` crate features. - Remove Tokio 0.2.x and 0.3.x support (`tokio-02` and `tokio-03` crate features). ## 0.3.15 - 2022-10-08 - `Level::Default::into_zstd()` now returns zstd's default value `3`. - Fix endianness when reading the `extra` field of a gzip header. async-compression-0.4.0/Cargo.lock0000644000000173650000000000100125000ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "alloc-no-stdlib" version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" [[package]] name = "alloc-stdlib" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" dependencies = [ "alloc-no-stdlib", ] [[package]] name = "async-compression" version = "0.4.0" dependencies = [ "brotli", "bzip2", "flate2", "futures-core", "futures-io", "memchr", "pin-project-lite", "tokio", "xz2", "zstd", "zstd-safe", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "brotli" version = "3.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f838e47a451d5a8fa552371f80024dd6ace9b7acdf25c4c3d0f9bc6816fb1c39" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", "brotli-decompressor", ] [[package]] name = "brotli-decompressor" version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", ] [[package]] name = "bzip2" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", ] [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "cc" version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" dependencies = [ "jobserver", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "crc32fast" version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" dependencies = [ "cfg-if", ] [[package]] name = "flate2" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" dependencies = [ "cfg-if", "crc32fast", "libc", "miniz_oxide", ] [[package]] name = "futures-core" version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" [[package]] name = "futures-io" version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" [[package]] name = "jobserver" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" dependencies = [ "libc", ] [[package]] name = "libc" version = "0.2.142" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317" [[package]] name = "lzma-sys" version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb4b7c3eddad11d3af9e86c487607d2d2442d185d848575365c4856ba96d619" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "memchr" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "miniz_oxide" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", "autocfg", ] [[package]] name = "pin-project-lite" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pkg-config" version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "tokio" version = "1.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a12a59981d9e3c38d216785b0c37399f6e415e8d0712047620f189371b0bb" dependencies = [ "autocfg", "pin-project-lite", "windows-sys", ] [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "xz2" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c179869f34fc7c01830d3ce7ea2086bc3a07e0d35289b667d0a8bf910258926c" dependencies = [ "lzma-sys", ] [[package]] name = "zstd" version = "0.12.3+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" version = "6.0.4+zstd.1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7afb4b54b8910cf5447638cb54bf4e8a65cbedd783af98b98c62ffe91f185543" dependencies = [ "libc", "zstd-sys", ] [[package]] name = "zstd-sys" version = "2.0.7+zstd.1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5" dependencies = [ "cc", "libc", "pkg-config", ] async-compression-0.4.0/Cargo.toml0000644000000054460000000000100125200ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "async-compression" version = "0.4.0" authors = [ "Wim Looman ", "Allen Bui ", ] description = """ Adaptors between compression crates and Rust's modern asynchronous IO types. """ readme = "README.md" keywords = [ "compression", "gzip", "zstd", "brotli", "async", ] categories = [ "compression", "asynchronous", ] license = "MIT OR Apache-2.0" repository = "https://github.com/Nullus157/async-compression" [package.metadata.docs.rs] all-features = true rustdoc-args = [ "--cfg", "docsrs", ] [[example]] name = "zlib_tokio_write" required-features = [ "zlib", "tokio", ] [[example]] name = "zstd_gzip" required-features = [ "zstd", "gzip", "tokio", ] [[test]] name = "brotli" required-features = ["brotli"] [[test]] name = "bzip2" required-features = ["bzip2"] [[test]] name = "deflate" required-features = ["deflate"] [[test]] name = "gzip" required-features = ["gzip"] [[test]] name = "lzma" required-features = ["lzma"] [[test]] name = "xz" required-features = ["xz"] [[test]] name = "zlib" required-features = ["zlib"] [[test]] name = "zstd" required-features = ["zstd"] [dependencies.brotli] version = "3.3" features = ["std"] optional = true default-features = false [dependencies.bzip2] version = "0.4.4" optional = true [dependencies.flate2] version = "1.0.11" optional = true [dependencies.futures-core] version = "0.3" default-features = false [dependencies.futures-io] version = "0.3" features = ["std"] optional = true default-features = false [dependencies.libzstd] version = "0.12" optional = true default-features = false package = "zstd" [dependencies.memchr] version = "2" [dependencies.pin-project-lite] version = "0.2" [dependencies.tokio] version = "1.24.2" optional = true default-features = false [dependencies.xz2] version = "0.1.6" optional = true [dependencies.zstd-safe] version = "6" optional = true default-features = false [features] all = [ "all-implementations", "all-algorithms", ] all-algorithms = [ "brotli", "bzip2", "deflate", "gzip", "lzma", "xz", "zlib", "zstd", ] all-implementations = [ "futures-io", "tokio", ] deflate = ["flate2"] gzip = ["flate2"] lzma = ["xz2"] xz = ["xz2"] zlib = ["flate2"] zstd = [ "libzstd", "zstd-safe", ] async-compression-0.4.0/Cargo.toml.orig0000644000000041260000000000100134510ustar [package] name = "async-compression" version = "0.4.0" authors = ["Wim Looman ", "Allen Bui "] edition = "2018" license = "MIT OR Apache-2.0" keywords = ["compression", "gzip", "zstd", "brotli", "async"] categories = ["compression", "asynchronous"] repository = "https://github.com/Nullus157/async-compression" description = """ Adaptors between compression crates and Rust's modern asynchronous IO types. """ [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [features] # groups all = ["all-implementations", "all-algorithms"] all-implementations = ["futures-io", "tokio"] all-algorithms = ["brotli", "bzip2", "deflate", "gzip", "lzma", "xz", "zlib", "zstd"] # algorithms deflate = ["flate2"] gzip = ["flate2"] lzma = ["xz2"] xz = ["xz2"] zlib = ["flate2"] zstd = ["libzstd", "zstd-safe"] [dependencies] brotli = { version = "3.3", optional = true, default-features = false, features = ["std"] } bzip2 = { version = "0.4.4", optional = true } flate2 = { version = "1.0.11", optional = true } futures-core = { version = "0.3", default-features = false } futures-io = { version = "0.3", default-features = false, features = ["std"], optional = true } libzstd = { package = "zstd", version = "0.12", optional = true, default-features = false } memchr = "2" pin-project-lite = "0.2" tokio = { version = "1.24.2", optional = true, default-features = false } xz2 = { version = "0.1.6", optional = true } zstd-safe = { version = "6", optional = true, default-features = false } [[test]] name = "brotli" required-features = ["brotli"] [[test]] name = "bzip2" required-features = ["bzip2"] [[test]] name = "deflate" required-features = ["deflate"] [[test]] name = "gzip" required-features = ["gzip"] [[test]] name = "lzma" required-features = ["lzma"] [[test]] name = "xz" required-features = ["xz"] [[test]] name = "zlib" required-features = ["zlib"] [[test]] name = "zstd" required-features = ["zstd"] [[example]] name = "zlib_tokio_write" required-features = ["zlib", "tokio"] [[example]] name = "zstd_gzip" required-features = ["zstd", "gzip", "tokio"] async-compression-0.4.0/Cargo.toml.orig000064400000000000000000000041261046102023000161730ustar 00000000000000[package] name = "async-compression" version = "0.4.0" authors = ["Wim Looman ", "Allen Bui "] edition = "2018" license = "MIT OR Apache-2.0" keywords = ["compression", "gzip", "zstd", "brotli", "async"] categories = ["compression", "asynchronous"] repository = "https://github.com/Nullus157/async-compression" description = """ Adaptors between compression crates and Rust's modern asynchronous IO types. """ [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [features] # groups all = ["all-implementations", "all-algorithms"] all-implementations = ["futures-io", "tokio"] all-algorithms = ["brotli", "bzip2", "deflate", "gzip", "lzma", "xz", "zlib", "zstd"] # algorithms deflate = ["flate2"] gzip = ["flate2"] lzma = ["xz2"] xz = ["xz2"] zlib = ["flate2"] zstd = ["libzstd", "zstd-safe"] [dependencies] brotli = { version = "3.3", optional = true, default-features = false, features = ["std"] } bzip2 = { version = "0.4.4", optional = true } flate2 = { version = "1.0.11", optional = true } futures-core = { version = "0.3", default-features = false } futures-io = { version = "0.3", default-features = false, features = ["std"], optional = true } libzstd = { package = "zstd", version = "0.12", optional = true, default-features = false } memchr = "2" pin-project-lite = "0.2" tokio = { version = "1.24.2", optional = true, default-features = false } xz2 = { version = "0.1.6", optional = true } zstd-safe = { version = "6", optional = true, default-features = false } [[test]] name = "brotli" required-features = ["brotli"] [[test]] name = "bzip2" required-features = ["bzip2"] [[test]] name = "deflate" required-features = ["deflate"] [[test]] name = "gzip" required-features = ["gzip"] [[test]] name = "lzma" required-features = ["lzma"] [[test]] name = "xz" required-features = ["xz"] [[test]] name = "zlib" required-features = ["zlib"] [[test]] name = "zstd" required-features = ["zstd"] [[example]] name = "zlib_tokio_write" required-features = ["zlib", "tokio"] [[example]] name = "zstd_gzip" required-features = ["zstd", "gzip", "tokio"] async-compression-0.4.0/LICENSE-APACHE000064400000000000000000000251371046102023000152350ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. async-compression-0.4.0/LICENSE-MIT000064400000000000000000000021031046102023000147310ustar 00000000000000The MIT License (MIT) Copyright (c) 2018 the rustasync developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. async-compression-0.4.0/README.md000064400000000000000000000033471046102023000145670ustar 00000000000000# async-compression [![crates.io version][1]][2] ![build status][3] [![downloads][5]][6] [![docs.rs docs][7]][8] ![MIT or Apache 2.0 licensed][9] [![dependency status][10]][11] This crate provides adaptors between compression crates and Rust's modern asynchronous IO types. - [Documentation][8] - [Crates.io][2] - [Releases][releases] ## Development When developing you will need to enable appropriate features for the different test cases to run, the simplest is `cargo test --all-features`, but you can enable different subsets of features as appropriate for the code you are testing to avoid compiling all dependencies, e.g. `cargo test --features tokio,gzip`. ## License Licensed under either of - [Apache License, Version 2.0](LICENSE-APACHE) - [MIT license](LICENSE-MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you shall be dual licensed as above, without any additional terms or conditions. [1]: https://img.shields.io/crates/v/async-compression.svg?style=flat-square [2]: https://crates.io/crates/async-compression [3]: https://img.shields.io/github/actions/workflow/status/Nullus157/async-compression/base.yml?style=flat-square [5]: https://img.shields.io/crates/d/async-compression.svg?style=flat-square [6]: https://crates.io/crates/async-compression [7]: https://img.shields.io/badge/docs-latest-blue.svg?style=flat-square [8]: https://docs.rs/async-compression [9]: https://img.shields.io/crates/l/async-compression.svg?style=flat-square [10]: https://deps.rs/crate/async-compression/0.4.0/status.svg?style=flat-square [11]: https://deps.rs/crate/async-compression/0.4.0/ [releases]: https://github.com/Nullus157/async-compression/releases async-compression-0.4.0/deny.toml000064400000000000000000000005071046102023000151370ustar 00000000000000[advisories] ignore = [ ] [licenses] unlicensed = "deny" allow = [ "MIT", "Apache-2.0", "BSD-3-Clause", ] default = "deny" [bans] multiple-versions = "warn" skip = [ { name = "tokio-util", version = "0.6" }, ] skip-tree = [ { name = "proptest", version = "1.0" }, { name = "proptest-derive", version = "0.3" }, ] async-compression-0.4.0/src/codec/brotli/decoder.rs000064400000000000000000000065331046102023000204220ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::{ fmt, io::{Error, ErrorKind, Result}, }; use brotli::{enc::StandardAlloc, BrotliDecompressStream, BrotliResult, BrotliState}; pub struct BrotliDecoder { // `BrotliState` is very large (over 2kb) which is why we're boxing it. state: Box>, } impl BrotliDecoder { pub(crate) fn new() -> Self { Self { state: Box::new(BrotliState::new( StandardAlloc::default(), StandardAlloc::default(), StandardAlloc::default(), )), } } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let in_buf = input.unwritten(); let mut out_buf = output.unwritten_mut(); let mut input_len = 0; let mut output_len = 0; let status = match BrotliDecompressStream( &mut in_buf.len(), &mut input_len, in_buf, &mut out_buf.len(), &mut output_len, out_buf, &mut 0, &mut self.state, ) { BrotliResult::ResultFailure => { return Err(Error::new(ErrorKind::Other, "brotli error")) } status => status, }; input.advance(input_len); output.advance(output_len); Ok(status) } } impl Decode for BrotliDecoder { fn reinit(&mut self) -> Result<()> { self.state = Box::new(BrotliState::new( StandardAlloc::default(), StandardAlloc::default(), StandardAlloc::default(), )); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { match self.decode(input, output)? { BrotliResult::ResultSuccess => Ok(true), BrotliResult::NeedsMoreOutput | BrotliResult::NeedsMoreInput => Ok(false), BrotliResult::ResultFailure => unreachable!(), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { match self.decode(&mut PartialBuffer::new(&[][..]), output)? { BrotliResult::ResultSuccess | BrotliResult::NeedsMoreInput => Ok(true), BrotliResult::NeedsMoreOutput => Ok(false), BrotliResult::ResultFailure => unreachable!(), } } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { match self.decode(&mut PartialBuffer::new(&[][..]), output)? { BrotliResult::ResultSuccess => Ok(true), BrotliResult::NeedsMoreOutput => Ok(false), BrotliResult::NeedsMoreInput => Err(Error::new( ErrorKind::UnexpectedEof, "reached unexpected EOF", )), BrotliResult::ResultFailure => unreachable!(), } } } impl fmt::Debug for BrotliDecoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BrotliDecoder") .field("decompress", &"") .finish() } } async-compression-0.4.0/src/codec/brotli/encoder.rs000064400000000000000000000054211046102023000204270ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::{ fmt, io::{Error, ErrorKind, Result}, }; use brotli::enc::{ backward_references::BrotliEncoderParams, encode::{ BrotliEncoderCompressStream, BrotliEncoderCreateInstance, BrotliEncoderHasMoreOutput, BrotliEncoderIsFinished, BrotliEncoderOperation, BrotliEncoderStateStruct, }, StandardAlloc, }; pub struct BrotliEncoder { state: BrotliEncoderStateStruct, } impl BrotliEncoder { pub(crate) fn new(params: BrotliEncoderParams) -> Self { let mut state = BrotliEncoderCreateInstance(StandardAlloc::default()); state.params = params; Self { state } } fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, op: BrotliEncoderOperation, ) -> Result<()> { let in_buf = input.unwritten(); let mut out_buf = output.unwritten_mut(); let mut input_len = 0; let mut output_len = 0; if BrotliEncoderCompressStream( &mut self.state, op, &mut in_buf.len(), in_buf, &mut input_len, &mut out_buf.len(), out_buf, &mut output_len, &mut None, &mut |_, _, _, _| (), ) <= 0 { return Err(Error::new(ErrorKind::Other, "brotli error")); } input.advance(input_len); output.advance(output_len); Ok(()) } } impl Encode for BrotliEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.encode( input, output, BrotliEncoderOperation::BROTLI_OPERATION_PROCESS, ) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.encode( &mut PartialBuffer::new(&[][..]), output, BrotliEncoderOperation::BROTLI_OPERATION_FLUSH, )?; Ok(BrotliEncoderHasMoreOutput(&self.state) == 0) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.encode( &mut PartialBuffer::new(&[][..]), output, BrotliEncoderOperation::BROTLI_OPERATION_FINISH, )?; Ok(BrotliEncoderIsFinished(&self.state) == 1) } } impl fmt::Debug for BrotliEncoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BrotliEncoder") .field("compress", &"") .finish() } } async-compression-0.4.0/src/codec/brotli/mod.rs000064400000000000000000000001421046102023000175620ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::BrotliDecoder, encoder::BrotliEncoder}; async-compression-0.4.0/src/codec/bzip2/decoder.rs000064400000000000000000000057571046102023000201640ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::fmt; use std::io::{Error, ErrorKind, Result}; use bzip2::{Decompress, Status}; pub struct BzDecoder { decompress: Decompress, } impl fmt::Debug for BzDecoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "BzDecoder {{total_in: {}, total_out: {}}}", self.decompress.total_in(), self.decompress.total_out() ) } } impl BzDecoder { pub(crate) fn new() -> Self { Self { decompress: Decompress::new(false), } } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let prior_in = self.decompress.total_in(); let prior_out = self.decompress.total_out(); let status = self .decompress .decompress(input.unwritten(), output.unwritten_mut()) .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; input.advance((self.decompress.total_in() - prior_in) as usize); output.advance((self.decompress.total_out() - prior_out) as usize); Ok(status) } } impl Decode for BzDecoder { fn reinit(&mut self) -> Result<()> { self.decompress = Decompress::new(false); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { match self.decode(input, output)? { // Decompression went fine, nothing much to report. Status::Ok => Ok(false), // The Flush action on a compression went ok. Status::FlushOk => unreachable!(), // THe Run action on compression went ok. Status::RunOk => unreachable!(), // The Finish action on compression went ok. Status::FinishOk => unreachable!(), // The stream's end has been met, meaning that no more data can be input. Status::StreamEnd => Ok(true), // There was insufficient memory in the input or output buffer to complete // the request, but otherwise everything went normally. Status::MemNeeded => Err(Error::new(ErrorKind::Other, "out of memory")), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.decode(&mut PartialBuffer::new(&[][..]), output)?; loop { let old_len = output.written().len(); self.decode(&mut PartialBuffer::new(&[][..]), output)?; if output.written().len() == old_len { break; } } Ok(!output.unwritten().is_empty()) } fn finish( &mut self, _output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { Ok(true) } } async-compression-0.4.0/src/codec/bzip2/encoder.rs000064400000000000000000000127401046102023000201640ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::fmt; use std::io::{Error, ErrorKind, Result}; use bzip2::{Action, Compress, Compression, Status}; pub struct BzEncoder { compress: Compress, } impl fmt::Debug for BzEncoder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "BzEncoder {{total_in: {}, total_out: {}}}", self.compress.total_in(), self.compress.total_out() ) } } impl BzEncoder { /// Creates a new stream prepared for compression. /// /// The `work_factor` parameter controls how the compression phase behaves /// when presented with worst case, highly repetitive, input data. If /// compression runs into difficulties caused by repetitive data, the /// library switches from the standard sorting algorithm to a fallback /// algorithm. The fallback is slower than the standard algorithm by perhaps /// a factor of three, but always behaves reasonably, no matter how bad the /// input. /// /// Lower values of `work_factor` reduce the amount of effort the standard /// algorithm will expend before resorting to the fallback. You should set /// this parameter carefully; too low, and many inputs will be handled by /// the fallback algorithm and so compress rather slowly, too high, and your /// average-to-worst case compression times can become very large. The /// default value of 30 gives reasonable behaviour over a wide range of /// circumstances. /// /// Allowable values range from 0 to 250 inclusive. 0 is a special case, /// equivalent to using the default value of 30. pub(crate) fn new(level: Compression, work_factor: u32) -> Self { Self { compress: Compress::new(level, work_factor), } } fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, action: Action, ) -> Result { let prior_in = self.compress.total_in(); let prior_out = self.compress.total_out(); let status = self .compress .compress(input.unwritten(), output.unwritten_mut(), action) .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; input.advance((self.compress.total_in() - prior_in) as usize); output.advance((self.compress.total_out() - prior_out) as usize); Ok(status) } } impl Encode for BzEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { match self.encode(input, output, Action::Run)? { // Decompression went fine, nothing much to report. Status::Ok => Ok(()), // The Flush action on a compression went ok. Status::FlushOk => unreachable!(), // The Run action on compression went ok. Status::RunOk => Ok(()), // The Finish action on compression went ok. Status::FinishOk => unreachable!(), // The stream's end has been met, meaning that no more data can be input. Status::StreamEnd => unreachable!(), // There was insufficient memory in the input or output buffer to complete // the request, but otherwise everything went normally. Status::MemNeeded => Err(Error::new(ErrorKind::Other, "out of memory")), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { match self.encode(&mut PartialBuffer::new(&[][..]), output, Action::Flush)? { // Decompression went fine, nothing much to report. Status::Ok => unreachable!(), // The Flush action on a compression went ok. Status::FlushOk => Ok(false), // The Run action on compression went ok. Status::RunOk => Ok(true), // The Finish action on compression went ok. Status::FinishOk => unreachable!(), // The stream's end has been met, meaning that no more data can be input. Status::StreamEnd => unreachable!(), // There was insufficient memory in the input or output buffer to complete // the request, but otherwise everything went normally. Status::MemNeeded => Err(Error::new(ErrorKind::Other, "out of memory")), } } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { match self.encode(&mut PartialBuffer::new(&[][..]), output, Action::Finish)? { // Decompression went fine, nothing much to report. Status::Ok => Ok(false), // The Flush action on a compression went ok. Status::FlushOk => unreachable!(), // The Run action on compression went ok. Status::RunOk => unreachable!(), // The Finish action on compression went ok. Status::FinishOk => Ok(false), // The stream's end has been met, meaning that no more data can be input. Status::StreamEnd => Ok(true), // There was insufficient memory in the input or output buffer to complete // the request, but otherwise everything went normally. Status::MemNeeded => Err(Error::new(ErrorKind::Other, "out of memory")), } } } async-compression-0.4.0/src/codec/bzip2/mod.rs000064400000000000000000000001321046102023000173140ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::BzDecoder, encoder::BzEncoder}; async-compression-0.4.0/src/codec/deflate/decoder.rs000064400000000000000000000017361046102023000205330ustar 00000000000000use crate::util::PartialBuffer; use std::io::Result; #[derive(Debug)] pub struct DeflateDecoder { inner: crate::codec::FlateDecoder, } impl DeflateDecoder { pub(crate) fn new() -> Self { Self { inner: crate::codec::FlateDecoder::new(false), } } } impl crate::codec::Decode for DeflateDecoder { fn reinit(&mut self) -> Result<()> { self.inner.reinit()?; Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.decode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.0/src/codec/deflate/encoder.rs000064400000000000000000000016651046102023000205460ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::Result; use flate2::Compression; #[derive(Debug)] pub struct DeflateEncoder { inner: crate::codec::FlateEncoder, } impl DeflateEncoder { pub(crate) fn new(level: Compression) -> Self { Self { inner: crate::codec::FlateEncoder::new(level, false), } } } impl Encode for DeflateEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.inner.encode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.0/src/codec/deflate/mod.rs000064400000000000000000000001441046102023000176750ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::DeflateDecoder, encoder::DeflateEncoder}; async-compression-0.4.0/src/codec/flate/decoder.rs000064400000000000000000000052251046102023000202170ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::io::{Error, ErrorKind, Result}; use flate2::{Decompress, FlushDecompress, Status}; #[derive(Debug)] pub struct FlateDecoder { zlib_header: bool, decompress: Decompress, } impl FlateDecoder { pub(crate) fn new(zlib_header: bool) -> Self { Self { zlib_header, decompress: Decompress::new(zlib_header), } } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, flush: FlushDecompress, ) -> Result { let prior_in = self.decompress.total_in(); let prior_out = self.decompress.total_out(); let status = self.decompress .decompress(input.unwritten(), output.unwritten_mut(), flush)?; input.advance((self.decompress.total_in() - prior_in) as usize); output.advance((self.decompress.total_out() - prior_out) as usize); Ok(status) } } impl Decode for FlateDecoder { fn reinit(&mut self) -> Result<()> { self.decompress.reset(self.zlib_header); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { match self.decode(input, output, FlushDecompress::None)? { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::BufError => Err(Error::new(ErrorKind::Other, "unexpected BufError")), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.decode( &mut PartialBuffer::new(&[][..]), output, FlushDecompress::Sync, )?; loop { let old_len = output.written().len(); self.decode( &mut PartialBuffer::new(&[][..]), output, FlushDecompress::None, )?; if output.written().len() == old_len { break; } } Ok(!output.unwritten().is_empty()) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { match self.decode( &mut PartialBuffer::new(&[][..]), output, FlushDecompress::Finish, )? { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::BufError => Err(Error::new(ErrorKind::Other, "unexpected BufError")), } } } async-compression-0.4.0/src/codec/flate/encoder.rs000064400000000000000000000055641046102023000202370ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::{Error, ErrorKind, Result}; use flate2::{Compress, Compression, FlushCompress, Status}; #[derive(Debug)] pub struct FlateEncoder { compress: Compress, flushed: bool, } impl FlateEncoder { pub(crate) fn new(level: Compression, zlib_header: bool) -> Self { Self { compress: Compress::new(level, zlib_header), flushed: true, } } fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, flush: FlushCompress, ) -> Result { let prior_in = self.compress.total_in(); let prior_out = self.compress.total_out(); let status = self .compress .compress(input.unwritten(), output.unwritten_mut(), flush)?; input.advance((self.compress.total_in() - prior_in) as usize); output.advance((self.compress.total_out() - prior_out) as usize); Ok(status) } } impl Encode for FlateEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.flushed = false; match self.encode(input, output, FlushCompress::None)? { Status::Ok => Ok(()), Status::StreamEnd => unreachable!(), Status::BufError => Err(Error::new(ErrorKind::Other, "unexpected BufError")), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { // We need to keep track of whether we've already flushed otherwise we'll just keep writing // out sync blocks continuously and probably never complete flushing. if self.flushed { return Ok(true); } self.encode( &mut PartialBuffer::new(&[][..]), output, FlushCompress::Sync, )?; loop { let old_len = output.written().len(); self.encode( &mut PartialBuffer::new(&[][..]), output, FlushCompress::None, )?; if output.written().len() == old_len { break; } } self.flushed = true; Ok(!output.unwritten().is_empty()) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.flushed = false; match self.encode( &mut PartialBuffer::new(&[][..]), output, FlushCompress::Finish, )? { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::BufError => Err(Error::new(ErrorKind::Other, "unexpected BufError")), } } } async-compression-0.4.0/src/codec/flate/mod.rs000064400000000000000000000001401046102023000173600ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::FlateDecoder, encoder::FlateEncoder}; async-compression-0.4.0/src/codec/gzip/decoder.rs000064400000000000000000000110151046102023000200670ustar 00000000000000use crate::{ codec::{ gzip::header::{self, Header}, Decode, }, util::PartialBuffer, }; use std::io::{Error, ErrorKind, Result}; use flate2::Crc; #[derive(Debug)] enum State { Header(header::Parser), Decoding, Footer(PartialBuffer>), Done, } #[derive(Debug)] pub struct GzipDecoder { inner: crate::codec::FlateDecoder, crc: Crc, state: State, header: Header, } fn check_footer(crc: &Crc, input: &[u8]) -> Result<()> { if input.len() < 8 { return Err(Error::new( ErrorKind::InvalidData, "Invalid gzip footer length", )); } let crc_sum = crc.sum().to_le_bytes(); let bytes_read = crc.amount().to_le_bytes(); if crc_sum != input[0..4] { return Err(Error::new( ErrorKind::InvalidData, "CRC computed does not match", )); } if bytes_read != input[4..8] { return Err(Error::new( ErrorKind::InvalidData, "amount of bytes read does not match", )); } Ok(()) } impl GzipDecoder { pub(crate) fn new() -> Self { Self { inner: crate::codec::FlateDecoder::new(false), crc: Crc::new(), state: State::Header(header::Parser::default()), header: Header::default(), } } fn process, O: AsRef<[u8]> + AsMut<[u8]>>( &mut self, input: &mut PartialBuffer, output: &mut PartialBuffer, inner: impl Fn(&mut Self, &mut PartialBuffer, &mut PartialBuffer) -> Result, ) -> Result { loop { match &mut self.state { State::Header(parser) => { if let Some(header) = parser.input(input)? { self.header = header; self.state = State::Decoding; } } State::Decoding => { let prior = output.written().len(); let done = inner(self, input, output)?; self.crc.update(&output.written()[prior..]); if done { self.state = State::Footer(vec![0; 8].into()) } } State::Footer(footer) => { footer.copy_unwritten_from(input); if footer.unwritten().is_empty() { check_footer(&self.crc, footer.written())?; self.state = State::Done } } State::Done => {} }; if let State::Done = self.state { return Ok(true); } if input.unwritten().is_empty() || output.unwritten().is_empty() { return Ok(false); } } } } impl Decode for GzipDecoder { fn reinit(&mut self) -> Result<()> { self.inner.reinit()?; self.crc = Crc::new(); self.state = State::Header(header::Parser::default()); self.header = Header::default(); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.process(input, output, |this, input, output| { this.inner.decode(input, output) }) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { loop { match self.state { State::Header(_) | State::Footer(_) | State::Done => return Ok(true), State::Decoding => { let prior = output.written().len(); let done = self.inner.flush(output)?; self.crc.update(&output.written()[prior..]); if done { return Ok(true); } } }; if output.unwritten().is_empty() { return Ok(false); } } } fn finish( &mut self, _output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { // Because of the footer we have to have already flushed all the data out before we get here if let State::Done = self.state { Ok(true) } else { Err(Error::new( ErrorKind::UnexpectedEof, "unexpected end of file", )) } } } async-compression-0.4.0/src/codec/gzip/encoder.rs000064400000000000000000000104311046102023000201020ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::Result; use flate2::{Compression, Crc}; #[derive(Debug)] enum State { Header(PartialBuffer>), Encoding, Footer(PartialBuffer>), Done, } #[derive(Debug)] pub struct GzipEncoder { inner: crate::codec::FlateEncoder, crc: Crc, state: State, } fn header(level: Compression) -> Vec { let level_byte = if level.level() >= Compression::best().level() { 0x02 } else if level.level() <= Compression::fast().level() { 0x04 } else { 0x00 }; vec![0x1f, 0x8b, 0x08, 0, 0, 0, 0, 0, level_byte, 0xff] } impl GzipEncoder { pub(crate) fn new(level: Compression) -> Self { Self { inner: crate::codec::FlateEncoder::new(level, false), crc: Crc::new(), state: State::Header(header(level).into()), } } fn footer(&mut self) -> Vec { let mut output = Vec::with_capacity(8); output.extend(&self.crc.sum().to_le_bytes()); output.extend(&self.crc.amount().to_le_bytes()); output } } impl Encode for GzipEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { loop { match &mut self.state { State::Header(header) => { output.copy_unwritten_from(&mut *header); if header.unwritten().is_empty() { self.state = State::Encoding; } } State::Encoding => { let prior_written = input.written().len(); self.inner.encode(input, output)?; self.crc.update(&input.written()[prior_written..]); } State::Footer(_) | State::Done => panic!("encode after complete"), }; if input.unwritten().is_empty() || output.unwritten().is_empty() { return Ok(()); } } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { loop { let done = match &mut self.state { State::Header(header) => { output.copy_unwritten_from(&mut *header); if header.unwritten().is_empty() { self.state = State::Encoding; } false } State::Encoding => self.inner.flush(output)?, State::Footer(footer) => { output.copy_unwritten_from(&mut *footer); if footer.unwritten().is_empty() { self.state = State::Done; true } else { false } } State::Done => true, }; if done { return Ok(true); } if output.unwritten().is_empty() { return Ok(false); } } } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { loop { match &mut self.state { State::Header(header) => { output.copy_unwritten_from(&mut *header); if header.unwritten().is_empty() { self.state = State::Encoding; } } State::Encoding => { if self.inner.finish(output)? { self.state = State::Footer(self.footer().into()); } } State::Footer(footer) => { output.copy_unwritten_from(&mut *footer); if footer.unwritten().is_empty() { self.state = State::Done; } } State::Done => {} }; if let State::Done = self.state { return Ok(true); } if output.unwritten().is_empty() { return Ok(false); } } } } async-compression-0.4.0/src/codec/gzip/header.rs000064400000000000000000000115341046102023000177200ustar 00000000000000use crate::util::PartialBuffer; use std::io::{Error, ErrorKind, Result}; #[derive(Debug, Default)] struct Flags { ascii: bool, crc: bool, extra: bool, filename: bool, comment: bool, } #[derive(Debug, Default)] pub(super) struct Header { flags: Flags, } #[derive(Debug)] enum State { Fixed(PartialBuffer<[u8; 10]>), ExtraLen(PartialBuffer<[u8; 2]>), Extra(PartialBuffer>), Filename(Vec), Comment(Vec), Crc(PartialBuffer<[u8; 2]>), Done, } impl Default for State { fn default() -> Self { State::Fixed(<_>::default()) } } #[derive(Debug, Default)] pub(super) struct Parser { state: State, header: Header, } impl Header { fn parse(input: &[u8; 10]) -> Result { if input[0..3] != [0x1f, 0x8b, 0x08] { return Err(Error::new(ErrorKind::InvalidData, "Invalid gzip header")); } let flag = input[3]; let flags = Flags { ascii: (flag & 0b0000_0001) != 0, crc: (flag & 0b0000_0010) != 0, extra: (flag & 0b0000_0100) != 0, filename: (flag & 0b0000_1000) != 0, comment: (flag & 0b0001_0000) != 0, }; Ok(Header { flags }) } } impl Parser { pub(super) fn input( &mut self, input: &mut PartialBuffer>, ) -> Result> { loop { match &mut self.state { State::Fixed(data) => { data.copy_unwritten_from(input); if data.unwritten().is_empty() { self.header = Header::parse(&data.take().into_inner())?; self.state = State::ExtraLen(<_>::default()); } else { return Ok(None); } } State::ExtraLen(data) => { if !self.header.flags.extra { self.state = State::Filename(<_>::default()); continue; } data.copy_unwritten_from(input); if data.unwritten().is_empty() { let len = u16::from_le_bytes(data.take().into_inner()); self.state = State::Extra(vec![0; usize::from(len)].into()); } else { return Ok(None); } } State::Extra(data) => { data.copy_unwritten_from(input); if data.unwritten().is_empty() { self.state = State::Filename(<_>::default()); } else { return Ok(None); } } State::Filename(data) => { if !self.header.flags.filename { self.state = State::Comment(<_>::default()); continue; } if let Some(len) = memchr::memchr(0, input.unwritten()) { data.extend_from_slice(&input.unwritten()[..len]); input.advance(len + 1); self.state = State::Comment(<_>::default()); } else { data.extend_from_slice(input.unwritten()); input.advance(input.unwritten().len()); return Ok(None); } } State::Comment(data) => { if !self.header.flags.comment { self.state = State::Crc(<_>::default()); continue; } if let Some(len) = memchr::memchr(0, input.unwritten()) { data.extend_from_slice(&input.unwritten()[..len]); input.advance(len + 1); self.state = State::Crc(<_>::default()); } else { data.extend_from_slice(input.unwritten()); input.advance(input.unwritten().len()); return Ok(None); } } State::Crc(data) => { if !self.header.flags.crc { self.state = State::Done; return Ok(Some(std::mem::take(&mut self.header))); } data.copy_unwritten_from(input); if data.unwritten().is_empty() { self.state = State::Done; return Ok(Some(std::mem::take(&mut self.header))); } else { return Ok(None); } } State::Done => { panic!("parser used after done"); } }; } } } async-compression-0.4.0/src/codec/gzip/mod.rs000064400000000000000000000001521046102023000172410ustar 00000000000000mod decoder; mod encoder; mod header; pub(crate) use self::{decoder::GzipDecoder, encoder::GzipEncoder}; async-compression-0.4.0/src/codec/lzma/decoder.rs000064400000000000000000000016701046102023000200670ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::io::Result; #[derive(Debug)] pub struct LzmaDecoder { inner: crate::codec::Xz2Decoder, } impl LzmaDecoder { pub fn new() -> Self { Self { inner: crate::codec::Xz2Decoder::new(), } } } impl Decode for LzmaDecoder { fn reinit(&mut self) -> Result<()> { self.inner.reinit() } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.decode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.0/src/codec/lzma/encoder.rs000064400000000000000000000016711046102023000201020ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::Result; #[derive(Debug)] pub struct LzmaEncoder { inner: crate::codec::Xz2Encoder, } impl LzmaEncoder { pub fn new(level: u32) -> Self { Self { inner: crate::codec::Xz2Encoder::new(crate::codec::Xz2FileFormat::Lzma, level), } } } impl Encode for LzmaEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.inner.encode(input, output) } fn flush( &mut self, _output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { // Flush on LZMA 1 is not supported Ok(true) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.0/src/codec/lzma/mod.rs000064400000000000000000000001361046102023000172350ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::LzmaDecoder, encoder::LzmaEncoder}; async-compression-0.4.0/src/codec/mod.rs000064400000000000000000000047731046102023000163050ustar 00000000000000use crate::util::PartialBuffer; use std::io::Result; #[cfg(feature = "brotli")] mod brotli; #[cfg(feature = "bzip2")] mod bzip2; #[cfg(feature = "deflate")] mod deflate; #[cfg(feature = "flate2")] mod flate; #[cfg(feature = "gzip")] mod gzip; #[cfg(feature = "lzma")] mod lzma; #[cfg(feature = "xz")] mod xz; #[cfg(feature = "xz2")] mod xz2; #[cfg(feature = "zlib")] mod zlib; #[cfg(feature = "zstd")] mod zstd; #[cfg(feature = "brotli")] pub(crate) use self::brotli::{BrotliDecoder, BrotliEncoder}; #[cfg(feature = "bzip2")] pub(crate) use self::bzip2::{BzDecoder, BzEncoder}; #[cfg(feature = "deflate")] pub(crate) use self::deflate::{DeflateDecoder, DeflateEncoder}; #[cfg(feature = "flate2")] pub(crate) use self::flate::{FlateDecoder, FlateEncoder}; #[cfg(feature = "gzip")] pub(crate) use self::gzip::{GzipDecoder, GzipEncoder}; #[cfg(feature = "lzma")] pub(crate) use self::lzma::{LzmaDecoder, LzmaEncoder}; #[cfg(feature = "xz")] pub(crate) use self::xz::{XzDecoder, XzEncoder}; #[cfg(feature = "xz2")] pub(crate) use self::xz2::{Xz2Decoder, Xz2Encoder, Xz2FileFormat}; #[cfg(feature = "zlib")] pub(crate) use self::zlib::{ZlibDecoder, ZlibEncoder}; #[cfg(feature = "zstd")] pub(crate) use self::zstd::{ZstdDecoder, ZstdEncoder}; pub trait Encode { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()>; /// Returns whether the internal buffers are flushed fn flush(&mut self, output: &mut PartialBuffer + AsMut<[u8]>>) -> Result; /// Returns whether the internal buffers are flushed and the end of the stream is written fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result; } pub trait Decode { /// Reinitializes this decoder ready to decode a new member/frame of data. fn reinit(&mut self) -> Result<()>; /// Returns whether the end of the stream has been read fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result; /// Returns whether the internal buffers are flushed fn flush(&mut self, output: &mut PartialBuffer + AsMut<[u8]>>) -> Result; /// Returns whether the internal buffers are flushed fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result; } async-compression-0.4.0/src/codec/xz/decoder.rs000064400000000000000000000036701046102023000175670ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::io::{Error, ErrorKind, Result}; #[derive(Debug)] pub struct XzDecoder { inner: crate::codec::Xz2Decoder, skip_padding: Option, } impl XzDecoder { pub fn new() -> Self { Self { inner: crate::codec::Xz2Decoder::new(), skip_padding: None, } } } impl Decode for XzDecoder { fn reinit(&mut self) -> Result<()> { self.skip_padding = Some(4); self.inner.reinit() } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { if let Some(ref mut count) = self.skip_padding { while input.unwritten().first() == Some(&0) { input.advance(1); *count -= 1; if *count == 0 { *count = 4; } } if input.unwritten().is_empty() { return Ok(true); } // If this is non-padding then it cannot start with null bytes, so it must be invalid // padding if *count != 4 { return Err(Error::new( ErrorKind::InvalidData, "stream padding was not a multiple of 4 bytes", )); } self.skip_padding = None; } self.inner.decode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { if self.skip_padding.is_some() { return Ok(true); } self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { if self.skip_padding.is_some() { return Ok(true); } self.inner.finish(output) } } async-compression-0.4.0/src/codec/xz/encoder.rs000064400000000000000000000016241046102023000175760ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::Result; #[derive(Debug)] pub struct XzEncoder { inner: crate::codec::Xz2Encoder, } impl XzEncoder { pub fn new(level: u32) -> Self { Self { inner: crate::codec::Xz2Encoder::new(crate::codec::Xz2FileFormat::Xz, level), } } } impl Encode for XzEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.inner.encode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.0/src/codec/xz/mod.rs000064400000000000000000000001321046102023000167270ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::XzDecoder, encoder::XzEncoder}; async-compression-0.4.0/src/codec/xz2/decoder.rs000064400000000000000000000046331046102023000176510ustar 00000000000000use crate::{codec::Decode, util::PartialBuffer}; use std::fmt::{Debug, Formatter, Result as FmtResult}; use std::io::Result; use xz2::stream::{Action, Status, Stream}; pub struct Xz2Decoder { stream: Stream, } impl Debug for Xz2Decoder { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { write!(f, "LzmaDecoder") } } impl Xz2Decoder { pub fn new() -> Self { Self { stream: Stream::new_auto_decoder(u64::max_value(), 0).unwrap(), } } } impl Decode for Xz2Decoder { fn reinit(&mut self) -> Result<()> { *self = Self::new(); Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let previous_in = self.stream.total_in() as usize; let previous_out = self.stream.total_out() as usize; let status = self .stream .process(input.unwritten(), output.unwritten_mut(), Action::Run)?; input.advance(self.stream.total_in() as usize - previous_in); output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::GetCheck => panic!("Unexpected lzma integrity check"), Status::MemNeeded => Err(std::io::Error::new( std::io::ErrorKind::Other, "More memory needed", )), } } fn flush( &mut self, _output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { // While decoding flush is a noop Ok(true) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let previous_out = self.stream.total_out() as usize; let status = self .stream .process(&[], output.unwritten_mut(), Action::Finish)?; output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::GetCheck => panic!("Unexpected lzma integrity check"), Status::MemNeeded => Err(std::io::Error::new( std::io::ErrorKind::Other, "More memory needed", )), } } } async-compression-0.4.0/src/codec/xz2/encoder.rs000064400000000000000000000061151046102023000176600ustar 00000000000000use crate::codec::Xz2FileFormat; use crate::{codec::Encode, util::PartialBuffer}; use std::fmt::{Debug, Formatter, Result as FmtResult}; use std::io::Result; use xz2::stream::{Action, Check, LzmaOptions, Status, Stream}; pub struct Xz2Encoder { stream: Stream, } impl Debug for Xz2Encoder { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { write!(f, "Xz2Encoder") } } impl Xz2Encoder { pub fn new(format: Xz2FileFormat, level: u32) -> Self { let stream = match format { Xz2FileFormat::Xz => Stream::new_easy_encoder(level, Check::Crc64).unwrap(), Xz2FileFormat::Lzma => { Stream::new_lzma_encoder(&LzmaOptions::new_preset(level).unwrap()).unwrap() } }; Self { stream } } } impl Encode for Xz2Encoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { let previous_in = self.stream.total_in() as usize; let previous_out = self.stream.total_out() as usize; let status = self .stream .process(input.unwritten(), output.unwritten_mut(), Action::Run)?; input.advance(self.stream.total_in() as usize - previous_in); output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok | Status::StreamEnd => Ok(()), Status::GetCheck => panic!("Unexpected lzma integrity check"), Status::MemNeeded => Err(std::io::Error::new( std::io::ErrorKind::Other, "out of memory", )), } } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let previous_out = self.stream.total_out() as usize; let status = self .stream .process(&[], output.unwritten_mut(), Action::SyncFlush)?; output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::GetCheck => panic!("Unexpected lzma integrity check"), Status::MemNeeded => Err(std::io::Error::new( std::io::ErrorKind::Other, "out of memory", )), } } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let previous_out = self.stream.total_out() as usize; let status = self .stream .process(&[], output.unwritten_mut(), Action::Finish)?; output.advance(self.stream.total_out() as usize - previous_out); match status { Status::Ok => Ok(false), Status::StreamEnd => Ok(true), Status::GetCheck => panic!("Unexpected lzma integrity check"), Status::MemNeeded => Err(std::io::Error::new( std::io::ErrorKind::Other, "out of memory", )), } } } async-compression-0.4.0/src/codec/xz2/mod.rs000064400000000000000000000002121046102023000170100ustar 00000000000000mod decoder; mod encoder; pub enum Xz2FileFormat { Xz, Lzma, } pub(crate) use self::{decoder::Xz2Decoder, encoder::Xz2Encoder}; async-compression-0.4.0/src/codec/zlib/decoder.rs000064400000000000000000000017241046102023000200640ustar 00000000000000use crate::util::PartialBuffer; use std::io::Result; #[derive(Debug)] pub struct ZlibDecoder { inner: crate::codec::FlateDecoder, } impl ZlibDecoder { pub(crate) fn new() -> Self { Self { inner: crate::codec::FlateDecoder::new(true), } } } impl crate::codec::Decode for ZlibDecoder { fn reinit(&mut self) -> Result<()> { self.inner.reinit()?; Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.decode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.0/src/codec/zlib/encoder.rs000064400000000000000000000016531046102023000200770ustar 00000000000000use crate::{codec::Encode, util::PartialBuffer}; use std::io::Result; use flate2::Compression; #[derive(Debug)] pub struct ZlibEncoder { inner: crate::codec::FlateEncoder, } impl ZlibEncoder { pub(crate) fn new(level: Compression) -> Self { Self { inner: crate::codec::FlateEncoder::new(level, true), } } } impl Encode for ZlibEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { self.inner.encode(input, output) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.flush(output) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { self.inner.finish(output) } } async-compression-0.4.0/src/codec/zlib/mod.rs000064400000000000000000000001361046102023000172320ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::ZlibDecoder, encoder::ZlibEncoder}; async-compression-0.4.0/src/codec/zstd/decoder.rs000064400000000000000000000033071046102023000201070ustar 00000000000000use std::io::Result; use crate::{codec::Decode, unshared::Unshared, util::PartialBuffer}; use libzstd::stream::raw::{Decoder, Operation}; #[derive(Debug)] pub struct ZstdDecoder { decoder: Unshared>, } impl ZstdDecoder { pub(crate) fn new() -> Self { Self { decoder: Unshared::new(Decoder::new().unwrap()), } } } impl Decode for ZstdDecoder { fn reinit(&mut self) -> Result<()> { self.decoder.get_mut().reinit()?; Ok(()) } fn decode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let status = self .decoder .get_mut() .run_on_buffers(input.unwritten(), output.unwritten_mut())?; input.advance(status.bytes_read); output.advance(status.bytes_written); Ok(status.remaining == 0) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); let bytes_left = self.decoder.get_mut().flush(&mut out_buf)?; let len = out_buf.as_slice().len(); output.advance(len); Ok(bytes_left == 0) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); let bytes_left = self.decoder.get_mut().finish(&mut out_buf, true)?; let len = out_buf.as_slice().len(); output.advance(len); Ok(bytes_left == 0) } } async-compression-0.4.0/src/codec/zstd/encoder.rs000064400000000000000000000036511046102023000201230ustar 00000000000000use crate::{codec::Encode, unshared::Unshared, util::PartialBuffer}; use libzstd::stream::raw::{CParameter, Encoder, Operation}; use std::io::Result; #[derive(Debug)] pub struct ZstdEncoder { encoder: Unshared>, } impl ZstdEncoder { pub(crate) fn new(level: i32) -> Self { Self { encoder: Unshared::new(Encoder::new(level).unwrap()), } } pub(crate) fn new_with_params(level: i32, params: &[crate::zstd::CParameter]) -> Self { let mut encoder = Encoder::new(level).unwrap(); for param in params { encoder.set_parameter(param.as_zstd()).unwrap(); } Self { encoder: Unshared::new(encoder), } } } impl Encode for ZstdEncoder { fn encode( &mut self, input: &mut PartialBuffer>, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result<()> { let status = self .encoder .get_mut() .run_on_buffers(input.unwritten(), output.unwritten_mut())?; input.advance(status.bytes_read); output.advance(status.bytes_written); Ok(()) } fn flush( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); let bytes_left = self.encoder.get_mut().flush(&mut out_buf)?; let len = out_buf.as_slice().len(); output.advance(len); Ok(bytes_left == 0) } fn finish( &mut self, output: &mut PartialBuffer + AsMut<[u8]>>, ) -> Result { let mut out_buf = zstd_safe::OutBuffer::around(output.unwritten_mut()); let bytes_left = self.encoder.get_mut().finish(&mut out_buf, true)?; let len = out_buf.as_slice().len(); output.advance(len); Ok(bytes_left == 0) } } async-compression-0.4.0/src/codec/zstd/mod.rs000064400000000000000000000001361046102023000172560ustar 00000000000000mod decoder; mod encoder; pub(crate) use self::{decoder::ZstdDecoder, encoder::ZstdEncoder}; async-compression-0.4.0/src/futures/bufread/generic/decoder.rs000064400000000000000000000074301046102023000225700ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{codec::Decode, util::PartialBuffer}; use futures_core::ready; use futures_io::{AsyncBufRead, AsyncRead}; use pin_project_lite::pin_project; #[derive(Debug)] enum State { Decoding, Flushing, Done, Next, } pin_project! { #[derive(Debug)] pub struct Decoder { #[pin] reader: R, decoder: D, state: State, multiple_members: bool, } } impl Decoder { pub fn new(reader: R, decoder: D) -> Self { Self { reader, decoder, state: State::Decoding, multiple_members: false, } } pub fn get_ref(&self) -> &R { &self.reader } pub fn get_mut(&mut self) -> &mut R { &mut self.reader } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().reader } pub fn into_inner(self) -> R { self.reader } pub fn multiple_members(&mut self, enabled: bool) { self.multiple_members = enabled; } fn do_poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, output: &mut PartialBuffer<&mut [u8]>, ) -> Poll> { let mut this = self.project(); loop { *this.state = match this.state { State::Decoding => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { // Avoid attempting to reinitialise the decoder if the reader // has returned EOF. *this.multiple_members = false; State::Flushing } else { let mut input = PartialBuffer::new(input); let done = this.decoder.decode(&mut input, output)?; let len = input.written().len(); this.reader.as_mut().consume(len); if done { State::Flushing } else { State::Decoding } } } State::Flushing => { if this.decoder.finish(output)? { if *this.multiple_members { this.decoder.reinit()?; State::Next } else { State::Done } } else { State::Flushing } } State::Done => State::Done, State::Next => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { State::Done } else { State::Decoding } } }; if let State::Done = *this.state { return Poll::Ready(Ok(())); } if output.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } } impl AsyncRead for Decoder { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut output = PartialBuffer::new(buf); match self.do_poll_read(cx, &mut output)? { Poll::Pending if output.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(output.written().len())), } } } async-compression-0.4.0/src/futures/bufread/generic/encoder.rs000064400000000000000000000054211046102023000226000ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{codec::Encode, util::PartialBuffer}; use futures_core::ready; use futures_io::{AsyncBufRead, AsyncRead}; use pin_project_lite::pin_project; #[derive(Debug)] enum State { Encoding, Flushing, Done, } pin_project! { #[derive(Debug)] pub struct Encoder { #[pin] reader: R, encoder: E, state: State, } } impl Encoder { pub fn new(reader: R, encoder: E) -> Self { Self { reader, encoder, state: State::Encoding, } } pub fn get_ref(&self) -> &R { &self.reader } pub fn get_mut(&mut self) -> &mut R { &mut self.reader } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().reader } pub fn into_inner(self) -> R { self.reader } fn do_poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, output: &mut PartialBuffer<&mut [u8]>, ) -> Poll> { let mut this = self.project(); loop { *this.state = match this.state { State::Encoding => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { State::Flushing } else { let mut input = PartialBuffer::new(input); this.encoder.encode(&mut input, output)?; let len = input.written().len(); this.reader.as_mut().consume(len); State::Encoding } } State::Flushing => { if this.encoder.finish(output)? { State::Done } else { State::Flushing } } State::Done => State::Done, }; if let State::Done = *this.state { return Poll::Ready(Ok(())); } if output.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } } impl AsyncRead for Encoder { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut output = PartialBuffer::new(buf); match self.do_poll_read(cx, &mut output)? { Poll::Pending if output.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(output.written().len())), } } } async-compression-0.4.0/src/futures/bufread/generic/mod.rs000064400000000000000000000001171046102023000217350ustar 00000000000000mod decoder; mod encoder; pub use self::{decoder::Decoder, encoder::Encoder}; async-compression-0.4.0/src/futures/bufread/macros/decoder.rs000064400000000000000000000066201046102023000224400ustar 00000000000000macro_rules! decoder { ($(#[$attr:meta])* $name:ident) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncRead`](futures_io::AsyncRead) interface and will /// read compressed data from an underlying stream and emit a stream of uncompressed data. #[derive(Debug)] pub struct $name { #[pin] inner: crate::futures::bufread::Decoder, } } impl $name { /// Creates a new decoder which will read compressed data from the given stream and /// emit a uncompressed stream. pub fn new(read: R) -> $name { $name { inner: crate::futures::bufread::Decoder::new(read, crate::codec::$name::new()), } } /// Configure multi-member/frame decoding, if enabled this will reset the decoder state /// when reaching the end of a compressed member/frame and expect either EOF or another /// compressed member/frame to follow it in the stream. pub fn multiple_members(&mut self, enabled: bool) { self.inner.multiple_members(enabled); } /// Acquires a reference to the underlying reader that this decoder is wrapping. pub fn get_ref(&self) -> &R { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_mut(&mut self) -> &mut R { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut R> { self.project().inner.get_pin_mut() } /// Consumes this decoder returning the underlying reader. /// /// Note that this may discard internal state of this decoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> R { self.inner.into_inner() } } impl futures_io::AsyncRead for $name { fn poll_read( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut [u8], ) -> std::task::Poll> { self.project().inner.poll_read(cx, buf) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use futures_io::AsyncBufRead; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.0/src/futures/bufread/macros/encoder.rs000064400000000000000000000057541046102023000224610ustar 00000000000000macro_rules! encoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($constructor:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncRead`](futures_io::AsyncRead) interface and will /// read uncompressed data from an underlying stream and emit a stream of compressed data. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::futures::bufread::Encoder<$inner, crate::codec::$name>, } } impl<$inner: futures_io::AsyncBufRead> $name<$inner> { $( /// Creates a new encoder which will read uncompressed data from the given stream /// and emit a compressed stream. /// $($constructor)* )* /// Acquires a reference to the underlying reader that this encoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this encoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this encoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this encoder returning the underlying reader. /// /// Note that this may discard internal state of this encoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: futures_io::AsyncBufRead> futures_io::AsyncRead for $name<$inner> { fn poll_read( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut [u8], ) -> std::task::Poll> { self.project().inner.poll_read(cx, buf) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use futures_io::AsyncBufRead; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.0/src/futures/bufread/macros/mod.rs000064400000000000000000000000641046102023000216060ustar 00000000000000#[macro_use] mod decoder; #[macro_use] mod encoder; async-compression-0.4.0/src/futures/bufread/mod.rs000064400000000000000000000003671046102023000203300ustar 00000000000000//! Types which operate over [`AsyncBufRead`](futures_io::AsyncBufRead) streams, both encoders and //! decoders for various formats. #[macro_use] mod macros; mod generic; pub(crate) use generic::{Decoder, Encoder}; algos!(futures::bufread); async-compression-0.4.0/src/futures/mod.rs000064400000000000000000000001551046102023000167130ustar 00000000000000//! Implementations for IO traits exported by [`futures-io`](::futures_io). pub mod bufread; pub mod write; async-compression-0.4.0/src/futures/write/buf_write.rs000064400000000000000000000025361046102023000212610ustar 00000000000000use std::{ io, pin::Pin, task::{Context, Poll}, }; pub(crate) trait AsyncBufWrite { /// Attempt to return an internal buffer to write to, flushing data out to the inner reader if /// it is full. /// /// On success, returns `Poll::Ready(Ok(buf))`. /// /// If the buffer is full and cannot be flushed, the method returns `Poll::Pending` and /// arranges for the current task context (`cx`) to receive a notification when the object /// becomes readable or is closed. fn poll_partial_flush_buf( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>; /// Tells this buffer that `amt` bytes have been written to its buffer, so they should be /// written out to the underlying IO when possible. /// /// This function is a lower-level call. It needs to be paired with the `poll_flush_buf` method to /// function properly. This function does not perform any I/O, it simply informs this object /// that some amount of its buffer, returned from `poll_flush_buf`, has been written to and should /// be sent. As such, this function may do odd things if `poll_flush_buf` isn't /// called before calling it. /// /// The `amt` must be `<=` the number of bytes in the buffer returned by `poll_flush_buf`. fn produce(self: Pin<&mut Self>, amt: usize); } async-compression-0.4.0/src/futures/write/buf_writer.rs000064400000000000000000000153261046102023000214440ustar 00000000000000// Originally sourced from `futures_util::io::buf_writer`, needs to be redefined locally so that // the `AsyncBufWrite` impl can access its internals, and changed a bit to make it more efficient // with those methods. use super::AsyncBufWrite; use futures_core::ready; use futures_io::{AsyncSeek, AsyncWrite, SeekFrom}; use pin_project_lite::pin_project; use std::{ cmp::min, fmt, io, pin::Pin, task::{Context, Poll}, }; const DEFAULT_BUF_SIZE: usize = 8192; pin_project! { pub struct BufWriter { #[pin] inner: W, buf: Box<[u8]>, written: usize, buffered: usize, } } impl BufWriter { /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. pub fn new(inner: W) -> Self { Self::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufWriter` with the specified buffer capacity. pub fn with_capacity(cap: usize, inner: W) -> Self { Self { inner, buf: vec![0; cap].into(), written: 0, buffered: 0, } } fn partial_flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); let mut ret = Ok(()); while *this.written < *this.buffered { match this .inner .as_mut() .poll_write(cx, &this.buf[*this.written..*this.buffered]) { Poll::Pending => { break; } Poll::Ready(Ok(0)) => { ret = Err(io::Error::new( io::ErrorKind::WriteZero, "failed to write the buffered data", )); break; } Poll::Ready(Ok(n)) => *this.written += n, Poll::Ready(Err(e)) => { ret = Err(e); break; } } } if *this.written > 0 { this.buf.copy_within(*this.written..*this.buffered, 0); *this.buffered -= *this.written; *this.written = 0; Poll::Ready(ret) } else if *this.buffered == 0 { Poll::Ready(ret) } else { ret?; Poll::Pending } } fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); let mut ret = Ok(()); while *this.written < *this.buffered { match ready!(this .inner .as_mut() .poll_write(cx, &this.buf[*this.written..*this.buffered])) { Ok(0) => { ret = Err(io::Error::new( io::ErrorKind::WriteZero, "failed to write the buffered data", )); break; } Ok(n) => *this.written += n, Err(e) => { ret = Err(e); break; } } } this.buf.copy_within(*this.written..*this.buffered, 0); *this.buffered -= *this.written; *this.written = 0; Poll::Ready(ret) } /// Gets a reference to the underlying writer. pub fn get_ref(&self) -> &W { &self.inner } /// Gets a mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_mut(&mut self) -> &mut W { &mut self.inner } /// Gets a pinned mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().inner } /// Consumes this `BufWriter`, returning the underlying writer. /// /// Note that any leftover data in the internal buffer is lost. pub fn into_inner(self) -> W { self.inner } } impl AsyncWrite for BufWriter { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { let this = self.as_mut().project(); if *this.buffered + buf.len() > this.buf.len() { ready!(self.as_mut().partial_flush_buf(cx))?; } let this = self.as_mut().project(); if buf.len() >= this.buf.len() { if *this.buffered == 0 { this.inner.poll_write(cx, buf) } else { // The only way that `partial_flush_buf` would have returned with // `this.buffered != 0` is if it were Pending, so our waker was already queued Poll::Pending } } else { let len = min(this.buf.len() - *this.buffered, buf.len()); this.buf[*this.buffered..*this.buffered + len].copy_from_slice(&buf[..len]); *this.buffered += len; Poll::Ready(Ok(len)) } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_flush(cx) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_close(cx) } } impl AsyncBufWrite for BufWriter { fn poll_partial_flush_buf( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { ready!(self.as_mut().partial_flush_buf(cx))?; let this = self.project(); Poll::Ready(Ok(&mut this.buf[*this.buffered..])) } fn produce(self: Pin<&mut Self>, amt: usize) { *self.project().buffered += amt; } } impl fmt::Debug for BufWriter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BufWriter") .field("writer", &self.inner) .field( "buffer", &format_args!("{}/{}", self.buffered, self.buf.len()), ) .field("written", &self.written) .finish() } } impl AsyncSeek for BufWriter { /// Seek to the offset, in bytes, in the underlying writer. /// /// Seeking always writes out the internal buffer before seeking. fn poll_seek( mut self: Pin<&mut Self>, cx: &mut Context<'_>, pos: SeekFrom, ) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_seek(cx, pos) } } async-compression-0.4.0/src/futures/write/generic/decoder.rs000064400000000000000000000113411046102023000223060ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::{Error, ErrorKind, Result}; use crate::{ codec::Decode, futures::write::{AsyncBufWrite, BufWriter}, util::PartialBuffer, }; use futures_core::ready; use futures_io::AsyncWrite; use pin_project_lite::pin_project; #[derive(Debug)] enum State { Decoding, Finishing, Done, } pin_project! { #[derive(Debug)] pub struct Decoder { #[pin] writer: BufWriter, decoder: D, state: State, } } impl Decoder { pub fn new(writer: W, decoder: D) -> Self { Self { writer: BufWriter::new(writer), decoder, state: State::Decoding, } } pub fn get_ref(&self) -> &W { self.writer.get_ref() } pub fn get_mut(&mut self) -> &mut W { self.writer.get_mut() } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().writer.get_pin_mut() } pub fn into_inner(self) -> W { self.writer.into_inner() } fn do_poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, input: &mut PartialBuffer<&[u8]>, ) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Decoding => { if this.decoder.decode(input, &mut output)? { State::Finishing } else { State::Decoding } } State::Finishing => { if this.decoder.finish(&mut output)? { State::Done } else { State::Finishing } } State::Done => panic!("Write after end of stream"), }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if let State::Done = this.state { return Poll::Ready(Ok(())); } if input.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } fn do_poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); let (state, done) = match this.state { State::Decoding => { let done = this.decoder.flush(&mut output)?; (State::Decoding, done) } State::Finishing => { if this.decoder.finish(&mut output)? { (State::Done, false) } else { (State::Finishing, false) } } State::Done => (State::Done, true), }; *this.state = state; let produced = output.written().len(); this.writer.as_mut().produce(produced); if done { return Poll::Ready(Ok(())); } } } } impl AsyncWrite for Decoder { fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut input = PartialBuffer::new(buf); match self.do_poll_write(cx, &mut input)? { Poll::Pending if input.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(input.written().len())), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_flush(cx))?; ready!(self.project().writer.as_mut().poll_flush(cx))?; Poll::Ready(Ok(())) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if let State::Decoding = self.as_mut().project().state { *self.as_mut().project().state = State::Finishing; } ready!(self.as_mut().do_poll_flush(cx))?; if let State::Done = self.as_mut().project().state { ready!(self.as_mut().project().writer.as_mut().poll_close(cx))?; Poll::Ready(Ok(())) } else { Poll::Ready(Err(Error::new( ErrorKind::Other, "Attempt to close before finishing input", ))) } } } async-compression-0.4.0/src/futures/write/generic/encoder.rs000064400000000000000000000106031046102023000223200ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{ codec::Encode, futures::write::{AsyncBufWrite, BufWriter}, util::PartialBuffer, }; use futures_core::ready; use futures_io::AsyncWrite; use pin_project_lite::pin_project; #[derive(Debug)] enum State { Encoding, Finishing, Done, } pin_project! { #[derive(Debug)] pub struct Encoder { #[pin] writer: BufWriter, encoder: E, state: State, } } impl Encoder { pub fn new(writer: W, encoder: E) -> Self { Self { writer: BufWriter::new(writer), encoder, state: State::Encoding, } } pub fn get_ref(&self) -> &W { self.writer.get_ref() } pub fn get_mut(&mut self) -> &mut W { self.writer.get_mut() } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().writer.get_pin_mut() } pub fn into_inner(self) -> W { self.writer.into_inner() } fn do_poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, input: &mut PartialBuffer<&[u8]>, ) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Encoding => { this.encoder.encode(input, &mut output)?; State::Encoding } State::Finishing | State::Done => panic!("Write after close"), }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if input.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } fn do_poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); let done = match this.state { State::Encoding => this.encoder.flush(&mut output)?, State::Finishing | State::Done => panic!("Flush after close"), }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if done { return Poll::Ready(Ok(())); } } } fn do_poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Encoding | State::Finishing => { if this.encoder.finish(&mut output)? { State::Done } else { State::Finishing } } State::Done => State::Done, }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if let State::Done = this.state { return Poll::Ready(Ok(())); } } } } impl AsyncWrite for Encoder { fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut input = PartialBuffer::new(buf); match self.do_poll_write(cx, &mut input)? { Poll::Pending if input.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(input.written().len())), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_flush(cx))?; ready!(self.project().writer.as_mut().poll_flush(cx))?; Poll::Ready(Ok(())) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_close(cx))?; ready!(self.project().writer.as_mut().poll_close(cx))?; Poll::Ready(Ok(())) } } async-compression-0.4.0/src/futures/write/generic/mod.rs000064400000000000000000000001171046102023000214570ustar 00000000000000mod decoder; mod encoder; pub use self::{decoder::Decoder, encoder::Encoder}; async-compression-0.4.0/src/futures/write/macros/decoder.rs000064400000000000000000000067201046102023000221630ustar 00000000000000macro_rules! decoder { ($(#[$attr:meta])* $name:ident) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncWrite`](futures_io::AsyncWrite) interface and will /// take in compressed data and write it uncompressed to an underlying stream. #[derive(Debug)] pub struct $name { #[pin] inner: crate::futures::write::Decoder, } } impl $name { /// Creates a new decoder which will take in compressed data and write it uncompressed /// to the given stream. pub fn new(read: W) -> $name { $name { inner: crate::futures::write::Decoder::new(read, crate::codec::$name::new()), } } /// Acquires a reference to the underlying reader that this decoder is wrapping. pub fn get_ref(&self) -> &W { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_mut(&mut self) -> &mut W { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut W> { self.project().inner.get_pin_mut() } /// Consumes this decoder returning the underlying reader. /// /// Note that this may discard internal state of this decoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> W { self.inner.into_inner() } } impl futures_io::AsyncWrite for $name { fn poll_write( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { self.project().inner.poll_write(cx, buf) } fn poll_flush( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_flush(cx) } fn poll_close( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_close(cx) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use futures_io::AsyncWrite; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.0/src/futures/write/macros/encoder.rs000064400000000000000000000066771046102023000222100ustar 00000000000000macro_rules! encoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($constructor:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncWrite`](futures_io::AsyncWrite) interface and will /// take in uncompressed data and write it compressed to an underlying stream. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::futures::write::Encoder<$inner, crate::codec::$name>, } } impl<$inner: futures_io::AsyncWrite> $name<$inner> { $( /// Creates a new encoder which will take in uncompressed data and write it /// compressed to the given stream. /// $($constructor)* )* /// Acquires a reference to the underlying writer that this encoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying writer that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the writer which /// may otherwise confuse this encoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying writer that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the writer which /// may otherwise confuse this encoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this encoder returning the underlying writer. /// /// Note that this may discard internal state of this encoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: futures_io::AsyncWrite> futures_io::AsyncWrite for $name<$inner> { fn poll_write( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { self.project().inner.poll_write(cx, buf) } fn poll_flush( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_flush(cx) } fn poll_close( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_close(cx) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use futures_io::AsyncWrite; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.0/src/futures/write/macros/mod.rs000064400000000000000000000000641046102023000213300ustar 00000000000000#[macro_use] mod decoder; #[macro_use] mod encoder; async-compression-0.4.0/src/futures/write/mod.rs000064400000000000000000000005161046102023000200460ustar 00000000000000//! Types which operate over [`AsyncWrite`](futures_io::AsyncWrite) streams, both encoders and //! decoders for various formats. #[macro_use] mod macros; mod generic; mod buf_write; mod buf_writer; use self::{ buf_write::AsyncBufWrite, buf_writer::BufWriter, generic::{Decoder, Encoder}, }; algos!(futures::write); async-compression-0.4.0/src/lib.rs000064400000000000000000000255271046102023000152170ustar 00000000000000//! Adaptors between compression crates and Rust's modern asynchronous IO types. //! //! # Feature Organization //! //! This crate is divided up along two axes, which can each be individually selected via Cargo //! features. //! //! All features are disabled by default, you should enable just the ones you need from the lists //! below. //! //! If you want to pull in everything there are three group features defined: //! //! Feature | Does //! ---------|------ //! `all` | Activates all implementations and algorithms. //! `all-implementations` | Activates all implementations, needs to be paired with a selection of algorithms //! `all-algorithms` | Activates all algorithms, needs to be paired with a selection of implementations //! //! ## IO implementation //! //! The first division is which underlying asynchronous IO trait will be wrapped, these are //! available as separate features that have corresponding top-level modules: //! //! Feature | Type //! ---------|------ // TODO: Kill rustfmt on this section, `#![rustfmt::skip::attributes(cfg_attr)]` should do it, but // that's unstable #![cfg_attr( feature = "futures-io", doc = "[`futures-io`](crate::futures) | [`futures::io::AsyncBufRead`](futures_io::AsyncBufRead), [`futures::io::AsyncWrite`](futures_io::AsyncWrite)" )] #![cfg_attr( not(feature = "futures-io"), doc = "`futures-io` (*inactive*) | `futures::io::AsyncBufRead`, `futures::io::AsyncWrite`" )] #![cfg_attr( feature = "tokio", doc = "[`tokio`](crate::tokio) | [`tokio::io::AsyncBufRead`](::tokio::io::AsyncBufRead), [`tokio::io::AsyncWrite`](::tokio::io::AsyncWrite)" )] #![cfg_attr( not(feature = "tokio"), doc = "`tokio` (*inactive*) | `tokio::io::AsyncBufRead`, `tokio::io::AsyncWrite`" )] //! //! ## Compression algorithm //! //! The second division is which compression schemes to support, there are currently a few //! available choices, these determine which types will be available inside the above modules: //! //! Feature | Types //! ---------|------ #![cfg_attr( feature = "brotli", doc = "`brotli` | [`BrotliEncoder`](?search=BrotliEncoder), [`BrotliDecoder`](?search=BrotliDecoder)" )] #![cfg_attr( not(feature = "brotli"), doc = "`brotli` (*inactive*) | `BrotliEncoder`, `BrotliDecoder`" )] #![cfg_attr( feature = "bzip2", doc = "`bzip2` | [`BzEncoder`](?search=BzEncoder), [`BzDecoder`](?search=BzDecoder)" )] #![cfg_attr( not(feature = "bzip2"), doc = "`bzip2` (*inactive*) | `BzEncoder`, `BzDecoder`" )] #![cfg_attr( feature = "deflate", doc = "`deflate` | [`DeflateEncoder`](?search=DeflateEncoder), [`DeflateDecoder`](?search=DeflateDecoder)" )] #![cfg_attr( not(feature = "deflate"), doc = "`deflate` (*inactive*) | `DeflateEncoder`, `DeflateDecoder`" )] #![cfg_attr( feature = "gzip", doc = "`gzip` | [`GzipEncoder`](?search=GzipEncoder), [`GzipDecoder`](?search=GzipDecoder)" )] #![cfg_attr( not(feature = "gzip"), doc = "`gzip` (*inactive*) | `GzipEncoder`, `GzipDecoder`" )] #![cfg_attr( feature = "lzma", doc = "`lzma` | [`LzmaEncoder`](?search=LzmaEncoder), [`LzmaDecoder`](?search=LzmaDecoder)" )] #![cfg_attr( not(feature = "lzma"), doc = "`lzma` (*inactive*) | `LzmaEncoder`, `LzmaDecoder`" )] #![cfg_attr( feature = "xz", doc = "`xz` | [`XzEncoder`](?search=XzEncoder), [`XzDecoder`](?search=XzDecoder)" )] #![cfg_attr( not(feature = "xz"), doc = "`xz` (*inactive*) | `XzEncoder`, `XzDecoder`" )] #![cfg_attr( feature = "zlib", doc = "`zlib` | [`ZlibEncoder`](?search=ZlibEncoder), [`ZlibDecoder`](?search=ZlibDecoder)" )] #![cfg_attr( not(feature = "zlib"), doc = "`zlib` (*inactive*) | `ZlibEncoder`, `ZlibDecoder`" )] #![cfg_attr( feature = "zstd", doc = "`zstd` | [`ZstdEncoder`](?search=ZstdEncoder), [`ZstdDecoder`](?search=ZstdDecoder)" )] #![cfg_attr( not(feature = "zstd"), doc = "`zstd` (*inactive*) | `ZstdEncoder`, `ZstdDecoder`" )] //! #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![warn( missing_docs, rust_2018_idioms, missing_copy_implementations, missing_debug_implementations )] #![cfg_attr(not(all), allow(unused))] #[cfg(any(feature = "bzip2", feature = "flate2", feature = "xz2"))] use std::convert::TryInto; #[macro_use] mod macros; mod codec; #[cfg(feature = "futures-io")] pub mod futures; #[cfg(feature = "tokio")] pub mod tokio; mod unshared; mod util; #[cfg(feature = "brotli")] use brotli::enc::backward_references::BrotliEncoderParams; /// Level of compression data should be compressed with. #[non_exhaustive] #[derive(Clone, Copy, Debug)] pub enum Level { /// Fastest quality of compression, usually produces bigger size. Fastest, /// Best quality of compression, usually produces the smallest size. Best, /// Default quality of compression defined by the selected compression algorithm. Default, /// Precise quality based on the underlying compression algorithms' /// qualities. The interpretation of this depends on the algorithm chosen /// and the specific implementation backing it. /// Qualities are implicitly clamped to the algorithm's maximum. Precise(i32), } impl Level { #[cfg(feature = "brotli")] fn into_brotli(self, mut params: BrotliEncoderParams) -> BrotliEncoderParams { match self { Self::Fastest => params.quality = 0, Self::Best => params.quality = 11, Self::Precise(quality) => params.quality = quality.clamp(0, 11), Self::Default => (), } params } #[cfg(feature = "bzip2")] fn into_bzip2(self) -> bzip2::Compression { let fastest = bzip2::Compression::fast(); let best = bzip2::Compression::best(); match self { Self::Fastest => fastest, Self::Best => best, Self::Precise(quality) => bzip2::Compression::new( quality .try_into() .unwrap_or(0) .clamp(fastest.level(), best.level()), ), Self::Default => bzip2::Compression::default(), } } #[cfg(feature = "flate2")] fn into_flate2(self) -> flate2::Compression { let fastest = flate2::Compression::fast(); let best = flate2::Compression::best(); match self { Self::Fastest => fastest, Self::Best => best, Self::Precise(quality) => flate2::Compression::new( quality .try_into() .unwrap_or(0) .clamp(fastest.level(), best.level()), ), Self::Default => flate2::Compression::default(), } } #[cfg(feature = "zstd")] fn into_zstd(self) -> i32 { let (fastest, best) = libzstd::compression_level_range().into_inner(); match self { Self::Fastest => fastest, Self::Best => best, Self::Precise(quality) => quality.clamp(fastest, best), Self::Default => libzstd::DEFAULT_COMPRESSION_LEVEL, } } #[cfg(feature = "xz2")] fn into_xz2(self) -> u32 { match self { Self::Fastest => 0, Self::Best => 9, Self::Precise(quality) => quality.try_into().unwrap_or(0).min(9), Self::Default => 5, } } } #[cfg(feature = "zstd")] /// This module contains zstd-specific types for async-compression. pub mod zstd { use libzstd::stream::raw::CParameter::*; /// A compression parameter for zstd. This is a stable wrapper around zstd's own `CParameter` /// type, to abstract over different versions of the zstd library. /// /// See the [zstd documentation](https://facebook.github.io/zstd/zstd_manual.html) for more /// information on these parameters. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct CParameter(libzstd::stream::raw::CParameter); impl CParameter { /// Window size in bytes (as a power of two) pub fn window_log(value: u32) -> Self { Self(WindowLog(value)) } /// Size of the initial probe table in 4-byte entries (as a power of two) pub fn hash_log(value: u32) -> Self { Self(HashLog(value)) } /// Size of the multi-probe table in 4-byte entries (as a power of two) pub fn chain_log(value: u32) -> Self { Self(ChainLog(value)) } /// Number of search attempts (as a power of two) pub fn search_log(value: u32) -> Self { Self(SearchLog(value)) } /// Minimum size of matches searched for pub fn min_match(value: u32) -> Self { Self(MinMatch(value)) } /// Strategy-dependent length modifier pub fn target_length(value: u32) -> Self { Self(TargetLength(value)) } /// Enable long-distance matching mode to look for and emit long-distance references. /// /// This increases the default window size. pub fn enable_long_distance_matching(value: bool) -> Self { Self(EnableLongDistanceMatching(value)) } /// Size of the long-distance matching table (as a power of two) pub fn ldm_hash_log(value: u32) -> Self { Self(LdmHashLog(value)) } /// Minimum size of long-distance matches searched for pub fn ldm_min_match(value: u32) -> Self { Self(LdmMinMatch(value)) } /// Size of each bucket in the LDM hash table for collision resolution (as a power of two) pub fn ldm_bucket_size_log(value: u32) -> Self { Self(LdmBucketSizeLog(value)) } /// Frequency of using the LDM hash table (as a power of two) pub fn ldm_hash_rate_log(value: u32) -> Self { Self(LdmHashRateLog(value)) } /// Emit the size of the content (default: true). pub fn content_size_flag(value: bool) -> Self { Self(ContentSizeFlag(value)) } /// Emit a checksum (default: false). pub fn checksum_flag(value: bool) -> Self { Self(ChecksumFlag(value)) } /// Emit a dictionary ID when using a custom dictionary (default: true). pub fn dict_id_flag(value: bool) -> Self { Self(DictIdFlag(value)) } /// Number of threads to spawn. /// /// If set to 0, compression functions will block; if set to 1 or more, compression will /// run in background threads and `flush` pushes bytes through the compressor. pub fn nb_workers(value: u32) -> Self { Self(NbWorkers(value)) } /// Number of bytes given to each worker. /// /// If set to 0, zstd selects a job size based on compression parameters. pub fn job_size(value: u32) -> Self { Self(JobSize(value)) } pub(crate) fn as_zstd(&self) -> libzstd::stream::raw::CParameter { self.0 } } } async-compression-0.4.0/src/macros.rs000064400000000000000000000112461046102023000157260ustar 00000000000000macro_rules! algos { (@algo $algo:ident [$algo_s:expr] $decoder:ident $encoder:ident<$inner:ident> $({ $($constructor:tt)* })*) => { #[cfg(feature = $algo_s)] decoder! { #[doc = concat!("A ", $algo_s, " decoder, or decompressor")] #[cfg(feature = $algo_s)] $decoder } #[cfg(feature = $algo_s)] encoder! { #[doc = concat!("A ", $algo_s, " encoder, or compressor.")] #[cfg(feature = $algo_s)] $encoder<$inner> { pub fn new(inner: $inner) -> Self { Self::with_quality(inner, crate::Level::Default) } } $({ $($constructor)* })* } }; ($($mod:ident)::+<$inner:ident>) => { algos!(@algo brotli ["brotli"] BrotliDecoder BrotliEncoder<$inner> { pub fn with_quality(inner: $inner, level: crate::Level) -> Self { let params = brotli::enc::backward_references::BrotliEncoderParams::default(); Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::BrotliEncoder::new(level.into_brotli(params)), ), } } }); algos!(@algo bzip2 ["bzip2"] BzDecoder BzEncoder<$inner> { pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::BzEncoder::new(level.into_bzip2(), 0), ), } } }); algos!(@algo deflate ["deflate"] DeflateDecoder DeflateEncoder<$inner> { pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::DeflateEncoder::new(level.into_flate2()), ), } } }); algos!(@algo gzip ["gzip"] GzipDecoder GzipEncoder<$inner> { pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::GzipEncoder::new(level.into_flate2()), ), } } }); algos!(@algo zlib ["zlib"] ZlibDecoder ZlibEncoder<$inner> { pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::ZlibEncoder::new(level.into_flate2()), ), } } }); algos!(@algo zstd ["zstd"] ZstdDecoder ZstdEncoder<$inner> { pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::ZstdEncoder::new(level.into_zstd()), ), } } /// Creates a new encoder, using the specified compression level and parameters, which /// will read uncompressed data from the given stream and emit a compressed stream. pub fn with_quality_and_params(inner: $inner, level: crate::Level, params: &[crate::zstd::CParameter]) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::ZstdEncoder::new_with_params(level.into_zstd(), params), ), } } }); algos!(@algo xz ["xz"] XzDecoder XzEncoder<$inner> { pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::XzEncoder::new(level.into_xz2()), ), } } }); algos!(@algo lzma ["lzma"] LzmaDecoder LzmaEncoder<$inner> { pub fn with_quality(inner: $inner, level: crate::Level) -> Self { Self { inner: crate::$($mod::)+generic::Encoder::new( inner, crate::codec::LzmaEncoder::new(level.into_xz2()), ), } } }); } } async-compression-0.4.0/src/tokio/bufread/generic/decoder.rs000064400000000000000000000076401046102023000222230ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{codec::Decode, util::PartialBuffer}; use futures_core::ready; use pin_project_lite::pin_project; use tokio::io::{AsyncBufRead, AsyncRead, ReadBuf}; #[derive(Debug)] enum State { Decoding, Flushing, Done, Next, } pin_project! { #[derive(Debug)] pub struct Decoder { #[pin] reader: R, decoder: D, state: State, multiple_members: bool, } } impl Decoder { pub fn new(reader: R, decoder: D) -> Self { Self { reader, decoder, state: State::Decoding, multiple_members: false, } } pub fn get_ref(&self) -> &R { &self.reader } pub fn get_mut(&mut self) -> &mut R { &mut self.reader } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().reader } pub fn into_inner(self) -> R { self.reader } pub fn multiple_members(&mut self, enabled: bool) { self.multiple_members = enabled; } fn do_poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, output: &mut PartialBuffer<&mut [u8]>, ) -> Poll> { let mut this = self.project(); loop { *this.state = match this.state { State::Decoding => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { // Avoid attempting to reinitialise the decoder if the reader // has returned EOF. *this.multiple_members = false; State::Flushing } else { let mut input = PartialBuffer::new(input); let done = this.decoder.decode(&mut input, output)?; let len = input.written().len(); this.reader.as_mut().consume(len); if done { State::Flushing } else { State::Decoding } } } State::Flushing => { if this.decoder.finish(output)? { if *this.multiple_members { this.decoder.reinit()?; State::Next } else { State::Done } } else { State::Flushing } } State::Done => State::Done, State::Next => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { State::Done } else { State::Decoding } } }; if let State::Done = *this.state { return Poll::Ready(Ok(())); } if output.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } } impl AsyncRead for Decoder { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { if buf.remaining() == 0 { return Poll::Ready(Ok(())); } let mut output = PartialBuffer::new(buf.initialize_unfilled()); match self.do_poll_read(cx, &mut output)? { Poll::Pending if output.written().is_empty() => Poll::Pending, _ => { let len = output.written().len(); buf.advance(len); Poll::Ready(Ok(())) } } } } async-compression-0.4.0/src/tokio/bufread/generic/encoder.rs000064400000000000000000000056311046102023000222330ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{codec::Encode, util::PartialBuffer}; use futures_core::ready; use pin_project_lite::pin_project; use tokio::io::{AsyncBufRead, AsyncRead, ReadBuf}; #[derive(Debug)] enum State { Encoding, Flushing, Done, } pin_project! { #[derive(Debug)] pub struct Encoder { #[pin] reader: R, encoder: E, state: State, } } impl Encoder { pub fn new(reader: R, encoder: E) -> Self { Self { reader, encoder, state: State::Encoding, } } pub fn get_ref(&self) -> &R { &self.reader } pub fn get_mut(&mut self) -> &mut R { &mut self.reader } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { self.project().reader } pub fn into_inner(self) -> R { self.reader } fn do_poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, output: &mut PartialBuffer<&mut [u8]>, ) -> Poll> { let mut this = self.project(); loop { *this.state = match this.state { State::Encoding => { let input = ready!(this.reader.as_mut().poll_fill_buf(cx))?; if input.is_empty() { State::Flushing } else { let mut input = PartialBuffer::new(input); this.encoder.encode(&mut input, output)?; let len = input.written().len(); this.reader.as_mut().consume(len); State::Encoding } } State::Flushing => { if this.encoder.finish(output)? { State::Done } else { State::Flushing } } State::Done => State::Done, }; if let State::Done = *this.state { return Poll::Ready(Ok(())); } if output.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } } impl AsyncRead for Encoder { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { if buf.remaining() == 0 { return Poll::Ready(Ok(())); } let mut output = PartialBuffer::new(buf.initialize_unfilled()); match self.do_poll_read(cx, &mut output)? { Poll::Pending if output.written().is_empty() => Poll::Pending, _ => { let len = output.written().len(); buf.advance(len); Poll::Ready(Ok(())) } } } } async-compression-0.4.0/src/tokio/bufread/generic/mod.rs000064400000000000000000000001171046102023000213650ustar 00000000000000mod decoder; mod encoder; pub use self::{decoder::Decoder, encoder::Encoder}; async-compression-0.4.0/src/tokio/bufread/macros/decoder.rs000064400000000000000000000066261046102023000220760ustar 00000000000000macro_rules! decoder { ($(#[$attr:meta])* $name:ident) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncRead`](tokio::io::AsyncRead) interface and will /// read compressed data from an underlying stream and emit a stream of uncompressed data. #[derive(Debug)] pub struct $name { #[pin] inner: crate::tokio::bufread::Decoder, } } impl $name { /// Creates a new decoder which will read compressed data from the given stream and /// emit a uncompressed stream. pub fn new(read: R) -> $name { $name { inner: crate::tokio::bufread::Decoder::new(read, crate::codec::$name::new()), } } /// Configure multi-member/frame decoding, if enabled this will reset the decoder state /// when reaching the end of a compressed member/frame and expect either EOF or another /// compressed member/frame to follow it in the stream. pub fn multiple_members(&mut self, enabled: bool) { self.inner.multiple_members(enabled); } /// Acquires a reference to the underlying reader that this decoder is wrapping. pub fn get_ref(&self) -> &R { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_mut(&mut self) -> &mut R { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut R> { self.project().inner.get_pin_mut() } /// Consumes this decoder returning the underlying reader. /// /// Note that this may discard internal state of this decoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> R { self.inner.into_inner() } } impl tokio::io::AsyncRead for $name { fn poll_read( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> std::task::Poll> { self.project().inner.poll_read(cx, buf) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use tokio::io::AsyncBufRead; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.0/src/tokio/bufread/macros/encoder.rs000064400000000000000000000057641046102023000221120ustar 00000000000000macro_rules! encoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($constructor:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncRead`](tokio::io::AsyncRead) interface and will /// read uncompressed data from an underlying stream and emit a stream of compressed data. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::tokio::bufread::Encoder<$inner, crate::codec::$name>, } } impl<$inner: tokio::io::AsyncBufRead> $name<$inner> { $( /// Creates a new encoder which will read uncompressed data from the given stream /// and emit a compressed stream. /// $($constructor)* )* /// Acquires a reference to the underlying reader that this encoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this encoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this encoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this encoder returning the underlying reader. /// /// Note that this may discard internal state of this encoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: tokio::io::AsyncBufRead> tokio::io::AsyncRead for $name<$inner> { fn poll_read( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> std::task::Poll> { self.project().inner.poll_read(cx, buf) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use tokio::io::AsyncBufRead; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.0/src/tokio/bufread/macros/mod.rs000064400000000000000000000000641046102023000212360ustar 00000000000000#[macro_use] mod decoder; #[macro_use] mod encoder; async-compression-0.4.0/src/tokio/bufread/mod.rs000064400000000000000000000003661046102023000177570ustar 00000000000000//! Types which operate over [`AsyncBufRead`](::tokio::io::AsyncBufRead) streams, both encoders and //! decoders for various formats. #[macro_use] mod macros; mod generic; pub(crate) use generic::{Decoder, Encoder}; algos!(tokio::bufread); async-compression-0.4.0/src/tokio/mod.rs000064400000000000000000000001501046102023000163360ustar 00000000000000//! Implementations for IO traits exported by [`tokio` v1.x](::tokio). pub mod bufread; pub mod write; async-compression-0.4.0/src/tokio/write/buf_write.rs000064400000000000000000000025361046102023000207110ustar 00000000000000use std::{ io, pin::Pin, task::{Context, Poll}, }; pub(crate) trait AsyncBufWrite { /// Attempt to return an internal buffer to write to, flushing data out to the inner reader if /// it is full. /// /// On success, returns `Poll::Ready(Ok(buf))`. /// /// If the buffer is full and cannot be flushed, the method returns `Poll::Pending` and /// arranges for the current task context (`cx`) to receive a notification when the object /// becomes readable or is closed. fn poll_partial_flush_buf( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>; /// Tells this buffer that `amt` bytes have been written to its buffer, so they should be /// written out to the underlying IO when possible. /// /// This function is a lower-level call. It needs to be paired with the `poll_flush_buf` method to /// function properly. This function does not perform any I/O, it simply informs this object /// that some amount of its buffer, returned from `poll_flush_buf`, has been written to and should /// be sent. As such, this function may do odd things if `poll_flush_buf` isn't /// called before calling it. /// /// The `amt` must be `<=` the number of bytes in the buffer returned by `poll_flush_buf`. fn produce(self: Pin<&mut Self>, amt: usize); } async-compression-0.4.0/src/tokio/write/buf_writer.rs000064400000000000000000000144101046102023000210650ustar 00000000000000// Originally sourced from `futures_util::io::buf_writer`, needs to be redefined locally so that // the `AsyncBufWrite` impl can access its internals, and changed a bit to make it more efficient // with those methods. use super::AsyncBufWrite; use futures_core::ready; use pin_project_lite::pin_project; use std::{ cmp::min, fmt, io, pin::Pin, task::{Context, Poll}, }; use tokio::io::AsyncWrite; const DEFAULT_BUF_SIZE: usize = 8192; pin_project! { pub struct BufWriter { #[pin] inner: W, buf: Box<[u8]>, written: usize, buffered: usize, } } impl BufWriter { /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. pub fn new(inner: W) -> Self { Self::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufWriter` with the specified buffer capacity. pub fn with_capacity(cap: usize, inner: W) -> Self { Self { inner, buf: vec![0; cap].into(), written: 0, buffered: 0, } } fn partial_flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); let mut ret = Ok(()); while *this.written < *this.buffered { match this .inner .as_mut() .poll_write(cx, &this.buf[*this.written..*this.buffered]) { Poll::Pending => { break; } Poll::Ready(Ok(0)) => { ret = Err(io::Error::new( io::ErrorKind::WriteZero, "failed to write the buffered data", )); break; } Poll::Ready(Ok(n)) => *this.written += n, Poll::Ready(Err(e)) => { ret = Err(e); break; } } } if *this.written > 0 { this.buf.copy_within(*this.written..*this.buffered, 0); *this.buffered -= *this.written; *this.written = 0; Poll::Ready(ret) } else if *this.buffered == 0 { Poll::Ready(ret) } else { ret?; Poll::Pending } } fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); let mut ret = Ok(()); while *this.written < *this.buffered { match ready!(this .inner .as_mut() .poll_write(cx, &this.buf[*this.written..*this.buffered])) { Ok(0) => { ret = Err(io::Error::new( io::ErrorKind::WriteZero, "failed to write the buffered data", )); break; } Ok(n) => *this.written += n, Err(e) => { ret = Err(e); break; } } } this.buf.copy_within(*this.written..*this.buffered, 0); *this.buffered -= *this.written; *this.written = 0; Poll::Ready(ret) } /// Gets a reference to the underlying writer. pub fn get_ref(&self) -> &W { &self.inner } /// Gets a mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_mut(&mut self) -> &mut W { &mut self.inner } /// Gets a pinned mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().inner } /// Consumes this `BufWriter`, returning the underlying writer. /// /// Note that any leftover data in the internal buffer is lost. pub fn into_inner(self) -> W { self.inner } } impl AsyncWrite for BufWriter { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { let this = self.as_mut().project(); if *this.buffered + buf.len() > this.buf.len() { ready!(self.as_mut().partial_flush_buf(cx))?; } let this = self.as_mut().project(); if buf.len() >= this.buf.len() { if *this.buffered == 0 { this.inner.poll_write(cx, buf) } else { // The only way that `partial_flush_buf` would have returned with // `this.buffered != 0` is if it were Pending, so our waker was already queued Poll::Pending } } else { let len = min(this.buf.len() - *this.buffered, buf.len()); this.buf[*this.buffered..*this.buffered + len].copy_from_slice(&buf[..len]); *this.buffered += len; Poll::Ready(Ok(len)) } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_flush(cx) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().flush_buf(cx))?; self.project().inner.poll_shutdown(cx) } } impl AsyncBufWrite for BufWriter { fn poll_partial_flush_buf( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { ready!(self.as_mut().partial_flush_buf(cx))?; let this = self.project(); Poll::Ready(Ok(&mut this.buf[*this.buffered..])) } fn produce(self: Pin<&mut Self>, amt: usize) { *self.project().buffered += amt; } } impl fmt::Debug for BufWriter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BufWriter") .field("writer", &self.inner) .field( "buffer", &format_args!("{}/{}", self.buffered, self.buf.len()), ) .field("written", &self.written) .finish() } } async-compression-0.4.0/src/tokio/write/generic/decoder.rs000064400000000000000000000113471046102023000217440ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::{Error, ErrorKind, Result}; use crate::{ codec::Decode, tokio::write::{AsyncBufWrite, BufWriter}, util::PartialBuffer, }; use futures_core::ready; use pin_project_lite::pin_project; use tokio::io::AsyncWrite; #[derive(Debug)] enum State { Decoding, Finishing, Done, } pin_project! { #[derive(Debug)] pub struct Decoder { #[pin] writer: BufWriter, decoder: D, state: State, } } impl Decoder { pub fn new(writer: W, decoder: D) -> Self { Self { writer: BufWriter::new(writer), decoder, state: State::Decoding, } } pub fn get_ref(&self) -> &W { self.writer.get_ref() } pub fn get_mut(&mut self) -> &mut W { self.writer.get_mut() } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().writer.get_pin_mut() } pub fn into_inner(self) -> W { self.writer.into_inner() } fn do_poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, input: &mut PartialBuffer<&[u8]>, ) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Decoding => { if this.decoder.decode(input, &mut output)? { State::Finishing } else { State::Decoding } } State::Finishing => { if this.decoder.finish(&mut output)? { State::Done } else { State::Finishing } } State::Done => panic!("Write after end of stream"), }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if let State::Done = this.state { return Poll::Ready(Ok(())); } if input.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } fn do_poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); let (state, done) = match this.state { State::Decoding => { let done = this.decoder.flush(&mut output)?; (State::Decoding, done) } State::Finishing => { if this.decoder.finish(&mut output)? { (State::Done, false) } else { (State::Finishing, false) } } State::Done => (State::Done, true), }; *this.state = state; let produced = output.written().len(); this.writer.as_mut().produce(produced); if done { return Poll::Ready(Ok(())); } } } } impl AsyncWrite for Decoder { fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut input = PartialBuffer::new(buf); match self.do_poll_write(cx, &mut input)? { Poll::Pending if input.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(input.written().len())), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_flush(cx))?; ready!(self.project().writer.as_mut().poll_flush(cx))?; Poll::Ready(Ok(())) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if let State::Decoding = self.as_mut().project().state { *self.as_mut().project().state = State::Finishing; } ready!(self.as_mut().do_poll_flush(cx))?; if let State::Done = self.as_mut().project().state { ready!(self.as_mut().project().writer.as_mut().poll_shutdown(cx))?; Poll::Ready(Ok(())) } else { Poll::Ready(Err(Error::new( ErrorKind::Other, "Attempt to shutdown before finishing input", ))) } } } async-compression-0.4.0/src/tokio/write/generic/encoder.rs000064400000000000000000000106221046102023000217510ustar 00000000000000use core::{ pin::Pin, task::{Context, Poll}, }; use std::io::Result; use crate::{ codec::Encode, tokio::write::{AsyncBufWrite, BufWriter}, util::PartialBuffer, }; use futures_core::ready; use pin_project_lite::pin_project; use tokio::io::AsyncWrite; #[derive(Debug)] enum State { Encoding, Finishing, Done, } pin_project! { #[derive(Debug)] pub struct Encoder { #[pin] writer: BufWriter, encoder: E, state: State, } } impl Encoder { pub fn new(writer: W, encoder: E) -> Self { Self { writer: BufWriter::new(writer), encoder, state: State::Encoding, } } pub fn get_ref(&self) -> &W { self.writer.get_ref() } pub fn get_mut(&mut self) -> &mut W { self.writer.get_mut() } pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { self.project().writer.get_pin_mut() } pub fn into_inner(self) -> W { self.writer.into_inner() } fn do_poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, input: &mut PartialBuffer<&[u8]>, ) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Encoding => { this.encoder.encode(input, &mut output)?; State::Encoding } State::Finishing | State::Done => panic!("Write after shutdown"), }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if input.unwritten().is_empty() { return Poll::Ready(Ok(())); } } } fn do_poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); let done = match this.state { State::Encoding => this.encoder.flush(&mut output)?, State::Finishing | State::Done => panic!("Flush after shutdown"), }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if done { return Poll::Ready(Ok(())); } } } fn do_poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); loop { let output = ready!(this.writer.as_mut().poll_partial_flush_buf(cx))?; let mut output = PartialBuffer::new(output); *this.state = match this.state { State::Encoding | State::Finishing => { if this.encoder.finish(&mut output)? { State::Done } else { State::Finishing } } State::Done => State::Done, }; let produced = output.written().len(); this.writer.as_mut().produce(produced); if let State::Done = this.state { return Poll::Ready(Ok(())); } } } } impl AsyncWrite for Encoder { fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } let mut input = PartialBuffer::new(buf); match self.do_poll_write(cx, &mut input)? { Poll::Pending if input.written().is_empty() => Poll::Pending, _ => Poll::Ready(Ok(input.written().len())), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_flush(cx))?; ready!(self.project().writer.as_mut().poll_flush(cx))?; Poll::Ready(Ok(())) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.as_mut().do_poll_shutdown(cx))?; ready!(self.project().writer.as_mut().poll_shutdown(cx))?; Poll::Ready(Ok(())) } } async-compression-0.4.0/src/tokio/write/generic/mod.rs000064400000000000000000000001171046102023000211070ustar 00000000000000mod decoder; mod encoder; pub use self::{decoder::Decoder, encoder::Encoder}; async-compression-0.4.0/src/tokio/write/macros/decoder.rs000064400000000000000000000067151046102023000216170ustar 00000000000000macro_rules! decoder { ($(#[$attr:meta])* $name:ident) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncWrite`](tokio::io::AsyncWrite) interface and will /// take in compressed data and write it uncompressed to an underlying stream. #[derive(Debug)] pub struct $name { #[pin] inner: crate::tokio::write::Decoder, } } impl $name { /// Creates a new decoder which will take in compressed data and write it uncompressed /// to the given stream. pub fn new(read: W) -> $name { $name { inner: crate::tokio::write::Decoder::new(read, crate::codec::$name::new()), } } /// Acquires a reference to the underlying reader that this decoder is wrapping. pub fn get_ref(&self) -> &W { self.inner.get_ref() } /// Acquires a mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_mut(&mut self) -> &mut W { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying reader that this decoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the reader which /// may otherwise confuse this decoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut W> { self.project().inner.get_pin_mut() } /// Consumes this decoder returning the underlying reader. /// /// Note that this may discard internal state of this decoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> W { self.inner.into_inner() } } impl tokio::io::AsyncWrite for $name { fn poll_write( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { self.project().inner.poll_write(cx, buf) } fn poll_flush( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_flush(cx) } fn poll_shutdown( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_shutdown(cx) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use tokio::io::AsyncWrite; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.0/src/tokio/write/macros/encoder.rs000064400000000000000000000066761046102023000216370ustar 00000000000000macro_rules! encoder { ($(#[$attr:meta])* $name:ident<$inner:ident> $({ $($constructor:tt)* })*) => { pin_project_lite::pin_project! { $(#[$attr])* /// /// This structure implements an [`AsyncWrite`](tokio::io::AsyncWrite) interface and will /// take in uncompressed data and write it compressed to an underlying stream. #[derive(Debug)] pub struct $name<$inner> { #[pin] inner: crate::tokio::write::Encoder<$inner, crate::codec::$name>, } } impl<$inner: tokio::io::AsyncWrite> $name<$inner> { $( /// Creates a new encoder which will take in uncompressed data and write it /// compressed to the given stream. /// $($constructor)* )* /// Acquires a reference to the underlying writer that this encoder is wrapping. pub fn get_ref(&self) -> &$inner { self.inner.get_ref() } /// Acquires a mutable reference to the underlying writer that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the writer which /// may otherwise confuse this encoder. pub fn get_mut(&mut self) -> &mut $inner { self.inner.get_mut() } /// Acquires a pinned mutable reference to the underlying writer that this encoder is /// wrapping. /// /// Note that care must be taken to avoid tampering with the state of the writer which /// may otherwise confuse this encoder. pub fn get_pin_mut(self: std::pin::Pin<&mut Self>) -> std::pin::Pin<&mut $inner> { self.project().inner.get_pin_mut() } /// Consumes this encoder returning the underlying writer. /// /// Note that this may discard internal state of this encoder, so care should be taken /// to avoid losing resources when this is called. pub fn into_inner(self) -> $inner { self.inner.into_inner() } } impl<$inner: tokio::io::AsyncWrite> tokio::io::AsyncWrite for $name<$inner> { fn poll_write( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { self.project().inner.poll_write(cx, buf) } fn poll_flush( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_flush(cx) } fn poll_shutdown( self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.project().inner.poll_shutdown(cx) } } const _: () = { fn _assert() { use crate::util::{_assert_send, _assert_sync}; use core::pin::Pin; use tokio::io::AsyncWrite; _assert_send::<$name>>>(); _assert_sync::<$name>>>(); } }; } } async-compression-0.4.0/src/tokio/write/macros/mod.rs000064400000000000000000000000641046102023000207600ustar 00000000000000#[macro_use] mod decoder; #[macro_use] mod encoder; async-compression-0.4.0/src/tokio/write/mod.rs000064400000000000000000000005131046102023000174730ustar 00000000000000//! Types which operate over [`AsyncWrite`](tokio::io::AsyncWrite) streams, both encoders and //! decoders for various formats. #[macro_use] mod macros; mod generic; mod buf_write; mod buf_writer; use self::{ buf_write::AsyncBufWrite, buf_writer::BufWriter, generic::{Decoder, Encoder}, }; algos!(tokio::write); async-compression-0.4.0/src/unshared.rs000064400000000000000000000022711046102023000162510ustar 00000000000000#![allow(dead_code)] // unused without any features use core::fmt::{self, Debug}; /// Wraps a type and only allows unique borrowing, the main use case is to wrap a `!Sync` type and /// implement `Sync` for it as this type blocks having multiple shared references to the inner /// value. /// /// # Safety /// /// We must be careful when accessing `inner`, there must be no way to create a shared reference to /// it from a shared reference to an `Unshared`, as that would allow creating shared references on /// multiple threads. /// /// As an example deriving or implementing `Clone` is impossible, two threads could attempt to /// clone a shared `Unshared` reference which would result in accessing the same inner value /// concurrently. pub struct Unshared { inner: T, } impl Unshared { pub fn new(inner: T) -> Self { Unshared { inner } } pub fn get_mut(&mut self) -> &mut T { &mut self.inner } } /// Safety: See comments on main docs for `Unshared` unsafe impl Sync for Unshared {} impl Debug for Unshared { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct(core::any::type_name::()).finish() } } async-compression-0.4.0/src/util.rs000064400000000000000000000027561046102023000154250ustar 00000000000000pub fn _assert_send() {} pub fn _assert_sync() {} #[derive(Debug, Default)] pub struct PartialBuffer> { buffer: B, index: usize, } impl> PartialBuffer { pub(crate) fn new(buffer: B) -> Self { Self { buffer, index: 0 } } pub(crate) fn written(&self) -> &[u8] { &self.buffer.as_ref()[..self.index] } pub(crate) fn unwritten(&self) -> &[u8] { &self.buffer.as_ref()[self.index..] } pub(crate) fn advance(&mut self, amount: usize) { self.index += amount; } pub(crate) fn get_mut(&mut self) -> &mut B { &mut self.buffer } pub(crate) fn into_inner(self) -> B { self.buffer } } impl + AsMut<[u8]>> PartialBuffer { pub(crate) fn unwritten_mut(&mut self) -> &mut [u8] { &mut self.buffer.as_mut()[self.index..] } pub(crate) fn copy_unwritten_from>(&mut self, other: &mut PartialBuffer) { let len = std::cmp::min(self.unwritten().len(), other.unwritten().len()); self.unwritten_mut()[..len].copy_from_slice(&other.unwritten()[..len]); self.advance(len); other.advance(len); } } impl + Default> PartialBuffer { pub(crate) fn take(&mut self) -> Self { std::mem::replace(self, Self::new(B::default())) } } impl + AsMut<[u8]>> From for PartialBuffer { fn from(buffer: B) -> Self { Self::new(buffer) } }