zip-0.6.6/.cargo_vcs_info.json0000644000000001360000000000100116470ustar { "git": { "sha1": "21a20584bc9e05dfa4f3c5b0bc420a1389fae2c3" }, "path_in_vcs": "" }zip-0.6.6/.github/dependabot.yml000064400000000000000000000001770072674642500146640ustar 00000000000000version: 2 updates: - package-ecosystem: cargo directory: "/" schedule: interval: daily open-pull-requests-limit: 10 zip-0.6.6/.github/workflows/ci.yaml000064400000000000000000000034250072674642500153470ustar 00000000000000name: CI on: pull_request: push: branches: - master env: RUSTFLAGS: -Dwarnings jobs: build_and_test: name: Build and test runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, macOS-latest, windows-latest] rust: [stable, 1.59.0] steps: - uses: actions/checkout@master - name: Install ${{ matrix.rust }} uses: actions-rs/toolchain@v1 with: toolchain: ${{ matrix.rust }} override: true - name: check uses: actions-rs/cargo@v1 with: command: check args: --all --bins --examples - name: tests uses: actions-rs/cargo@v1 with: command: test args: --all clippy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: nightly override: true components: clippy - name: clippy uses: actions-rs/cargo@v1 with: command: clippy args: --all-targets --all-features -- -D warnings check_fmt_and_docs: name: Checking fmt and docs runs-on: ubuntu-latest steps: - uses: actions/checkout@master - uses: actions-rs/toolchain@v1 with: toolchain: nightly components: rustfmt, clippy override: true - name: fmt run: cargo fmt --all -- --check - name: Docs run: cargo doc fuzz: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: nightly override: true - run: cargo install cargo-fuzz - name: compile fuzz run: | cargo fuzz build fuzz_read zip-0.6.6/.gitignore000064400000000000000000000000330072674642500124530ustar 00000000000000Cargo.lock target \.idea/ zip-0.6.6/CHANGELOG.md000064400000000000000000000012760072674642500123060ustar 00000000000000# Changelog ## [0.6.6] ### Changed - Updated `aes` dependency to `0.8.2` (https://github.com/zip-rs/zip/pull/354) ## [0.6.5] ### Changed - Added experimental [`zip::unstable::write::FileOptions::with_deprecated_encryption`] API to enable encrypting files with PKWARE encryption. ## [0.6.4] ### Changed - [#333](https://github.com/zip-rs/zip/pull/333): disabled the default features of the `time` dependency, and also `formatting` and `macros`, as they were enabled by mistake. - Deprecated [`DateTime::from_time`](https://docs.rs/zip/0.6/zip/struct.DateTime.html#method.from_time) in favor of [`DateTime::try_from`](https://docs.rs/zip/0.6/zip/struct.DateTime.html#impl-TryFrom-for-DateTime) zip-0.6.6/CODE_OF_CONDUCT.md000064400000000000000000000064520072674642500132750ustar 00000000000000# Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at ryan.levick@gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq zip-0.6.6/Cargo.lock0000644000000266770000000000100076440ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aes" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ "cfg-if", "cipher", "cpufeatures", ] [[package]] name = "base64ct" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bencher" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dfdb4953a096c551ce9ace855a604d702e6e62d77fac690575ae347571717f5" [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "byteorder" version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bzip2" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", ] [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cipher" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", ] [[package]] name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "cpufeatures" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-utils" version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "digest" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer", "crypto-common", "subtle", ] [[package]] name = "flate2" version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "libz-sys", "miniz_oxide", ] [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "hmac" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ "digest", ] [[package]] name = "inout" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ "generic-array", ] [[package]] name = "itoa" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jobserver" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] [[package]] name = "libc" version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libz-sys" version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", "pkg-config", "vcpkg", ] [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "password-hash" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", "rand_core", "subtle", ] [[package]] name = "pbkdf2" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest", "hmac", "password-hash", "sha2", ] [[package]] name = "pkg-config" version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "serde" version = "1.0.162" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71b2f6e1ab5c2b98c05f0f35b236b22e8df7ead6ffbf51d7808da7f8817e7ab6" [[package]] name = "sha1" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha2" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "subtle" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "time" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ "itoa", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] [[package]] name = "typenum" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "vcpkg" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "zip" version = "0.6.6" dependencies = [ "aes", "bencher", "byteorder", "bzip2", "constant_time_eq", "crc32fast", "crossbeam-utils", "flate2", "getrandom", "hmac", "pbkdf2", "sha1", "time", "walkdir", "zstd", ] [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" version = "5.0.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" dependencies = [ "libc", "zstd-sys", ] [[package]] name = "zstd-sys" version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", "pkg-config", ] zip-0.6.6/Cargo.toml0000644000000045400000000000100076500ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.59.0" name = "zip" version = "0.6.6" authors = [ "Mathijs van de Nes ", "Marli Frost ", "Ryan Levick ", ] description = """ Library to support the reading and writing of zip files. """ readme = "README.md" keywords = [ "zip", "archive", ] license = "MIT" repository = "https://github.com/zip-rs/zip.git" [[bench]] name = "read_entry" harness = false [[bench]] name = "read_metadata" harness = false [dependencies.aes] version = "0.8.2" optional = true [dependencies.byteorder] version = "1.4.3" [dependencies.bzip2] version = "0.4.3" optional = true [dependencies.constant_time_eq] version = "0.1.5" optional = true [dependencies.crc32fast] version = "1.3.2" [dependencies.flate2] version = "1.0.23" optional = true default-features = false [dependencies.hmac] version = "0.12.1" features = ["reset"] optional = true [dependencies.pbkdf2] version = "0.11.0" optional = true [dependencies.sha1] version = "0.10.1" optional = true [dependencies.time] version = "0.3.7" features = ["std"] optional = true default-features = false [dependencies.zstd] version = "0.11.2" optional = true [dev-dependencies.bencher] version = "0.1.5" [dev-dependencies.getrandom] version = "0.2.5" [dev-dependencies.time] version = "0.3.7" features = [ "formatting", "macros", ] [dev-dependencies.walkdir] version = "2.3.2" [features] aes-crypto = [ "aes", "constant_time_eq", "hmac", "pbkdf2", "sha1", ] default = [ "aes-crypto", "bzip2", "deflate", "time", "zstd", ] deflate = ["flate2/rust_backend"] deflate-miniz = ["flate2/default"] deflate-zlib = ["flate2/zlib"] unreserved = [] [target."cfg(any(all(target_arch = \"arm\", target_pointer_width = \"32\"), target_arch = \"mips\", target_arch = \"powerpc\"))".dependencies.crossbeam-utils] version = "0.8.8" zip-0.6.6/Cargo.toml.orig0000644000000031330000000000100106040ustar [package] name = "zip" version = "0.6.6" authors = ["Mathijs van de Nes ", "Marli Frost ", "Ryan Levick "] license = "MIT" repository = "https://github.com/zip-rs/zip.git" keywords = ["zip", "archive"] description = """ Library to support the reading and writing of zip files. """ edition = "2021" rust-version = "1.59.0" [dependencies] aes = { version = "0.8.2", optional = true } byteorder = "1.4.3" bzip2 = { version = "0.4.3", optional = true } constant_time_eq = { version = "0.1.5", optional = true } crc32fast = "1.3.2" flate2 = { version = "1.0.23", default-features = false, optional = true } hmac = { version = "0.12.1", optional = true, features = ["reset"] } pbkdf2 = {version = "0.11.0", optional = true } sha1 = {version = "0.10.1", optional = true } time = { version = "0.3.7", optional = true, default-features = false, features = ["std"] } zstd = { version = "0.11.2", optional = true } [target.'cfg(any(all(target_arch = "arm", target_pointer_width = "32"), target_arch = "mips", target_arch = "powerpc"))'.dependencies] crossbeam-utils = "0.8.8" [dev-dependencies] bencher = "0.1.5" getrandom = "0.2.5" walkdir = "2.3.2" time = { version = "0.3.7", features = ["formatting", "macros"] } [features] aes-crypto = [ "aes", "constant_time_eq", "hmac", "pbkdf2", "sha1" ] deflate = ["flate2/rust_backend"] deflate-miniz = ["flate2/default"] deflate-zlib = ["flate2/zlib"] unreserved = [] default = ["aes-crypto", "bzip2", "deflate", "time", "zstd"] [[bench]] name = "read_entry" harness = false [[bench]] name = "read_metadata" harness = false zip-0.6.6/Cargo.toml.orig000064400000000000000000000031330072674642500133560ustar 00000000000000[package] name = "zip" version = "0.6.6" authors = ["Mathijs van de Nes ", "Marli Frost ", "Ryan Levick "] license = "MIT" repository = "https://github.com/zip-rs/zip.git" keywords = ["zip", "archive"] description = """ Library to support the reading and writing of zip files. """ edition = "2021" rust-version = "1.59.0" [dependencies] aes = { version = "0.8.2", optional = true } byteorder = "1.4.3" bzip2 = { version = "0.4.3", optional = true } constant_time_eq = { version = "0.1.5", optional = true } crc32fast = "1.3.2" flate2 = { version = "1.0.23", default-features = false, optional = true } hmac = { version = "0.12.1", optional = true, features = ["reset"] } pbkdf2 = {version = "0.11.0", optional = true } sha1 = {version = "0.10.1", optional = true } time = { version = "0.3.7", optional = true, default-features = false, features = ["std"] } zstd = { version = "0.11.2", optional = true } [target.'cfg(any(all(target_arch = "arm", target_pointer_width = "32"), target_arch = "mips", target_arch = "powerpc"))'.dependencies] crossbeam-utils = "0.8.8" [dev-dependencies] bencher = "0.1.5" getrandom = "0.2.5" walkdir = "2.3.2" time = { version = "0.3.7", features = ["formatting", "macros"] } [features] aes-crypto = [ "aes", "constant_time_eq", "hmac", "pbkdf2", "sha1" ] deflate = ["flate2/rust_backend"] deflate-miniz = ["flate2/default"] deflate-zlib = ["flate2/zlib"] unreserved = [] default = ["aes-crypto", "bzip2", "deflate", "time", "zstd"] [[bench]] name = "read_entry" harness = false [[bench]] name = "read_metadata" harness = false zip-0.6.6/LICENSE000064400000000000000000000020740072674642500114770ustar 00000000000000The MIT License (MIT) Copyright (c) 2014 Mathijs van de Nes Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.zip-0.6.6/README.md000064400000000000000000000044560072674642500117570ustar 00000000000000zip-rs ====== [![Build Status](https://img.shields.io/github/workflow/status/zip-rs/zip/CI)](https://github.com/zip-rs/zip/actions?query=branch%3Amaster+workflow%3ACI) [![Crates.io version](https://img.shields.io/crates/v/zip.svg)](https://crates.io/crates/zip) [![Discord](https://badgen.net/badge/icon/discord?icon=discord&label)](https://discord.gg/rQ7H9cSsF4) [Documentation](https://docs.rs/zip/0.6.3/zip/) Info ---- A zip library for rust which supports reading and writing of simple ZIP files. Supported compression formats: * stored (i.e. none) * deflate * bzip2 * zstd Currently unsupported zip extensions: * Encryption * Multi-disk Usage ----- With all default features: ```toml [dependencies] zip = "0.6" ``` Without the default features: ```toml [dependencies] zip = { version = "0.6.6", default-features = false } ``` The features available are: * `aes-crypto`: Enables decryption of files which were encrypted with AES. Supports AE-1 and AE-2 methods. * `deflate`: Enables the deflate compression algorithm, which is the default for zip files. * `bzip2`: Enables the BZip2 compression algorithm. * `time`: Enables features using the [time](https://github.com/rust-lang-deprecated/time) crate. * `zstd`: Enables the Zstandard compression algorithm. All of these are enabled by default. MSRV ---- Our current Minimum Supported Rust Version is **1.59.0**. When adding features, we will follow these guidelines: - We will always support the latest four minor Rust versions. This gives you a 6 month window to upgrade your compiler. - Any change to the MSRV will be accompanied with a **minor** version bump - While the crate is pre-1.0, this will be a change to the PATCH version. Examples -------- See the [examples directory](examples) for: * How to write a file to a zip. * How to write a directory of files to a zip (using [walkdir](https://github.com/BurntSushi/walkdir)). * How to extract a zip file. * How to extract a single file from a zip. * How to read a zip from the standard input. Fuzzing ------- Fuzzing support is through [cargo fuzz](https://github.com/rust-fuzz/cargo-fuzz). To install cargo fuzz: ```bash cargo install cargo-fuzz ``` To list fuzz targets: ```bash cargo +nightly fuzz list ``` To start fuzzing zip extraction: ```bash cargo +nightly fuzz run fuzz_read ``` zip-0.6.6/src/aes.rs000064400000000000000000000160450072674642500124020ustar 00000000000000//! Implementation of the AES decryption for zip files. //! //! This was implemented according to the [WinZip specification](https://www.winzip.com/win/en/aes_info.html). //! Note that using CRC with AES depends on the used encryption specification, AE-1 or AE-2. //! If the file is marked as encrypted with AE-2 the CRC field is ignored, even if it isn't set to 0. use crate::aes_ctr; use crate::types::AesMode; use constant_time_eq::constant_time_eq; use hmac::{Hmac, Mac}; use sha1::Sha1; use std::io::{self, Read}; /// The length of the password verifcation value in bytes const PWD_VERIFY_LENGTH: usize = 2; /// The length of the authentication code in bytes const AUTH_CODE_LENGTH: usize = 10; /// The number of iterations used with PBKDF2 const ITERATION_COUNT: u32 = 1000; /// Create a AesCipher depending on the used `AesMode` and the given `key`. /// /// # Panics /// /// This panics if `key` doesn't have the correct size for the chosen aes mode. fn cipher_from_mode(aes_mode: AesMode, key: &[u8]) -> Box { match aes_mode { AesMode::Aes128 => Box::new(aes_ctr::AesCtrZipKeyStream::::new(key)) as Box, AesMode::Aes192 => Box::new(aes_ctr::AesCtrZipKeyStream::::new(key)) as Box, AesMode::Aes256 => Box::new(aes_ctr::AesCtrZipKeyStream::::new(key)) as Box, } } // An aes encrypted file starts with a salt, whose length depends on the used aes mode // followed by a 2 byte password verification value // then the variable length encrypted data // and lastly a 10 byte authentication code pub struct AesReader { reader: R, aes_mode: AesMode, data_length: u64, } impl AesReader { pub fn new(reader: R, aes_mode: AesMode, compressed_size: u64) -> AesReader { let data_length = compressed_size - (PWD_VERIFY_LENGTH + AUTH_CODE_LENGTH + aes_mode.salt_length()) as u64; Self { reader, aes_mode, data_length, } } /// Read the AES header bytes and validate the password. /// /// Even if the validation succeeds, there is still a 1 in 65536 chance that an incorrect /// password was provided. /// It isn't possible to check the authentication code in this step. This will be done after /// reading and decrypting the file. /// /// # Returns /// /// If the password verification failed `Ok(None)` will be returned to match the validate /// method of ZipCryptoReader. pub fn validate(mut self, password: &[u8]) -> io::Result>> { let salt_length = self.aes_mode.salt_length(); let key_length = self.aes_mode.key_length(); let mut salt = vec![0; salt_length]; self.reader.read_exact(&mut salt)?; // next are 2 bytes used for password verification let mut pwd_verification_value = vec![0; PWD_VERIFY_LENGTH]; self.reader.read_exact(&mut pwd_verification_value)?; // derive a key from the password and salt // the length depends on the aes key length let derived_key_len = 2 * key_length + PWD_VERIFY_LENGTH; let mut derived_key: Vec = vec![0; derived_key_len]; // use PBKDF2 with HMAC-Sha1 to derive the key pbkdf2::pbkdf2::>(password, &salt, ITERATION_COUNT, &mut derived_key); let decrypt_key = &derived_key[0..key_length]; let hmac_key = &derived_key[key_length..key_length * 2]; let pwd_verify = &derived_key[derived_key_len - 2..]; // the last 2 bytes should equal the password verification value if pwd_verification_value != pwd_verify { // wrong password return Ok(None); } let cipher = cipher_from_mode(self.aes_mode, decrypt_key); let hmac = Hmac::::new_from_slice(hmac_key).unwrap(); Ok(Some(AesReaderValid { reader: self.reader, data_remaining: self.data_length, cipher, hmac, finalized: false, })) } } /// A reader for aes encrypted files, which has already passed the first password check. /// /// There is a 1 in 65536 chance that an invalid password passes that check. /// After the data has been read and decrypted an HMAC will be checked and provide a final means /// to check if either the password is invalid or if the data has been changed. pub struct AesReaderValid { reader: R, data_remaining: u64, cipher: Box, hmac: Hmac, finalized: bool, } impl Read for AesReaderValid { /// This implementation does not fulfill all requirements set in the trait documentation. /// /// ```txt /// "If an error is returned then it must be guaranteed that no bytes were read." /// ``` /// /// Whether this applies to errors that occur while reading the encrypted data depends on the /// underlying reader. If the error occurs while verifying the HMAC, the reader might become /// practically unusable, since its position after the error is not known. fn read(&mut self, buf: &mut [u8]) -> io::Result { if self.data_remaining == 0 { return Ok(0); } // get the number of bytes to read, compare as u64 to make sure we can read more than // 2^32 bytes even on 32 bit systems. let bytes_to_read = self.data_remaining.min(buf.len() as u64) as usize; let read = self.reader.read(&mut buf[0..bytes_to_read])?; self.data_remaining -= read as u64; // Update the hmac with the encrypted data self.hmac.update(&buf[0..read]); // decrypt the data self.cipher.crypt_in_place(&mut buf[0..read]); // if there is no data left to read, check the integrity of the data if self.data_remaining == 0 { assert!( !self.finalized, "Tried to use an already finalized HMAC. This is a bug!" ); self.finalized = true; // Zip uses HMAC-Sha1-80, which only uses the first half of the hash // see https://www.winzip.com/win/en/aes_info.html#auth-faq let mut read_auth_code = [0; AUTH_CODE_LENGTH]; self.reader.read_exact(&mut read_auth_code)?; let computed_auth_code = &self.hmac.finalize_reset().into_bytes()[0..AUTH_CODE_LENGTH]; // use constant time comparison to mitigate timing attacks if !constant_time_eq(computed_auth_code, &read_auth_code) { return Err( io::Error::new( io::ErrorKind::InvalidData, "Invalid authentication code, this could be due to an invalid password or errors in the data" ) ); } } Ok(read) } } impl AesReaderValid { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> R { self.reader } } zip-0.6.6/src/aes_ctr.rs000064400000000000000000000223250072674642500132500ustar 00000000000000//! A counter mode (CTR) for AES to work with the encryption used in zip files. //! //! This was implemented since the zip specification requires the mode to not use a nonce and uses a //! different byte order (little endian) than NIST (big endian). //! See [AesCtrZipKeyStream] for more information. use aes::cipher::generic_array::GenericArray; // use aes::{BlockEncrypt, NewBlockCipher}; use aes::cipher::{BlockEncrypt, KeyInit}; use byteorder::WriteBytesExt; use std::{any, fmt}; /// Internal block size of an AES cipher. const AES_BLOCK_SIZE: usize = 16; /// AES-128. #[derive(Debug)] pub struct Aes128; /// AES-192 #[derive(Debug)] pub struct Aes192; /// AES-256. #[derive(Debug)] pub struct Aes256; /// An AES cipher kind. pub trait AesKind { /// Key type. type Key: AsRef<[u8]>; /// Cipher used to decrypt. type Cipher; } impl AesKind for Aes128 { type Key = [u8; 16]; type Cipher = aes::Aes128; } impl AesKind for Aes192 { type Key = [u8; 24]; type Cipher = aes::Aes192; } impl AesKind for Aes256 { type Key = [u8; 32]; type Cipher = aes::Aes256; } /// An AES-CTR key stream generator. /// /// Implements the slightly non-standard AES-CTR variant used by WinZip AES encryption. /// /// Typical AES-CTR implementations combine a nonce with a 64 bit counter. WinZIP AES instead uses /// no nonce and also uses a different byte order (little endian) than NIST (big endian). /// /// The stream implements the `Read` trait; encryption or decryption is performed by XOR-ing the /// bytes from the key stream with the ciphertext/plaintext. pub struct AesCtrZipKeyStream { /// Current AES counter. counter: u128, /// AES cipher instance. cipher: C::Cipher, /// Stores the currently available keystream bytes. buffer: [u8; AES_BLOCK_SIZE], /// Number of bytes already used up from `buffer`. pos: usize, } impl fmt::Debug for AesCtrZipKeyStream where C: AesKind, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "AesCtrZipKeyStream<{}>(counter: {})", any::type_name::(), self.counter ) } } impl AesCtrZipKeyStream where C: AesKind, C::Cipher: KeyInit, { /// Creates a new zip variant AES-CTR key stream. /// /// # Panics /// /// This panics if `key` doesn't have the correct size for cipher `C`. pub fn new(key: &[u8]) -> AesCtrZipKeyStream { AesCtrZipKeyStream { counter: 1, cipher: C::Cipher::new(GenericArray::from_slice(key)), buffer: [0u8; AES_BLOCK_SIZE], pos: AES_BLOCK_SIZE, } } } impl AesCipher for AesCtrZipKeyStream where C: AesKind, C::Cipher: BlockEncrypt, { /// Decrypt or encrypt `target`. #[inline] fn crypt_in_place(&mut self, mut target: &mut [u8]) { while !target.is_empty() { if self.pos == AES_BLOCK_SIZE { // Note: AES block size is always 16 bytes, same as u128. self.buffer .as_mut() .write_u128::(self.counter) .expect("did not expect u128 le conversion to fail"); self.cipher .encrypt_block(GenericArray::from_mut_slice(&mut self.buffer)); self.counter += 1; self.pos = 0; } let target_len = target.len().min(AES_BLOCK_SIZE - self.pos); xor( &mut target[0..target_len], &self.buffer[self.pos..(self.pos + target_len)], ); target = &mut target[target_len..]; self.pos += target_len; } } } /// This trait allows using generic AES ciphers with different key sizes. pub trait AesCipher { fn crypt_in_place(&mut self, target: &mut [u8]); } /// XORs a slice in place with another slice. #[inline] fn xor(dest: &mut [u8], src: &[u8]) { assert_eq!(dest.len(), src.len()); for (lhs, rhs) in dest.iter_mut().zip(src.iter()) { *lhs ^= *rhs; } } #[cfg(test)] mod tests { use super::{Aes128, Aes192, Aes256, AesCipher, AesCtrZipKeyStream, AesKind}; use aes::cipher::{BlockEncrypt, KeyInit}; /// Checks whether `crypt_in_place` produces the correct plaintext after one use and yields the /// cipertext again after applying it again. fn roundtrip(key: &[u8], ciphertext: &mut [u8], expected_plaintext: &[u8]) where Aes: AesKind, Aes::Cipher: KeyInit + BlockEncrypt, { let mut key_stream = AesCtrZipKeyStream::::new(key); let mut plaintext: Vec = ciphertext.to_vec(); key_stream.crypt_in_place(plaintext.as_mut_slice()); assert_eq!(plaintext, expected_plaintext.to_vec()); // Round-tripping should yield the ciphertext again. let mut key_stream = AesCtrZipKeyStream::::new(key); key_stream.crypt_in_place(&mut plaintext); assert_eq!(plaintext, ciphertext.to_vec()); } #[test] #[should_panic] fn new_with_wrong_key_size() { AesCtrZipKeyStream::::new(&[1, 2, 3, 4, 5]); } // The data used in these tests was generated with p7zip without any compression. // It's not possible to recreate the exact same data, since a random salt is used for encryption. // `7z a -phelloworld -mem=AES256 -mx=0 aes256_40byte.zip 40byte_data.txt` #[test] fn crypt_aes_256_0_byte() { let mut ciphertext = []; let expected_plaintext = &[]; let key = [ 0x0b, 0xec, 0x2e, 0xf2, 0x46, 0xf0, 0x7e, 0x35, 0x16, 0x54, 0xe0, 0x98, 0x10, 0xb3, 0x18, 0x55, 0x24, 0xa3, 0x9e, 0x0e, 0x40, 0xe7, 0x92, 0xad, 0xb2, 0x8a, 0x48, 0xf4, 0x5c, 0xd0, 0xc0, 0x54, ]; roundtrip::(&key, &mut ciphertext, expected_plaintext); } #[test] fn crypt_aes_128_5_byte() { let mut ciphertext = [0x98, 0xa9, 0x8c, 0x26, 0x0e]; let expected_plaintext = b"asdf\n"; let key = [ 0xe0, 0x25, 0x7b, 0x57, 0x97, 0x6a, 0xa4, 0x23, 0xab, 0x94, 0xaa, 0x44, 0xfd, 0x47, 0x4f, 0xa5, ]; roundtrip::(&key, &mut ciphertext, expected_plaintext); } #[test] fn crypt_aes_192_5_byte() { let mut ciphertext = [0x36, 0x55, 0x5c, 0x61, 0x3c]; let expected_plaintext = b"asdf\n"; let key = [ 0xe4, 0x4a, 0x88, 0x52, 0x8f, 0xf7, 0x0b, 0x81, 0x7b, 0x75, 0xf1, 0x74, 0x21, 0x37, 0x8c, 0x90, 0xad, 0xbe, 0x4a, 0x65, 0xa8, 0x96, 0x0e, 0xcc, ]; roundtrip::(&key, &mut ciphertext, expected_plaintext); } #[test] fn crypt_aes_256_5_byte() { let mut ciphertext = [0xc2, 0x47, 0xc0, 0xdc, 0x56]; let expected_plaintext = b"asdf\n"; let key = [ 0x79, 0x5e, 0x17, 0xf2, 0xc6, 0x3d, 0x28, 0x9b, 0x4b, 0x4b, 0xbb, 0xa9, 0xba, 0xc9, 0xa5, 0xee, 0x3a, 0x4f, 0x0f, 0x4b, 0x29, 0xbd, 0xe9, 0xb8, 0x41, 0x9c, 0x41, 0xa5, 0x15, 0xb2, 0x86, 0xab, ]; roundtrip::(&key, &mut ciphertext, expected_plaintext); } #[test] fn crypt_aes_128_40_byte() { let mut ciphertext = [ 0xcf, 0x72, 0x6b, 0xa1, 0xb2, 0x0f, 0xdf, 0xaa, 0x10, 0xad, 0x9c, 0x7f, 0x6d, 0x1c, 0x8d, 0xb5, 0x16, 0x7e, 0xbb, 0x11, 0x69, 0x52, 0x8c, 0x89, 0x80, 0x32, 0xaa, 0x76, 0xa6, 0x18, 0x31, 0x98, 0xee, 0xdd, 0x22, 0x68, 0xb7, 0xe6, 0x77, 0xd2, ]; let expected_plaintext = b"Lorem ipsum dolor sit amet, consectetur\n"; let key = [ 0x43, 0x2b, 0x6d, 0xbe, 0x05, 0x76, 0x6c, 0x9e, 0xde, 0xca, 0x3b, 0xf8, 0xaf, 0x5d, 0x81, 0xb6, ]; roundtrip::(&key, &mut ciphertext, expected_plaintext); } #[test] fn crypt_aes_192_40_byte() { let mut ciphertext = [ 0xa6, 0xfc, 0x52, 0x79, 0x2c, 0x6c, 0xfe, 0x68, 0xb1, 0xa8, 0xb3, 0x07, 0x52, 0x8b, 0x82, 0xa6, 0x87, 0x9c, 0x72, 0x42, 0x3a, 0xf8, 0xc6, 0xa9, 0xc9, 0xfb, 0x61, 0x19, 0x37, 0xb9, 0x56, 0x62, 0xf4, 0xfc, 0x5e, 0x7a, 0xdd, 0x55, 0x0a, 0x48, ]; let expected_plaintext = b"Lorem ipsum dolor sit amet, consectetur\n"; let key = [ 0xac, 0x92, 0x41, 0xba, 0xde, 0xd9, 0x02, 0xfe, 0x40, 0x92, 0x20, 0xf6, 0x56, 0x03, 0xfe, 0xae, 0x1b, 0xba, 0x01, 0x97, 0x97, 0x79, 0xbb, 0xa6, ]; roundtrip::(&key, &mut ciphertext, expected_plaintext); } #[test] fn crypt_aes_256_40_byte() { let mut ciphertext = [ 0xa9, 0x99, 0xbd, 0xea, 0x82, 0x9b, 0x8f, 0x2f, 0xb7, 0x52, 0x2f, 0x6b, 0xd8, 0xf6, 0xab, 0x0e, 0x24, 0x51, 0x9e, 0x18, 0x0f, 0xc0, 0x8f, 0x54, 0x15, 0x80, 0xae, 0xbc, 0xa0, 0x5c, 0x8a, 0x11, 0x8d, 0x14, 0x7e, 0xc5, 0xb4, 0xae, 0xd3, 0x37, ]; let expected_plaintext = b"Lorem ipsum dolor sit amet, consectetur\n"; let key = [ 0x64, 0x7c, 0x7a, 0xde, 0xf0, 0xf2, 0x61, 0x49, 0x1c, 0xf1, 0xf1, 0xe3, 0x37, 0xfc, 0xe1, 0x4d, 0x4a, 0x77, 0xd4, 0xeb, 0x9e, 0x3d, 0x75, 0xce, 0x9a, 0x3e, 0x10, 0x50, 0xc2, 0x07, 0x36, 0xb6, ]; roundtrip::(&key, &mut ciphertext, expected_plaintext); } } zip-0.6.6/src/compression.rs000064400000000000000000000154660072674642500142010ustar 00000000000000//! Possible ZIP compression methods. use std::fmt; #[allow(deprecated)] /// Identifies the storage format used to compress a file within a ZIP archive. /// /// Each file's compression method is stored alongside it, allowing the /// contents to be read without context. /// /// When creating ZIP files, you may choose the method to use with /// [`crate::write::FileOptions::compression_method`] #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[non_exhaustive] pub enum CompressionMethod { /// Store the file as is Stored, /// Compress the file using Deflate #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] Deflated, /// Compress the file using BZIP2 #[cfg(feature = "bzip2")] Bzip2, /// Encrypted using AES. /// /// The actual compression method has to be taken from the AES extra data field /// or from `ZipFileData`. #[cfg(feature = "aes-crypto")] Aes, /// Compress the file using ZStandard #[cfg(feature = "zstd")] Zstd, /// Unsupported compression method #[deprecated(since = "0.5.7", note = "use the constants instead")] Unsupported(u16), } #[allow(deprecated, missing_docs)] /// All compression methods defined for the ZIP format impl CompressionMethod { pub const STORE: Self = CompressionMethod::Stored; pub const SHRINK: Self = CompressionMethod::Unsupported(1); pub const REDUCE_1: Self = CompressionMethod::Unsupported(2); pub const REDUCE_2: Self = CompressionMethod::Unsupported(3); pub const REDUCE_3: Self = CompressionMethod::Unsupported(4); pub const REDUCE_4: Self = CompressionMethod::Unsupported(5); pub const IMPLODE: Self = CompressionMethod::Unsupported(6); #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] pub const DEFLATE: Self = CompressionMethod::Deflated; #[cfg(not(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" )))] pub const DEFLATE: Self = CompressionMethod::Unsupported(8); pub const DEFLATE64: Self = CompressionMethod::Unsupported(9); pub const PKWARE_IMPLODE: Self = CompressionMethod::Unsupported(10); #[cfg(feature = "bzip2")] pub const BZIP2: Self = CompressionMethod::Bzip2; #[cfg(not(feature = "bzip2"))] pub const BZIP2: Self = CompressionMethod::Unsupported(12); pub const LZMA: Self = CompressionMethod::Unsupported(14); pub const IBM_ZOS_CMPSC: Self = CompressionMethod::Unsupported(16); pub const IBM_TERSE: Self = CompressionMethod::Unsupported(18); pub const ZSTD_DEPRECATED: Self = CompressionMethod::Unsupported(20); #[cfg(feature = "zstd")] pub const ZSTD: Self = CompressionMethod::Zstd; #[cfg(not(feature = "zstd"))] pub const ZSTD: Self = CompressionMethod::Unsupported(93); pub const MP3: Self = CompressionMethod::Unsupported(94); pub const XZ: Self = CompressionMethod::Unsupported(95); pub const JPEG: Self = CompressionMethod::Unsupported(96); pub const WAVPACK: Self = CompressionMethod::Unsupported(97); pub const PPMD: Self = CompressionMethod::Unsupported(98); #[cfg(feature = "aes-crypto")] pub const AES: Self = CompressionMethod::Aes; #[cfg(not(feature = "aes-crypto"))] pub const AES: Self = CompressionMethod::Unsupported(99); } impl CompressionMethod { /// Converts an u16 to its corresponding CompressionMethod #[deprecated( since = "0.5.7", note = "use a constant to construct a compression method" )] pub fn from_u16(val: u16) -> CompressionMethod { #[allow(deprecated)] match val { 0 => CompressionMethod::Stored, #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] 8 => CompressionMethod::Deflated, #[cfg(feature = "bzip2")] 12 => CompressionMethod::Bzip2, #[cfg(feature = "zstd")] 93 => CompressionMethod::Zstd, #[cfg(feature = "aes-crypto")] 99 => CompressionMethod::Aes, v => CompressionMethod::Unsupported(v), } } /// Converts a CompressionMethod to a u16 #[deprecated( since = "0.5.7", note = "to match on other compression methods, use a constant" )] pub fn to_u16(self) -> u16 { #[allow(deprecated)] match self { CompressionMethod::Stored => 0, #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] CompressionMethod::Deflated => 8, #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => 12, #[cfg(feature = "aes-crypto")] CompressionMethod::Aes => 99, #[cfg(feature = "zstd")] CompressionMethod::Zstd => 93, CompressionMethod::Unsupported(v) => v, } } } impl fmt::Display for CompressionMethod { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Just duplicate what the Debug format looks like, i.e, the enum key: write!(f, "{self:?}") } } /// The compression methods which have been implemented. pub const SUPPORTED_COMPRESSION_METHODS: &[CompressionMethod] = &[ CompressionMethod::Stored, #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] CompressionMethod::Deflated, #[cfg(feature = "bzip2")] CompressionMethod::Bzip2, #[cfg(feature = "zstd")] CompressionMethod::Zstd, ]; #[cfg(test)] mod test { use super::{CompressionMethod, SUPPORTED_COMPRESSION_METHODS}; #[test] fn from_eq_to() { for v in 0..(u16::MAX as u32 + 1) { #[allow(deprecated)] let from = CompressionMethod::from_u16(v as u16); #[allow(deprecated)] let to = from.to_u16() as u32; assert_eq!(v, to); } } #[test] fn to_eq_from() { fn check_match(method: CompressionMethod) { #[allow(deprecated)] let to = method.to_u16(); #[allow(deprecated)] let from = CompressionMethod::from_u16(to); #[allow(deprecated)] let back = from.to_u16(); assert_eq!(to, back); } for &method in SUPPORTED_COMPRESSION_METHODS { check_match(method); } } #[test] fn to_display_fmt() { fn check_match(method: CompressionMethod) { let debug_str = format!("{method:?}"); let display_str = format!("{method}"); assert_eq!(debug_str, display_str); } for &method in SUPPORTED_COMPRESSION_METHODS { check_match(method); } } } zip-0.6.6/src/cp437.rs000064400000000000000000000115370072674642500124730ustar 00000000000000//! Convert a string in IBM codepage 437 to UTF-8 /// Trait to convert IBM codepage 437 to the target type pub trait FromCp437 { /// Target type type Target; /// Function that does the conversion from cp437. /// Generally allocations will be avoided if all data falls into the ASCII range. #[allow(clippy::wrong_self_convention)] fn from_cp437(self) -> Self::Target; } impl<'a> FromCp437 for &'a [u8] { type Target = ::std::borrow::Cow<'a, str>; fn from_cp437(self) -> Self::Target { if self.iter().all(|c| *c < 0x80) { ::std::str::from_utf8(self).unwrap().into() } else { self.iter().map(|c| to_char(*c)).collect::().into() } } } impl FromCp437 for Vec { type Target = String; fn from_cp437(self) -> Self::Target { if self.iter().all(|c| *c < 0x80) { String::from_utf8(self).unwrap() } else { self.into_iter().map(to_char).collect() } } } fn to_char(input: u8) -> char { let output = match input { 0x00..=0x7f => input as u32, 0x80 => 0x00c7, 0x81 => 0x00fc, 0x82 => 0x00e9, 0x83 => 0x00e2, 0x84 => 0x00e4, 0x85 => 0x00e0, 0x86 => 0x00e5, 0x87 => 0x00e7, 0x88 => 0x00ea, 0x89 => 0x00eb, 0x8a => 0x00e8, 0x8b => 0x00ef, 0x8c => 0x00ee, 0x8d => 0x00ec, 0x8e => 0x00c4, 0x8f => 0x00c5, 0x90 => 0x00c9, 0x91 => 0x00e6, 0x92 => 0x00c6, 0x93 => 0x00f4, 0x94 => 0x00f6, 0x95 => 0x00f2, 0x96 => 0x00fb, 0x97 => 0x00f9, 0x98 => 0x00ff, 0x99 => 0x00d6, 0x9a => 0x00dc, 0x9b => 0x00a2, 0x9c => 0x00a3, 0x9d => 0x00a5, 0x9e => 0x20a7, 0x9f => 0x0192, 0xa0 => 0x00e1, 0xa1 => 0x00ed, 0xa2 => 0x00f3, 0xa3 => 0x00fa, 0xa4 => 0x00f1, 0xa5 => 0x00d1, 0xa6 => 0x00aa, 0xa7 => 0x00ba, 0xa8 => 0x00bf, 0xa9 => 0x2310, 0xaa => 0x00ac, 0xab => 0x00bd, 0xac => 0x00bc, 0xad => 0x00a1, 0xae => 0x00ab, 0xaf => 0x00bb, 0xb0 => 0x2591, 0xb1 => 0x2592, 0xb2 => 0x2593, 0xb3 => 0x2502, 0xb4 => 0x2524, 0xb5 => 0x2561, 0xb6 => 0x2562, 0xb7 => 0x2556, 0xb8 => 0x2555, 0xb9 => 0x2563, 0xba => 0x2551, 0xbb => 0x2557, 0xbc => 0x255d, 0xbd => 0x255c, 0xbe => 0x255b, 0xbf => 0x2510, 0xc0 => 0x2514, 0xc1 => 0x2534, 0xc2 => 0x252c, 0xc3 => 0x251c, 0xc4 => 0x2500, 0xc5 => 0x253c, 0xc6 => 0x255e, 0xc7 => 0x255f, 0xc8 => 0x255a, 0xc9 => 0x2554, 0xca => 0x2569, 0xcb => 0x2566, 0xcc => 0x2560, 0xcd => 0x2550, 0xce => 0x256c, 0xcf => 0x2567, 0xd0 => 0x2568, 0xd1 => 0x2564, 0xd2 => 0x2565, 0xd3 => 0x2559, 0xd4 => 0x2558, 0xd5 => 0x2552, 0xd6 => 0x2553, 0xd7 => 0x256b, 0xd8 => 0x256a, 0xd9 => 0x2518, 0xda => 0x250c, 0xdb => 0x2588, 0xdc => 0x2584, 0xdd => 0x258c, 0xde => 0x2590, 0xdf => 0x2580, 0xe0 => 0x03b1, 0xe1 => 0x00df, 0xe2 => 0x0393, 0xe3 => 0x03c0, 0xe4 => 0x03a3, 0xe5 => 0x03c3, 0xe6 => 0x00b5, 0xe7 => 0x03c4, 0xe8 => 0x03a6, 0xe9 => 0x0398, 0xea => 0x03a9, 0xeb => 0x03b4, 0xec => 0x221e, 0xed => 0x03c6, 0xee => 0x03b5, 0xef => 0x2229, 0xf0 => 0x2261, 0xf1 => 0x00b1, 0xf2 => 0x2265, 0xf3 => 0x2264, 0xf4 => 0x2320, 0xf5 => 0x2321, 0xf6 => 0x00f7, 0xf7 => 0x2248, 0xf8 => 0x00b0, 0xf9 => 0x2219, 0xfa => 0x00b7, 0xfb => 0x221a, 0xfc => 0x207f, 0xfd => 0x00b2, 0xfe => 0x25a0, 0xff => 0x00a0, }; ::std::char::from_u32(output).unwrap() } #[cfg(test)] mod test { #[test] fn to_char_valid() { for i in 0x00_u32..0x100 { super::to_char(i as u8); } } #[test] fn ascii() { for i in 0x00..0x80 { assert_eq!(super::to_char(i), i as char); } } #[test] fn example_slice() { use super::FromCp437; let data = b"Cura\x87ao"; assert!(::std::str::from_utf8(data).is_err()); assert_eq!(data.from_cp437(), "Curaçao"); } #[test] fn example_vec() { use super::FromCp437; let data = vec![0xCC, 0xCD, 0xCD, 0xB9]; assert!(String::from_utf8(data.clone()).is_err()); assert_eq!(&data.from_cp437(), "╠══╣"); } } zip-0.6.6/src/crc32.rs000064400000000000000000000054270072674642500125500ustar 00000000000000//! Helper module to compute a CRC32 checksum use std::io; use std::io::prelude::*; use crc32fast::Hasher; /// Reader that validates the CRC32 when it reaches the EOF. pub struct Crc32Reader { inner: R, hasher: Hasher, check: u32, /// Signals if `inner` stores aes encrypted data. /// AE-2 encrypted data doesn't use crc and sets the value to 0. ae2_encrypted: bool, } impl Crc32Reader { /// Get a new Crc32Reader which checks the inner reader against checksum. /// The check is disabled if `ae2_encrypted == true`. pub(crate) fn new(inner: R, checksum: u32, ae2_encrypted: bool) -> Crc32Reader { Crc32Reader { inner, hasher: Hasher::new(), check: checksum, ae2_encrypted, } } fn check_matches(&self) -> bool { self.check == self.hasher.clone().finalize() } pub fn into_inner(self) -> R { self.inner } } impl Read for Crc32Reader { fn read(&mut self, buf: &mut [u8]) -> io::Result { let invalid_check = !buf.is_empty() && !self.check_matches() && !self.ae2_encrypted; let count = match self.inner.read(buf) { Ok(0) if invalid_check => { return Err(io::Error::new(io::ErrorKind::Other, "Invalid checksum")) } Ok(n) => n, Err(e) => return Err(e), }; self.hasher.update(&buf[0..count]); Ok(count) } } #[cfg(test)] mod test { use super::*; use std::io::Read; #[test] fn test_empty_reader() { let data: &[u8] = b""; let mut buf = [0; 1]; let mut reader = Crc32Reader::new(data, 0, false); assert_eq!(reader.read(&mut buf).unwrap(), 0); let mut reader = Crc32Reader::new(data, 1, false); assert!(reader .read(&mut buf) .unwrap_err() .to_string() .contains("Invalid checksum")); } #[test] fn test_byte_by_byte() { let data: &[u8] = b"1234"; let mut buf = [0; 1]; let mut reader = Crc32Reader::new(data, 0x9be3e0a3, false); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 0); // Can keep reading 0 bytes after the end assert_eq!(reader.read(&mut buf).unwrap(), 0); } #[test] fn test_zero_read() { let data: &[u8] = b"1234"; let mut buf = [0; 5]; let mut reader = Crc32Reader::new(data, 0x9be3e0a3, false); assert_eq!(reader.read(&mut buf[..0]).unwrap(), 0); assert_eq!(reader.read(&mut buf).unwrap(), 4); } } zip-0.6.6/src/lib.rs000064400000000000000000000031460072674642500123760ustar 00000000000000//! A library for reading and writing ZIP archives. //! ZIP is a format designed for cross-platform file "archiving". //! That is, storing a collection of files in a single datastream //! to make them easier to share between computers. //! Additionally, ZIP is able to compress and encrypt files in its //! archives. //! //! The current implementation is based on [PKWARE's APPNOTE.TXT v6.3.9](https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT) //! //! --- //! //! [`zip`](`crate`) has support for the most common ZIP archives found in common use. //! However, in special cases, //! there are some zip archives that are difficult to read or write. //! //! This is a list of supported features: //! //! | | Reading | Writing | //! | ------- | ------ | ------- | //! | Deflate | ✅ [->](`crate::ZipArchive::by_name`) | ✅ [->](`crate::write::FileOptions::compression_method`) | //! //! //! #![warn(missing_docs)] pub use crate::compression::{CompressionMethod, SUPPORTED_COMPRESSION_METHODS}; pub use crate::read::ZipArchive; pub use crate::types::DateTime; pub use crate::write::ZipWriter; #[cfg(feature = "aes-crypto")] mod aes; #[cfg(feature = "aes-crypto")] mod aes_ctr; mod compression; mod cp437; mod crc32; pub mod read; pub mod result; mod spec; mod types; pub mod write; mod zipcrypto; /// Unstable APIs /// /// All APIs accessible by importing this module are unstable; They may be changed in patch releases. /// You MUST you an exact version specifier in `Cargo.toml`, to indicate the version of this API you're using: /// /// ```toml /// [dependencies] /// zip = "=0.6.6" /// ``` pub mod unstable; zip-0.6.6/src/read/stream.rs000064400000000000000000000272540072674642500140440ustar 00000000000000use std::fs; use std::io::{self, Read}; use std::path::Path; use super::{ central_header_to_zip_file_inner, read_zipfile_from_stream, spec, ZipError, ZipFile, ZipFileData, ZipResult, }; use byteorder::{LittleEndian, ReadBytesExt}; /// Stream decoder for zip. #[derive(Debug)] pub struct ZipStreamReader(R); impl ZipStreamReader { /// Create a new ZipStreamReader pub fn new(reader: R) -> Self { Self(reader) } } impl ZipStreamReader { fn parse_central_directory(&mut self) -> ZipResult> { // Give archive_offset and central_header_start dummy value 0, since // they are not used in the output. let archive_offset = 0; let central_header_start = 0; // Parse central header let signature = self.0.read_u32::()?; if signature != spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE { Ok(None) } else { central_header_to_zip_file_inner(&mut self.0, archive_offset, central_header_start) .map(ZipStreamFileMetadata) .map(Some) } } /// Iteraate over the stream and extract all file and their /// metadata. pub fn visit(mut self, visitor: &mut V) -> ZipResult<()> { while let Some(mut file) = read_zipfile_from_stream(&mut self.0)? { visitor.visit_file(&mut file)?; } while let Some(metadata) = self.parse_central_directory()? { visitor.visit_additional_metadata(&metadata)?; } Ok(()) } /// Extract a Zip archive into a directory, overwriting files if they /// already exist. Paths are sanitized with [`ZipFile::enclosed_name`]. /// /// Extraction is not atomic; If an error is encountered, some of the files /// may be left on disk. pub fn extract>(self, directory: P) -> ZipResult<()> { struct Extractor<'a>(&'a Path); impl ZipStreamVisitor for Extractor<'_> { fn visit_file(&mut self, file: &mut ZipFile<'_>) -> ZipResult<()> { let filepath = file .enclosed_name() .ok_or(ZipError::InvalidArchive("Invalid file path"))?; let outpath = self.0.join(filepath); if file.name().ends_with('/') { fs::create_dir_all(&outpath)?; } else { if let Some(p) = outpath.parent() { fs::create_dir_all(p)?; } let mut outfile = fs::File::create(&outpath)?; io::copy(file, &mut outfile)?; } Ok(()) } #[allow(unused)] fn visit_additional_metadata( &mut self, metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { #[cfg(unix)] { let filepath = metadata .enclosed_name() .ok_or(ZipError::InvalidArchive("Invalid file path"))?; let outpath = self.0.join(filepath); use std::os::unix::fs::PermissionsExt; if let Some(mode) = metadata.unix_mode() { fs::set_permissions(outpath, fs::Permissions::from_mode(mode))?; } } Ok(()) } } self.visit(&mut Extractor(directory.as_ref())) } } /// Visitor for ZipStreamReader pub trait ZipStreamVisitor { /// * `file` - contains the content of the file and most of the metadata, /// except: /// - `comment`: set to an empty string /// - `data_start`: set to 0 /// - `external_attributes`: `unix_mode()`: will return None fn visit_file(&mut self, file: &mut ZipFile<'_>) -> ZipResult<()>; /// This function is guranteed to be called after all `visit_file`s. /// /// * `metadata` - Provides missing metadata in `visit_file`. fn visit_additional_metadata(&mut self, metadata: &ZipStreamFileMetadata) -> ZipResult<()>; } /// Additional metadata for the file. #[derive(Debug)] pub struct ZipStreamFileMetadata(ZipFileData); impl ZipStreamFileMetadata { /// Get the name of the file /// /// # Warnings /// /// It is dangerous to use this name directly when extracting an archive. /// It may contain an absolute path (`/etc/shadow`), or break out of the /// current directory (`../runtime`). Carelessly writing to these paths /// allows an attacker to craft a ZIP archive that will overwrite critical /// files. /// /// You can use the [`ZipFile::enclosed_name`] method to validate the name /// as a safe path. pub fn name(&self) -> &str { &self.0.file_name } /// Get the name of the file, in the raw (internal) byte representation. /// /// The encoding of this data is currently undefined. pub fn name_raw(&self) -> &[u8] { &self.0.file_name_raw } /// Rewrite the path, ignoring any path components with special meaning. /// /// - Absolute paths are made relative /// - [`ParentDir`]s are ignored /// - Truncates the filename at a NULL byte /// /// This is appropriate if you need to be able to extract *something* from /// any archive, but will easily misrepresent trivial paths like /// `foo/../bar` as `foo/bar` (instead of `bar`). Because of this, /// [`ZipFile::enclosed_name`] is the better option in most scenarios. /// /// [`ParentDir`]: `Component::ParentDir` pub fn mangled_name(&self) -> ::std::path::PathBuf { self.0.file_name_sanitized() } /// Ensure the file path is safe to use as a [`Path`]. /// /// - It can't contain NULL bytes /// - It can't resolve to a path outside the current directory /// > `foo/../bar` is fine, `foo/../../bar` is not. /// - It can't be an absolute path /// /// This will read well-formed ZIP files correctly, and is resistant /// to path-based exploits. It is recommended over /// [`ZipFile::mangled_name`]. pub fn enclosed_name(&self) -> Option<&Path> { self.0.enclosed_name() } /// Returns whether the file is actually a directory pub fn is_dir(&self) -> bool { self.name() .chars() .rev() .next() .map_or(false, |c| c == '/' || c == '\\') } /// Returns whether the file is a regular file pub fn is_file(&self) -> bool { !self.is_dir() } /// Get the comment of the file pub fn comment(&self) -> &str { &self.0.file_comment } /// Get the starting offset of the data of the compressed file pub fn data_start(&self) -> u64 { self.0.data_start.load() } /// Get unix mode for the file pub fn unix_mode(&self) -> Option { self.0.unix_mode() } } #[cfg(test)] mod test { use super::*; use std::collections::BTreeSet; use std::io; struct DummyVisitor; impl ZipStreamVisitor for DummyVisitor { fn visit_file(&mut self, _file: &mut ZipFile<'_>) -> ZipResult<()> { Ok(()) } fn visit_additional_metadata( &mut self, _metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { Ok(()) } } #[derive(Default, Debug, Eq, PartialEq)] struct CounterVisitor(u64, u64); impl ZipStreamVisitor for CounterVisitor { fn visit_file(&mut self, _file: &mut ZipFile<'_>) -> ZipResult<()> { self.0 += 1; Ok(()) } fn visit_additional_metadata( &mut self, _metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { self.1 += 1; Ok(()) } } #[test] fn invalid_offset() { ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/invalid_offset.zip" ))) .visit(&mut DummyVisitor) .unwrap_err(); } #[test] fn invalid_offset2() { ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/invalid_offset2.zip" ))) .visit(&mut DummyVisitor) .unwrap_err(); } #[test] fn zip_read_streaming() { let reader = ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/mimetype.zip" ))); #[derive(Default)] struct V { filenames: BTreeSet>, } impl ZipStreamVisitor for V { fn visit_file(&mut self, file: &mut ZipFile<'_>) -> ZipResult<()> { if file.is_file() { self.filenames.insert(file.name().into()); } Ok(()) } fn visit_additional_metadata( &mut self, metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { if metadata.is_file() { assert!( self.filenames.contains(metadata.name()), "{} is missing its file content", metadata.name() ); } Ok(()) } } reader.visit(&mut V::default()).unwrap(); } #[test] fn file_and_dir_predicates() { let reader = ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/files_and_dirs.zip" ))); #[derive(Default)] struct V { filenames: BTreeSet>, } impl ZipStreamVisitor for V { fn visit_file(&mut self, file: &mut ZipFile<'_>) -> ZipResult<()> { let full_name = file.enclosed_name().unwrap(); let file_name = full_name.file_name().unwrap().to_str().unwrap(); assert!( (file_name.starts_with("dir") && file.is_dir()) || (file_name.starts_with("file") && file.is_file()) ); if file.is_file() { self.filenames.insert(file.name().into()); } Ok(()) } fn visit_additional_metadata( &mut self, metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { if metadata.is_file() { assert!( self.filenames.contains(metadata.name()), "{} is missing its file content", metadata.name() ); } Ok(()) } } reader.visit(&mut V::default()).unwrap(); } /// test case to ensure we don't preemptively over allocate based on the /// declared number of files in the CDE of an invalid zip when the number of /// files declared is more than the alleged offset in the CDE #[test] fn invalid_cde_number_of_files_allocation_smaller_offset() { ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/invalid_cde_number_of_files_allocation_smaller_offset.zip" ))) .visit(&mut DummyVisitor) .unwrap_err(); } /// test case to ensure we don't preemptively over allocate based on the /// declared number of files in the CDE of an invalid zip when the number of /// files declared is less than the alleged offset in the CDE #[test] fn invalid_cde_number_of_files_allocation_greater_offset() { ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/invalid_cde_number_of_files_allocation_greater_offset.zip" ))) .visit(&mut DummyVisitor) .unwrap_err(); } } zip-0.6.6/src/read.rs000064400000000000000000001333720072674642500125500ustar 00000000000000//! Types for reading ZIP archives #[cfg(feature = "aes-crypto")] use crate::aes::{AesReader, AesReaderValid}; use crate::compression::CompressionMethod; use crate::cp437::FromCp437; use crate::crc32::Crc32Reader; use crate::result::{InvalidPassword, ZipError, ZipResult}; use crate::spec; use crate::types::{AesMode, AesVendorVersion, AtomicU64, DateTime, System, ZipFileData}; use crate::zipcrypto::{ZipCryptoReader, ZipCryptoReaderValid, ZipCryptoValidator}; use byteorder::{LittleEndian, ReadBytesExt}; use std::borrow::Cow; use std::collections::HashMap; use std::io::{self, prelude::*}; use std::path::Path; use std::sync::Arc; #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] use flate2::read::DeflateDecoder; #[cfg(feature = "bzip2")] use bzip2::read::BzDecoder; #[cfg(feature = "zstd")] use zstd::stream::read::Decoder as ZstdDecoder; /// Provides high level API for reading from a stream. pub(crate) mod stream; // Put the struct declaration in a private module to convince rustdoc to display ZipArchive nicely pub(crate) mod zip_archive { /// Extract immutable data from `ZipArchive` to make it cheap to clone #[derive(Debug)] pub(crate) struct Shared { pub(super) files: Vec, pub(super) names_map: super::HashMap, pub(super) offset: u64, pub(super) comment: Vec, } /// ZIP archive reader /// /// At the moment, this type is cheap to clone if this is the case for the /// reader it uses. However, this is not guaranteed by this crate and it may /// change in the future. /// /// ```no_run /// use std::io::prelude::*; /// fn list_zip_contents(reader: impl Read + Seek) -> zip::result::ZipResult<()> { /// let mut zip = zip::ZipArchive::new(reader)?; /// /// for i in 0..zip.len() { /// let mut file = zip.by_index(i)?; /// println!("Filename: {}", file.name()); /// std::io::copy(&mut file, &mut std::io::stdout()); /// } /// /// Ok(()) /// } /// ``` #[derive(Clone, Debug)] pub struct ZipArchive { pub(super) reader: R, pub(super) shared: super::Arc, } } pub use zip_archive::ZipArchive; #[allow(clippy::large_enum_variant)] enum CryptoReader<'a> { Plaintext(io::Take<&'a mut dyn Read>), ZipCrypto(ZipCryptoReaderValid>), #[cfg(feature = "aes-crypto")] Aes { reader: AesReaderValid>, vendor_version: AesVendorVersion, }, } impl<'a> Read for CryptoReader<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { match self { CryptoReader::Plaintext(r) => r.read(buf), CryptoReader::ZipCrypto(r) => r.read(buf), #[cfg(feature = "aes-crypto")] CryptoReader::Aes { reader: r, .. } => r.read(buf), } } } impl<'a> CryptoReader<'a> { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> io::Take<&'a mut dyn Read> { match self { CryptoReader::Plaintext(r) => r, CryptoReader::ZipCrypto(r) => r.into_inner(), #[cfg(feature = "aes-crypto")] CryptoReader::Aes { reader: r, .. } => r.into_inner(), } } /// Returns `true` if the data is encrypted using AE2. pub fn is_ae2_encrypted(&self) -> bool { #[cfg(feature = "aes-crypto")] return matches!( self, CryptoReader::Aes { vendor_version: AesVendorVersion::Ae2, .. } ); #[cfg(not(feature = "aes-crypto"))] false } } enum ZipFileReader<'a> { NoReader, Raw(io::Take<&'a mut dyn io::Read>), Stored(Crc32Reader>), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] Deflated(Crc32Reader>>), #[cfg(feature = "bzip2")] Bzip2(Crc32Reader>>), #[cfg(feature = "zstd")] Zstd(Crc32Reader>>>), } impl<'a> Read for ZipFileReader<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { match self { ZipFileReader::NoReader => panic!("ZipFileReader was in an invalid state"), ZipFileReader::Raw(r) => r.read(buf), ZipFileReader::Stored(r) => r.read(buf), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] ZipFileReader::Deflated(r) => r.read(buf), #[cfg(feature = "bzip2")] ZipFileReader::Bzip2(r) => r.read(buf), #[cfg(feature = "zstd")] ZipFileReader::Zstd(r) => r.read(buf), } } } impl<'a> ZipFileReader<'a> { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> io::Take<&'a mut dyn Read> { match self { ZipFileReader::NoReader => panic!("ZipFileReader was in an invalid state"), ZipFileReader::Raw(r) => r, ZipFileReader::Stored(r) => r.into_inner().into_inner(), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] ZipFileReader::Deflated(r) => r.into_inner().into_inner().into_inner(), #[cfg(feature = "bzip2")] ZipFileReader::Bzip2(r) => r.into_inner().into_inner().into_inner(), #[cfg(feature = "zstd")] ZipFileReader::Zstd(r) => r.into_inner().finish().into_inner().into_inner(), } } } /// A struct for reading a zip file pub struct ZipFile<'a> { data: Cow<'a, ZipFileData>, crypto_reader: Option>, reader: ZipFileReader<'a>, } fn find_content<'a>( data: &ZipFileData, reader: &'a mut (impl Read + Seek), ) -> ZipResult> { // Parse local header reader.seek(io::SeekFrom::Start(data.header_start))?; let signature = reader.read_u32::()?; if signature != spec::LOCAL_FILE_HEADER_SIGNATURE { return Err(ZipError::InvalidArchive("Invalid local file header")); } reader.seek(io::SeekFrom::Current(22))?; let file_name_length = reader.read_u16::()? as u64; let extra_field_length = reader.read_u16::()? as u64; let magic_and_header = 4 + 22 + 2 + 2; let data_start = data.header_start + magic_and_header + file_name_length + extra_field_length; data.data_start.store(data_start); reader.seek(io::SeekFrom::Start(data_start))?; Ok((reader as &mut dyn Read).take(data.compressed_size)) } #[allow(clippy::too_many_arguments)] fn make_crypto_reader<'a>( compression_method: crate::compression::CompressionMethod, crc32: u32, last_modified_time: DateTime, using_data_descriptor: bool, reader: io::Take<&'a mut dyn io::Read>, password: Option<&[u8]>, aes_info: Option<(AesMode, AesVendorVersion)>, #[cfg(feature = "aes-crypto")] compressed_size: u64, ) -> ZipResult, InvalidPassword>> { #[allow(deprecated)] { if let CompressionMethod::Unsupported(_) = compression_method { return unsupported_zip_error("Compression method not supported"); } } let reader = match (password, aes_info) { #[cfg(not(feature = "aes-crypto"))] (Some(_), Some(_)) => { return Err(ZipError::UnsupportedArchive( "AES encrypted files cannot be decrypted without the aes-crypto feature.", )) } #[cfg(feature = "aes-crypto")] (Some(password), Some((aes_mode, vendor_version))) => { match AesReader::new(reader, aes_mode, compressed_size).validate(password)? { None => return Ok(Err(InvalidPassword)), Some(r) => CryptoReader::Aes { reader: r, vendor_version, }, } } (Some(password), None) => { let validator = if using_data_descriptor { ZipCryptoValidator::InfoZipMsdosTime(last_modified_time.timepart()) } else { ZipCryptoValidator::PkzipCrc32(crc32) }; match ZipCryptoReader::new(reader, password).validate(validator)? { None => return Ok(Err(InvalidPassword)), Some(r) => CryptoReader::ZipCrypto(r), } } (None, Some(_)) => return Ok(Err(InvalidPassword)), (None, None) => CryptoReader::Plaintext(reader), }; Ok(Ok(reader)) } fn make_reader( compression_method: CompressionMethod, crc32: u32, reader: CryptoReader, ) -> ZipFileReader { let ae2_encrypted = reader.is_ae2_encrypted(); match compression_method { CompressionMethod::Stored => { ZipFileReader::Stored(Crc32Reader::new(reader, crc32, ae2_encrypted)) } #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] CompressionMethod::Deflated => { let deflate_reader = DeflateDecoder::new(reader); ZipFileReader::Deflated(Crc32Reader::new(deflate_reader, crc32, ae2_encrypted)) } #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => { let bzip2_reader = BzDecoder::new(reader); ZipFileReader::Bzip2(Crc32Reader::new(bzip2_reader, crc32, ae2_encrypted)) } #[cfg(feature = "zstd")] CompressionMethod::Zstd => { let zstd_reader = ZstdDecoder::new(reader).unwrap(); ZipFileReader::Zstd(Crc32Reader::new(zstd_reader, crc32, ae2_encrypted)) } _ => panic!("Compression method not supported"), } } impl ZipArchive { /// Get the directory start offset and number of files. This is done in a /// separate function to ease the control flow design. pub(crate) fn get_directory_counts( reader: &mut R, footer: &spec::CentralDirectoryEnd, cde_start_pos: u64, ) -> ZipResult<(u64, u64, usize)> { // See if there's a ZIP64 footer. The ZIP64 locator if present will // have its signature 20 bytes in front of the standard footer. The // standard footer, in turn, is 22+N bytes large, where N is the // comment length. Therefore: let zip64locator = if reader .seek(io::SeekFrom::End( -(20 + 22 + footer.zip_file_comment.len() as i64), )) .is_ok() { match spec::Zip64CentralDirectoryEndLocator::parse(reader) { Ok(loc) => Some(loc), Err(ZipError::InvalidArchive(_)) => { // No ZIP64 header; that's actually fine. We're done here. None } Err(e) => { // Yikes, a real problem return Err(e); } } } else { // Empty Zip files will have nothing else so this error might be fine. If // not, we'll find out soon. None }; match zip64locator { None => { // Some zip files have data prepended to them, resulting in the // offsets all being too small. Get the amount of error by comparing // the actual file position we found the CDE at with the offset // recorded in the CDE. let archive_offset = cde_start_pos .checked_sub(footer.central_directory_size as u64) .and_then(|x| x.checked_sub(footer.central_directory_offset as u64)) .ok_or(ZipError::InvalidArchive( "Invalid central directory size or offset", ))?; let directory_start = footer.central_directory_offset as u64 + archive_offset; let number_of_files = footer.number_of_files_on_this_disk as usize; Ok((archive_offset, directory_start, number_of_files)) } Some(locator64) => { // If we got here, this is indeed a ZIP64 file. if !footer.record_too_small() && footer.disk_number as u32 != locator64.disk_with_central_directory { return unsupported_zip_error( "Support for multi-disk files is not implemented", ); } // We need to reassess `archive_offset`. We know where the ZIP64 // central-directory-end structure *should* be, but unfortunately we // don't know how to precisely relate that location to our current // actual offset in the file, since there may be junk at its // beginning. Therefore we need to perform another search, as in // read::CentralDirectoryEnd::find_and_parse, except now we search // forward. let search_upper_bound = cde_start_pos .checked_sub(60) // minimum size of Zip64CentralDirectoryEnd + Zip64CentralDirectoryEndLocator .ok_or(ZipError::InvalidArchive( "File cannot contain ZIP64 central directory end", ))?; let (footer, archive_offset) = spec::Zip64CentralDirectoryEnd::find_and_parse( reader, locator64.end_of_central_directory_offset, search_upper_bound, )?; if footer.disk_number != footer.disk_with_central_directory { return unsupported_zip_error( "Support for multi-disk files is not implemented", ); } let directory_start = footer .central_directory_offset .checked_add(archive_offset) .ok_or({ ZipError::InvalidArchive("Invalid central directory size or offset") })?; Ok(( archive_offset, directory_start, footer.number_of_files as usize, )) } } } /// Read a ZIP archive, collecting the files it contains /// /// This uses the central directory record of the ZIP file, and ignores local file headers pub fn new(mut reader: R) -> ZipResult> { let (footer, cde_start_pos) = spec::CentralDirectoryEnd::find_and_parse(&mut reader)?; if !footer.record_too_small() && footer.disk_number != footer.disk_with_central_directory { return unsupported_zip_error("Support for multi-disk files is not implemented"); } let (archive_offset, directory_start, number_of_files) = Self::get_directory_counts(&mut reader, &footer, cde_start_pos)?; // If the parsed number of files is greater than the offset then // something fishy is going on and we shouldn't trust number_of_files. let file_capacity = if number_of_files > cde_start_pos as usize { 0 } else { number_of_files }; let mut files = Vec::with_capacity(file_capacity); let mut names_map = HashMap::with_capacity(file_capacity); if reader.seek(io::SeekFrom::Start(directory_start)).is_err() { return Err(ZipError::InvalidArchive( "Could not seek to start of central directory", )); } for _ in 0..number_of_files { let file = central_header_to_zip_file(&mut reader, archive_offset)?; names_map.insert(file.file_name.clone(), files.len()); files.push(file); } let shared = Arc::new(zip_archive::Shared { files, names_map, offset: archive_offset, comment: footer.zip_file_comment, }); Ok(ZipArchive { reader, shared }) } /// Extract a Zip archive into a directory, overwriting files if they /// already exist. Paths are sanitized with [`ZipFile::enclosed_name`]. /// /// Extraction is not atomic; If an error is encountered, some of the files /// may be left on disk. pub fn extract>(&mut self, directory: P) -> ZipResult<()> { use std::fs; for i in 0..self.len() { let mut file = self.by_index(i)?; let filepath = file .enclosed_name() .ok_or(ZipError::InvalidArchive("Invalid file path"))?; let outpath = directory.as_ref().join(filepath); if file.name().ends_with('/') { fs::create_dir_all(&outpath)?; } else { if let Some(p) = outpath.parent() { if !p.exists() { fs::create_dir_all(p)?; } } let mut outfile = fs::File::create(&outpath)?; io::copy(&mut file, &mut outfile)?; } // Get and Set permissions #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; if let Some(mode) = file.unix_mode() { fs::set_permissions(&outpath, fs::Permissions::from_mode(mode))?; } } } Ok(()) } /// Number of files contained in this zip. pub fn len(&self) -> usize { self.shared.files.len() } /// Whether this zip archive contains no files pub fn is_empty(&self) -> bool { self.len() == 0 } /// Get the offset from the beginning of the underlying reader that this zip begins at, in bytes. /// /// Normally this value is zero, but if the zip has arbitrary data prepended to it, then this value will be the size /// of that prepended data. pub fn offset(&self) -> u64 { self.shared.offset } /// Get the comment of the zip archive. pub fn comment(&self) -> &[u8] { &self.shared.comment } /// Returns an iterator over all the file and directory names in this archive. pub fn file_names(&self) -> impl Iterator { self.shared.names_map.keys().map(|s| s.as_str()) } /// Search for a file entry by name, decrypt with given password /// /// # Warning /// /// The implementation of the cryptographic algorithms has not /// gone through a correctness review, and you should assume it is insecure: /// passwords used with this API may be compromised. /// /// This function sometimes accepts wrong password. This is because the ZIP spec only allows us /// to check for a 1/256 chance that the password is correct. /// There are many passwords out there that will also pass the validity checks /// we are able to perform. This is a weakness of the ZipCrypto algorithm, /// due to its fairly primitive approach to cryptography. pub fn by_name_decrypt<'a>( &'a mut self, name: &str, password: &[u8], ) -> ZipResult, InvalidPassword>> { self.by_name_with_optional_password(name, Some(password)) } /// Search for a file entry by name pub fn by_name<'a>(&'a mut self, name: &str) -> ZipResult> { Ok(self.by_name_with_optional_password(name, None)?.unwrap()) } fn by_name_with_optional_password<'a>( &'a mut self, name: &str, password: Option<&[u8]>, ) -> ZipResult, InvalidPassword>> { let index = match self.shared.names_map.get(name) { Some(index) => *index, None => { return Err(ZipError::FileNotFound); } }; self.by_index_with_optional_password(index, password) } /// Get a contained file by index, decrypt with given password /// /// # Warning /// /// The implementation of the cryptographic algorithms has not /// gone through a correctness review, and you should assume it is insecure: /// passwords used with this API may be compromised. /// /// This function sometimes accepts wrong password. This is because the ZIP spec only allows us /// to check for a 1/256 chance that the password is correct. /// There are many passwords out there that will also pass the validity checks /// we are able to perform. This is a weakness of the ZipCrypto algorithm, /// due to its fairly primitive approach to cryptography. pub fn by_index_decrypt<'a>( &'a mut self, file_number: usize, password: &[u8], ) -> ZipResult, InvalidPassword>> { self.by_index_with_optional_password(file_number, Some(password)) } /// Get a contained file by index pub fn by_index(&mut self, file_number: usize) -> ZipResult> { Ok(self .by_index_with_optional_password(file_number, None)? .unwrap()) } /// Get a contained file by index without decompressing it pub fn by_index_raw(&mut self, file_number: usize) -> ZipResult> { let reader = &mut self.reader; self.shared .files .get(file_number) .ok_or(ZipError::FileNotFound) .and_then(move |data| { Ok(ZipFile { crypto_reader: None, reader: ZipFileReader::Raw(find_content(data, reader)?), data: Cow::Borrowed(data), }) }) } fn by_index_with_optional_password<'a>( &'a mut self, file_number: usize, mut password: Option<&[u8]>, ) -> ZipResult, InvalidPassword>> { let data = self .shared .files .get(file_number) .ok_or(ZipError::FileNotFound)?; match (password, data.encrypted) { (None, true) => return Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)), (Some(_), false) => password = None, //Password supplied, but none needed! Discard. _ => {} } let limit_reader = find_content(data, &mut self.reader)?; match make_crypto_reader( data.compression_method, data.crc32, data.last_modified_time, data.using_data_descriptor, limit_reader, password, data.aes_mode, #[cfg(feature = "aes-crypto")] data.compressed_size, ) { Ok(Ok(crypto_reader)) => Ok(Ok(ZipFile { crypto_reader: Some(crypto_reader), reader: ZipFileReader::NoReader, data: Cow::Borrowed(data), })), Err(e) => Err(e), Ok(Err(e)) => Ok(Err(e)), } } /// Unwrap and return the inner reader object /// /// The position of the reader is undefined. pub fn into_inner(self) -> R { self.reader } } fn unsupported_zip_error(detail: &'static str) -> ZipResult { Err(ZipError::UnsupportedArchive(detail)) } /// Parse a central directory entry to collect the information for the file. pub(crate) fn central_header_to_zip_file( reader: &mut R, archive_offset: u64, ) -> ZipResult { let central_header_start = reader.stream_position()?; // Parse central header let signature = reader.read_u32::()?; if signature != spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE { Err(ZipError::InvalidArchive("Invalid Central Directory header")) } else { central_header_to_zip_file_inner(reader, archive_offset, central_header_start) } } /// Parse a central directory entry to collect the information for the file. fn central_header_to_zip_file_inner( reader: &mut R, archive_offset: u64, central_header_start: u64, ) -> ZipResult { let version_made_by = reader.read_u16::()?; let _version_to_extract = reader.read_u16::()?; let flags = reader.read_u16::()?; let encrypted = flags & 1 == 1; let is_utf8 = flags & (1 << 11) != 0; let using_data_descriptor = flags & (1 << 3) != 0; let compression_method = reader.read_u16::()?; let last_mod_time = reader.read_u16::()?; let last_mod_date = reader.read_u16::()?; let crc32 = reader.read_u32::()?; let compressed_size = reader.read_u32::()?; let uncompressed_size = reader.read_u32::()?; let file_name_length = reader.read_u16::()? as usize; let extra_field_length = reader.read_u16::()? as usize; let file_comment_length = reader.read_u16::()? as usize; let _disk_number = reader.read_u16::()?; let _internal_file_attributes = reader.read_u16::()?; let external_file_attributes = reader.read_u32::()?; let offset = reader.read_u32::()? as u64; let mut file_name_raw = vec![0; file_name_length]; reader.read_exact(&mut file_name_raw)?; let mut extra_field = vec![0; extra_field_length]; reader.read_exact(&mut extra_field)?; let mut file_comment_raw = vec![0; file_comment_length]; reader.read_exact(&mut file_comment_raw)?; let file_name = match is_utf8 { true => String::from_utf8_lossy(&file_name_raw).into_owned(), false => file_name_raw.clone().from_cp437(), }; let file_comment = match is_utf8 { true => String::from_utf8_lossy(&file_comment_raw).into_owned(), false => file_comment_raw.from_cp437(), }; // Construct the result let mut result = ZipFileData { system: System::from_u8((version_made_by >> 8) as u8), version_made_by: version_made_by as u8, encrypted, using_data_descriptor, compression_method: { #[allow(deprecated)] CompressionMethod::from_u16(compression_method) }, compression_level: None, last_modified_time: DateTime::from_msdos(last_mod_date, last_mod_time), crc32, compressed_size: compressed_size as u64, uncompressed_size: uncompressed_size as u64, file_name, file_name_raw, extra_field, file_comment, header_start: offset, central_header_start, data_start: AtomicU64::new(0), external_attributes: external_file_attributes, large_file: false, aes_mode: None, }; match parse_extra_field(&mut result) { Ok(..) | Err(ZipError::Io(..)) => {} Err(e) => return Err(e), } let aes_enabled = result.compression_method == CompressionMethod::AES; if aes_enabled && result.aes_mode.is_none() { return Err(ZipError::InvalidArchive( "AES encryption without AES extra data field", )); } // Account for shifted zip offsets. result.header_start = result .header_start .checked_add(archive_offset) .ok_or(ZipError::InvalidArchive("Archive header is too large"))?; Ok(result) } fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> { let mut reader = io::Cursor::new(&file.extra_field); while (reader.position() as usize) < file.extra_field.len() { let kind = reader.read_u16::()?; let len = reader.read_u16::()?; let mut len_left = len as i64; match kind { // Zip64 extended information extra field 0x0001 => { if file.uncompressed_size == spec::ZIP64_BYTES_THR { file.large_file = true; file.uncompressed_size = reader.read_u64::()?; len_left -= 8; } if file.compressed_size == spec::ZIP64_BYTES_THR { file.large_file = true; file.compressed_size = reader.read_u64::()?; len_left -= 8; } if file.header_start == spec::ZIP64_BYTES_THR { file.header_start = reader.read_u64::()?; len_left -= 8; } } 0x9901 => { // AES if len != 7 { return Err(ZipError::UnsupportedArchive( "AES extra data field has an unsupported length", )); } let vendor_version = reader.read_u16::()?; let vendor_id = reader.read_u16::()?; let aes_mode = reader.read_u8()?; let compression_method = reader.read_u16::()?; if vendor_id != 0x4541 { return Err(ZipError::InvalidArchive("Invalid AES vendor")); } let vendor_version = match vendor_version { 0x0001 => AesVendorVersion::Ae1, 0x0002 => AesVendorVersion::Ae2, _ => return Err(ZipError::InvalidArchive("Invalid AES vendor version")), }; match aes_mode { 0x01 => file.aes_mode = Some((AesMode::Aes128, vendor_version)), 0x02 => file.aes_mode = Some((AesMode::Aes192, vendor_version)), 0x03 => file.aes_mode = Some((AesMode::Aes256, vendor_version)), _ => return Err(ZipError::InvalidArchive("Invalid AES encryption strength")), }; file.compression_method = { #[allow(deprecated)] CompressionMethod::from_u16(compression_method) }; } _ => { // Other fields are ignored } } // We could also check for < 0 to check for errors if len_left > 0 { reader.seek(io::SeekFrom::Current(len_left))?; } } Ok(()) } /// Methods for retrieving information on zip files impl<'a> ZipFile<'a> { fn get_reader(&mut self) -> &mut ZipFileReader<'a> { if let ZipFileReader::NoReader = self.reader { let data = &self.data; let crypto_reader = self.crypto_reader.take().expect("Invalid reader state"); self.reader = make_reader(data.compression_method, data.crc32, crypto_reader) } &mut self.reader } pub(crate) fn get_raw_reader(&mut self) -> &mut dyn Read { if let ZipFileReader::NoReader = self.reader { let crypto_reader = self.crypto_reader.take().expect("Invalid reader state"); self.reader = ZipFileReader::Raw(crypto_reader.into_inner()) } &mut self.reader } /// Get the version of the file pub fn version_made_by(&self) -> (u8, u8) { ( self.data.version_made_by / 10, self.data.version_made_by % 10, ) } /// Get the name of the file /// /// # Warnings /// /// It is dangerous to use this name directly when extracting an archive. /// It may contain an absolute path (`/etc/shadow`), or break out of the /// current directory (`../runtime`). Carelessly writing to these paths /// allows an attacker to craft a ZIP archive that will overwrite critical /// files. /// /// You can use the [`ZipFile::enclosed_name`] method to validate the name /// as a safe path. pub fn name(&self) -> &str { &self.data.file_name } /// Get the name of the file, in the raw (internal) byte representation. /// /// The encoding of this data is currently undefined. pub fn name_raw(&self) -> &[u8] { &self.data.file_name_raw } /// Get the name of the file in a sanitized form. It truncates the name to the first NULL byte, /// removes a leading '/' and removes '..' parts. #[deprecated( since = "0.5.7", note = "by stripping `..`s from the path, the meaning of paths can change. `mangled_name` can be used if this behaviour is desirable" )] pub fn sanitized_name(&self) -> ::std::path::PathBuf { self.mangled_name() } /// Rewrite the path, ignoring any path components with special meaning. /// /// - Absolute paths are made relative /// - [`ParentDir`]s are ignored /// - Truncates the filename at a NULL byte /// /// This is appropriate if you need to be able to extract *something* from /// any archive, but will easily misrepresent trivial paths like /// `foo/../bar` as `foo/bar` (instead of `bar`). Because of this, /// [`ZipFile::enclosed_name`] is the better option in most scenarios. /// /// [`ParentDir`]: `Component::ParentDir` pub fn mangled_name(&self) -> ::std::path::PathBuf { self.data.file_name_sanitized() } /// Ensure the file path is safe to use as a [`Path`]. /// /// - It can't contain NULL bytes /// - It can't resolve to a path outside the current directory /// > `foo/../bar` is fine, `foo/../../bar` is not. /// - It can't be an absolute path /// /// This will read well-formed ZIP files correctly, and is resistant /// to path-based exploits. It is recommended over /// [`ZipFile::mangled_name`]. pub fn enclosed_name(&self) -> Option<&Path> { self.data.enclosed_name() } /// Get the comment of the file pub fn comment(&self) -> &str { &self.data.file_comment } /// Get the compression method used to store the file pub fn compression(&self) -> CompressionMethod { self.data.compression_method } /// Get the size of the file, in bytes, in the archive pub fn compressed_size(&self) -> u64 { self.data.compressed_size } /// Get the size of the file, in bytes, when uncompressed pub fn size(&self) -> u64 { self.data.uncompressed_size } /// Get the time the file was last modified pub fn last_modified(&self) -> DateTime { self.data.last_modified_time } /// Returns whether the file is actually a directory pub fn is_dir(&self) -> bool { self.name() .chars() .rev() .next() .map_or(false, |c| c == '/' || c == '\\') } /// Returns whether the file is a regular file pub fn is_file(&self) -> bool { !self.is_dir() } /// Get unix mode for the file pub fn unix_mode(&self) -> Option { self.data.unix_mode() } /// Get the CRC32 hash of the original file pub fn crc32(&self) -> u32 { self.data.crc32 } /// Get the extra data of the zip header for this file pub fn extra_data(&self) -> &[u8] { &self.data.extra_field } /// Get the starting offset of the data of the compressed file pub fn data_start(&self) -> u64 { self.data.data_start.load() } /// Get the starting offset of the zip header for this file pub fn header_start(&self) -> u64 { self.data.header_start } /// Get the starting offset of the zip header in the central directory for this file pub fn central_header_start(&self) -> u64 { self.data.central_header_start } } impl<'a> Read for ZipFile<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { self.get_reader().read(buf) } } impl<'a> Drop for ZipFile<'a> { fn drop(&mut self) { // self.data is Owned, this reader is constructed by a streaming reader. // In this case, we want to exhaust the reader so that the next file is accessible. if let Cow::Owned(_) = self.data { let mut buffer = [0; 1 << 16]; // Get the inner `Take` reader so all decryption, decompression and CRC calculation is skipped. let mut reader: std::io::Take<&mut dyn std::io::Read> = match &mut self.reader { ZipFileReader::NoReader => { let innerreader = ::std::mem::replace(&mut self.crypto_reader, None); innerreader.expect("Invalid reader state").into_inner() } reader => { let innerreader = ::std::mem::replace(reader, ZipFileReader::NoReader); innerreader.into_inner() } }; loop { match reader.read(&mut buffer) { Ok(0) => break, Ok(_) => (), Err(e) => { panic!("Could not consume all of the output of the current ZipFile: {e:?}") } } } } } } /// Read ZipFile structures from a non-seekable reader. /// /// This is an alternative method to read a zip file. If possible, use the ZipArchive functions /// as some information will be missing when reading this manner. /// /// Reads a file header from the start of the stream. Will return `Ok(Some(..))` if a file is /// present at the start of the stream. Returns `Ok(None)` if the start of the central directory /// is encountered. No more files should be read after this. /// /// The Drop implementation of ZipFile ensures that the reader will be correctly positioned after /// the structure is done. /// /// Missing fields are: /// * `comment`: set to an empty string /// * `data_start`: set to 0 /// * `external_attributes`: `unix_mode()`: will return None pub fn read_zipfile_from_stream<'a, R: io::Read>( reader: &'a mut R, ) -> ZipResult>> { let signature = reader.read_u32::()?; match signature { spec::LOCAL_FILE_HEADER_SIGNATURE => (), spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE => return Ok(None), _ => return Err(ZipError::InvalidArchive("Invalid local file header")), } let version_made_by = reader.read_u16::()?; let flags = reader.read_u16::()?; let encrypted = flags & 1 == 1; let is_utf8 = flags & (1 << 11) != 0; let using_data_descriptor = flags & (1 << 3) != 0; #[allow(deprecated)] let compression_method = CompressionMethod::from_u16(reader.read_u16::()?); let last_mod_time = reader.read_u16::()?; let last_mod_date = reader.read_u16::()?; let crc32 = reader.read_u32::()?; let compressed_size = reader.read_u32::()?; let uncompressed_size = reader.read_u32::()?; let file_name_length = reader.read_u16::()? as usize; let extra_field_length = reader.read_u16::()? as usize; let mut file_name_raw = vec![0; file_name_length]; reader.read_exact(&mut file_name_raw)?; let mut extra_field = vec![0; extra_field_length]; reader.read_exact(&mut extra_field)?; let file_name = match is_utf8 { true => String::from_utf8_lossy(&file_name_raw).into_owned(), false => file_name_raw.clone().from_cp437(), }; let mut result = ZipFileData { system: System::from_u8((version_made_by >> 8) as u8), version_made_by: version_made_by as u8, encrypted, using_data_descriptor, compression_method, compression_level: None, last_modified_time: DateTime::from_msdos(last_mod_date, last_mod_time), crc32, compressed_size: compressed_size as u64, uncompressed_size: uncompressed_size as u64, file_name, file_name_raw, extra_field, file_comment: String::new(), // file comment is only available in the central directory // header_start and data start are not available, but also don't matter, since seeking is // not available. header_start: 0, data_start: AtomicU64::new(0), central_header_start: 0, // The external_attributes field is only available in the central directory. // We set this to zero, which should be valid as the docs state 'If input came // from standard input, this field is set to zero.' external_attributes: 0, large_file: false, aes_mode: None, }; match parse_extra_field(&mut result) { Ok(..) | Err(ZipError::Io(..)) => {} Err(e) => return Err(e), } if encrypted { return unsupported_zip_error("Encrypted files are not supported"); } if using_data_descriptor { return unsupported_zip_error("The file length is not available in the local header"); } let limit_reader = (reader as &'a mut dyn io::Read).take(result.compressed_size); let result_crc32 = result.crc32; let result_compression_method = result.compression_method; let crypto_reader = make_crypto_reader( result_compression_method, result_crc32, result.last_modified_time, result.using_data_descriptor, limit_reader, None, None, #[cfg(feature = "aes-crypto")] result.compressed_size, )? .unwrap(); Ok(Some(ZipFile { data: Cow::Owned(result), crypto_reader: None, reader: make_reader(result_compression_method, result_crc32, crypto_reader), })) } #[cfg(test)] mod test { #[test] fn invalid_offset() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/invalid_offset.zip")); let reader = ZipArchive::new(io::Cursor::new(v)); assert!(reader.is_err()); } #[test] fn invalid_offset2() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/invalid_offset2.zip")); let reader = ZipArchive::new(io::Cursor::new(v)); assert!(reader.is_err()); } #[test] fn zip64_with_leading_junk() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/zip64_demo.zip")); let reader = ZipArchive::new(io::Cursor::new(v)).unwrap(); assert_eq!(reader.len(), 1); } #[test] fn zip_contents() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); let mut reader = ZipArchive::new(io::Cursor::new(v)).unwrap(); assert_eq!(reader.comment(), b""); assert_eq!(reader.by_index(0).unwrap().central_header_start(), 77); } #[test] fn zip_read_streaming() { use super::read_zipfile_from_stream; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); let mut reader = io::Cursor::new(v); loop { if read_zipfile_from_stream(&mut reader).unwrap().is_none() { break; } } } #[test] fn zip_clone() { use super::ZipArchive; use std::io::{self, Read}; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); let mut reader1 = ZipArchive::new(io::Cursor::new(v)).unwrap(); let mut reader2 = reader1.clone(); let mut file1 = reader1.by_index(0).unwrap(); let mut file2 = reader2.by_index(0).unwrap(); let t = file1.last_modified(); assert_eq!( ( t.year(), t.month(), t.day(), t.hour(), t.minute(), t.second() ), (1980, 1, 1, 0, 0, 0) ); let mut buf1 = [0; 5]; let mut buf2 = [0; 5]; let mut buf3 = [0; 5]; let mut buf4 = [0; 5]; file1.read_exact(&mut buf1).unwrap(); file2.read_exact(&mut buf2).unwrap(); file1.read_exact(&mut buf3).unwrap(); file2.read_exact(&mut buf4).unwrap(); assert_eq!(buf1, buf2); assert_eq!(buf3, buf4); assert_ne!(buf1, buf3); } #[test] fn file_and_dir_predicates() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/files_and_dirs.zip")); let mut zip = ZipArchive::new(io::Cursor::new(v)).unwrap(); for i in 0..zip.len() { let zip_file = zip.by_index(i).unwrap(); let full_name = zip_file.enclosed_name().unwrap(); let file_name = full_name.file_name().unwrap().to_str().unwrap(); assert!( (file_name.starts_with("dir") && zip_file.is_dir()) || (file_name.starts_with("file") && zip_file.is_file()) ); } } /// test case to ensure we don't preemptively over allocate based on the /// declared number of files in the CDE of an invalid zip when the number of /// files declared is more than the alleged offset in the CDE #[test] fn invalid_cde_number_of_files_allocation_smaller_offset() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!( "../tests/data/invalid_cde_number_of_files_allocation_smaller_offset.zip" )); let reader = ZipArchive::new(io::Cursor::new(v)); assert!(reader.is_err()); } /// test case to ensure we don't preemptively over allocate based on the /// declared number of files in the CDE of an invalid zip when the number of /// files declared is less than the alleged offset in the CDE #[test] fn invalid_cde_number_of_files_allocation_greater_offset() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!( "../tests/data/invalid_cde_number_of_files_allocation_greater_offset.zip" )); let reader = ZipArchive::new(io::Cursor::new(v)); assert!(reader.is_err()); } } zip-0.6.6/src/result.rs000064400000000000000000000051730072674642500131500ustar 00000000000000//! Error types that can be emitted from this library use std::error::Error; use std::fmt; use std::io; /// Generic result type with ZipError as its error variant pub type ZipResult = Result; /// The given password is wrong #[derive(Debug)] pub struct InvalidPassword; impl fmt::Display for InvalidPassword { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "invalid password for file in archive") } } impl Error for InvalidPassword {} /// Error type for Zip #[derive(Debug)] pub enum ZipError { /// An Error caused by I/O Io(io::Error), /// This file is probably not a zip archive InvalidArchive(&'static str), /// This archive is not supported UnsupportedArchive(&'static str), /// The requested file could not be found in the archive FileNotFound, } impl From for ZipError { fn from(err: io::Error) -> ZipError { ZipError::Io(err) } } impl fmt::Display for ZipError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match self { ZipError::Io(err) => write!(fmt, "{err}"), ZipError::InvalidArchive(err) => write!(fmt, "invalid Zip archive: {err}"), ZipError::UnsupportedArchive(err) => write!(fmt, "unsupported Zip archive: {err}"), ZipError::FileNotFound => write!(fmt, "specified file not found in archive"), } } } impl Error for ZipError { fn source(&self) -> Option<&(dyn Error + 'static)> { match self { ZipError::Io(err) => Some(err), _ => None, } } } impl ZipError { /// The text used as an error when a password is required and not supplied /// /// ```rust,no_run /// # use zip::result::ZipError; /// # let mut archive = zip::ZipArchive::new(std::io::Cursor::new(&[])).unwrap(); /// match archive.by_index(1) { /// Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)) => eprintln!("a password is needed to unzip this file"), /// _ => (), /// } /// # () /// ``` pub const PASSWORD_REQUIRED: &'static str = "Password required to decrypt file"; } impl From for io::Error { fn from(err: ZipError) -> io::Error { io::Error::new(io::ErrorKind::Other, err) } } /// Error type for time parsing #[derive(Debug)] pub struct DateTimeRangeError; impl fmt::Display for DateTimeRangeError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!( fmt, "a date could not be represented within the bounds the MS-DOS date range (1980-2107)" ) } } impl Error for DateTimeRangeError {} zip-0.6.6/src/spec.rs000064400000000000000000000221540072674642500125620ustar 00000000000000use crate::result::{ZipError, ZipResult}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use std::io; use std::io::prelude::*; pub const LOCAL_FILE_HEADER_SIGNATURE: u32 = 0x04034b50; pub const CENTRAL_DIRECTORY_HEADER_SIGNATURE: u32 = 0x02014b50; const CENTRAL_DIRECTORY_END_SIGNATURE: u32 = 0x06054b50; pub const ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE: u32 = 0x06064b50; const ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE: u32 = 0x07064b50; pub const ZIP64_BYTES_THR: u64 = u32::MAX as u64; pub const ZIP64_ENTRY_THR: usize = u16::MAX as usize; pub struct CentralDirectoryEnd { pub disk_number: u16, pub disk_with_central_directory: u16, pub number_of_files_on_this_disk: u16, pub number_of_files: u16, pub central_directory_size: u32, pub central_directory_offset: u32, pub zip_file_comment: Vec, } impl CentralDirectoryEnd { // Per spec 4.4.1.4 - a CentralDirectoryEnd field might be insufficient to hold the // required data. In this case the file SHOULD contain a ZIP64 format record // and the field of this record will be set to -1 pub(crate) fn record_too_small(&self) -> bool { self.disk_number == 0xFFFF || self.disk_with_central_directory == 0xFFFF || self.number_of_files_on_this_disk == 0xFFFF || self.number_of_files == 0xFFFF || self.central_directory_size == 0xFFFFFFFF || self.central_directory_offset == 0xFFFFFFFF } pub fn parse(reader: &mut T) -> ZipResult { let magic = reader.read_u32::()?; if magic != CENTRAL_DIRECTORY_END_SIGNATURE { return Err(ZipError::InvalidArchive("Invalid digital signature header")); } let disk_number = reader.read_u16::()?; let disk_with_central_directory = reader.read_u16::()?; let number_of_files_on_this_disk = reader.read_u16::()?; let number_of_files = reader.read_u16::()?; let central_directory_size = reader.read_u32::()?; let central_directory_offset = reader.read_u32::()?; let zip_file_comment_length = reader.read_u16::()? as usize; let mut zip_file_comment = vec![0; zip_file_comment_length]; reader.read_exact(&mut zip_file_comment)?; Ok(CentralDirectoryEnd { disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, zip_file_comment, }) } pub fn find_and_parse( reader: &mut T, ) -> ZipResult<(CentralDirectoryEnd, u64)> { const HEADER_SIZE: u64 = 22; const BYTES_BETWEEN_MAGIC_AND_COMMENT_SIZE: u64 = HEADER_SIZE - 6; let file_length = reader.seek(io::SeekFrom::End(0))?; let search_upper_bound = file_length.saturating_sub(HEADER_SIZE + ::std::u16::MAX as u64); if file_length < HEADER_SIZE { return Err(ZipError::InvalidArchive("Invalid zip header")); } let mut pos = file_length - HEADER_SIZE; while pos >= search_upper_bound { reader.seek(io::SeekFrom::Start(pos))?; if reader.read_u32::()? == CENTRAL_DIRECTORY_END_SIGNATURE { reader.seek(io::SeekFrom::Current( BYTES_BETWEEN_MAGIC_AND_COMMENT_SIZE as i64, ))?; let cde_start_pos = reader.seek(io::SeekFrom::Start(pos))?; return CentralDirectoryEnd::parse(reader).map(|cde| (cde, cde_start_pos)); } pos = match pos.checked_sub(1) { Some(p) => p, None => break, }; } Err(ZipError::InvalidArchive( "Could not find central directory end", )) } pub fn write(&self, writer: &mut T) -> ZipResult<()> { writer.write_u32::(CENTRAL_DIRECTORY_END_SIGNATURE)?; writer.write_u16::(self.disk_number)?; writer.write_u16::(self.disk_with_central_directory)?; writer.write_u16::(self.number_of_files_on_this_disk)?; writer.write_u16::(self.number_of_files)?; writer.write_u32::(self.central_directory_size)?; writer.write_u32::(self.central_directory_offset)?; writer.write_u16::(self.zip_file_comment.len() as u16)?; writer.write_all(&self.zip_file_comment)?; Ok(()) } } pub struct Zip64CentralDirectoryEndLocator { pub disk_with_central_directory: u32, pub end_of_central_directory_offset: u64, pub number_of_disks: u32, } impl Zip64CentralDirectoryEndLocator { pub fn parse(reader: &mut T) -> ZipResult { let magic = reader.read_u32::()?; if magic != ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE { return Err(ZipError::InvalidArchive( "Invalid zip64 locator digital signature header", )); } let disk_with_central_directory = reader.read_u32::()?; let end_of_central_directory_offset = reader.read_u64::()?; let number_of_disks = reader.read_u32::()?; Ok(Zip64CentralDirectoryEndLocator { disk_with_central_directory, end_of_central_directory_offset, number_of_disks, }) } pub fn write(&self, writer: &mut T) -> ZipResult<()> { writer.write_u32::(ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE)?; writer.write_u32::(self.disk_with_central_directory)?; writer.write_u64::(self.end_of_central_directory_offset)?; writer.write_u32::(self.number_of_disks)?; Ok(()) } } pub struct Zip64CentralDirectoryEnd { pub version_made_by: u16, pub version_needed_to_extract: u16, pub disk_number: u32, pub disk_with_central_directory: u32, pub number_of_files_on_this_disk: u64, pub number_of_files: u64, pub central_directory_size: u64, pub central_directory_offset: u64, //pub extensible_data_sector: Vec, <-- We don't do anything with this at the moment. } impl Zip64CentralDirectoryEnd { pub fn find_and_parse( reader: &mut T, nominal_offset: u64, search_upper_bound: u64, ) -> ZipResult<(Zip64CentralDirectoryEnd, u64)> { let mut pos = nominal_offset; while pos <= search_upper_bound { reader.seek(io::SeekFrom::Start(pos))?; if reader.read_u32::()? == ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE { let archive_offset = pos - nominal_offset; let _record_size = reader.read_u64::()?; // We would use this value if we did anything with the "zip64 extensible data sector". let version_made_by = reader.read_u16::()?; let version_needed_to_extract = reader.read_u16::()?; let disk_number = reader.read_u32::()?; let disk_with_central_directory = reader.read_u32::()?; let number_of_files_on_this_disk = reader.read_u64::()?; let number_of_files = reader.read_u64::()?; let central_directory_size = reader.read_u64::()?; let central_directory_offset = reader.read_u64::()?; return Ok(( Zip64CentralDirectoryEnd { version_made_by, version_needed_to_extract, disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, }, archive_offset, )); } pos += 1; } Err(ZipError::InvalidArchive( "Could not find ZIP64 central directory end", )) } pub fn write(&self, writer: &mut T) -> ZipResult<()> { writer.write_u32::(ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE)?; writer.write_u64::(44)?; // record size writer.write_u16::(self.version_made_by)?; writer.write_u16::(self.version_needed_to_extract)?; writer.write_u32::(self.disk_number)?; writer.write_u32::(self.disk_with_central_directory)?; writer.write_u64::(self.number_of_files_on_this_disk)?; writer.write_u64::(self.number_of_files)?; writer.write_u64::(self.central_directory_size)?; writer.write_u64::(self.central_directory_offset)?; Ok(()) } } zip-0.6.6/src/types.rs000064400000000000000000000451000072674642500127700ustar 00000000000000//! Types that specify what is contained in a ZIP. use std::path; #[cfg(not(any( all(target_arch = "arm", target_pointer_width = "32"), target_arch = "mips", target_arch = "powerpc" )))] use std::sync::atomic; #[cfg(not(feature = "time"))] use std::time::SystemTime; #[cfg(doc)] use {crate::read::ZipFile, crate::write::FileOptions}; mod ffi { pub const S_IFDIR: u32 = 0o0040000; pub const S_IFREG: u32 = 0o0100000; } #[cfg(any( all(target_arch = "arm", target_pointer_width = "32"), target_arch = "mips", target_arch = "powerpc" ))] mod atomic { use crossbeam_utils::sync::ShardedLock; pub use std::sync::atomic::Ordering; #[derive(Debug, Default)] pub struct AtomicU64 { value: ShardedLock, } impl AtomicU64 { pub fn new(v: u64) -> Self { Self { value: ShardedLock::new(v), } } pub fn get_mut(&mut self) -> &mut u64 { self.value.get_mut().unwrap() } pub fn load(&self, _: Ordering) -> u64 { *self.value.read().unwrap() } pub fn store(&self, value: u64, _: Ordering) { *self.value.write().unwrap() = value; } } } #[cfg(feature = "time")] use crate::result::DateTimeRangeError; #[cfg(feature = "time")] use time::{error::ComponentRange, Date, Month, OffsetDateTime, PrimitiveDateTime, Time}; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum System { Dos = 0, Unix = 3, Unknown, } impl System { pub fn from_u8(system: u8) -> System { use self::System::*; match system { 0 => Dos, 3 => Unix, _ => Unknown, } } } /// Representation of a moment in time. /// /// Zip files use an old format from DOS to store timestamps, /// with its own set of peculiarities. /// For example, it has a resolution of 2 seconds! /// /// A [`DateTime`] can be stored directly in a zipfile with [`FileOptions::last_modified_time`], /// or read from one with [`ZipFile::last_modified`] /// /// # Warning /// /// Because there is no timezone associated with the [`DateTime`], they should ideally only /// be used for user-facing descriptions. This also means [`DateTime::to_time`] returns an /// [`OffsetDateTime`] (which is the equivalent of chrono's `NaiveDateTime`). /// /// Modern zip files store more precise timestamps, which are ignored by [`crate::read::ZipArchive`], /// so keep in mind that these timestamps are unreliable. [We're working on this](https://github.com/zip-rs/zip/issues/156#issuecomment-652981904). #[derive(Debug, Clone, Copy)] pub struct DateTime { year: u16, month: u8, day: u8, hour: u8, minute: u8, second: u8, } impl ::std::default::Default for DateTime { /// Constructs an 'default' datetime of 1980-01-01 00:00:00 fn default() -> DateTime { DateTime { year: 1980, month: 1, day: 1, hour: 0, minute: 0, second: 0, } } } impl DateTime { /// Converts an msdos (u16, u16) pair to a DateTime object pub fn from_msdos(datepart: u16, timepart: u16) -> DateTime { let seconds = (timepart & 0b0000000000011111) << 1; let minutes = (timepart & 0b0000011111100000) >> 5; let hours = (timepart & 0b1111100000000000) >> 11; let days = datepart & 0b0000000000011111; let months = (datepart & 0b0000000111100000) >> 5; let years = (datepart & 0b1111111000000000) >> 9; DateTime { year: years + 1980, month: months as u8, day: days as u8, hour: hours as u8, minute: minutes as u8, second: seconds as u8, } } /// Constructs a DateTime from a specific date and time /// /// The bounds are: /// * year: [1980, 2107] /// * month: [1, 12] /// * day: [1, 31] /// * hour: [0, 23] /// * minute: [0, 59] /// * second: [0, 60] #[allow(clippy::result_unit_err)] pub fn from_date_and_time( year: u16, month: u8, day: u8, hour: u8, minute: u8, second: u8, ) -> Result { if (1980..=2107).contains(&year) && (1..=12).contains(&month) && (1..=31).contains(&day) && hour <= 23 && minute <= 59 && second <= 60 { Ok(DateTime { year, month, day, hour, minute, second, }) } else { Err(()) } } #[cfg(feature = "time")] /// Converts a OffsetDateTime object to a DateTime /// /// Returns `Err` when this object is out of bounds #[allow(clippy::result_unit_err)] #[deprecated(note = "use `DateTime::try_from()`")] pub fn from_time(dt: OffsetDateTime) -> Result { dt.try_into().map_err(|_err| ()) } /// Gets the time portion of this datetime in the msdos representation pub fn timepart(&self) -> u16 { ((self.second as u16) >> 1) | ((self.minute as u16) << 5) | ((self.hour as u16) << 11) } /// Gets the date portion of this datetime in the msdos representation pub fn datepart(&self) -> u16 { (self.day as u16) | ((self.month as u16) << 5) | ((self.year - 1980) << 9) } #[cfg(feature = "time")] /// Converts the DateTime to a OffsetDateTime structure pub fn to_time(&self) -> Result { let date = Date::from_calendar_date(self.year as i32, Month::try_from(self.month)?, self.day)?; let time = Time::from_hms(self.hour, self.minute, self.second)?; Ok(PrimitiveDateTime::new(date, time).assume_utc()) } /// Get the year. There is no epoch, i.e. 2018 will be returned as 2018. pub fn year(&self) -> u16 { self.year } /// Get the month, where 1 = january and 12 = december /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub fn month(&self) -> u8 { self.month } /// Get the day /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub fn day(&self) -> u8 { self.day } /// Get the hour /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub fn hour(&self) -> u8 { self.hour } /// Get the minute /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub fn minute(&self) -> u8 { self.minute } /// Get the second /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub fn second(&self) -> u8 { self.second } } #[cfg(feature = "time")] impl TryFrom for DateTime { type Error = DateTimeRangeError; fn try_from(dt: OffsetDateTime) -> Result { if dt.year() >= 1980 && dt.year() <= 2107 { Ok(DateTime { year: (dt.year()) as u16, month: (dt.month()) as u8, day: dt.day(), hour: dt.hour(), minute: dt.minute(), second: dt.second(), }) } else { Err(DateTimeRangeError) } } } pub const DEFAULT_VERSION: u8 = 46; /// A type like `AtomicU64` except it implements `Clone` and has predefined /// ordering. /// /// It uses `Relaxed` ordering because it is not used for synchronisation. #[derive(Debug)] pub struct AtomicU64(atomic::AtomicU64); impl AtomicU64 { pub fn new(v: u64) -> Self { Self(atomic::AtomicU64::new(v)) } pub fn load(&self) -> u64 { self.0.load(atomic::Ordering::Relaxed) } pub fn store(&self, val: u64) { self.0.store(val, atomic::Ordering::Relaxed) } pub fn get_mut(&mut self) -> &mut u64 { self.0.get_mut() } } impl Clone for AtomicU64 { fn clone(&self) -> Self { Self(atomic::AtomicU64::new(self.load())) } } /// Structure representing a ZIP file. #[derive(Debug, Clone)] pub struct ZipFileData { /// Compatibility of the file attribute information pub system: System, /// Specification version pub version_made_by: u8, /// True if the file is encrypted. pub encrypted: bool, /// True if the file uses a data-descriptor section pub using_data_descriptor: bool, /// Compression method used to store the file pub compression_method: crate::compression::CompressionMethod, /// Compression level to store the file pub compression_level: Option, /// Last modified time. This will only have a 2 second precision. pub last_modified_time: DateTime, /// CRC32 checksum pub crc32: u32, /// Size of the file in the ZIP pub compressed_size: u64, /// Size of the file when extracted pub uncompressed_size: u64, /// Name of the file pub file_name: String, /// Raw file name. To be used when file_name was incorrectly decoded. pub file_name_raw: Vec, /// Extra field usually used for storage expansion pub extra_field: Vec, /// File comment pub file_comment: String, /// Specifies where the local header of the file starts pub header_start: u64, /// Specifies where the central header of the file starts /// /// Note that when this is not known, it is set to 0 pub central_header_start: u64, /// Specifies where the compressed data of the file starts pub data_start: AtomicU64, /// External file attributes pub external_attributes: u32, /// Reserve local ZIP64 extra field pub large_file: bool, /// AES mode if applicable pub aes_mode: Option<(AesMode, AesVendorVersion)>, } impl ZipFileData { pub fn file_name_sanitized(&self) -> ::std::path::PathBuf { let no_null_filename = match self.file_name.find('\0') { Some(index) => &self.file_name[0..index], None => &self.file_name, } .to_string(); // zip files can contain both / and \ as separators regardless of the OS // and as we want to return a sanitized PathBuf that only supports the // OS separator let's convert incompatible separators to compatible ones let separator = ::std::path::MAIN_SEPARATOR; let opposite_separator = match separator { '/' => '\\', _ => '/', }; let filename = no_null_filename.replace(&opposite_separator.to_string(), &separator.to_string()); ::std::path::Path::new(&filename) .components() .filter(|component| matches!(*component, ::std::path::Component::Normal(..))) .fold(::std::path::PathBuf::new(), |mut path, ref cur| { path.push(cur.as_os_str()); path }) } pub(crate) fn enclosed_name(&self) -> Option<&path::Path> { if self.file_name.contains('\0') { return None; } let path = path::Path::new(&self.file_name); let mut depth = 0usize; for component in path.components() { match component { path::Component::Prefix(_) | path::Component::RootDir => return None, path::Component::ParentDir => depth = depth.checked_sub(1)?, path::Component::Normal(_) => depth += 1, path::Component::CurDir => (), } } Some(path) } /// Get unix mode for the file pub(crate) fn unix_mode(&self) -> Option { if self.external_attributes == 0 { return None; } match self.system { System::Unix => Some(self.external_attributes >> 16), System::Dos => { // Interpret MS-DOS directory bit let mut mode = if 0x10 == (self.external_attributes & 0x10) { ffi::S_IFDIR | 0o0775 } else { ffi::S_IFREG | 0o0664 }; if 0x01 == (self.external_attributes & 0x01) { // Read-only bit; strip write permissions mode &= 0o0555; } Some(mode) } _ => None, } } pub fn zip64_extension(&self) -> bool { self.uncompressed_size > 0xFFFFFFFF || self.compressed_size > 0xFFFFFFFF || self.header_start > 0xFFFFFFFF } pub fn version_needed(&self) -> u16 { // higher versions matched first match (self.zip64_extension(), self.compression_method) { #[cfg(feature = "bzip2")] (_, crate::compression::CompressionMethod::Bzip2) => 46, (true, _) => 45, _ => 20, } } } /// The encryption specification used to encrypt a file with AES. /// /// According to the [specification](https://www.winzip.com/win/en/aes_info.html#winzip11) AE-2 /// does not make use of the CRC check. #[derive(Copy, Clone, Debug)] pub enum AesVendorVersion { Ae1, Ae2, } /// AES variant used. #[derive(Copy, Clone, Debug)] pub enum AesMode { Aes128, Aes192, Aes256, } #[cfg(feature = "aes-crypto")] impl AesMode { pub fn salt_length(&self) -> usize { self.key_length() / 2 } pub fn key_length(&self) -> usize { match self { Self::Aes128 => 16, Self::Aes192 => 24, Self::Aes256 => 32, } } } #[cfg(test)] mod test { #[test] fn system() { use super::System; assert_eq!(System::Dos as u16, 0u16); assert_eq!(System::Unix as u16, 3u16); assert_eq!(System::from_u8(0), System::Dos); assert_eq!(System::from_u8(3), System::Unix); } #[test] fn sanitize() { use super::*; let file_name = "/path/../../../../etc/./passwd\0/etc/shadow".to_string(); let data = ZipFileData { system: System::Dos, version_made_by: 0, encrypted: false, using_data_descriptor: false, compression_method: crate::compression::CompressionMethod::Stored, compression_level: None, last_modified_time: DateTime::default(), crc32: 0, compressed_size: 0, uncompressed_size: 0, file_name: file_name.clone(), file_name_raw: file_name.into_bytes(), extra_field: Vec::new(), file_comment: String::new(), header_start: 0, data_start: AtomicU64::new(0), central_header_start: 0, external_attributes: 0, large_file: false, aes_mode: None, }; assert_eq!( data.file_name_sanitized(), ::std::path::PathBuf::from("path/etc/passwd") ); } #[test] #[allow(clippy::unusual_byte_groupings)] fn datetime_default() { use super::DateTime; let dt = DateTime::default(); assert_eq!(dt.timepart(), 0); assert_eq!(dt.datepart(), 0b0000000_0001_00001); } #[test] #[allow(clippy::unusual_byte_groupings)] fn datetime_max() { use super::DateTime; let dt = DateTime::from_date_and_time(2107, 12, 31, 23, 59, 60).unwrap(); assert_eq!(dt.timepart(), 0b10111_111011_11110); assert_eq!(dt.datepart(), 0b1111111_1100_11111); } #[test] fn datetime_bounds() { use super::DateTime; assert!(DateTime::from_date_and_time(2000, 1, 1, 23, 59, 60).is_ok()); assert!(DateTime::from_date_and_time(2000, 1, 1, 24, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2000, 1, 1, 0, 60, 0).is_err()); assert!(DateTime::from_date_and_time(2000, 1, 1, 0, 0, 61).is_err()); assert!(DateTime::from_date_and_time(2107, 12, 31, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(1980, 1, 1, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(1979, 1, 1, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(1980, 0, 1, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(1980, 1, 0, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2108, 12, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2107, 13, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2107, 12, 32, 0, 0, 0).is_err()); } #[cfg(feature = "time")] use time::{format_description::well_known::Rfc3339, OffsetDateTime}; #[cfg(feature = "time")] #[test] fn datetime_try_from_bounds() { use std::convert::TryFrom; use super::DateTime; use time::macros::datetime; // 1979-12-31 23:59:59 assert!(DateTime::try_from(datetime!(1979-12-31 23:59:59 UTC)).is_err()); // 1980-01-01 00:00:00 assert!(DateTime::try_from(datetime!(1980-01-01 00:00:00 UTC)).is_ok()); // 2107-12-31 23:59:59 assert!(DateTime::try_from(datetime!(2107-12-31 23:59:59 UTC)).is_ok()); // 2108-01-01 00:00:00 assert!(DateTime::try_from(datetime!(2108-01-01 00:00:00 UTC)).is_err()); } #[test] fn time_conversion() { use super::DateTime; let dt = DateTime::from_msdos(0x4D71, 0x54CF); assert_eq!(dt.year(), 2018); assert_eq!(dt.month(), 11); assert_eq!(dt.day(), 17); assert_eq!(dt.hour(), 10); assert_eq!(dt.minute(), 38); assert_eq!(dt.second(), 30); #[cfg(feature = "time")] assert_eq!( dt.to_time().unwrap().format(&Rfc3339).unwrap(), "2018-11-17T10:38:30Z" ); } #[test] fn time_out_of_bounds() { use super::DateTime; let dt = DateTime::from_msdos(0xFFFF, 0xFFFF); assert_eq!(dt.year(), 2107); assert_eq!(dt.month(), 15); assert_eq!(dt.day(), 31); assert_eq!(dt.hour(), 31); assert_eq!(dt.minute(), 63); assert_eq!(dt.second(), 62); #[cfg(feature = "time")] assert!(dt.to_time().is_err()); let dt = DateTime::from_msdos(0x0000, 0x0000); assert_eq!(dt.year(), 1980); assert_eq!(dt.month(), 0); assert_eq!(dt.day(), 0); assert_eq!(dt.hour(), 0); assert_eq!(dt.minute(), 0); assert_eq!(dt.second(), 0); #[cfg(feature = "time")] assert!(dt.to_time().is_err()); } #[cfg(feature = "time")] #[test] fn time_at_january() { use super::DateTime; use std::convert::TryFrom; // 2020-01-01 00:00:00 let clock = OffsetDateTime::from_unix_timestamp(1_577_836_800).unwrap(); assert!(DateTime::try_from(clock).is_ok()); } } zip-0.6.6/src/unstable.rs000064400000000000000000000013230072674642500134400ustar 00000000000000/// Provides high level API for reading from a stream. pub mod stream { pub use crate::read::stream::*; } /// Types for creating ZIP archives. pub mod write { use crate::write::FileOptions; /// Unstable methods for [`FileOptions`]. pub trait FileOptionsExt { /// Write the file with the given password using the deprecated ZipCrypto algorithm. /// /// This is not recommended for new archives, as ZipCrypto is not secure. fn with_deprecated_encryption(self, password: &[u8]) -> Self; } impl FileOptionsExt for FileOptions { fn with_deprecated_encryption(self, password: &[u8]) -> Self { self.with_deprecated_encryption(password) } } }zip-0.6.6/src/write.rs000064400000000000000000001541330072674642500127650ustar 00000000000000//! Types for creating ZIP archives use crate::compression::CompressionMethod; use crate::read::{central_header_to_zip_file, ZipArchive, ZipFile}; use crate::result::{ZipError, ZipResult}; use crate::spec; use crate::types::{AtomicU64, DateTime, System, ZipFileData, DEFAULT_VERSION}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use crc32fast::Hasher; use std::convert::TryInto; use std::default::Default; use std::io; use std::io::prelude::*; use std::mem; #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] use flate2::write::DeflateEncoder; #[cfg(feature = "bzip2")] use bzip2::write::BzEncoder; #[cfg(feature = "time")] use time::OffsetDateTime; #[cfg(feature = "zstd")] use zstd::stream::write::Encoder as ZstdEncoder; enum MaybeEncrypted { Unencrypted(W), Encrypted(crate::zipcrypto::ZipCryptoWriter), } impl Write for MaybeEncrypted { fn write(&mut self, buf: &[u8]) -> io::Result { match self { MaybeEncrypted::Unencrypted(w) => w.write(buf), MaybeEncrypted::Encrypted(w) => w.write(buf), } } fn flush(&mut self) -> io::Result<()> { match self { MaybeEncrypted::Unencrypted(w) => w.flush(), MaybeEncrypted::Encrypted(w) => w.flush(), } } } enum GenericZipWriter { Closed, Storer(MaybeEncrypted), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] Deflater(DeflateEncoder>), #[cfg(feature = "bzip2")] Bzip2(BzEncoder>), #[cfg(feature = "zstd")] Zstd(ZstdEncoder<'static, MaybeEncrypted>), } // Put the struct declaration in a private module to convince rustdoc to display ZipWriter nicely pub(crate) mod zip_writer { use super::*; /// ZIP archive generator /// /// Handles the bookkeeping involved in building an archive, and provides an /// API to edit its contents. /// /// ``` /// # fn doit() -> zip::result::ZipResult<()> /// # { /// # use zip::ZipWriter; /// use std::io::Write; /// use zip::write::FileOptions; /// /// // We use a buffer here, though you'd normally use a `File` /// let mut buf = [0; 65536]; /// let mut zip = zip::ZipWriter::new(std::io::Cursor::new(&mut buf[..])); /// /// let options = zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Stored); /// zip.start_file("hello_world.txt", options)?; /// zip.write(b"Hello, World!")?; /// /// // Apply the changes you've made. /// // Dropping the `ZipWriter` will have the same effect, but may silently fail /// zip.finish()?; /// /// # Ok(()) /// # } /// # doit().unwrap(); /// ``` pub struct ZipWriter { pub(super) inner: GenericZipWriter, pub(super) files: Vec, pub(super) stats: ZipWriterStats, pub(super) writing_to_file: bool, pub(super) writing_to_extra_field: bool, pub(super) writing_to_central_extra_field_only: bool, pub(super) writing_raw: bool, pub(super) comment: Vec, } } pub use zip_writer::ZipWriter; #[derive(Default)] struct ZipWriterStats { hasher: Hasher, start: u64, bytes_written: u64, } struct ZipRawValues { crc32: u32, compressed_size: u64, uncompressed_size: u64, } /// Metadata for a file to be written #[derive(Copy, Clone)] pub struct FileOptions { compression_method: CompressionMethod, compression_level: Option, last_modified_time: DateTime, permissions: Option, large_file: bool, encrypt_with: Option, } impl FileOptions { /// Set the compression method for the new file /// /// The default is `CompressionMethod::Deflated`. If the deflate compression feature is /// disabled, `CompressionMethod::Stored` becomes the default. #[must_use] pub fn compression_method(mut self, method: CompressionMethod) -> FileOptions { self.compression_method = method; self } /// Set the compression level for the new file /// /// `None` value specifies default compression level. /// /// Range of values depends on compression method: /// * `Deflated`: 0 - 9. Default is 6 /// * `Bzip2`: 0 - 9. Default is 6 /// * `Zstd`: -7 - 22, with zero being mapped to default level. Default is 3 /// * others: only `None` is allowed #[must_use] pub fn compression_level(mut self, level: Option) -> FileOptions { self.compression_level = level; self } /// Set the last modified time /// /// The default is the current timestamp if the 'time' feature is enabled, and 1980-01-01 /// otherwise #[must_use] pub fn last_modified_time(mut self, mod_time: DateTime) -> FileOptions { self.last_modified_time = mod_time; self } /// Set the permissions for the new file. /// /// The format is represented with unix-style permissions. /// The default is `0o644`, which represents `rw-r--r--` for files, /// and `0o755`, which represents `rwxr-xr-x` for directories. /// /// This method only preserves the file permissions bits (via a `& 0o777`) and discards /// higher file mode bits. So it cannot be used to denote an entry as a directory, /// symlink, or other special file type. #[must_use] pub fn unix_permissions(mut self, mode: u32) -> FileOptions { self.permissions = Some(mode & 0o777); self } /// Set whether the new file's compressed and uncompressed size is less than 4 GiB. /// /// If set to `false` and the file exceeds the limit, an I/O error is thrown. If set to `true`, /// readers will require ZIP64 support and if the file does not exceed the limit, 20 B are /// wasted. The default is `false`. #[must_use] pub fn large_file(mut self, large: bool) -> FileOptions { self.large_file = large; self } pub(crate) fn with_deprecated_encryption(mut self, password: &[u8]) -> FileOptions { self.encrypt_with = Some(crate::zipcrypto::ZipCryptoKeys::derive(password)); self } } impl Default for FileOptions { /// Construct a new FileOptions object fn default() -> Self { Self { #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] compression_method: CompressionMethod::Deflated, #[cfg(not(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" )))] compression_method: CompressionMethod::Stored, compression_level: None, #[cfg(feature = "time")] last_modified_time: OffsetDateTime::now_utc().try_into().unwrap_or_default(), #[cfg(not(feature = "time"))] last_modified_time: DateTime::default(), permissions: None, large_file: false, encrypt_with: None, } } } impl Write for ZipWriter { fn write(&mut self, buf: &[u8]) -> io::Result { if !self.writing_to_file { return Err(io::Error::new( io::ErrorKind::Other, "No file has been started", )); } match self.inner.ref_mut() { Some(ref mut w) => { if self.writing_to_extra_field { self.files.last_mut().unwrap().extra_field.write(buf) } else { let write_result = w.write(buf); if let Ok(count) = write_result { self.stats.update(&buf[0..count]); if self.stats.bytes_written > spec::ZIP64_BYTES_THR && !self.files.last_mut().unwrap().large_file { let _inner = mem::replace(&mut self.inner, GenericZipWriter::Closed); return Err(io::Error::new( io::ErrorKind::Other, "Large file option has not been set", )); } } write_result } } None => Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", )), } } fn flush(&mut self) -> io::Result<()> { match self.inner.ref_mut() { Some(ref mut w) => w.flush(), None => Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", )), } } } impl ZipWriterStats { fn update(&mut self, buf: &[u8]) { self.hasher.update(buf); self.bytes_written += buf.len() as u64; } } impl ZipWriter { /// Initializes the archive from an existing ZIP archive, making it ready for append. pub fn new_append(mut readwriter: A) -> ZipResult> { let (footer, cde_start_pos) = spec::CentralDirectoryEnd::find_and_parse(&mut readwriter)?; if footer.disk_number != footer.disk_with_central_directory { return Err(ZipError::UnsupportedArchive( "Support for multi-disk files is not implemented", )); } let (archive_offset, directory_start, number_of_files) = ZipArchive::get_directory_counts(&mut readwriter, &footer, cde_start_pos)?; if readwriter .seek(io::SeekFrom::Start(directory_start)) .is_err() { return Err(ZipError::InvalidArchive( "Could not seek to start of central directory", )); } let files = (0..number_of_files) .map(|_| central_header_to_zip_file(&mut readwriter, archive_offset)) .collect::, _>>()?; let _ = readwriter.seek(io::SeekFrom::Start(directory_start)); // seek directory_start to overwrite it Ok(ZipWriter { inner: GenericZipWriter::Storer(MaybeEncrypted::Unencrypted(readwriter)), files, stats: Default::default(), writing_to_file: false, writing_to_extra_field: false, writing_to_central_extra_field_only: false, comment: footer.zip_file_comment, writing_raw: true, // avoid recomputing the last file's header }) } } impl ZipWriter { /// Initializes the archive. /// /// Before writing to this object, the [`ZipWriter::start_file`] function should be called. pub fn new(inner: W) -> ZipWriter { ZipWriter { inner: GenericZipWriter::Storer(MaybeEncrypted::Unencrypted(inner)), files: Vec::new(), stats: Default::default(), writing_to_file: false, writing_to_extra_field: false, writing_to_central_extra_field_only: false, writing_raw: false, comment: Vec::new(), } } /// Set ZIP archive comment. pub fn set_comment(&mut self, comment: S) where S: Into, { self.set_raw_comment(comment.into().into()) } /// Set ZIP archive comment. /// /// This sets the raw bytes of the comment. The comment /// is typically expected to be encoded in UTF-8 pub fn set_raw_comment(&mut self, comment: Vec) { self.comment = comment; } /// Start a new file for with the requested options. fn start_entry( &mut self, name: S, options: FileOptions, raw_values: Option, ) -> ZipResult<()> where S: Into, { self.finish_file()?; let raw_values = raw_values.unwrap_or(ZipRawValues { crc32: 0, compressed_size: 0, uncompressed_size: 0, }); { let writer = self.inner.get_plain(); let header_start = writer.stream_position()?; let permissions = options.permissions.unwrap_or(0o100644); let mut file = ZipFileData { system: System::Unix, version_made_by: DEFAULT_VERSION, encrypted: options.encrypt_with.is_some(), using_data_descriptor: false, compression_method: options.compression_method, compression_level: options.compression_level, last_modified_time: options.last_modified_time, crc32: raw_values.crc32, compressed_size: raw_values.compressed_size, uncompressed_size: raw_values.uncompressed_size, file_name: name.into(), file_name_raw: Vec::new(), // Never used for saving extra_field: Vec::new(), file_comment: String::new(), header_start, data_start: AtomicU64::new(0), central_header_start: 0, external_attributes: permissions << 16, large_file: options.large_file, aes_mode: None, }; write_local_file_header(writer, &file)?; let header_end = writer.stream_position()?; self.stats.start = header_end; *file.data_start.get_mut() = header_end; self.stats.bytes_written = 0; self.stats.hasher = Hasher::new(); self.files.push(file); } if let Some(keys) = options.encrypt_with { let mut zipwriter = crate::zipcrypto::ZipCryptoWriter { writer: core::mem::replace(&mut self.inner, GenericZipWriter::Closed).unwrap(), buffer: vec![], keys }; let mut crypto_header = [0u8; 12]; zipwriter.write_all(&crypto_header)?; self.inner = GenericZipWriter::Storer(MaybeEncrypted::Encrypted(zipwriter)); } Ok(()) } fn finish_file(&mut self) -> ZipResult<()> { if self.writing_to_extra_field { // Implicitly calling [`ZipWriter::end_extra_data`] for empty files. self.end_extra_data()?; } self.inner.switch_to(CompressionMethod::Stored, None)?; match core::mem::replace(&mut self.inner, GenericZipWriter::Closed) { GenericZipWriter::Storer(MaybeEncrypted::Encrypted(writer)) => { let crc32 = self.stats.hasher.clone().finalize(); self.inner = GenericZipWriter::Storer(MaybeEncrypted::Unencrypted(writer.finish(crc32)?)) } GenericZipWriter::Storer(w) => self.inner = GenericZipWriter::Storer(w), _ => unreachable!() } let writer = self.inner.get_plain(); if !self.writing_raw { let file = match self.files.last_mut() { None => return Ok(()), Some(f) => f, }; file.crc32 = self.stats.hasher.clone().finalize(); file.uncompressed_size = self.stats.bytes_written; let file_end = writer.stream_position()?; file.compressed_size = file_end - self.stats.start; update_local_file_header(writer, file)?; writer.seek(io::SeekFrom::Start(file_end))?; } self.writing_to_file = false; self.writing_raw = false; Ok(()) } /// Create a file in the archive and start writing its' contents. /// /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`] pub fn start_file(&mut self, name: S, mut options: FileOptions) -> ZipResult<()> where S: Into, { if options.permissions.is_none() { options.permissions = Some(0o644); } *options.permissions.as_mut().unwrap() |= 0o100000; self.start_entry(name, options, None)?; self.inner .switch_to(options.compression_method, options.compression_level)?; self.writing_to_file = true; Ok(()) } /// Starts a file, taking a Path as argument. /// /// This function ensures that the '/' path separator is used. It also ignores all non 'Normal' /// Components, such as a starting '/' or '..' and '.'. #[deprecated( since = "0.5.7", note = "by stripping `..`s from the path, the meaning of paths can change. Use `start_file` instead." )] pub fn start_file_from_path( &mut self, path: &std::path::Path, options: FileOptions, ) -> ZipResult<()> { self.start_file(path_to_string(path), options) } /// Create an aligned file in the archive and start writing its' contents. /// /// Returns the number of padding bytes required to align the file. /// /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`] pub fn start_file_aligned( &mut self, name: S, options: FileOptions, align: u16, ) -> Result where S: Into, { let data_start = self.start_file_with_extra_data(name, options)?; let align = align as u64; if align > 1 && data_start % align != 0 { let pad_length = (align - (data_start + 4) % align) % align; let pad = vec![0; pad_length as usize]; self.write_all(b"za").map_err(ZipError::from)?; // 0x617a self.write_u16::(pad.len() as u16) .map_err(ZipError::from)?; self.write_all(&pad).map_err(ZipError::from)?; assert_eq!(self.end_local_start_central_extra_data()? % align, 0); } let extra_data_end = self.end_extra_data()?; Ok(extra_data_end - data_start) } /// Create a file in the archive and start writing its extra data first. /// /// Finish writing extra data and start writing file data with [`ZipWriter::end_extra_data`]. /// Optionally, distinguish local from central extra data with /// [`ZipWriter::end_local_start_central_extra_data`]. /// /// Returns the preliminary starting offset of the file data without any extra data allowing to /// align the file data by calculating a pad length to be prepended as part of the extra data. /// /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`] /// /// ``` /// use byteorder::{LittleEndian, WriteBytesExt}; /// use zip::{ZipArchive, ZipWriter, result::ZipResult}; /// use zip::{write::FileOptions, CompressionMethod}; /// use std::io::{Write, Cursor}; /// /// # fn main() -> ZipResult<()> { /// let mut archive = Cursor::new(Vec::new()); /// /// { /// let mut zip = ZipWriter::new(&mut archive); /// let options = FileOptions::default() /// .compression_method(CompressionMethod::Stored); /// /// zip.start_file_with_extra_data("identical_extra_data.txt", options)?; /// let extra_data = b"local and central extra data"; /// zip.write_u16::(0xbeef)?; /// zip.write_u16::(extra_data.len() as u16)?; /// zip.write_all(extra_data)?; /// zip.end_extra_data()?; /// zip.write_all(b"file data")?; /// /// let data_start = zip.start_file_with_extra_data("different_extra_data.txt", options)?; /// let extra_data = b"local extra data"; /// zip.write_u16::(0xbeef)?; /// zip.write_u16::(extra_data.len() as u16)?; /// zip.write_all(extra_data)?; /// let data_start = data_start as usize + 4 + extra_data.len() + 4; /// let align = 64; /// let pad_length = (align - data_start % align) % align; /// assert_eq!(pad_length, 19); /// zip.write_u16::(0xdead)?; /// zip.write_u16::(pad_length as u16)?; /// zip.write_all(&vec![0; pad_length])?; /// let data_start = zip.end_local_start_central_extra_data()?; /// assert_eq!(data_start as usize % align, 0); /// let extra_data = b"central extra data"; /// zip.write_u16::(0xbeef)?; /// zip.write_u16::(extra_data.len() as u16)?; /// zip.write_all(extra_data)?; /// zip.end_extra_data()?; /// zip.write_all(b"file data")?; /// /// zip.finish()?; /// } /// /// let mut zip = ZipArchive::new(archive)?; /// assert_eq!(&zip.by_index(0)?.extra_data()[4..], b"local and central extra data"); /// assert_eq!(&zip.by_index(1)?.extra_data()[4..], b"central extra data"); /// # Ok(()) /// # } /// ``` pub fn start_file_with_extra_data( &mut self, name: S, mut options: FileOptions, ) -> ZipResult where S: Into, { if options.permissions.is_none() { options.permissions = Some(0o644); } *options.permissions.as_mut().unwrap() |= 0o100000; self.start_entry(name, options, None)?; self.writing_to_file = true; self.writing_to_extra_field = true; Ok(self.files.last().unwrap().data_start.load()) } /// End local and start central extra data. Requires [`ZipWriter::start_file_with_extra_data`]. /// /// Returns the final starting offset of the file data. pub fn end_local_start_central_extra_data(&mut self) -> ZipResult { let data_start = self.end_extra_data()?; self.files.last_mut().unwrap().extra_field.clear(); self.writing_to_extra_field = true; self.writing_to_central_extra_field_only = true; Ok(data_start) } /// End extra data and start file data. Requires [`ZipWriter::start_file_with_extra_data`]. /// /// Returns the final starting offset of the file data. pub fn end_extra_data(&mut self) -> ZipResult { // Require `start_file_with_extra_data()`. Ensures `file` is some. if !self.writing_to_extra_field { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Not writing to extra field", ))); } let file = self.files.last_mut().unwrap(); validate_extra_data(file)?; let data_start = file.data_start.get_mut(); if !self.writing_to_central_extra_field_only { let writer = self.inner.get_plain(); // Append extra data to local file header and keep it for central file header. writer.write_all(&file.extra_field)?; // Update final `data_start`. let header_end = *data_start + file.extra_field.len() as u64; self.stats.start = header_end; *data_start = header_end; // Update extra field length in local file header. let extra_field_length = if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16; writer.seek(io::SeekFrom::Start(file.header_start + 28))?; writer.write_u16::(extra_field_length)?; writer.seek(io::SeekFrom::Start(header_end))?; self.inner .switch_to(file.compression_method, file.compression_level)?; } self.writing_to_extra_field = false; self.writing_to_central_extra_field_only = false; Ok(*data_start) } /// Add a new file using the already compressed data from a ZIP file being read and renames it, this /// allows faster copies of the `ZipFile` since there is no need to decompress and compress it again. /// Any `ZipFile` metadata is copied and not checked, for example the file CRC. /// ```no_run /// use std::fs::File; /// use std::io::{Read, Seek, Write}; /// use zip::{ZipArchive, ZipWriter}; /// /// fn copy_rename( /// src: &mut ZipArchive, /// dst: &mut ZipWriter, /// ) -> zip::result::ZipResult<()> /// where /// R: Read + Seek, /// W: Write + Seek, /// { /// // Retrieve file entry by name /// let file = src.by_name("src_file.txt")?; /// /// // Copy and rename the previously obtained file entry to the destination zip archive /// dst.raw_copy_file_rename(file, "new_name.txt")?; /// /// Ok(()) /// } /// ``` pub fn raw_copy_file_rename(&mut self, mut file: ZipFile, name: S) -> ZipResult<()> where S: Into, { let mut options = FileOptions::default() .large_file(file.compressed_size().max(file.size()) > spec::ZIP64_BYTES_THR) .last_modified_time(file.last_modified()) .compression_method(file.compression()); if let Some(perms) = file.unix_mode() { options = options.unix_permissions(perms); } let raw_values = ZipRawValues { crc32: file.crc32(), compressed_size: file.compressed_size(), uncompressed_size: file.size(), }; self.start_entry(name, options, Some(raw_values))?; self.writing_to_file = true; self.writing_raw = true; io::copy(file.get_raw_reader(), self)?; Ok(()) } /// Add a new file using the already compressed data from a ZIP file being read, this allows faster /// copies of the `ZipFile` since there is no need to decompress and compress it again. Any `ZipFile` /// metadata is copied and not checked, for example the file CRC. /// /// ```no_run /// use std::fs::File; /// use std::io::{Read, Seek, Write}; /// use zip::{ZipArchive, ZipWriter}; /// /// fn copy(src: &mut ZipArchive, dst: &mut ZipWriter) -> zip::result::ZipResult<()> /// where /// R: Read + Seek, /// W: Write + Seek, /// { /// // Retrieve file entry by name /// let file = src.by_name("src_file.txt")?; /// /// // Copy the previously obtained file entry to the destination zip archive /// dst.raw_copy_file(file)?; /// /// Ok(()) /// } /// ``` pub fn raw_copy_file(&mut self, file: ZipFile) -> ZipResult<()> { let name = file.name().to_owned(); self.raw_copy_file_rename(file, name) } /// Add a directory entry. /// /// As directories have no content, you must not call [`ZipWriter::write`] before adding a new file. pub fn add_directory(&mut self, name: S, mut options: FileOptions) -> ZipResult<()> where S: Into, { if options.permissions.is_none() { options.permissions = Some(0o755); } *options.permissions.as_mut().unwrap() |= 0o40000; options.compression_method = CompressionMethod::Stored; let name_as_string = name.into(); // Append a slash to the filename if it does not end with it. let name_with_slash = match name_as_string.chars().last() { Some('/') | Some('\\') => name_as_string, _ => name_as_string + "/", }; self.start_entry(name_with_slash, options, None)?; self.writing_to_file = false; Ok(()) } /// Add a directory entry, taking a Path as argument. /// /// This function ensures that the '/' path separator is used. It also ignores all non 'Normal' /// Components, such as a starting '/' or '..' and '.'. #[deprecated( since = "0.5.7", note = "by stripping `..`s from the path, the meaning of paths can change. Use `add_directory` instead." )] pub fn add_directory_from_path( &mut self, path: &std::path::Path, options: FileOptions, ) -> ZipResult<()> { self.add_directory(path_to_string(path), options) } /// Finish the last file and write all other zip-structures /// /// This will return the writer, but one should normally not append any data to the end of the file. /// Note that the zipfile will also be finished on drop. pub fn finish(&mut self) -> ZipResult { self.finalize()?; let inner = mem::replace(&mut self.inner, GenericZipWriter::Closed); Ok(inner.unwrap()) } /// Add a symlink entry. /// /// The zip archive will contain an entry for path `name` which is a symlink to `target`. /// /// No validation or normalization of the paths is performed. For best results, /// callers should normalize `\` to `/` and ensure symlinks are relative to other /// paths within the zip archive. /// /// WARNING: not all zip implementations preserve symlinks on extract. Some zip /// implementations may materialize a symlink as a regular file, possibly with the /// content incorrectly set to the symlink target. For maximum portability, consider /// storing a regular file instead. pub fn add_symlink( &mut self, name: N, target: T, mut options: FileOptions, ) -> ZipResult<()> where N: Into, T: Into, { if options.permissions.is_none() { options.permissions = Some(0o777); } *options.permissions.as_mut().unwrap() |= 0o120000; // The symlink target is stored as file content. And compressing the target path // likely wastes space. So always store. options.compression_method = CompressionMethod::Stored; self.start_entry(name, options, None)?; self.writing_to_file = true; self.write_all(target.into().as_bytes())?; self.writing_to_file = false; Ok(()) } fn finalize(&mut self) -> ZipResult<()> { self.finish_file()?; { let writer = self.inner.get_plain(); let central_start = writer.stream_position()?; for file in self.files.iter() { write_central_directory_header(writer, file)?; } let central_size = writer.stream_position()? - central_start; if self.files.len() > spec::ZIP64_ENTRY_THR || central_size.max(central_start) > spec::ZIP64_BYTES_THR { let zip64_footer = spec::Zip64CentralDirectoryEnd { version_made_by: DEFAULT_VERSION as u16, version_needed_to_extract: DEFAULT_VERSION as u16, disk_number: 0, disk_with_central_directory: 0, number_of_files_on_this_disk: self.files.len() as u64, number_of_files: self.files.len() as u64, central_directory_size: central_size, central_directory_offset: central_start, }; zip64_footer.write(writer)?; let zip64_footer = spec::Zip64CentralDirectoryEndLocator { disk_with_central_directory: 0, end_of_central_directory_offset: central_start + central_size, number_of_disks: 1, }; zip64_footer.write(writer)?; } let number_of_files = self.files.len().min(spec::ZIP64_ENTRY_THR) as u16; let footer = spec::CentralDirectoryEnd { disk_number: 0, disk_with_central_directory: 0, zip_file_comment: self.comment.clone(), number_of_files_on_this_disk: number_of_files, number_of_files, central_directory_size: central_size.min(spec::ZIP64_BYTES_THR) as u32, central_directory_offset: central_start.min(spec::ZIP64_BYTES_THR) as u32, }; footer.write(writer)?; } Ok(()) } } impl Drop for ZipWriter { fn drop(&mut self) { if !self.inner.is_closed() { if let Err(e) = self.finalize() { let _ = write!(io::stderr(), "ZipWriter drop failed: {e:?}"); } } } } impl GenericZipWriter { fn switch_to( &mut self, compression: CompressionMethod, compression_level: Option, ) -> ZipResult<()> { match self.current_compression() { Some(method) if method == compression => return Ok(()), None => { return Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", ) .into()) } _ => {} } let bare = match mem::replace(self, GenericZipWriter::Closed) { GenericZipWriter::Storer(w) => w, #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] GenericZipWriter::Deflater(w) => w.finish()?, #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(w) => w.finish()?, #[cfg(feature = "zstd")] GenericZipWriter::Zstd(w) => w.finish()?, GenericZipWriter::Closed => { return Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", ) .into()) } }; *self = { #[allow(deprecated)] match compression { CompressionMethod::Stored => { if compression_level.is_some() { return Err(ZipError::UnsupportedArchive( "Unsupported compression level", )); } GenericZipWriter::Storer(bare) } #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] CompressionMethod::Deflated => GenericZipWriter::Deflater(DeflateEncoder::new( bare, flate2::Compression::new( clamp_opt( compression_level .unwrap_or(flate2::Compression::default().level() as i32), deflate_compression_level_range(), ) .ok_or(ZipError::UnsupportedArchive( "Unsupported compression level", ))? as u32, ), )), #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => GenericZipWriter::Bzip2(BzEncoder::new( bare, bzip2::Compression::new( clamp_opt( compression_level .unwrap_or(bzip2::Compression::default().level() as i32), bzip2_compression_level_range(), ) .ok_or(ZipError::UnsupportedArchive( "Unsupported compression level", ))? as u32, ), )), CompressionMethod::AES => { return Err(ZipError::UnsupportedArchive( "AES compression is not supported for writing", )) } #[cfg(feature = "zstd")] CompressionMethod::Zstd => GenericZipWriter::Zstd( ZstdEncoder::new( bare, clamp_opt( compression_level.unwrap_or(zstd::DEFAULT_COMPRESSION_LEVEL), zstd::compression_level_range(), ) .ok_or(ZipError::UnsupportedArchive( "Unsupported compression level", ))?, ) .unwrap(), ), CompressionMethod::Unsupported(..) => { return Err(ZipError::UnsupportedArchive("Unsupported compression")) } } }; Ok(()) } fn ref_mut(&mut self) -> Option<&mut dyn Write> { match *self { GenericZipWriter::Storer(ref mut w) => Some(w as &mut dyn Write), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] GenericZipWriter::Deflater(ref mut w) => Some(w as &mut dyn Write), #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(ref mut w) => Some(w as &mut dyn Write), #[cfg(feature = "zstd")] GenericZipWriter::Zstd(ref mut w) => Some(w as &mut dyn Write), GenericZipWriter::Closed => None, } } fn is_closed(&self) -> bool { matches!(*self, GenericZipWriter::Closed) } fn get_plain(&mut self) -> &mut W { match *self { GenericZipWriter::Storer(MaybeEncrypted::Unencrypted(ref mut w)) => w, _ => panic!("Should have switched to stored and unencrypted beforehand"), } } fn current_compression(&self) -> Option { match *self { GenericZipWriter::Storer(..) => Some(CompressionMethod::Stored), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] GenericZipWriter::Deflater(..) => Some(CompressionMethod::Deflated), #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(..) => Some(CompressionMethod::Bzip2), #[cfg(feature = "zstd")] GenericZipWriter::Zstd(..) => Some(CompressionMethod::Zstd), GenericZipWriter::Closed => None, } } fn unwrap(self) -> W { match self { GenericZipWriter::Storer(MaybeEncrypted::Unencrypted(w)) => w, _ => panic!("Should have switched to stored and unencrypted beforehand"), } } } #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] fn deflate_compression_level_range() -> std::ops::RangeInclusive { let min = flate2::Compression::none().level() as i32; let max = flate2::Compression::best().level() as i32; min..=max } #[cfg(feature = "bzip2")] fn bzip2_compression_level_range() -> std::ops::RangeInclusive { let min = bzip2::Compression::none().level() as i32; let max = bzip2::Compression::best().level() as i32; min..=max } #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib", feature = "bzip2", feature = "zstd" ))] fn clamp_opt(value: T, range: std::ops::RangeInclusive) -> Option { if range.contains(&value) { Some(value) } else { None } } fn write_local_file_header(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { // local file header signature writer.write_u32::(spec::LOCAL_FILE_HEADER_SIGNATURE)?; // version needed to extract writer.write_u16::(file.version_needed())?; // general purpose bit flag let flag = if !file.file_name.is_ascii() { 1u16 << 11 } else { 0 } | if file.encrypted { 1u16 << 0 } else { 0 }; writer.write_u16::(flag)?; // Compression method #[allow(deprecated)] writer.write_u16::(file.compression_method.to_u16())?; // last mod file time and last mod file date writer.write_u16::(file.last_modified_time.timepart())?; writer.write_u16::(file.last_modified_time.datepart())?; // crc-32 writer.write_u32::(file.crc32)?; // compressed size and uncompressed size if file.large_file { writer.write_u32::(spec::ZIP64_BYTES_THR as u32)?; writer.write_u32::(spec::ZIP64_BYTES_THR as u32)?; } else { writer.write_u32::(file.compressed_size as u32)?; writer.write_u32::(file.uncompressed_size as u32)?; } // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length let extra_field_length = if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16; writer.write_u16::(extra_field_length)?; // file name writer.write_all(file.file_name.as_bytes())?; // zip64 extra field if file.large_file { write_local_zip64_extra_field(writer, file)?; } Ok(()) } fn update_local_file_header( writer: &mut T, file: &ZipFileData, ) -> ZipResult<()> { const CRC32_OFFSET: u64 = 14; writer.seek(io::SeekFrom::Start(file.header_start + CRC32_OFFSET))?; writer.write_u32::(file.crc32)?; if file.large_file { update_local_zip64_extra_field(writer, file)?; } else { // check compressed size as well as it can also be slightly larger than uncompressed size if file.compressed_size > spec::ZIP64_BYTES_THR { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Large file option has not been set", ))); } writer.write_u32::(file.compressed_size as u32)?; // uncompressed size is already checked on write to catch it as soon as possible writer.write_u32::(file.uncompressed_size as u32)?; } Ok(()) } fn write_central_directory_header(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { // buffer zip64 extra field to determine its variable length let mut zip64_extra_field = [0; 28]; let zip64_extra_field_length = write_central_zip64_extra_field(&mut zip64_extra_field.as_mut(), file)?; // central file header signature writer.write_u32::(spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE)?; // version made by let version_made_by = (file.system as u16) << 8 | (file.version_made_by as u16); writer.write_u16::(version_made_by)?; // version needed to extract writer.write_u16::(file.version_needed())?; // general puprose bit flag let flag = if !file.file_name.is_ascii() { 1u16 << 11 } else { 0 } | if file.encrypted { 1u16 << 0 } else { 0 }; writer.write_u16::(flag)?; // compression method #[allow(deprecated)] writer.write_u16::(file.compression_method.to_u16())?; // last mod file time + date writer.write_u16::(file.last_modified_time.timepart())?; writer.write_u16::(file.last_modified_time.datepart())?; // crc-32 writer.write_u32::(file.crc32)?; // compressed size writer.write_u32::(file.compressed_size.min(spec::ZIP64_BYTES_THR) as u32)?; // uncompressed size writer.write_u32::(file.uncompressed_size.min(spec::ZIP64_BYTES_THR) as u32)?; // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length writer.write_u16::(zip64_extra_field_length + file.extra_field.len() as u16)?; // file comment length writer.write_u16::(0)?; // disk number start writer.write_u16::(0)?; // internal file attribytes writer.write_u16::(0)?; // external file attributes writer.write_u32::(file.external_attributes)?; // relative offset of local header writer.write_u32::(file.header_start.min(spec::ZIP64_BYTES_THR) as u32)?; // file name writer.write_all(file.file_name.as_bytes())?; // zip64 extra field writer.write_all(&zip64_extra_field[..zip64_extra_field_length as usize])?; // extra field writer.write_all(&file.extra_field)?; // file comment // Ok(()) } fn validate_extra_data(file: &ZipFileData) -> ZipResult<()> { let mut data = file.extra_field.as_slice(); if data.len() > spec::ZIP64_ENTRY_THR { return Err(ZipError::Io(io::Error::new( io::ErrorKind::InvalidData, "Extra data exceeds extra field", ))); } while !data.is_empty() { let left = data.len(); if left < 4 { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Incomplete extra data header", ))); } let kind = data.read_u16::()?; let size = data.read_u16::()? as usize; let left = left - 4; if kind == 0x0001 { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "No custom ZIP64 extra data allowed", ))); } #[cfg(not(feature = "unreserved"))] { if kind <= 31 || EXTRA_FIELD_MAPPING.iter().any(|&mapped| mapped == kind) { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, format!( "Extra data header ID {kind:#06} requires crate feature \"unreserved\"", ), ))); } } if size > left { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Extra data size exceeds extra field", ))); } data = &data[size..]; } Ok(()) } fn write_local_zip64_extra_field(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { // This entry in the Local header MUST include BOTH original // and compressed file size fields. writer.write_u16::(0x0001)?; writer.write_u16::(16)?; writer.write_u64::(file.uncompressed_size)?; writer.write_u64::(file.compressed_size)?; // Excluded fields: // u32: disk start number Ok(()) } fn update_local_zip64_extra_field( writer: &mut T, file: &ZipFileData, ) -> ZipResult<()> { let zip64_extra_field = file.header_start + 30 + file.file_name.as_bytes().len() as u64; writer.seek(io::SeekFrom::Start(zip64_extra_field + 4))?; writer.write_u64::(file.uncompressed_size)?; writer.write_u64::(file.compressed_size)?; // Excluded fields: // u32: disk start number Ok(()) } fn write_central_zip64_extra_field(writer: &mut T, file: &ZipFileData) -> ZipResult { // The order of the fields in the zip64 extended // information record is fixed, but the fields MUST // only appear if the corresponding Local or Central // directory record field is set to 0xFFFF or 0xFFFFFFFF. let mut size = 0; let uncompressed_size = file.uncompressed_size > spec::ZIP64_BYTES_THR; let compressed_size = file.compressed_size > spec::ZIP64_BYTES_THR; let header_start = file.header_start > spec::ZIP64_BYTES_THR; if uncompressed_size { size += 8; } if compressed_size { size += 8; } if header_start { size += 8; } if size > 0 { writer.write_u16::(0x0001)?; writer.write_u16::(size)?; size += 4; if uncompressed_size { writer.write_u64::(file.uncompressed_size)?; } if compressed_size { writer.write_u64::(file.compressed_size)?; } if header_start { writer.write_u64::(file.header_start)?; } // Excluded fields: // u32: disk start number } Ok(size) } fn path_to_string(path: &std::path::Path) -> String { let mut path_str = String::new(); for component in path.components() { if let std::path::Component::Normal(os_str) = component { if !path_str.is_empty() { path_str.push('/'); } path_str.push_str(&os_str.to_string_lossy()); } } path_str } #[cfg(test)] mod test { use super::{FileOptions, ZipWriter}; use crate::compression::CompressionMethod; use crate::types::DateTime; use std::io; use std::io::Write; #[test] fn write_empty_zip() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer.set_comment("ZIP"); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 25); assert_eq!( *result.get_ref(), [80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 90, 73, 80] ); } #[test] fn unix_permissions_bitmask() { // unix_permissions() throws away upper bits. let options = FileOptions::default().unix_permissions(0o120777); assert_eq!(options.permissions, Some(0o777)); } #[test] fn write_zip_dir() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .add_directory( "test", FileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a directory is not allowed, and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 108); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 1, 2, 46, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, 65, 0, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 51, 0, 0, 0, 35, 0, 0, 0, 0, 0, ] as &[u8] ); } #[test] fn write_symlink_simple() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .add_symlink( "name", "target", FileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a symlink is not allowed and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 112); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 252, 47, 111, 70, 6, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0, 110, 97, 109, 101, 116, 97, 114, 103, 101, 116, 80, 75, 1, 2, 46, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 252, 47, 111, 70, 6, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 161, 0, 0, 0, 0, 110, 97, 109, 101, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 50, 0, 0, 0, 40, 0, 0, 0, 0, 0 ] as &[u8], ); } #[test] fn write_symlink_wonky_paths() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .add_symlink( "directory\\link", "/absolute/symlink\\with\\mixed/slashes", FileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a symlink is not allowed and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 162); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 95, 41, 81, 245, 36, 0, 0, 0, 36, 0, 0, 0, 14, 0, 0, 0, 100, 105, 114, 101, 99, 116, 111, 114, 121, 92, 108, 105, 110, 107, 47, 97, 98, 115, 111, 108, 117, 116, 101, 47, 115, 121, 109, 108, 105, 110, 107, 92, 119, 105, 116, 104, 92, 109, 105, 120, 101, 100, 47, 115, 108, 97, 115, 104, 101, 115, 80, 75, 1, 2, 46, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 95, 41, 81, 245, 36, 0, 0, 0, 36, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 161, 0, 0, 0, 0, 100, 105, 114, 101, 99, 116, 111, 114, 121, 92, 108, 105, 110, 107, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 60, 0, 0, 0, 80, 0, 0, 0, 0, 0 ] as &[u8], ); } #[test] fn write_mimetype_zip() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); let options = FileOptions { compression_method: CompressionMethod::Stored, compression_level: None, last_modified_time: DateTime::default(), permissions: Some(33188), large_file: false, encrypt_with: None, }; writer.start_file("mimetype", options).unwrap(); writer .write_all(b"application/vnd.oasis.opendocument.text") .unwrap(); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 153); let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); assert_eq!(result.get_ref(), &v); } #[test] fn path_to_string() { let mut path = std::path::PathBuf::new(); #[cfg(windows)] path.push(r"C:\"); #[cfg(unix)] path.push("/"); path.push("windows"); path.push(".."); path.push("."); path.push("system32"); let path_str = super::path_to_string(&path); assert_eq!(path_str, "windows/system32"); } } #[cfg(not(feature = "unreserved"))] const EXTRA_FIELD_MAPPING: [u16; 49] = [ 0x0001, 0x0007, 0x0008, 0x0009, 0x000a, 0x000c, 0x000d, 0x000e, 0x000f, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x0020, 0x0021, 0x0022, 0x0023, 0x0065, 0x0066, 0x4690, 0x07c8, 0x2605, 0x2705, 0x2805, 0x334d, 0x4341, 0x4453, 0x4704, 0x470f, 0x4b46, 0x4c41, 0x4d49, 0x4f4c, 0x5356, 0x5455, 0x554e, 0x5855, 0x6375, 0x6542, 0x7075, 0x756e, 0x7855, 0xa11e, 0xa220, 0xfd4a, 0x9901, 0x9902, ]; zip-0.6.6/src/zipcrypto.rs000064400000000000000000000217670072674642500137040ustar 00000000000000//! Implementation of the ZipCrypto algorithm //! //! The following paper was used to implement the ZipCrypto algorithm: //! [https://courses.cs.ut.ee/MTAT.07.022/2015_fall/uploads/Main/dmitri-report-f15-16.pdf](https://courses.cs.ut.ee/MTAT.07.022/2015_fall/uploads/Main/dmitri-report-f15-16.pdf) use std::num::Wrapping; /// A container to hold the current key state #[derive(Clone, Copy)] pub(crate) struct ZipCryptoKeys { key_0: Wrapping, key_1: Wrapping, key_2: Wrapping, } impl ZipCryptoKeys { fn new() -> ZipCryptoKeys { ZipCryptoKeys { key_0: Wrapping(0x12345678), key_1: Wrapping(0x23456789), key_2: Wrapping(0x34567890), } } fn update(&mut self, input: u8) { self.key_0 = ZipCryptoKeys::crc32(self.key_0, input); self.key_1 = (self.key_1 + (self.key_0 & Wrapping(0xff))) * Wrapping(0x08088405) + Wrapping(1); self.key_2 = ZipCryptoKeys::crc32(self.key_2, (self.key_1 >> 24).0 as u8); } fn stream_byte(&mut self) -> u8 { let temp: Wrapping = Wrapping(self.key_2.0 as u16) | Wrapping(3); ((temp * (temp ^ Wrapping(1))) >> 8).0 as u8 } fn decrypt_byte(&mut self, cipher_byte: u8) -> u8 { let plain_byte: u8 = self.stream_byte() ^ cipher_byte; self.update(plain_byte); plain_byte } #[allow(dead_code)] fn encrypt_byte(&mut self, plain_byte: u8) -> u8 { let cipher_byte: u8 = self.stream_byte() ^ plain_byte; self.update(plain_byte); cipher_byte } fn crc32(crc: Wrapping, input: u8) -> Wrapping { (crc >> 8) ^ Wrapping(CRCTABLE[((crc & Wrapping(0xff)).0 as u8 ^ input) as usize]) } pub(crate) fn derive(password: &[u8]) -> ZipCryptoKeys { let mut keys = ZipCryptoKeys::new(); for byte in password.iter() { keys.update(*byte); } keys } } /// A ZipCrypto reader with unverified password pub struct ZipCryptoReader { file: R, keys: ZipCryptoKeys, } pub enum ZipCryptoValidator { PkzipCrc32(u32), InfoZipMsdosTime(u16), } impl ZipCryptoReader { /// Note: The password is `&[u8]` and not `&str` because the /// [zip specification](https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.3.TXT) /// does not specify password encoding (see function `update_keys` in the specification). /// Therefore, if `&str` was used, the password would be UTF-8 and it /// would be impossible to decrypt files that were encrypted with a /// password byte sequence that is unrepresentable in UTF-8. pub fn new(file: R, password: &[u8]) -> ZipCryptoReader { ZipCryptoReader { file, keys: ZipCryptoKeys::derive(password), } } /// Read the ZipCrypto header bytes and validate the password. pub fn validate( mut self, validator: ZipCryptoValidator, ) -> Result>, std::io::Error> { // ZipCrypto prefixes a file with a 12 byte header let mut header_buf = [0u8; 12]; self.file.read_exact(&mut header_buf)?; for byte in header_buf.iter_mut() { *byte = self.keys.decrypt_byte(*byte); } match validator { ZipCryptoValidator::PkzipCrc32(crc32_plaintext) => { // PKZIP before 2.0 used 2 byte CRC check. // PKZIP 2.0+ used 1 byte CRC check. It's more secure. // We also use 1 byte CRC. if (crc32_plaintext >> 24) as u8 != header_buf[11] { return Ok(None); // Wrong password } } ZipCryptoValidator::InfoZipMsdosTime(last_mod_time) => { // Info-ZIP modification to ZipCrypto format: // If bit 3 of the general purpose bit flag is set // (indicates that the file uses a data-descriptor section), // it uses high byte of 16-bit File Time. // Info-ZIP code probably writes 2 bytes of File Time. // We check only 1 byte. if (last_mod_time >> 8) as u8 != header_buf[11] { return Ok(None); // Wrong password } } } Ok(Some(ZipCryptoReaderValid { reader: self })) } } pub(crate) struct ZipCryptoWriter { pub(crate) writer: W, pub(crate) buffer: Vec, pub(crate) keys: ZipCryptoKeys, } impl ZipCryptoWriter { pub(crate) fn finish(mut self, crc32: u32) -> std::io::Result { self.buffer[11] = (crc32 >> 24) as u8; for byte in self.buffer.iter_mut() { *byte = self.keys.encrypt_byte(*byte); } self.writer.write_all(&self.buffer)?; self.writer.flush()?; Ok(self.writer) } } impl std::io::Write for ZipCryptoWriter { fn write(&mut self, buf: &[u8]) -> std::io::Result { self.buffer.extend_from_slice(buf); Ok(buf.len()) } fn flush(&mut self) -> std::io::Result<()> { Ok(()) } } /// A ZipCrypto reader with verified password pub struct ZipCryptoReaderValid { reader: ZipCryptoReader, } impl std::io::Read for ZipCryptoReaderValid { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { // Note: There might be potential for optimization. Inspiration can be found at: // https://github.com/kornelski/7z/blob/master/CPP/7zip/Crypto/ZipCrypto.cpp let result = self.reader.file.read(buf); for byte in buf.iter_mut() { *byte = self.reader.keys.decrypt_byte(*byte); } result } } impl ZipCryptoReaderValid { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> R { self.reader.file } } static CRCTABLE: [u32; 256] = [ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, ]; zip-0.6.6/tests/aes_encryption.rs000064400000000000000000000051730072674642500152270ustar 00000000000000#![cfg(feature = "aes-crypto")] use std::io::{self, Read}; use zip::ZipArchive; const SECRET_CONTENT: &str = "Lorem ipsum dolor sit amet"; const PASSWORD: &[u8] = b"helloworld"; #[test] fn aes256_encrypted_uncompressed_file() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("data/aes_archive.zip")); let mut archive = ZipArchive::new(io::Cursor::new(v)).expect("couldn't open test zip file"); let mut file = archive .by_name_decrypt("secret_data_256_uncompressed", PASSWORD) .expect("couldn't find file in archive") .expect("invalid password"); assert_eq!("secret_data_256_uncompressed", file.name()); let mut content = String::new(); file.read_to_string(&mut content) .expect("couldn't read encrypted file"); assert_eq!(SECRET_CONTENT, content); } #[test] fn aes256_encrypted_file() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("data/aes_archive.zip")); let mut archive = ZipArchive::new(io::Cursor::new(v)).expect("couldn't open test zip file"); let mut file = archive .by_name_decrypt("secret_data_256", PASSWORD) .expect("couldn't find file in archive") .expect("invalid password"); assert_eq!("secret_data_256", file.name()); let mut content = String::new(); file.read_to_string(&mut content) .expect("couldn't read encrypted and compressed file"); assert_eq!(SECRET_CONTENT, content); } #[test] fn aes192_encrypted_file() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("data/aes_archive.zip")); let mut archive = ZipArchive::new(io::Cursor::new(v)).expect("couldn't open test zip file"); let mut file = archive .by_name_decrypt("secret_data_192", PASSWORD) .expect("couldn't find file in archive") .expect("invalid password"); assert_eq!("secret_data_192", file.name()); let mut content = String::new(); file.read_to_string(&mut content) .expect("couldn't read encrypted file"); assert_eq!(SECRET_CONTENT, content); } #[test] fn aes128_encrypted_file() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("data/aes_archive.zip")); let mut archive = ZipArchive::new(io::Cursor::new(v)).expect("couldn't open test zip file"); let mut file = archive .by_name_decrypt("secret_data_128", PASSWORD) .expect("couldn't find file in archive") .expect("invalid password"); assert_eq!("secret_data_128", file.name()); let mut content = String::new(); file.read_to_string(&mut content) .expect("couldn't read encrypted file"); assert_eq!(SECRET_CONTENT, content); } zip-0.6.6/tests/data/aes_archive.zip000064400000000000000000000016140072674642500155410ustar 00000000000000PK3cp9T. secret_data_128AE՟(@i I5.<)FoXggPK3cs9T2 secret_data_192AEP)tuatj휘RfW fт6w#`hhwTPK3cv9T6 secret_data_256AE{3i5jmk&yݺaf7  xGuDҮPi=βPK3cy9T6 secret_data_256_uncompressedAEabCL/t _lDl/eiu <  Z HܽH?PK?3cp9T./ secret_data_128 /./AEPK?3cs9T2/ fsecret_data_192 /> ./AEPK?3cv9T6/ secret_data_256 /"./AEPK?3cy9T6/ >secret_data_256_uncompressed  /. /AEPKzip-0.6.6/tests/data/comment_garbage.zip000064400000000000000000000000560072674642500164010ustar 00000000000000PKshort.omment bla bla blazip-0.6.6/tests/data/files_and_dirs.zip000064400000000000000000000007160072674642500162370ustar 00000000000000PK x;N!X file0.txtUT M\R\ux File at the root. PK x;Ndir1/UT M\$R\ux PK x;Ndir2/UT M\$R\ux PK x;N!X file0.txtUTM\ux PK x;NAUdir1/UTM\ux PK x;NAdir2/UTM\ux PKzip-0.6.6/tests/data/invalid_cde_number_of_files_allocation_greater_offset.zip000064400000000000000000000001740072674642500262330ustar 00000000000000PKLg+]PKPK Kzip-0.6.6/tests/data/invalid_cde_number_of_files_allocation_smaller_offset.zip000064400000000000000000000003240072674642500262360ustar 00000000000000PKPKPKvvvvvvvvrvvvvvvvvvvvvvPKvvvvvvvvvvvvvvvvvvvv-vvvvvvvPKPKzip-0.6.6/tests/data/invalid_offset.zip000064400000000000000000000023310072674642500162610ustar 00000000000000PK pJzip/UT PY"PYux PKpJ zip/.DS_StoreUT PYPYux 1 @E -- ` z/ (؈HSy.f~Q4X) &ivvf8qF'ㅄemZ/cQ!B᰿f~ iO6>OўtmKhG{:u47-a|qB1G{:B (z!c|9ZĊ9[aA S<:Z!>PK\gJӆmzip/Cargo.tomlUT PYlPYux 1 Ew:WIPN@Mz:UmvP܄zip/.DS_StoreUTPYux PK\gJӆm[zip/Cargo.tomlUTPYux PK zhJ A@zip/corpus/UTPYux PKzhJL{K3zip/corpus/9fdad349dac578687a62907dd7ba4295801fa566UTPYux PKhJ2 zip/read.rsUTOPYux PK qJ /seeds/UT"PYux PKpJjmzip/seeds/.DS_StoreUTPYux PK~zip-0.6.6/tests/data/invalid_offset2.zip000064400000000000000000000001650072674642500163460ustar 00000000000000PK'PKP)PK,PKP)PK=%zip-0.6.6/tests/data/mimetype.zip000064400000000000000000000002310072674642500151130ustar 00000000000000PK!^2 ''mimetypeapplication/vnd.oasis.opendocument.textPK.!^2 ''mimetypePK6Mzip-0.6.6/tests/data/zip64_demo.zip000064400000000000000000000003400072674642500152430ustar 00000000000000Leading junk. PK-q KU{-Hello, world! PK-q KU{-PK,-/APKpPK/Azip-0.6.6/tests/end_to_end.rs000064400000000000000000000163650072674642500143100ustar 00000000000000use byteorder::{LittleEndian, WriteBytesExt}; use std::collections::HashSet; use std::io::prelude::*; use std::io::{Cursor, Seek}; use std::iter::FromIterator; use zip::write::FileOptions; use zip::{CompressionMethod, SUPPORTED_COMPRESSION_METHODS}; // This test asserts that after creating a zip file, then reading its contents back out, // the extracted data will *always* be exactly the same as the original data. #[test] fn end_to_end() { for &method in SUPPORTED_COMPRESSION_METHODS { let file = &mut Cursor::new(Vec::new()); println!("Writing file with {method} compression"); write_test_archive(file, method).expect("Couldn't write test zip archive"); println!("Checking file contents"); check_archive_file(file, ENTRY_NAME, Some(method), LOREM_IPSUM); } } // This test asserts that after copying a `ZipFile` to a new `ZipWriter`, then reading its // contents back out, the extracted data will *always* be exactly the same as the original data. #[test] fn copy() { for &method in SUPPORTED_COMPRESSION_METHODS { let src_file = &mut Cursor::new(Vec::new()); write_test_archive(src_file, method).expect("Couldn't write to test file"); let mut tgt_file = &mut Cursor::new(Vec::new()); { let mut src_archive = zip::ZipArchive::new(src_file).unwrap(); let mut zip = zip::ZipWriter::new(&mut tgt_file); { let file = src_archive .by_name(ENTRY_NAME) .expect("Missing expected file"); zip.raw_copy_file(file).expect("Couldn't copy file"); } { let file = src_archive .by_name(ENTRY_NAME) .expect("Missing expected file"); zip.raw_copy_file_rename(file, COPY_ENTRY_NAME) .expect("Couldn't copy and rename file"); } } let mut tgt_archive = zip::ZipArchive::new(tgt_file).unwrap(); check_archive_file_contents(&mut tgt_archive, ENTRY_NAME, LOREM_IPSUM); check_archive_file_contents(&mut tgt_archive, COPY_ENTRY_NAME, LOREM_IPSUM); } } // This test asserts that after appending to a `ZipWriter`, then reading its contents back out, // both the prior data and the appended data will be exactly the same as their originals. #[test] fn append() { for &method in SUPPORTED_COMPRESSION_METHODS { let mut file = &mut Cursor::new(Vec::new()); write_test_archive(file, method).expect("Couldn't write to test file"); { let mut zip = zip::ZipWriter::new_append(&mut file).unwrap(); zip.start_file( COPY_ENTRY_NAME, FileOptions::default().compression_method(method), ) .unwrap(); zip.write_all(LOREM_IPSUM).unwrap(); zip.finish().unwrap(); } let mut zip = zip::ZipArchive::new(&mut file).unwrap(); check_archive_file_contents(&mut zip, ENTRY_NAME, LOREM_IPSUM); check_archive_file_contents(&mut zip, COPY_ENTRY_NAME, LOREM_IPSUM); } } // Write a test zip archive to buffer. fn write_test_archive( file: &mut Cursor>, method: CompressionMethod, ) -> zip::result::ZipResult<()> { let mut zip = zip::ZipWriter::new(file); zip.add_directory("test/", Default::default())?; let options = FileOptions::default() .compression_method(method) .unix_permissions(0o755); zip.start_file("test/☃.txt", options)?; zip.write_all(b"Hello, World!\n")?; zip.start_file_with_extra_data("test_with_extra_data/🐢.txt", options)?; zip.write_u16::(0xbeef)?; zip.write_u16::(EXTRA_DATA.len() as u16)?; zip.write_all(EXTRA_DATA)?; zip.end_extra_data()?; zip.write_all(b"Hello, World! Again.\n")?; zip.start_file(ENTRY_NAME, options)?; zip.write_all(LOREM_IPSUM)?; zip.finish()?; Ok(()) } // Load an archive from buffer and check for test data. fn check_test_archive(zip_file: R) -> zip::result::ZipResult> { let mut archive = zip::ZipArchive::new(zip_file).unwrap(); // Check archive contains expected file names. { let expected_file_names = [ "test/", "test/☃.txt", "test_with_extra_data/🐢.txt", ENTRY_NAME, ]; let expected_file_names = HashSet::from_iter(expected_file_names.iter().copied()); let file_names = archive.file_names().collect::>(); assert_eq!(file_names, expected_file_names); } // Check an archive file for extra data field contents. { let file_with_extra_data = archive.by_name("test_with_extra_data/🐢.txt")?; let mut extra_data = Vec::new(); extra_data.write_u16::(0xbeef)?; extra_data.write_u16::(EXTRA_DATA.len() as u16)?; extra_data.write_all(EXTRA_DATA)?; assert_eq!(file_with_extra_data.extra_data(), extra_data.as_slice()); } Ok(archive) } // Read a file in the archive as a string. fn read_archive_file( archive: &mut zip::ZipArchive, name: &str, ) -> zip::result::ZipResult { let mut file = archive.by_name(name)?; let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); Ok(contents) } // Check a file in the archive contains expected data and properties. fn check_archive_file( zip_file: &mut Cursor>, name: &str, expected_method: Option, expected_data: &[u8], ) { let mut archive = check_test_archive(zip_file).unwrap(); if let Some(expected_method) = expected_method { // Check the file's compression method. let file = archive.by_name(name).unwrap(); let real_method = file.compression(); assert_eq!( expected_method, real_method, "File does not have expected compression method" ); } check_archive_file_contents(&mut archive, name, expected_data); } // Check a file in the archive contains the given data. fn check_archive_file_contents( archive: &mut zip::ZipArchive, name: &str, expected: &[u8], ) { let file_contents: String = read_archive_file(archive, name).unwrap(); assert_eq!(file_contents.as_bytes(), expected); } const LOREM_IPSUM : &[u8] = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit. In tellus elit, tristique vitae mattis egestas, ultricies vitae risus. Quisque sit amet quam ut urna aliquet molestie. Proin blandit ornare dui, a tempor nisl accumsan in. Praesent a consequat felis. Morbi metus diam, auctor in auctor vel, feugiat id odio. Curabitur ex ex, dictum quis auctor quis, suscipit id lorem. Aliquam vestibulum dolor nec enim vehicula, porta tristique augue tincidunt. Vivamus ut gravida est. Sed pellentesque, dolor vitae tristique consectetur, neque lectus pulvinar dui, sed feugiat purus diam id lectus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Maecenas feugiat velit in ex ultrices scelerisque id id neque. "; const EXTRA_DATA: &[u8] = b"Extra Data"; const ENTRY_NAME: &str = "test/lorem_ipsum.txt"; const COPY_ENTRY_NAME: &str = "test/lorem_ipsum_renamed.txt"; zip-0.6.6/tests/invalid_date.rs000064400000000000000000000026500072674642500146250ustar 00000000000000use std::io::Cursor; use zip::read::ZipArchive; const BUF: &[u8] = &[ 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x1c, 0x00, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x55, 0x54, 0x09, 0x00, 0x03, 0xf4, 0x5c, 0x88, 0x5a, 0xf4, 0x5c, 0x88, 0x5a, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // time part: 0 seconds, 0 minutes, 0 hours 0x00, 0x00, // date part: day 0 (invalid), month 0 (invalid), year 0 (1980) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0xed, 0x41, 0x00, 0x00, 0x00, 0x00, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x55, 0x54, 0x05, 0x00, 0x03, 0xf4, 0x5c, 0x88, 0x5a, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x58, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, ]; #[test] fn invalid_date() { let _archive = ZipArchive::new(Cursor::new(BUF)).unwrap(); } zip-0.6.6/tests/issue_234.rs000064400000000000000000000036570072674642500137320ustar 00000000000000use zip::result::ZipError; const BUF: &[u8] = &[ 0, 80, 75, 1, 2, 127, 120, 0, 3, 3, 75, 80, 232, 3, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 7, 0, 0, 0, 0, 65, 0, 1, 0, 0, 0, 4, 0, 0, 224, 255, 0, 255, 255, 255, 255, 255, 255, 20, 39, 221, 221, 221, 221, 221, 221, 205, 221, 221, 221, 42, 221, 221, 221, 221, 221, 221, 221, 221, 38, 34, 34, 219, 80, 75, 5, 6, 0, 0, 0, 0, 5, 96, 0, 1, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 234, 236, 124, 221, 221, 37, 221, 221, 221, 221, 221, 129, 4, 0, 0, 221, 221, 80, 75, 1, 2, 127, 120, 0, 4, 0, 0, 2, 127, 120, 0, 79, 75, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 234, 0, 0, 0, 3, 8, 4, 232, 3, 0, 0, 0, 255, 255, 255, 255, 1, 0, 0, 0, 0, 7, 0, 0, 0, 0, 3, 0, 221, 209, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 58, 58, 42, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 234, 0, 0, 0, 3, 8, 0, 0, 0, 12, 0, 0, 0, 0, 0, 3, 0, 0, 0, 7, 0, 0, 0, 0, 0, 58, 58, 58, 42, 175, 221, 253, 221, 221, 221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 221, 221, 221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, ]; #[test] fn invalid_header() { let reader = std::io::Cursor::new(&BUF); let archive = zip::ZipArchive::new(reader); match archive { Err(ZipError::InvalidArchive(_)) => {} value => panic!("Unexpected value: {value:?}"), } } zip-0.6.6/tests/zip64_large.rs000064400000000000000000000264730072674642500143410ustar 00000000000000// The following is a hexdump of a zip64 file containing the following files: // zero4400: 4400 MB of zeroes // zero100: 100 MB of zeroes // zero4400_2: 4400 MB of zeroes // // 00000000 50 4b 03 04 2d 00 00 00 00 00 1b 6e 51 4d 66 82 |PK..-......nQMf.| // 00000010 13 da ff ff ff ff ff ff ff ff 08 00 30 00 7a 65 |............0.ze| // 00000020 72 6f 34 34 30 30 55 54 09 00 03 a5 21 c7 5b db |ro4400UT....!.[.| // 00000030 21 c7 5b 75 78 0b 00 01 04 e8 03 00 00 04 e8 03 |!.[ux...........| // 00000040 00 00 01 00 10 00 00 00 00 13 01 00 00 00 00 00 |................| // 00000050 00 13 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| // 00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| // * // 113000050 00 00 00 00 00 00 50 4b 03 04 0a 00 00 00 00 00 |......PK........| // 113000060 2b 6e 51 4d 98 23 28 4b 00 00 40 06 00 00 40 06 |+nQM.#(K..@...@.| // 113000070 07 00 1c 00 7a 65 72 6f 31 30 30 55 54 09 00 03 |....zero100UT...| // 113000080 c2 21 c7 5b c2 21 c7 5b 75 78 0b 00 01 04 e8 03 |.!.[.!.[ux......| // 113000090 00 00 04 e8 03 00 00 00 00 00 00 00 00 00 00 00 |................| // 1130000a0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| // * // 119400090 00 00 00 00 00 00 00 50 4b 03 04 2d 00 00 00 00 |.......PK..-....| // 1194000a0 00 3b 6e 51 4d 66 82 13 da ff ff ff ff ff ff ff |.;nQMf..........| // 1194000b0 ff 0a 00 30 00 7a 65 72 6f 34 34 30 30 5f 32 55 |...0.zero4400_2U| // 1194000c0 54 09 00 03 e2 21 c7 5b db 21 c7 5b 75 78 0b 00 |T....!.[.!.[ux..| // 1194000d0 01 04 e8 03 00 00 04 e8 03 00 00 01 00 10 00 00 |................| // 1194000e0 00 00 13 01 00 00 00 00 00 00 13 01 00 00 00 00 |................| // 1194000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| // * // 22c4000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 50 |...............P| // 22c4000f0 4b 01 02 1e 03 2d 00 00 00 00 00 1b 6e 51 4d 66 |K....-......nQMf| // 22c400100 82 13 da ff ff ff ff ff ff ff ff 08 00 2c 00 00 |.............,..| // 22c400110 00 00 00 00 00 00 00 a4 81 00 00 00 00 7a 65 72 |.............zer| // 22c400120 6f 34 34 30 30 55 54 05 00 03 a5 21 c7 5b 75 78 |o4400UT....!.[ux| // 22c400130 0b 00 01 04 e8 03 00 00 04 e8 03 00 00 01 00 10 |................| // 22c400140 00 00 00 00 13 01 00 00 00 00 00 00 13 01 00 00 |................| // 22c400150 00 50 4b 01 02 1e 03 0a 00 00 00 00 00 2b 6e 51 |.PK..........+nQ| // 22c400160 4d 98 23 28 4b 00 00 40 06 00 00 40 06 07 00 24 |M.#(K..@...@...$| // 22c400170 00 00 00 00 00 00 00 00 00 a4 81 ff ff ff ff 7a |...............z| // 22c400180 65 72 6f 31 30 30 55 54 05 00 03 c2 21 c7 5b 75 |ero100UT....!.[u| // 22c400190 78 0b 00 01 04 e8 03 00 00 04 e8 03 00 00 01 00 |x...............| // 22c4001a0 08 00 56 00 00 13 01 00 00 00 50 4b 01 02 1e 03 |..V.......PK....| // 22c4001b0 2d 00 00 00 00 00 3b 6e 51 4d 66 82 13 da ff ff |-.....;nQMf.....| // 22c4001c0 ff ff ff ff ff ff 0a 00 34 00 00 00 00 00 00 00 |........4.......| // 22c4001d0 00 00 a4 81 ff ff ff ff 7a 65 72 6f 34 34 30 30 |........zero4400| // 22c4001e0 5f 32 55 54 05 00 03 e2 21 c7 5b 75 78 0b 00 01 |_2UT....!.[ux...| // 22c4001f0 04 e8 03 00 00 04 e8 03 00 00 01 00 18 00 00 00 |................| // 22c400200 00 13 01 00 00 00 00 00 00 13 01 00 00 00 97 00 |................| // 22c400210 40 19 01 00 00 00 50 4b 06 06 2c 00 00 00 00 00 |@.....PK..,.....| // 22c400220 00 00 1e 03 2d 00 00 00 00 00 00 00 00 00 03 00 |....-...........| // 22c400230 00 00 00 00 00 00 03 00 00 00 00 00 00 00 27 01 |..............'.| // 22c400240 00 00 00 00 00 00 ef 00 40 2c 02 00 00 00 50 4b |........@,....PK| // 22c400250 06 07 00 00 00 00 16 02 40 2c 02 00 00 00 01 00 |........@,......| // 22c400260 00 00 50 4b 05 06 00 00 00 00 03 00 03 00 27 01 |..PK..........'.| // 22c400270 00 00 ff ff ff ff 00 00 |........| // 22c400278 use std::io::{self, Read, Seek, SeekFrom}; const BLOCK1_LENGTH: u64 = 0x60; const BLOCK1: [u8; BLOCK1_LENGTH as usize] = [ 0x50, 0x4b, 0x03, 0x04, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x6e, 0x51, 0x4d, 0x66, 0x82, 0x13, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08, 0x00, 0x30, 0x00, 0x7a, 0x65, 0x72, 0x6f, 0x34, 0x34, 0x30, 0x30, 0x55, 0x54, 0x09, 0x00, 0x03, 0xa5, 0x21, 0xc7, 0x5b, 0xdb, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; const BLOCK2_LENGTH: u64 = 0x50; const BLOCK2: [u8; BLOCK2_LENGTH as usize] = [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x6e, 0x51, 0x4d, 0x98, 0x23, 0x28, 0x4b, 0x00, 0x00, 0x40, 0x06, 0x00, 0x00, 0x40, 0x06, 0x07, 0x00, 0x1c, 0x00, 0x7a, 0x65, 0x72, 0x6f, 0x31, 0x30, 0x30, 0x55, 0x54, 0x09, 0x00, 0x03, 0xc2, 0x21, 0xc7, 0x5b, 0xc2, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; const BLOCK3_LENGTH: u64 = 0x60; const BLOCK3: [u8; BLOCK3_LENGTH as usize] = [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x03, 0x04, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x6e, 0x51, 0x4d, 0x66, 0x82, 0x13, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0x30, 0x00, 0x7a, 0x65, 0x72, 0x6f, 0x34, 0x34, 0x30, 0x30, 0x5f, 0x32, 0x55, 0x54, 0x09, 0x00, 0x03, 0xe2, 0x21, 0xc7, 0x5b, 0xdb, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, ]; const BLOCK4_LENGTH: u64 = 0x198; const BLOCK4: [u8; BLOCK4_LENGTH as usize] = [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x6e, 0x51, 0x4d, 0x66, 0x82, 0x13, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x65, 0x72, 0x6f, 0x34, 0x34, 0x30, 0x30, 0x55, 0x54, 0x05, 0x00, 0x03, 0xa5, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x6e, 0x51, 0x4d, 0x98, 0x23, 0x28, 0x4b, 0x00, 0x00, 0x40, 0x06, 0x00, 0x00, 0x40, 0x06, 0x07, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x81, 0xff, 0xff, 0xff, 0xff, 0x7a, 0x65, 0x72, 0x6f, 0x31, 0x30, 0x30, 0x55, 0x54, 0x05, 0x00, 0x03, 0xc2, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x08, 0x00, 0x56, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x6e, 0x51, 0x4d, 0x66, 0x82, 0x13, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x81, 0xff, 0xff, 0xff, 0xff, 0x7a, 0x65, 0x72, 0x6f, 0x34, 0x34, 0x30, 0x30, 0x5f, 0x32, 0x55, 0x54, 0x05, 0x00, 0x03, 0xe2, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x97, 0x00, 0x40, 0x19, 0x01, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x06, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0x00, 0x40, 0x2c, 0x02, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x16, 0x02, 0x40, 0x2c, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x27, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, ]; const BLOCK1_START: u64 = 0x000000000; const BLOCK2_START: u64 = 0x113000050; const BLOCK3_START: u64 = 0x119400090; const BLOCK4_START: u64 = 0x22c4000e0; const BLOCK1_END: u64 = BLOCK1_START + BLOCK1_LENGTH - 1; const BLOCK2_END: u64 = BLOCK2_START + BLOCK2_LENGTH - 1; const BLOCK3_END: u64 = BLOCK3_START + BLOCK3_LENGTH - 1; const BLOCK4_END: u64 = BLOCK4_START + BLOCK4_LENGTH - 1; const TOTAL_LENGTH: u64 = BLOCK4_START + BLOCK4_LENGTH; struct Zip64File { pointer: u64, } impl Zip64File { fn new() -> Self { Zip64File { pointer: 0 } } } impl Seek for Zip64File { fn seek(&mut self, pos: SeekFrom) -> io::Result { match pos { SeekFrom::Start(offset) => { self.pointer = offset; } SeekFrom::End(offset) => { if offset > 0 || offset < -(TOTAL_LENGTH as i64) { return Err(io::Error::new(io::ErrorKind::Other, "Invalid seek offset")); } self.pointer = (TOTAL_LENGTH as i64 + offset) as u64; } SeekFrom::Current(offset) => { let seekpos = self.pointer as i64 + offset; if seekpos < 0 || seekpos as u64 > TOTAL_LENGTH { return Err(io::Error::new(io::ErrorKind::Other, "Invalid seek offset")); } self.pointer = seekpos as u64; } } Ok(self.pointer) } } impl Read for Zip64File { fn read(&mut self, buf: &mut [u8]) -> io::Result { if self.pointer >= TOTAL_LENGTH { return Ok(0); } match self.pointer { BLOCK1_START..=BLOCK1_END => { buf[0] = BLOCK1[(self.pointer - BLOCK1_START) as usize]; } BLOCK2_START..=BLOCK2_END => { buf[0] = BLOCK2[(self.pointer - BLOCK2_START) as usize]; } BLOCK3_START..=BLOCK3_END => { buf[0] = BLOCK3[(self.pointer - BLOCK3_START) as usize]; } BLOCK4_START..=BLOCK4_END => { buf[0] = BLOCK4[(self.pointer - BLOCK4_START) as usize]; } _ => { buf[0] = 0; } } self.pointer += 1; Ok(1) } } #[test] fn zip64_large() { let zipfile = Zip64File::new(); let mut archive = zip::ZipArchive::new(zipfile).unwrap(); let mut buf = [0u8; 32]; for i in 0..archive.len() { let mut file = archive.by_index(i).unwrap(); let outpath = file.enclosed_name().unwrap(); println!( "Entry {} has name \"{}\" ({} bytes)", i, outpath.display(), file.size() ); match file.read_exact(&mut buf) { Ok(()) => println!("The first {} bytes are: {:?}", buf.len(), buf), Err(e) => println!("Could not read the file: {e:?}"), }; } } zip-0.6.6/tests/zip_comment_garbage.rs000064400000000000000000000017670072674642500162060ustar 00000000000000// Some zip files can contain garbage after the comment. For example, python zipfile generates // it when opening a zip in 'a' mode: // // >>> from zipfile import ZipFile // >>> with ZipFile('comment_garbage.zip', 'a') as z: // ... z.comment = b'long comment bla bla bla' // ... // >>> with ZipFile('comment_garbage.zip', 'a') as z: // ... z.comment = b'short.' // ... // >>> // // Hexdump: // // 00000000 50 4b 05 06 00 00 00 00 00 00 00 00 00 00 00 00 |PK..............| // 00000010 00 00 00 00 06 00 73 68 6f 72 74 2e 6f 6d 6d 65 |......short.omme| // 00000020 6e 74 20 62 6c 61 20 62 6c 61 20 62 6c 61 |nt bla bla bla| // 0000002e use std::io; use zip::ZipArchive; #[test] fn correctly_handle_zip_with_garbage_after_comment() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/comment_garbage.zip")); let archive = ZipArchive::new(io::Cursor::new(v)).expect("couldn't open test zip file"); assert_eq!(archive.comment(), "short.".as_bytes()); } zip-0.6.6/tests/zip_crypto.rs000064400000000000000000000114560072674642500144100ustar 00000000000000// The following is a hexdump of a zip file containing the following // ZipCrypto encrypted file: // test.txt: 35 bytes, contents: `abcdefghijklmnopqrstuvwxyz123456789`, password: `test` // // 00000000 50 4b 03 04 14 00 01 00 00 00 54 bd b5 50 2f 20 |PK........T..P/ | // 00000010 79 55 2f 00 00 00 23 00 00 00 08 00 00 00 74 65 |yU/...#.......te| // 00000020 73 74 2e 74 78 74 ca 2d 1d 27 19 19 63 43 77 9a |st.txt.-.'..cCw.| // 00000030 71 76 c9 ec d1 6f d9 f5 22 67 b3 8f 52 b5 41 bc |qv...o.."g..R.A.| // 00000040 5c 36 f2 1d 84 c3 c0 28 3b fd e1 70 c2 cc 0c 11 |\6.....(;..p....| // 00000050 0c c5 95 2f a4 50 4b 01 02 3f 00 14 00 01 00 00 |.../.PK..?......| // 00000060 00 54 bd b5 50 2f 20 79 55 2f 00 00 00 23 00 00 |.T..P/ yU/...#..| // 00000070 00 08 00 24 00 00 00 00 00 00 00 20 00 00 00 00 |...$....... ....| // 00000080 00 00 00 74 65 73 74 2e 74 78 74 0a 00 20 00 00 |...test.txt.. ..| // 00000090 00 00 00 01 00 18 00 31 b2 3b bf b8 2f d6 01 31 |.......1.;../..1| // 000000a0 b2 3b bf b8 2f d6 01 a8 c4 45 bd b8 2f d6 01 50 |.;../....E../..P| // 000000b0 4b 05 06 00 00 00 00 01 00 01 00 5a 00 00 00 55 |K..........Z...U| // 000000c0 00 00 00 00 00 |.....| // 000000c5 use std::io::Cursor; use std::io::Read; #[test] fn encrypting_file() { use zip::unstable::write::FileOptionsExt; use std::io::{Read, Write}; let mut buf = vec![0; 2048]; let mut archive = zip::write::ZipWriter::new(std::io::Cursor::new(&mut buf)); archive.start_file("name", zip::write::FileOptions::default().with_deprecated_encryption(b"password")).unwrap(); archive.write_all(b"test").unwrap(); archive.finish().unwrap(); drop(archive); let mut archive = zip::ZipArchive::new(std::io::Cursor::new(&mut buf)).unwrap(); let mut file = archive.by_index_decrypt(0, b"password").unwrap().unwrap(); let mut buf = Vec::new(); file.read_to_end(&mut buf).unwrap(); assert_eq!(buf, b"test"); } #[test] fn encrypted_file() { let zip_file_bytes = &mut Cursor::new(vec![ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0x54, 0xbd, 0xb5, 0x50, 0x2f, 0x20, 0x79, 0x55, 0x2f, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78, 0x74, 0xca, 0x2d, 0x1d, 0x27, 0x19, 0x19, 0x63, 0x43, 0x77, 0x9a, 0x71, 0x76, 0xc9, 0xec, 0xd1, 0x6f, 0xd9, 0xf5, 0x22, 0x67, 0xb3, 0x8f, 0x52, 0xb5, 0x41, 0xbc, 0x5c, 0x36, 0xf2, 0x1d, 0x84, 0xc3, 0xc0, 0x28, 0x3b, 0xfd, 0xe1, 0x70, 0xc2, 0xcc, 0x0c, 0x11, 0x0c, 0xc5, 0x95, 0x2f, 0xa4, 0x50, 0x4b, 0x01, 0x02, 0x3f, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0x54, 0xbd, 0xb5, 0x50, 0x2f, 0x20, 0x79, 0x55, 0x2f, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x08, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x0a, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x18, 0x00, 0x31, 0xb2, 0x3b, 0xbf, 0xb8, 0x2f, 0xd6, 0x01, 0x31, 0xb2, 0x3b, 0xbf, 0xb8, 0x2f, 0xd6, 0x01, 0xa8, 0xc4, 0x45, 0xbd, 0xb8, 0x2f, 0xd6, 0x01, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, ]); let mut archive = zip::ZipArchive::new(zip_file_bytes).unwrap(); assert_eq!(archive.len(), 1); //Only one file inside archive: `test.txt` { // No password let file = archive.by_index(0); match file { Err(zip::result::ZipError::UnsupportedArchive( zip::result::ZipError::PASSWORD_REQUIRED, )) => (), Err(_) => panic!( "Expected PasswordRequired error when opening encrypted file without password" ), Ok(_) => panic!("Error: Successfully opened encrypted file without password?!"), } } { // Wrong password let file = archive.by_index_decrypt(0, b"wrong password"); match file { Ok(Err(zip::result::InvalidPassword)) => (), Err(_) => panic!( "Expected InvalidPassword error when opening encrypted file with wrong password" ), Ok(Ok(_)) => panic!("Error: Successfully opened encrypted file with wrong password?!"), } } { // Correct password, read contents let mut file = archive .by_index_decrypt(0, "test".as_bytes()) .unwrap() .unwrap(); let file_name = file.enclosed_name().unwrap(); assert_eq!(file_name, std::path::PathBuf::from("test.txt")); let mut data = Vec::new(); file.read_to_end(&mut data).unwrap(); assert_eq!(data, "abcdefghijklmnopqrstuvwxyz123456789".as_bytes()); } }