zip-0.5.13/.cargo_vcs_info.json0000644000000001120000000000000117150ustar { "git": { "sha1": "7edf2489d5cff8b80f02ee6fc5febf3efd0a9442" } } zip-0.5.13/.github/dependabot.yml000064400000000000000000000001770000000000000146660ustar 00000000000000version: 2 updates: - package-ecosystem: cargo directory: "/" schedule: interval: daily open-pull-requests-limit: 10 zip-0.5.13/.github/workflows/ci.yaml000064400000000000000000000020640000000000000153470ustar 00000000000000name: CI on: pull_request: push: branches: - master env: RUSTFLAGS: -Dwarnings jobs: build_and_test: name: Build and test runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, macOS-latest, windows-latest] rust: [stable, 1.36.0] steps: - uses: actions/checkout@master - name: Install ${{ matrix.rust }} uses: actions-rs/toolchain@v1 with: toolchain: ${{ matrix.rust }} override: true - name: check uses: actions-rs/cargo@v1 with: command: check args: --all --bins --examples - name: tests uses: actions-rs/cargo@v1 with: command: test args: --all check_fmt_and_docs: name: Checking fmt and docs runs-on: ubuntu-latest steps: - uses: actions/checkout@master - uses: actions-rs/toolchain@v1 with: toolchain: nightly components: rustfmt, clippy override: true - name: fmt run: cargo fmt --all -- --check - name: Docs run: cargo doczip-0.5.13/.gitignore000064400000000000000000000000330000000000000124550ustar 00000000000000Cargo.lock target \.idea/ zip-0.5.13/CODE_OF_CONDUCT.md000064400000000000000000000064530000000000000133000ustar 00000000000000 # Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at ryan.levick@gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq zip-0.5.13/Cargo.lock0000644000000202410000000000000076750ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. [[package]] name = "adler32" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" [[package]] name = "bencher" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dfdb4953a096c551ce9ace855a604d702e6e62d77fac690575ae347571717f5" [[package]] name = "byteorder" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" [[package]] name = "bzip2" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abf8012c8a15d5df745fcf258d93e6149dcf102882c8d8702d9cff778eab43a8" dependencies = [ "bzip2-sys", "libc", ] [[package]] name = "bzip2-sys" version = "0.1.10+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17fa3d1ac1ca21c5c4e36a97f3c3eb25084576f6fc47bf0139c1123434216c6c" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "cc" version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" [[package]] name = "cfg-if" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "crc32fast" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "flate2" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" dependencies = [ "cfg-if 0.1.10", "crc32fast", "libc", "libz-sys", "miniz_oxide", ] [[package]] name = "getrandom" version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "libc" version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" [[package]] name = "libz-sys" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" dependencies = [ "cc", "libc", "pkg-config", "vcpkg", ] [[package]] name = "miniz_oxide" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" dependencies = [ "adler32", ] [[package]] name = "pkg-config" version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "ppv-lite86" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "proc-macro2" version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" dependencies = [ "unicode-xid", ] [[package]] name = "quote" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ "getrandom", "libc", "rand_chacha", "rand_core", "rand_hc", ] [[package]] name = "rand_chacha" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ "getrandom", ] [[package]] name = "rand_hc" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ "rand_core", ] [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "syn" version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" dependencies = [ "proc-macro2", "quote", "unicode-xid", ] [[package]] name = "thiserror" version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "time" version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] [[package]] name = "unicode-xid" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] name = "vcpkg" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "walkdir" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" dependencies = [ "same-file", "winapi", "winapi-util", ] [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "zip" version = "0.5.13" dependencies = [ "bencher", "byteorder", "bzip2", "crc32fast", "flate2", "rand", "thiserror", "time", "walkdir", ] zip-0.5.13/Cargo.toml0000644000000030010000000000000077130ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] edition = "2018" name = "zip" version = "0.5.13" authors = ["Mathijs van de Nes ", "Marli Frost ", "Ryan Levick "] description = "Library to support the reading and writing of zip files.\n" keywords = ["zip", "archive"] license = "MIT" repository = "https://github.com/zip-rs/zip.git" [[bench]] name = "read_entry" harness = false [dependencies.byteorder] version = "1.3" [dependencies.bzip2] version = "0.4" optional = true [dependencies.crc32fast] version = "1.0" [dependencies.flate2] version = "1.0.0" optional = true default-features = false [dependencies.thiserror] version = "1.0" [dependencies.time] version = "0.1" optional = true [dev-dependencies.bencher] version = "0.1" [dev-dependencies.rand] version = "0.7" [dev-dependencies.walkdir] version = "2" [features] default = ["bzip2", "deflate", "time"] deflate = ["flate2/rust_backend"] deflate-miniz = ["flate2/default"] deflate-zlib = ["flate2/zlib"] unreserved = [] zip-0.5.13/Cargo.toml.orig000064400000000000000000000015500000000000000133610ustar 00000000000000[package] name = "zip" version = "0.5.13" authors = ["Mathijs van de Nes ", "Marli Frost ", "Ryan Levick "] license = "MIT" repository = "https://github.com/zip-rs/zip.git" keywords = ["zip", "archive"] description = """ Library to support the reading and writing of zip files. """ edition = "2018" [dependencies] flate2 = { version = "1.0.0", default-features = false, optional = true } time = { version = "0.1", optional = true } byteorder = "1.3" bzip2 = { version = "0.4", optional = true } crc32fast = "1.0" thiserror = "1.0" [dev-dependencies] bencher = "0.1" rand = "0.7" walkdir = "2" [features] deflate = ["flate2/rust_backend"] deflate-miniz = ["flate2/default"] deflate-zlib = ["flate2/zlib"] unreserved = [] default = ["bzip2", "deflate", "time"] [[bench]] name = "read_entry" harness = false zip-0.5.13/LICENSE000064400000000000000000000020740000000000000115010ustar 00000000000000The MIT License (MIT) Copyright (c) 2014 Mathijs van de Nes Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.zip-0.5.13/README.md000064400000000000000000000033550000000000000117560ustar 00000000000000zip-rs ====== [![Build Status](https://img.shields.io/github/workflow/status/zip-rs/zip/CI)](https://github.com/zip-rs/zip/actions?query=branch%3Amaster+workflow%3ACI) [![Crates.io version](https://img.shields.io/crates/v/zip.svg)](https://crates.io/crates/zip) [Documentation](https://docs.rs/zip/0.5.13/zip/) Info ---- A zip library for rust which supports reading and writing of simple ZIP files. Supported compression formats: * stored (i.e. none) * deflate * bzip2 Currently unsupported zip extensions: * Encryption * Multi-disk Usage ----- With all default features: ```toml [dependencies] zip = "0.5" ``` Without the default features: ```toml [dependencies] zip = { version = "0.5", default-features = false } ``` The features available are: * `deflate`: Enables the deflate compression algorithm, which is the default for zipfiles * `bzip2`: Enables the BZip2 compression algorithm. * `time`: Enables features using the [time](https://github.com/rust-lang-deprecated/time) crate. All of these are enabled by default. MSRV ---- Our current Minimum Supported Rust Version is **1.36.0**. When adding features, we will follow these guidelines: - We will always support the latest four minor Rust versions. This gives you a 6 month window to upgrade your compiler. - Any change to the MSRV will be accompanied with a **minor** version bump - While the crate is pre-1.0, this will be a change to the PATCH version. Examples -------- See the [examples directory](examples) for: * How to write a file to a zip. * How to write a directory of files to a zip (using [walkdir](https://github.com/BurntSushi/walkdir)). * How to extract a zip file. * How to extract a single file from a zip. * How to read a zip from the standard input. zip-0.5.13/benches/read_entry.rs000064400000000000000000000022420000000000000146020ustar 00000000000000use bencher::{benchmark_group, benchmark_main}; use std::io::{Cursor, Read, Write}; use bencher::Bencher; use rand::Rng; use zip::{ZipArchive, ZipWriter}; fn generate_random_archive(size: usize) -> Vec { let data = Vec::new(); let mut writer = ZipWriter::new(Cursor::new(data)); let options = zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Stored); writer.start_file("random.dat", options).unwrap(); let mut bytes = vec![0u8; size]; rand::thread_rng().fill_bytes(&mut bytes); writer.write_all(&bytes).unwrap(); writer.finish().unwrap().into_inner() } fn read_entry(bench: &mut Bencher) { let size = 1024 * 1024; let bytes = generate_random_archive(size); let mut archive = ZipArchive::new(Cursor::new(bytes.as_slice())).unwrap(); bench.iter(|| { let mut file = archive.by_name("random.dat").unwrap(); let mut buf = [0u8; 1024]; loop { let n = file.read(&mut buf).unwrap(); if n == 0 { break; } } }); bench.bytes = size as u64; } benchmark_group!(benches, read_entry); benchmark_main!(benches); zip-0.5.13/examples/extract.rs000064400000000000000000000033600000000000000143310ustar 00000000000000use std::fs; use std::io; fn main() { std::process::exit(real_main()); } fn real_main() -> i32 { let args: Vec<_> = std::env::args().collect(); if args.len() < 2 { println!("Usage: {} ", args[0]); return 1; } let fname = std::path::Path::new(&*args[1]); let file = fs::File::open(&fname).unwrap(); let mut archive = zip::ZipArchive::new(file).unwrap(); for i in 0..archive.len() { let mut file = archive.by_index(i).unwrap(); let outpath = match file.enclosed_name() { Some(path) => path.to_owned(), None => continue, }; { let comment = file.comment(); if !comment.is_empty() { println!("File {} comment: {}", i, comment); } } if (&*file.name()).ends_with('/') { println!("File {} extracted to \"{}\"", i, outpath.display()); fs::create_dir_all(&outpath).unwrap(); } else { println!( "File {} extracted to \"{}\" ({} bytes)", i, outpath.display(), file.size() ); if let Some(p) = outpath.parent() { if !p.exists() { fs::create_dir_all(&p).unwrap(); } } let mut outfile = fs::File::create(&outpath).unwrap(); io::copy(&mut file, &mut outfile).unwrap(); } // Get and Set permissions #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; if let Some(mode) = file.unix_mode() { fs::set_permissions(&outpath, fs::Permissions::from_mode(mode)).unwrap(); } } } return 0; } zip-0.5.13/examples/extract_lorem.rs000064400000000000000000000014020000000000000155220ustar 00000000000000use std::io::prelude::*; fn main() { std::process::exit(real_main()); } fn real_main() -> i32 { let args: Vec<_> = std::env::args().collect(); if args.len() < 2 { println!("Usage: {} ", args[0]); return 1; } let fname = std::path::Path::new(&*args[1]); let zipfile = std::fs::File::open(&fname).unwrap(); let mut archive = zip::ZipArchive::new(zipfile).unwrap(); let mut file = match archive.by_name("test/lorem_ipsum.txt") { Ok(file) => file, Err(..) => { println!("File test/lorem_ipsum.txt not found"); return 2; } }; let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); println!("{}", contents); return 0; } zip-0.5.13/examples/file_info.rs000064400000000000000000000025420000000000000146120ustar 00000000000000use std::fs; use std::io::BufReader; fn main() { std::process::exit(real_main()); } fn real_main() -> i32 { let args: Vec<_> = std::env::args().collect(); if args.len() < 2 { println!("Usage: {} ", args[0]); return 1; } let fname = std::path::Path::new(&*args[1]); let file = fs::File::open(&fname).unwrap(); let reader = BufReader::new(file); let mut archive = zip::ZipArchive::new(reader).unwrap(); for i in 0..archive.len() { let file = archive.by_index(i).unwrap(); let outpath = match file.enclosed_name() { Some(path) => path, None => { println!("Entry {} has a suspicious path", file.name()); continue; } }; { let comment = file.comment(); if !comment.is_empty() { println!("Entry {} comment: {}", i, comment); } } if (&*file.name()).ends_with('/') { println!( "Entry {} is a directory with name \"{}\"", i, outpath.display() ); } else { println!( "Entry {} is a file with name \"{}\" ({} bytes)", i, outpath.display(), file.size() ); } } return 0; } zip-0.5.13/examples/stdin_info.rs000064400000000000000000000017120000000000000150120ustar 00000000000000use std::io::{self, Read}; fn main() { std::process::exit(real_main()); } fn real_main() -> i32 { let stdin = io::stdin(); let mut stdin_handle = stdin.lock(); let mut buf = [0u8; 16]; loop { match zip::read::read_zipfile_from_stream(&mut stdin_handle) { Ok(Some(mut file)) => { println!( "{}: {} bytes ({} bytes packed)", file.name(), file.size(), file.compressed_size() ); match file.read(&mut buf) { Ok(n) => println!("The first {} bytes are: {:?}", n, &buf[0..n]), Err(e) => println!("Could not read the file: {:?}", e), }; } Ok(None) => break, Err(e) => { println!("Error encountered while reading zip: {:?}", e); return 1; } } } return 0; } zip-0.5.13/examples/write_dir.rs000064400000000000000000000064700000000000000146540ustar 00000000000000use std::io::prelude::*; use std::io::{Seek, Write}; use std::iter::Iterator; use zip::result::ZipError; use zip::write::FileOptions; use std::fs::File; use std::path::Path; use walkdir::{DirEntry, WalkDir}; fn main() { std::process::exit(real_main()); } const METHOD_STORED: Option = Some(zip::CompressionMethod::Stored); #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] const METHOD_DEFLATED: Option = Some(zip::CompressionMethod::Deflated); #[cfg(not(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" )))] const METHOD_DEFLATED: Option = None; #[cfg(feature = "bzip2")] const METHOD_BZIP2: Option = Some(zip::CompressionMethod::Bzip2); #[cfg(not(feature = "bzip2"))] const METHOD_BZIP2: Option = None; fn real_main() -> i32 { let args: Vec<_> = std::env::args().collect(); if args.len() < 3 { println!( "Usage: {} ", args[0] ); return 1; } let src_dir = &*args[1]; let dst_file = &*args[2]; for &method in [METHOD_STORED, METHOD_DEFLATED, METHOD_BZIP2].iter() { if method.is_none() { continue; } match doit(src_dir, dst_file, method.unwrap()) { Ok(_) => println!("done: {} written to {}", src_dir, dst_file), Err(e) => println!("Error: {:?}", e), } } return 0; } fn zip_dir( it: &mut dyn Iterator, prefix: &str, writer: T, method: zip::CompressionMethod, ) -> zip::result::ZipResult<()> where T: Write + Seek, { let mut zip = zip::ZipWriter::new(writer); let options = FileOptions::default() .compression_method(method) .unix_permissions(0o755); let mut buffer = Vec::new(); for entry in it { let path = entry.path(); let name = path.strip_prefix(Path::new(prefix)).unwrap(); // Write file or directory explicitly // Some unzip tools unzip files with directory paths correctly, some do not! if path.is_file() { println!("adding file {:?} as {:?} ...", path, name); #[allow(deprecated)] zip.start_file_from_path(name, options)?; let mut f = File::open(path)?; f.read_to_end(&mut buffer)?; zip.write_all(&*buffer)?; buffer.clear(); } else if name.as_os_str().len() != 0 { // Only if not root! Avoids path spec / warning // and mapname conversion failed error on unzip println!("adding dir {:?} as {:?} ...", path, name); #[allow(deprecated)] zip.add_directory_from_path(name, options)?; } } zip.finish()?; Result::Ok(()) } fn doit( src_dir: &str, dst_file: &str, method: zip::CompressionMethod, ) -> zip::result::ZipResult<()> { if !Path::new(src_dir).is_dir() { return Err(ZipError::FileNotFound); } let path = Path::new(dst_file); let file = File::create(&path).unwrap(); let walkdir = WalkDir::new(src_dir.to_string()); let it = walkdir.into_iter(); zip_dir(&mut it.filter_map(|e| e.ok()), src_dir, file, method)?; Ok(()) } zip-0.5.13/examples/write_sample.rs000064400000000000000000000105130000000000000153500ustar 00000000000000use std::io::prelude::*; use zip::write::FileOptions; fn main() { std::process::exit(real_main()); } fn real_main() -> i32 { let args: Vec<_> = std::env::args().collect(); if args.len() < 2 { println!("Usage: {} ", args[0]); return 1; } let filename = &*args[1]; match doit(filename) { Ok(_) => println!("File written to {}", filename), Err(e) => println!("Error: {:?}", e), } return 0; } fn doit(filename: &str) -> zip::result::ZipResult<()> { let path = std::path::Path::new(filename); let file = std::fs::File::create(&path).unwrap(); let mut zip = zip::ZipWriter::new(file); zip.add_directory("test/", Default::default())?; let options = FileOptions::default() .compression_method(zip::CompressionMethod::Stored) .unix_permissions(0o755); zip.start_file("test/☃.txt", options)?; zip.write_all(b"Hello, World!\n")?; zip.start_file("test/lorem_ipsum.txt", Default::default())?; zip.write_all(LOREM_IPSUM)?; zip.finish()?; Ok(()) } const LOREM_IPSUM : &'static [u8] = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit. In tellus elit, tristique vitae mattis egestas, ultricies vitae risus. Quisque sit amet quam ut urna aliquet molestie. Proin blandit ornare dui, a tempor nisl accumsan in. Praesent a consequat felis. Morbi metus diam, auctor in auctor vel, feugiat id odio. Curabitur ex ex, dictum quis auctor quis, suscipit id lorem. Aliquam vestibulum dolor nec enim vehicula, porta tristique augue tincidunt. Vivamus ut gravida est. Sed pellentesque, dolor vitae tristique consectetur, neque lectus pulvinar dui, sed feugiat purus diam id lectus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Maecenas feugiat velit in ex ultrices scelerisque id id neque. Phasellus sed nisi in augue sodales pulvinar ut et leo. Pellentesque eget leo vitae massa bibendum sollicitudin. Curabitur erat lectus, congue quis auctor sed, aliquet bibendum est. Ut porta ultricies turpis at maximus. Cras non lobortis justo. Duis rutrum magna sed velit facilisis, et sagittis metus laoreet. Pellentesque quam ligula, dapibus vitae mauris quis, dapibus cursus leo. Sed sit amet condimentum eros. Nulla vestibulum enim sit amet lorem pharetra, eu fringilla nisl posuere. Sed tristique non nibh at viverra. Vivamus sed accumsan lacus, nec pretium eros. Mauris elementum arcu eu risus fermentum, tempor ullamcorper neque aliquam. Sed tempor in erat eu suscipit. In euismod in libero in facilisis. Donec sagittis, odio et fermentum dignissim, risus justo pretium nibh, eget vestibulum lectus metus vel lacus. Quisque feugiat, magna ac feugiat ullamcorper, augue justo consequat felis, ut fermentum arcu lorem vitae ligula. Quisque iaculis tempor maximus. In quis eros ac tellus aliquam placerat quis id tellus. Donec non gravida nulla. Morbi faucibus neque sed faucibus aliquam. Sed accumsan mattis nunc, non interdum justo. Cras vitae facilisis leo. Fusce sollicitudin ultrices sagittis. Maecenas eget massa id lorem dignissim ultrices non et ligula. Pellentesque aliquam mi ac neque tempus ornare. Morbi non enim vulputate quam ullamcorper finibus id non neque. Quisque malesuada commodo lorem, ut ornare velit iaculis rhoncus. Mauris vel maximus ex. Morbi eleifend blandit diam, non vulputate ante iaculis in. Donec pellentesque augue id enim suscipit, eget suscipit lacus commodo. Ut vel ex vitae elit imperdiet vulputate. Nunc eu mattis orci, ut pretium sem. Nam vitae purus mollis ante tempus malesuada a at magna. Integer mattis lectus non luctus lobortis. In a cursus quam, eget faucibus sem. Donec vitae condimentum nisi, non efficitur massa. Praesent sed mi in massa sollicitudin iaculis. Pellentesque a libero ultrices, sodales lacus eu, ornare dui. In laoreet est nec dolor aliquam consectetur. Integer iaculis felis venenatis libero pulvinar, ut pretium odio interdum. Donec in nisi eu dolor varius vestibulum eget vel nunc. Morbi a venenatis quam, in vehicula justo. Nam risus dui, auctor eu accumsan at, sagittis ac lectus. Mauris iaculis dignissim interdum. Cras cursus dapibus auctor. Donec sagittis massa vitae tortor viverra vehicula. Mauris fringilla nunc eu lorem ultrices placerat. Maecenas posuere porta quam at semper. Praesent eu bibendum eros. Nunc congue sollicitudin ante, sollicitudin lacinia magna cursus vitae. "; zip-0.5.13/src/compression.rs000064400000000000000000000135240000000000000141740ustar 00000000000000//! Possible ZIP compression methods. use std::fmt; #[allow(deprecated)] /// Identifies the storage format used to compress a file within a ZIP archive. /// /// Each file's compression method is stored alongside it, allowing the /// contents to be read without context. /// /// When creating ZIP files, you may choose the method to use with /// [`zip::write::FileOptions::compression_method`] #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum CompressionMethod { /// Store the file as is Stored, /// Compress the file using Deflate #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] Deflated, /// Compress the file using BZIP2 #[cfg(feature = "bzip2")] Bzip2, /// Unsupported compression method #[deprecated(since = "0.5.7", note = "use the constants instead")] Unsupported(u16), } #[allow(deprecated, missing_docs)] /// All compression methods defined for the ZIP format impl CompressionMethod { pub const STORE: Self = CompressionMethod::Stored; pub const SHRINK: Self = CompressionMethod::Unsupported(1); pub const REDUCE_1: Self = CompressionMethod::Unsupported(2); pub const REDUCE_2: Self = CompressionMethod::Unsupported(3); pub const REDUCE_3: Self = CompressionMethod::Unsupported(4); pub const REDUCE_4: Self = CompressionMethod::Unsupported(5); pub const IMPLODE: Self = CompressionMethod::Unsupported(6); #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] pub const DEFLATE: Self = CompressionMethod::Deflated; #[cfg(not(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" )))] pub const DEFLATE: Self = CompressionMethod::Unsupported(8); pub const DEFLATE64: Self = CompressionMethod::Unsupported(9); pub const PKWARE_IMPLODE: Self = CompressionMethod::Unsupported(10); #[cfg(feature = "bzip2")] pub const BZIP2: Self = CompressionMethod::Bzip2; #[cfg(not(feature = "bzip2"))] pub const BZIP2: Self = CompressionMethod::Unsupported(12); pub const LZMA: Self = CompressionMethod::Unsupported(14); pub const IBM_ZOS_CMPSC: Self = CompressionMethod::Unsupported(16); pub const IBM_TERSE: Self = CompressionMethod::Unsupported(18); pub const ZSTD_DEPRECATED: Self = CompressionMethod::Unsupported(20); pub const ZSTD: Self = CompressionMethod::Unsupported(93); pub const MP3: Self = CompressionMethod::Unsupported(94); pub const XZ: Self = CompressionMethod::Unsupported(95); pub const JPEG: Self = CompressionMethod::Unsupported(96); pub const WAVPACK: Self = CompressionMethod::Unsupported(97); pub const PPMD: Self = CompressionMethod::Unsupported(98); } impl CompressionMethod { /// Converts an u16 to its corresponding CompressionMethod #[deprecated( since = "0.5.7", note = "use a constant to construct a compression method" )] pub fn from_u16(val: u16) -> CompressionMethod { #[allow(deprecated)] match val { 0 => CompressionMethod::Stored, #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] 8 => CompressionMethod::Deflated, #[cfg(feature = "bzip2")] 12 => CompressionMethod::Bzip2, v => CompressionMethod::Unsupported(v), } } /// Converts a CompressionMethod to a u16 #[deprecated( since = "0.5.7", note = "to match on other compression methods, use a constant" )] pub fn to_u16(self) -> u16 { #[allow(deprecated)] match self { CompressionMethod::Stored => 0, #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] CompressionMethod::Deflated => 8, #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => 12, CompressionMethod::Unsupported(v) => v, } } } impl fmt::Display for CompressionMethod { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Just duplicate what the Debug format looks like, i.e, the enum key: write!(f, "{:?}", self) } } #[cfg(test)] mod test { use super::CompressionMethod; #[test] fn from_eq_to() { for v in 0..(::std::u16::MAX as u32 + 1) { #[allow(deprecated)] let from = CompressionMethod::from_u16(v as u16); #[allow(deprecated)] let to = from.to_u16() as u32; assert_eq!(v, to); } } fn methods() -> Vec { let mut methods = Vec::new(); methods.push(CompressionMethod::Stored); #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] methods.push(CompressionMethod::Deflated); #[cfg(feature = "bzip2")] methods.push(CompressionMethod::Bzip2); methods } #[test] fn to_eq_from() { fn check_match(method: CompressionMethod) { #[allow(deprecated)] let to = method.to_u16(); #[allow(deprecated)] let from = CompressionMethod::from_u16(to); #[allow(deprecated)] let back = from.to_u16(); assert_eq!(to, back); } for method in methods() { check_match(method); } } #[test] fn to_display_fmt() { fn check_match(method: CompressionMethod) { let debug_str = format!("{:?}", method); let display_str = format!("{}", method); assert_eq!(debug_str, display_str); } for method in methods() { check_match(method); } } } zip-0.5.13/src/cp437.rs000064400000000000000000000114640000000000000124740ustar 00000000000000//! Convert a string in IBM codepage 437 to UTF-8 /// Trait to convert IBM codepage 437 to the target type pub trait FromCp437 { /// Target type type Target; /// Function that does the conversion from cp437. /// Gennerally allocations will be avoided if all data falls into the ASCII range. fn from_cp437(self) -> Self::Target; } impl<'a> FromCp437 for &'a [u8] { type Target = ::std::borrow::Cow<'a, str>; fn from_cp437(self) -> Self::Target { if self.iter().all(|c| *c < 0x80) { ::std::str::from_utf8(self).unwrap().into() } else { self.iter().map(|c| to_char(*c)).collect::().into() } } } impl FromCp437 for Vec { type Target = String; fn from_cp437(self) -> Self::Target { if self.iter().all(|c| *c < 0x80) { String::from_utf8(self).unwrap() } else { self.into_iter().map(to_char).collect() } } } fn to_char(input: u8) -> char { let output = match input { 0x00..=0x7f => input as u32, 0x80 => 0x00c7, 0x81 => 0x00fc, 0x82 => 0x00e9, 0x83 => 0x00e2, 0x84 => 0x00e4, 0x85 => 0x00e0, 0x86 => 0x00e5, 0x87 => 0x00e7, 0x88 => 0x00ea, 0x89 => 0x00eb, 0x8a => 0x00e8, 0x8b => 0x00ef, 0x8c => 0x00ee, 0x8d => 0x00ec, 0x8e => 0x00c4, 0x8f => 0x00c5, 0x90 => 0x00c9, 0x91 => 0x00e6, 0x92 => 0x00c6, 0x93 => 0x00f4, 0x94 => 0x00f6, 0x95 => 0x00f2, 0x96 => 0x00fb, 0x97 => 0x00f9, 0x98 => 0x00ff, 0x99 => 0x00d6, 0x9a => 0x00dc, 0x9b => 0x00a2, 0x9c => 0x00a3, 0x9d => 0x00a5, 0x9e => 0x20a7, 0x9f => 0x0192, 0xa0 => 0x00e1, 0xa1 => 0x00ed, 0xa2 => 0x00f3, 0xa3 => 0x00fa, 0xa4 => 0x00f1, 0xa5 => 0x00d1, 0xa6 => 0x00aa, 0xa7 => 0x00ba, 0xa8 => 0x00bf, 0xa9 => 0x2310, 0xaa => 0x00ac, 0xab => 0x00bd, 0xac => 0x00bc, 0xad => 0x00a1, 0xae => 0x00ab, 0xaf => 0x00bb, 0xb0 => 0x2591, 0xb1 => 0x2592, 0xb2 => 0x2593, 0xb3 => 0x2502, 0xb4 => 0x2524, 0xb5 => 0x2561, 0xb6 => 0x2562, 0xb7 => 0x2556, 0xb8 => 0x2555, 0xb9 => 0x2563, 0xba => 0x2551, 0xbb => 0x2557, 0xbc => 0x255d, 0xbd => 0x255c, 0xbe => 0x255b, 0xbf => 0x2510, 0xc0 => 0x2514, 0xc1 => 0x2534, 0xc2 => 0x252c, 0xc3 => 0x251c, 0xc4 => 0x2500, 0xc5 => 0x253c, 0xc6 => 0x255e, 0xc7 => 0x255f, 0xc8 => 0x255a, 0xc9 => 0x2554, 0xca => 0x2569, 0xcb => 0x2566, 0xcc => 0x2560, 0xcd => 0x2550, 0xce => 0x256c, 0xcf => 0x2567, 0xd0 => 0x2568, 0xd1 => 0x2564, 0xd2 => 0x2565, 0xd3 => 0x2559, 0xd4 => 0x2558, 0xd5 => 0x2552, 0xd6 => 0x2553, 0xd7 => 0x256b, 0xd8 => 0x256a, 0xd9 => 0x2518, 0xda => 0x250c, 0xdb => 0x2588, 0xdc => 0x2584, 0xdd => 0x258c, 0xde => 0x2590, 0xdf => 0x2580, 0xe0 => 0x03b1, 0xe1 => 0x00df, 0xe2 => 0x0393, 0xe3 => 0x03c0, 0xe4 => 0x03a3, 0xe5 => 0x03c3, 0xe6 => 0x00b5, 0xe7 => 0x03c4, 0xe8 => 0x03a6, 0xe9 => 0x0398, 0xea => 0x03a9, 0xeb => 0x03b4, 0xec => 0x221e, 0xed => 0x03c6, 0xee => 0x03b5, 0xef => 0x2229, 0xf0 => 0x2261, 0xf1 => 0x00b1, 0xf2 => 0x2265, 0xf3 => 0x2264, 0xf4 => 0x2320, 0xf5 => 0x2321, 0xf6 => 0x00f7, 0xf7 => 0x2248, 0xf8 => 0x00b0, 0xf9 => 0x2219, 0xfa => 0x00b7, 0xfb => 0x221a, 0xfc => 0x207f, 0xfd => 0x00b2, 0xfe => 0x25a0, 0xff => 0x00a0, }; ::std::char::from_u32(output).unwrap() } #[cfg(test)] mod test { #[test] fn to_char_valid() { for i in 0x00_u32..0x100 { super::to_char(i as u8); } } #[test] fn ascii() { for i in 0x00..0x80 { assert_eq!(super::to_char(i), i as char); } } #[test] fn example_slice() { use super::FromCp437; let data = b"Cura\x87ao"; assert!(::std::str::from_utf8(data).is_err()); assert_eq!(data.from_cp437(), "Curaçao"); } #[test] fn example_vec() { use super::FromCp437; let data = vec![0xCC, 0xCD, 0xCD, 0xB9]; assert!(String::from_utf8(data.clone()).is_err()); assert_eq!(&data.from_cp437(), "╠══╣"); } } zip-0.5.13/src/crc32.rs000064400000000000000000000046620000000000000125520ustar 00000000000000//! Helper module to compute a CRC32 checksum use std::io; use std::io::prelude::*; use crc32fast::Hasher; /// Reader that validates the CRC32 when it reaches the EOF. pub struct Crc32Reader { inner: R, hasher: Hasher, check: u32, } impl Crc32Reader { /// Get a new Crc32Reader which check the inner reader against checksum. pub fn new(inner: R, checksum: u32) -> Crc32Reader { Crc32Reader { inner, hasher: Hasher::new(), check: checksum, } } fn check_matches(&self) -> bool { self.check == self.hasher.clone().finalize() } pub fn into_inner(self) -> R { self.inner } } impl Read for Crc32Reader { fn read(&mut self, buf: &mut [u8]) -> io::Result { let count = match self.inner.read(buf) { Ok(0) if !buf.is_empty() && !self.check_matches() => { return Err(io::Error::new(io::ErrorKind::Other, "Invalid checksum")) } Ok(n) => n, Err(e) => return Err(e), }; self.hasher.update(&buf[0..count]); Ok(count) } } #[cfg(test)] mod test { use super::*; use std::io::Read; #[test] fn test_empty_reader() { let data: &[u8] = b""; let mut buf = [0; 1]; let mut reader = Crc32Reader::new(data, 0); assert_eq!(reader.read(&mut buf).unwrap(), 0); let mut reader = Crc32Reader::new(data, 1); assert!(reader .read(&mut buf) .unwrap_err() .to_string() .contains("Invalid checksum")); } #[test] fn test_byte_by_byte() { let data: &[u8] = b"1234"; let mut buf = [0; 1]; let mut reader = Crc32Reader::new(data, 0x9be3e0a3); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 0); // Can keep reading 0 bytes after the end assert_eq!(reader.read(&mut buf).unwrap(), 0); } #[test] fn test_zero_read() { let data: &[u8] = b"1234"; let mut buf = [0; 5]; let mut reader = Crc32Reader::new(data, 0x9be3e0a3); assert_eq!(reader.read(&mut buf[..0]).unwrap(), 0); assert_eq!(reader.read(&mut buf).unwrap(), 4); } } zip-0.5.13/src/lib.rs000064400000000000000000000011160000000000000123730ustar 00000000000000//! An ergonomic API for reading and writing ZIP files. //! //! The current implementation is based on [PKWARE's APPNOTE.TXT v6.3.9](https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT) // TODO(#184): Decide on the crate's bias: Do we prioritise permissiveness/correctness/speed/ergonomics? #![warn(missing_docs)] pub use crate::compression::CompressionMethod; pub use crate::read::ZipArchive; pub use crate::types::DateTime; pub use crate::write::ZipWriter; mod compression; mod cp437; mod crc32; pub mod read; pub mod result; mod spec; mod types; pub mod write; mod zipcrypto; zip-0.5.13/src/read.rs000064400000000000000000001142600000000000000125450ustar 00000000000000//! Types for reading ZIP archives use crate::compression::CompressionMethod; use crate::crc32::Crc32Reader; use crate::result::{InvalidPassword, ZipError, ZipResult}; use crate::spec; use crate::zipcrypto::{ZipCryptoReader, ZipCryptoReaderValid, ZipCryptoValidator}; use std::borrow::Cow; use std::collections::HashMap; use std::io::{self, prelude::*}; use std::path::{Component, Path}; use crate::cp437::FromCp437; use crate::types::{DateTime, System, ZipFileData}; use byteorder::{LittleEndian, ReadBytesExt}; #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] use flate2::read::DeflateDecoder; #[cfg(feature = "bzip2")] use bzip2::read::BzDecoder; mod ffi { pub const S_IFDIR: u32 = 0o0040000; pub const S_IFREG: u32 = 0o0100000; } /// ZIP archive reader /// /// ```no_run /// use std::io::prelude::*; /// fn list_zip_contents(reader: impl Read + Seek) -> zip::result::ZipResult<()> { /// let mut zip = zip::ZipArchive::new(reader)?; /// /// for i in 0..zip.len() { /// let mut file = zip.by_index(i)?; /// println!("Filename: {}", file.name()); /// std::io::copy(&mut file, &mut std::io::stdout()); /// } /// /// Ok(()) /// } /// ``` #[derive(Clone, Debug)] pub struct ZipArchive { reader: R, files: Vec, names_map: HashMap, offset: u64, comment: Vec, } enum CryptoReader<'a> { Plaintext(io::Take<&'a mut dyn Read>), ZipCrypto(ZipCryptoReaderValid>), } impl<'a> Read for CryptoReader<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { match self { CryptoReader::Plaintext(r) => r.read(buf), CryptoReader::ZipCrypto(r) => r.read(buf), } } } impl<'a> CryptoReader<'a> { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> io::Take<&'a mut dyn Read> { match self { CryptoReader::Plaintext(r) => r, CryptoReader::ZipCrypto(r) => r.into_inner(), } } } enum ZipFileReader<'a> { NoReader, Raw(io::Take<&'a mut dyn io::Read>), Stored(Crc32Reader>), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] Deflated(Crc32Reader>>), #[cfg(feature = "bzip2")] Bzip2(Crc32Reader>>), } impl<'a> Read for ZipFileReader<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { match self { ZipFileReader::NoReader => panic!("ZipFileReader was in an invalid state"), ZipFileReader::Raw(r) => r.read(buf), ZipFileReader::Stored(r) => r.read(buf), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] ZipFileReader::Deflated(r) => r.read(buf), #[cfg(feature = "bzip2")] ZipFileReader::Bzip2(r) => r.read(buf), } } } impl<'a> ZipFileReader<'a> { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> io::Take<&'a mut dyn Read> { match self { ZipFileReader::NoReader => panic!("ZipFileReader was in an invalid state"), ZipFileReader::Raw(r) => r, ZipFileReader::Stored(r) => r.into_inner().into_inner(), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] ZipFileReader::Deflated(r) => r.into_inner().into_inner().into_inner(), #[cfg(feature = "bzip2")] ZipFileReader::Bzip2(r) => r.into_inner().into_inner().into_inner(), } } } /// A struct for reading a zip file pub struct ZipFile<'a> { data: Cow<'a, ZipFileData>, crypto_reader: Option>, reader: ZipFileReader<'a>, } fn find_content<'a>( data: &mut ZipFileData, reader: &'a mut (impl Read + Seek), ) -> ZipResult> { // Parse local header reader.seek(io::SeekFrom::Start(data.header_start))?; let signature = reader.read_u32::()?; if signature != spec::LOCAL_FILE_HEADER_SIGNATURE { return Err(ZipError::InvalidArchive("Invalid local file header")); } reader.seek(io::SeekFrom::Current(22))?; let file_name_length = reader.read_u16::()? as u64; let extra_field_length = reader.read_u16::()? as u64; let magic_and_header = 4 + 22 + 2 + 2; data.data_start = data.header_start + magic_and_header + file_name_length + extra_field_length; reader.seek(io::SeekFrom::Start(data.data_start))?; Ok((reader as &mut dyn Read).take(data.compressed_size)) } fn make_crypto_reader<'a>( compression_method: crate::compression::CompressionMethod, crc32: u32, last_modified_time: DateTime, using_data_descriptor: bool, reader: io::Take<&'a mut dyn io::Read>, password: Option<&[u8]>, ) -> ZipResult, InvalidPassword>> { #[allow(deprecated)] { if let CompressionMethod::Unsupported(_) = compression_method { return unsupported_zip_error("Compression method not supported"); } } let reader = match password { None => CryptoReader::Plaintext(reader), Some(password) => { let validator = if using_data_descriptor { ZipCryptoValidator::InfoZipMsdosTime(last_modified_time.timepart()) } else { ZipCryptoValidator::PkzipCrc32(crc32) }; match ZipCryptoReader::new(reader, password).validate(validator)? { None => return Ok(Err(InvalidPassword)), Some(r) => CryptoReader::ZipCrypto(r), } } }; Ok(Ok(reader)) } fn make_reader<'a>( compression_method: CompressionMethod, crc32: u32, reader: CryptoReader<'a>, ) -> ZipFileReader<'a> { match compression_method { CompressionMethod::Stored => ZipFileReader::Stored(Crc32Reader::new(reader, crc32)), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] CompressionMethod::Deflated => { let deflate_reader = DeflateDecoder::new(reader); ZipFileReader::Deflated(Crc32Reader::new(deflate_reader, crc32)) } #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => { let bzip2_reader = BzDecoder::new(reader); ZipFileReader::Bzip2(Crc32Reader::new(bzip2_reader, crc32)) } _ => panic!("Compression method not supported"), } } impl ZipArchive { /// Get the directory start offset and number of files. This is done in a /// separate function to ease the control flow design. pub(crate) fn get_directory_counts( reader: &mut R, footer: &spec::CentralDirectoryEnd, cde_start_pos: u64, ) -> ZipResult<(u64, u64, usize)> { // See if there's a ZIP64 footer. The ZIP64 locator if present will // have its signature 20 bytes in front of the standard footer. The // standard footer, in turn, is 22+N bytes large, where N is the // comment length. Therefore: let zip64locator = if reader .seek(io::SeekFrom::End( -(20 + 22 + footer.zip_file_comment.len() as i64), )) .is_ok() { match spec::Zip64CentralDirectoryEndLocator::parse(reader) { Ok(loc) => Some(loc), Err(ZipError::InvalidArchive(_)) => { // No ZIP64 header; that's actually fine. We're done here. None } Err(e) => { // Yikes, a real problem return Err(e); } } } else { // Empty Zip files will have nothing else so this error might be fine. If // not, we'll find out soon. None }; match zip64locator { None => { // Some zip files have data prepended to them, resulting in the // offsets all being too small. Get the amount of error by comparing // the actual file position we found the CDE at with the offset // recorded in the CDE. let archive_offset = cde_start_pos .checked_sub(footer.central_directory_size as u64) .and_then(|x| x.checked_sub(footer.central_directory_offset as u64)) .ok_or(ZipError::InvalidArchive( "Invalid central directory size or offset", ))?; let directory_start = footer.central_directory_offset as u64 + archive_offset; let number_of_files = footer.number_of_files_on_this_disk as usize; Ok((archive_offset, directory_start, number_of_files)) } Some(locator64) => { // If we got here, this is indeed a ZIP64 file. if footer.disk_number as u32 != locator64.disk_with_central_directory { return unsupported_zip_error( "Support for multi-disk files is not implemented", ); } // We need to reassess `archive_offset`. We know where the ZIP64 // central-directory-end structure *should* be, but unfortunately we // don't know how to precisely relate that location to our current // actual offset in the file, since there may be junk at its // beginning. Therefore we need to perform another search, as in // read::CentralDirectoryEnd::find_and_parse, except now we search // forward. let search_upper_bound = cde_start_pos .checked_sub(60) // minimum size of Zip64CentralDirectoryEnd + Zip64CentralDirectoryEndLocator .ok_or(ZipError::InvalidArchive( "File cannot contain ZIP64 central directory end", ))?; let (footer, archive_offset) = spec::Zip64CentralDirectoryEnd::find_and_parse( reader, locator64.end_of_central_directory_offset, search_upper_bound, )?; if footer.disk_number != footer.disk_with_central_directory { return unsupported_zip_error( "Support for multi-disk files is not implemented", ); } let directory_start = footer .central_directory_offset .checked_add(archive_offset) .ok_or_else(|| { ZipError::InvalidArchive("Invalid central directory size or offset") })?; Ok(( archive_offset, directory_start, footer.number_of_files as usize, )) } } } /// Read a ZIP archive, collecting the files it contains /// /// This uses the central directory record of the ZIP file, and ignores local file headers pub fn new(mut reader: R) -> ZipResult> { let (footer, cde_start_pos) = spec::CentralDirectoryEnd::find_and_parse(&mut reader)?; if footer.disk_number != footer.disk_with_central_directory { return unsupported_zip_error("Support for multi-disk files is not implemented"); } let (archive_offset, directory_start, number_of_files) = Self::get_directory_counts(&mut reader, &footer, cde_start_pos)?; let mut files = Vec::new(); let mut names_map = HashMap::new(); if let Err(_) = reader.seek(io::SeekFrom::Start(directory_start)) { return Err(ZipError::InvalidArchive( "Could not seek to start of central directory", )); } for _ in 0..number_of_files { let file = central_header_to_zip_file(&mut reader, archive_offset)?; names_map.insert(file.file_name.clone(), files.len()); files.push(file); } Ok(ZipArchive { reader, files, names_map, offset: archive_offset, comment: footer.zip_file_comment, }) } /// Extract a Zip archive into a directory, overwriting files if they /// already exist. Paths are sanitized with [`ZipFile::enclosed_name`]. /// /// Extraction is not atomic; If an error is encountered, some of the files /// may be left on disk. pub fn extract>(&mut self, directory: P) -> ZipResult<()> { use std::fs; for i in 0..self.len() { let mut file = self.by_index(i)?; let filepath = file .enclosed_name() .ok_or(ZipError::InvalidArchive("Invalid file path"))?; let outpath = directory.as_ref().join(filepath); if file.name().ends_with('/') { fs::create_dir_all(&outpath)?; } else { if let Some(p) = outpath.parent() { if !p.exists() { fs::create_dir_all(&p)?; } } let mut outfile = fs::File::create(&outpath)?; io::copy(&mut file, &mut outfile)?; } // Get and Set permissions #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; if let Some(mode) = file.unix_mode() { fs::set_permissions(&outpath, fs::Permissions::from_mode(mode))?; } } } Ok(()) } /// Number of files contained in this zip. pub fn len(&self) -> usize { self.files.len() } /// Whether this zip archive contains no files pub fn is_empty(&self) -> bool { self.len() == 0 } /// Get the offset from the beginning of the underlying reader that this zip begins at, in bytes. /// /// Normally this value is zero, but if the zip has arbitrary data prepended to it, then this value will be the size /// of that prepended data. pub fn offset(&self) -> u64 { self.offset } /// Get the comment of the zip archive. pub fn comment(&self) -> &[u8] { &self.comment } /// Returns an iterator over all the file and directory names in this archive. pub fn file_names(&self) -> impl Iterator { self.names_map.keys().map(|s| s.as_str()) } /// Search for a file entry by name, decrypt with given password pub fn by_name_decrypt<'a>( &'a mut self, name: &str, password: &[u8], ) -> ZipResult, InvalidPassword>> { self.by_name_with_optional_password(name, Some(password)) } /// Search for a file entry by name pub fn by_name<'a>(&'a mut self, name: &str) -> ZipResult> { Ok(self.by_name_with_optional_password(name, None)?.unwrap()) } fn by_name_with_optional_password<'a>( &'a mut self, name: &str, password: Option<&[u8]>, ) -> ZipResult, InvalidPassword>> { let index = match self.names_map.get(name) { Some(index) => *index, None => { return Err(ZipError::FileNotFound); } }; self.by_index_with_optional_password(index, password) } /// Get a contained file by index, decrypt with given password pub fn by_index_decrypt<'a>( &'a mut self, file_number: usize, password: &[u8], ) -> ZipResult, InvalidPassword>> { self.by_index_with_optional_password(file_number, Some(password)) } /// Get a contained file by index pub fn by_index<'a>(&'a mut self, file_number: usize) -> ZipResult> { Ok(self .by_index_with_optional_password(file_number, None)? .unwrap()) } /// Get a contained file by index without decompressing it pub fn by_index_raw<'a>(&'a mut self, file_number: usize) -> ZipResult> { let reader = &mut self.reader; self.files .get_mut(file_number) .ok_or(ZipError::FileNotFound) .and_then(move |data| { Ok(ZipFile { crypto_reader: None, reader: ZipFileReader::Raw(find_content(data, reader)?), data: Cow::Borrowed(data), }) }) } fn by_index_with_optional_password<'a>( &'a mut self, file_number: usize, mut password: Option<&[u8]>, ) -> ZipResult, InvalidPassword>> { if file_number >= self.files.len() { return Err(ZipError::FileNotFound); } let data = &mut self.files[file_number]; match (password, data.encrypted) { (None, true) => return Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)), (Some(_), false) => password = None, //Password supplied, but none needed! Discard. _ => {} } let limit_reader = find_content(data, &mut self.reader)?; match make_crypto_reader( data.compression_method, data.crc32, data.last_modified_time, data.using_data_descriptor, limit_reader, password, ) { Ok(Ok(crypto_reader)) => Ok(Ok(ZipFile { crypto_reader: Some(crypto_reader), reader: ZipFileReader::NoReader, data: Cow::Borrowed(data), })), Err(e) => Err(e), Ok(Err(e)) => Ok(Err(e)), } } /// Unwrap and return the inner reader object /// /// The position of the reader is undefined. pub fn into_inner(self) -> R { self.reader } } fn unsupported_zip_error(detail: &'static str) -> ZipResult { Err(ZipError::UnsupportedArchive(detail)) } /// Parse a central directory entry to collect the information for the file. pub(crate) fn central_header_to_zip_file( reader: &mut R, archive_offset: u64, ) -> ZipResult { let central_header_start = reader.seek(io::SeekFrom::Current(0))?; // Parse central header let signature = reader.read_u32::()?; if signature != spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE { return Err(ZipError::InvalidArchive("Invalid Central Directory header")); } let version_made_by = reader.read_u16::()?; let _version_to_extract = reader.read_u16::()?; let flags = reader.read_u16::()?; let encrypted = flags & 1 == 1; let is_utf8 = flags & (1 << 11) != 0; let using_data_descriptor = flags & (1 << 3) != 0; let compression_method = reader.read_u16::()?; let last_mod_time = reader.read_u16::()?; let last_mod_date = reader.read_u16::()?; let crc32 = reader.read_u32::()?; let compressed_size = reader.read_u32::()?; let uncompressed_size = reader.read_u32::()?; let file_name_length = reader.read_u16::()? as usize; let extra_field_length = reader.read_u16::()? as usize; let file_comment_length = reader.read_u16::()? as usize; let _disk_number = reader.read_u16::()?; let _internal_file_attributes = reader.read_u16::()?; let external_file_attributes = reader.read_u32::()?; let offset = reader.read_u32::()? as u64; let mut file_name_raw = vec![0; file_name_length]; reader.read_exact(&mut file_name_raw)?; let mut extra_field = vec![0; extra_field_length]; reader.read_exact(&mut extra_field)?; let mut file_comment_raw = vec![0; file_comment_length]; reader.read_exact(&mut file_comment_raw)?; let file_name = match is_utf8 { true => String::from_utf8_lossy(&*file_name_raw).into_owned(), false => file_name_raw.clone().from_cp437(), }; let file_comment = match is_utf8 { true => String::from_utf8_lossy(&*file_comment_raw).into_owned(), false => file_comment_raw.from_cp437(), }; // Construct the result let mut result = ZipFileData { system: System::from_u8((version_made_by >> 8) as u8), version_made_by: version_made_by as u8, encrypted, using_data_descriptor, compression_method: { #[allow(deprecated)] CompressionMethod::from_u16(compression_method) }, last_modified_time: DateTime::from_msdos(last_mod_date, last_mod_time), crc32, compressed_size: compressed_size as u64, uncompressed_size: uncompressed_size as u64, file_name, file_name_raw, extra_field, file_comment, header_start: offset, central_header_start, data_start: 0, external_attributes: external_file_attributes, large_file: false, }; match parse_extra_field(&mut result) { Ok(..) | Err(ZipError::Io(..)) => {} Err(e) => return Err(e), } // Account for shifted zip offsets. result.header_start += archive_offset; Ok(result) } fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> { let mut reader = io::Cursor::new(&file.extra_field); while (reader.position() as usize) < file.extra_field.len() { let kind = reader.read_u16::()?; let len = reader.read_u16::()?; let mut len_left = len as i64; // Zip64 extended information extra field if kind == 0x0001 { if file.uncompressed_size == 0xFFFFFFFF { file.large_file = true; file.uncompressed_size = reader.read_u64::()?; len_left -= 8; } if file.compressed_size == 0xFFFFFFFF { file.large_file = true; file.compressed_size = reader.read_u64::()?; len_left -= 8; } if file.header_start == 0xFFFFFFFF { file.header_start = reader.read_u64::()?; len_left -= 8; } // Unparsed fields: // u32: disk start number } // We could also check for < 0 to check for errors if len_left > 0 { reader.seek(io::SeekFrom::Current(len_left))?; } } Ok(()) } /// Methods for retrieving information on zip files impl<'a> ZipFile<'a> { fn get_reader(&mut self) -> &mut ZipFileReader<'a> { if let ZipFileReader::NoReader = self.reader { let data = &self.data; let crypto_reader = self.crypto_reader.take().expect("Invalid reader state"); self.reader = make_reader(data.compression_method, data.crc32, crypto_reader) } &mut self.reader } pub(crate) fn get_raw_reader(&mut self) -> &mut dyn Read { if let ZipFileReader::NoReader = self.reader { let crypto_reader = self.crypto_reader.take().expect("Invalid reader state"); self.reader = ZipFileReader::Raw(crypto_reader.into_inner()) } &mut self.reader } /// Get the version of the file pub fn version_made_by(&self) -> (u8, u8) { ( self.data.version_made_by / 10, self.data.version_made_by % 10, ) } /// Get the name of the file /// /// # Warnings /// /// It is dangerous to use this name directly when extracting an archive. /// It may contain an absolute path (`/etc/shadow`), or break out of the /// current directory (`../runtime`). Carelessly writing to these paths /// allows an attacker to craft a ZIP archive that will overwrite critical /// files. /// /// You can use the [`ZipFile::enclosed_name`] method to validate the name /// as a safe path. pub fn name(&self) -> &str { &self.data.file_name } /// Get the name of the file, in the raw (internal) byte representation. /// /// The encoding of this data is currently undefined. pub fn name_raw(&self) -> &[u8] { &self.data.file_name_raw } /// Get the name of the file in a sanitized form. It truncates the name to the first NULL byte, /// removes a leading '/' and removes '..' parts. #[deprecated( since = "0.5.7", note = "by stripping `..`s from the path, the meaning of paths can change. `mangled_name` can be used if this behaviour is desirable" )] pub fn sanitized_name(&self) -> ::std::path::PathBuf { self.mangled_name() } /// Rewrite the path, ignoring any path components with special meaning. /// /// - Absolute paths are made relative /// - [`ParentDir`]s are ignored /// - Truncates the filename at a NULL byte /// /// This is appropriate if you need to be able to extract *something* from /// any archive, but will easily misrepresent trivial paths like /// `foo/../bar` as `foo/bar` (instead of `bar`). Because of this, /// [`ZipFile::enclosed_name`] is the better option in most scenarios. /// /// [`ParentDir`]: `Component::ParentDir` pub fn mangled_name(&self) -> ::std::path::PathBuf { self.data.file_name_sanitized() } /// Ensure the file path is safe to use as a [`Path`]. /// /// - It can't contain NULL bytes /// - It can't resolve to a path outside the current directory /// > `foo/../bar` is fine, `foo/../../bar` is not. /// - It can't be an absolute path /// /// This will read well-formed ZIP files correctly, and is resistant /// to path-based exploits. It is recommended over /// [`ZipFile::mangled_name`]. pub fn enclosed_name(&self) -> Option<&Path> { if self.data.file_name.contains('\0') { return None; } let path = Path::new(&self.data.file_name); let mut depth = 0usize; for component in path.components() { match component { Component::Prefix(_) | Component::RootDir => return None, Component::ParentDir => depth = depth.checked_sub(1)?, Component::Normal(_) => depth += 1, Component::CurDir => (), } } Some(path) } /// Get the comment of the file pub fn comment(&self) -> &str { &self.data.file_comment } /// Get the compression method used to store the file pub fn compression(&self) -> CompressionMethod { self.data.compression_method } /// Get the size of the file in the archive pub fn compressed_size(&self) -> u64 { self.data.compressed_size } /// Get the size of the file when uncompressed pub fn size(&self) -> u64 { self.data.uncompressed_size } /// Get the time the file was last modified pub fn last_modified(&self) -> DateTime { self.data.last_modified_time } /// Returns whether the file is actually a directory pub fn is_dir(&self) -> bool { self.name() .chars() .rev() .next() .map_or(false, |c| c == '/' || c == '\\') } /// Returns whether the file is a regular file pub fn is_file(&self) -> bool { !self.is_dir() } /// Get unix mode for the file pub fn unix_mode(&self) -> Option { if self.data.external_attributes == 0 { return None; } match self.data.system { System::Unix => Some(self.data.external_attributes >> 16), System::Dos => { // Interpret MSDOS directory bit let mut mode = if 0x10 == (self.data.external_attributes & 0x10) { ffi::S_IFDIR | 0o0775 } else { ffi::S_IFREG | 0o0664 }; if 0x01 == (self.data.external_attributes & 0x01) { // Read-only bit; strip write permissions mode &= 0o0555; } Some(mode) } _ => None, } } /// Get the CRC32 hash of the original file pub fn crc32(&self) -> u32 { self.data.crc32 } /// Get the extra data of the zip header for this file pub fn extra_data(&self) -> &[u8] { &self.data.extra_field } /// Get the starting offset of the data of the compressed file pub fn data_start(&self) -> u64 { self.data.data_start } /// Get the starting offset of the zip header for this file pub fn header_start(&self) -> u64 { self.data.header_start } /// Get the starting offset of the zip header in the central directory for this file pub fn central_header_start(&self) -> u64 { self.data.central_header_start } } impl<'a> Read for ZipFile<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { self.get_reader().read(buf) } } impl<'a> Drop for ZipFile<'a> { fn drop(&mut self) { // self.data is Owned, this reader is constructed by a streaming reader. // In this case, we want to exhaust the reader so that the next file is accessible. if let Cow::Owned(_) = self.data { let mut buffer = [0; 1 << 16]; // Get the inner `Take` reader so all decryption, decompression and CRC calculation is skipped. let mut reader: std::io::Take<&mut dyn std::io::Read> = match &mut self.reader { ZipFileReader::NoReader => { let innerreader = ::std::mem::replace(&mut self.crypto_reader, None); innerreader.expect("Invalid reader state").into_inner() } reader => { let innerreader = ::std::mem::replace(reader, ZipFileReader::NoReader); innerreader.into_inner() } }; loop { match reader.read(&mut buffer) { Ok(0) => break, Ok(_) => (), Err(e) => panic!( "Could not consume all of the output of the current ZipFile: {:?}", e ), } } } } } /// Read ZipFile structures from a non-seekable reader. /// /// This is an alternative method to read a zip file. If possible, use the ZipArchive functions /// as some information will be missing when reading this manner. /// /// Reads a file header from the start of the stream. Will return `Ok(Some(..))` if a file is /// present at the start of the stream. Returns `Ok(None)` if the start of the central directory /// is encountered. No more files should be read after this. /// /// The Drop implementation of ZipFile ensures that the reader will be correctly positioned after /// the structure is done. /// /// Missing fields are: /// * `comment`: set to an empty string /// * `data_start`: set to 0 /// * `external_attributes`: `unix_mode()`: will return None pub fn read_zipfile_from_stream<'a, R: io::Read>( reader: &'a mut R, ) -> ZipResult>> { let signature = reader.read_u32::()?; match signature { spec::LOCAL_FILE_HEADER_SIGNATURE => (), spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE => return Ok(None), _ => return Err(ZipError::InvalidArchive("Invalid local file header")), } let version_made_by = reader.read_u16::()?; let flags = reader.read_u16::()?; let encrypted = flags & 1 == 1; let is_utf8 = flags & (1 << 11) != 0; let using_data_descriptor = flags & (1 << 3) != 0; #[allow(deprecated)] let compression_method = CompressionMethod::from_u16(reader.read_u16::()?); let last_mod_time = reader.read_u16::()?; let last_mod_date = reader.read_u16::()?; let crc32 = reader.read_u32::()?; let compressed_size = reader.read_u32::()?; let uncompressed_size = reader.read_u32::()?; let file_name_length = reader.read_u16::()? as usize; let extra_field_length = reader.read_u16::()? as usize; let mut file_name_raw = vec![0; file_name_length]; reader.read_exact(&mut file_name_raw)?; let mut extra_field = vec![0; extra_field_length]; reader.read_exact(&mut extra_field)?; let file_name = match is_utf8 { true => String::from_utf8_lossy(&*file_name_raw).into_owned(), false => file_name_raw.clone().from_cp437(), }; let mut result = ZipFileData { system: System::from_u8((version_made_by >> 8) as u8), version_made_by: version_made_by as u8, encrypted, using_data_descriptor, compression_method, last_modified_time: DateTime::from_msdos(last_mod_date, last_mod_time), crc32, compressed_size: compressed_size as u64, uncompressed_size: uncompressed_size as u64, file_name, file_name_raw, extra_field, file_comment: String::new(), // file comment is only available in the central directory // header_start and data start are not available, but also don't matter, since seeking is // not available. header_start: 0, data_start: 0, central_header_start: 0, // The external_attributes field is only available in the central directory. // We set this to zero, which should be valid as the docs state 'If input came // from standard input, this field is set to zero.' external_attributes: 0, large_file: false, }; match parse_extra_field(&mut result) { Ok(..) | Err(ZipError::Io(..)) => {} Err(e) => return Err(e), } if encrypted { return unsupported_zip_error("Encrypted files are not supported"); } if using_data_descriptor { return unsupported_zip_error("The file length is not available in the local header"); } let limit_reader = (reader as &'a mut dyn io::Read).take(result.compressed_size as u64); let result_crc32 = result.crc32; let result_compression_method = result.compression_method; let crypto_reader = make_crypto_reader( result_compression_method, result_crc32, result.last_modified_time, result.using_data_descriptor, limit_reader, None, )? .unwrap(); Ok(Some(ZipFile { data: Cow::Owned(result), crypto_reader: None, reader: make_reader(result_compression_method, result_crc32, crypto_reader), })) } #[cfg(test)] mod test { #[test] fn invalid_offset() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/invalid_offset.zip")); let reader = ZipArchive::new(io::Cursor::new(v)); assert!(reader.is_err()); } #[test] fn invalid_offset2() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/invalid_offset2.zip")); let reader = ZipArchive::new(io::Cursor::new(v)); assert!(reader.is_err()); } #[test] fn zip64_with_leading_junk() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/zip64_demo.zip")); let reader = ZipArchive::new(io::Cursor::new(v)).unwrap(); assert!(reader.len() == 1); } #[test] fn zip_contents() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); let mut reader = ZipArchive::new(io::Cursor::new(v)).unwrap(); assert!(reader.comment() == b""); assert_eq!(reader.by_index(0).unwrap().central_header_start(), 77); } #[test] fn zip_read_streaming() { use super::read_zipfile_from_stream; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); let mut reader = io::Cursor::new(v); loop { match read_zipfile_from_stream(&mut reader).unwrap() { None => break, _ => (), } } } #[test] fn zip_clone() { use super::ZipArchive; use std::io::{self, Read}; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); let mut reader1 = ZipArchive::new(io::Cursor::new(v)).unwrap(); let mut reader2 = reader1.clone(); let mut file1 = reader1.by_index(0).unwrap(); let mut file2 = reader2.by_index(0).unwrap(); let t = file1.last_modified(); assert_eq!( ( t.year(), t.month(), t.day(), t.hour(), t.minute(), t.second() ), (1980, 1, 1, 0, 0, 0) ); let mut buf1 = [0; 5]; let mut buf2 = [0; 5]; let mut buf3 = [0; 5]; let mut buf4 = [0; 5]; file1.read(&mut buf1).unwrap(); file2.read(&mut buf2).unwrap(); file1.read(&mut buf3).unwrap(); file2.read(&mut buf4).unwrap(); assert_eq!(buf1, buf2); assert_eq!(buf3, buf4); assert!(buf1 != buf3); } #[test] fn file_and_dir_predicates() { use super::ZipArchive; use std::io; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/files_and_dirs.zip")); let mut zip = ZipArchive::new(io::Cursor::new(v)).unwrap(); for i in 0..zip.len() { let zip_file = zip.by_index(i).unwrap(); let full_name = zip_file.enclosed_name().unwrap(); let file_name = full_name.file_name().unwrap().to_str().unwrap(); assert!( (file_name.starts_with("dir") && zip_file.is_dir()) || (file_name.starts_with("file") && zip_file.is_file()) ); } } } zip-0.5.13/src/result.rs000064400000000000000000000030160000000000000131440ustar 00000000000000//! Error types that can be emitted from this library use std::io; use thiserror::Error; /// Generic result type with ZipError as its error variant pub type ZipResult = Result; /// The given password is wrong #[derive(Error, Debug)] #[error("invalid password for file in archive")] pub struct InvalidPassword; /// Error type for Zip #[derive(Debug, Error)] pub enum ZipError { /// An Error caused by I/O #[error(transparent)] Io(#[from] io::Error), /// This file is probably not a zip archive #[error("invalid Zip archive")] InvalidArchive(&'static str), /// This archive is not supported #[error("unsupported Zip archive")] UnsupportedArchive(&'static str), /// The requested file could not be found in the archive #[error("specified file not found in archive")] FileNotFound, } impl ZipError { /// The text used as an error when a password is required and not supplied /// /// ```rust,no_run /// # use zip::result::ZipError; /// # let mut archive = zip::ZipArchive::new(std::io::Cursor::new(&[])).unwrap(); /// match archive.by_index(1) { /// Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)) => eprintln!("a password is needed to unzip this file"), /// _ => (), /// } /// # () /// ``` pub const PASSWORD_REQUIRED: &'static str = "Password required to decrypt file"; } impl From for io::Error { fn from(err: ZipError) -> io::Error { io::Error::new(io::ErrorKind::Other, err) } } zip-0.5.13/src/spec.rs000064400000000000000000000206750000000000000125720ustar 00000000000000use crate::result::{ZipError, ZipResult}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use std::io; use std::io::prelude::*; pub const LOCAL_FILE_HEADER_SIGNATURE: u32 = 0x04034b50; pub const CENTRAL_DIRECTORY_HEADER_SIGNATURE: u32 = 0x02014b50; const CENTRAL_DIRECTORY_END_SIGNATURE: u32 = 0x06054b50; pub const ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE: u32 = 0x06064b50; const ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE: u32 = 0x07064b50; pub struct CentralDirectoryEnd { pub disk_number: u16, pub disk_with_central_directory: u16, pub number_of_files_on_this_disk: u16, pub number_of_files: u16, pub central_directory_size: u32, pub central_directory_offset: u32, pub zip_file_comment: Vec, } impl CentralDirectoryEnd { pub fn parse(reader: &mut T) -> ZipResult { let magic = reader.read_u32::()?; if magic != CENTRAL_DIRECTORY_END_SIGNATURE { return Err(ZipError::InvalidArchive("Invalid digital signature header")); } let disk_number = reader.read_u16::()?; let disk_with_central_directory = reader.read_u16::()?; let number_of_files_on_this_disk = reader.read_u16::()?; let number_of_files = reader.read_u16::()?; let central_directory_size = reader.read_u32::()?; let central_directory_offset = reader.read_u32::()?; let zip_file_comment_length = reader.read_u16::()? as usize; let mut zip_file_comment = vec![0; zip_file_comment_length]; reader.read_exact(&mut zip_file_comment)?; Ok(CentralDirectoryEnd { disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, zip_file_comment, }) } pub fn find_and_parse( reader: &mut T, ) -> ZipResult<(CentralDirectoryEnd, u64)> { const HEADER_SIZE: u64 = 22; const BYTES_BETWEEN_MAGIC_AND_COMMENT_SIZE: u64 = HEADER_SIZE - 6; let file_length = reader.seek(io::SeekFrom::End(0))?; let search_upper_bound = file_length.saturating_sub(HEADER_SIZE + ::std::u16::MAX as u64); if file_length < HEADER_SIZE { return Err(ZipError::InvalidArchive("Invalid zip header")); } let mut pos = file_length - HEADER_SIZE; while pos >= search_upper_bound { reader.seek(io::SeekFrom::Start(pos as u64))?; if reader.read_u32::()? == CENTRAL_DIRECTORY_END_SIGNATURE { reader.seek(io::SeekFrom::Current( BYTES_BETWEEN_MAGIC_AND_COMMENT_SIZE as i64, ))?; let cde_start_pos = reader.seek(io::SeekFrom::Start(pos as u64))?; return CentralDirectoryEnd::parse(reader).map(|cde| (cde, cde_start_pos)); } pos = match pos.checked_sub(1) { Some(p) => p, None => break, }; } Err(ZipError::InvalidArchive( "Could not find central directory end", )) } pub fn write(&self, writer: &mut T) -> ZipResult<()> { writer.write_u32::(CENTRAL_DIRECTORY_END_SIGNATURE)?; writer.write_u16::(self.disk_number)?; writer.write_u16::(self.disk_with_central_directory)?; writer.write_u16::(self.number_of_files_on_this_disk)?; writer.write_u16::(self.number_of_files)?; writer.write_u32::(self.central_directory_size)?; writer.write_u32::(self.central_directory_offset)?; writer.write_u16::(self.zip_file_comment.len() as u16)?; writer.write_all(&self.zip_file_comment)?; Ok(()) } } pub struct Zip64CentralDirectoryEndLocator { pub disk_with_central_directory: u32, pub end_of_central_directory_offset: u64, pub number_of_disks: u32, } impl Zip64CentralDirectoryEndLocator { pub fn parse(reader: &mut T) -> ZipResult { let magic = reader.read_u32::()?; if magic != ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE { return Err(ZipError::InvalidArchive( "Invalid zip64 locator digital signature header", )); } let disk_with_central_directory = reader.read_u32::()?; let end_of_central_directory_offset = reader.read_u64::()?; let number_of_disks = reader.read_u32::()?; Ok(Zip64CentralDirectoryEndLocator { disk_with_central_directory, end_of_central_directory_offset, number_of_disks, }) } pub fn write(&self, writer: &mut T) -> ZipResult<()> { writer.write_u32::(ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE)?; writer.write_u32::(self.disk_with_central_directory)?; writer.write_u64::(self.end_of_central_directory_offset)?; writer.write_u32::(self.number_of_disks)?; Ok(()) } } pub struct Zip64CentralDirectoryEnd { pub version_made_by: u16, pub version_needed_to_extract: u16, pub disk_number: u32, pub disk_with_central_directory: u32, pub number_of_files_on_this_disk: u64, pub number_of_files: u64, pub central_directory_size: u64, pub central_directory_offset: u64, //pub extensible_data_sector: Vec, <-- We don't do anything with this at the moment. } impl Zip64CentralDirectoryEnd { pub fn find_and_parse( reader: &mut T, nominal_offset: u64, search_upper_bound: u64, ) -> ZipResult<(Zip64CentralDirectoryEnd, u64)> { let mut pos = nominal_offset; while pos <= search_upper_bound { reader.seek(io::SeekFrom::Start(pos))?; if reader.read_u32::()? == ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE { let archive_offset = pos - nominal_offset; let _record_size = reader.read_u64::()?; // We would use this value if we did anything with the "zip64 extensible data sector". let version_made_by = reader.read_u16::()?; let version_needed_to_extract = reader.read_u16::()?; let disk_number = reader.read_u32::()?; let disk_with_central_directory = reader.read_u32::()?; let number_of_files_on_this_disk = reader.read_u64::()?; let number_of_files = reader.read_u64::()?; let central_directory_size = reader.read_u64::()?; let central_directory_offset = reader.read_u64::()?; return Ok(( Zip64CentralDirectoryEnd { version_made_by, version_needed_to_extract, disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, }, archive_offset, )); } pos += 1; } Err(ZipError::InvalidArchive( "Could not find ZIP64 central directory end", )) } pub fn write(&self, writer: &mut T) -> ZipResult<()> { writer.write_u32::(ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE)?; writer.write_u64::(44)?; // record size writer.write_u16::(self.version_made_by)?; writer.write_u16::(self.version_needed_to_extract)?; writer.write_u32::(self.disk_number)?; writer.write_u32::(self.disk_with_central_directory)?; writer.write_u64::(self.number_of_files_on_this_disk)?; writer.write_u64::(self.number_of_files)?; writer.write_u64::(self.central_directory_size)?; writer.write_u64::(self.central_directory_offset)?; Ok(()) } } zip-0.5.13/src/types.rs000064400000000000000000000350500000000000000127750ustar 00000000000000//! Types that specify what is contained in a ZIP. #[derive(Clone, Copy, Debug, PartialEq)] pub enum System { Dos = 0, Unix = 3, Unknown, } impl System { pub fn from_u8(system: u8) -> System { use self::System::*; match system { 0 => Dos, 3 => Unix, _ => Unknown, } } } /// A DateTime field to be used for storing timestamps in a zip file /// /// This structure does bounds checking to ensure the date is able to be stored in a zip file. /// /// When constructed manually from a date and time, it will also check if the input is sensible /// (e.g. months are from [1, 12]), but when read from a zip some parts may be out of their normal /// bounds (e.g. month 0, or hour 31). /// /// # Warning /// /// Some utilities use alternative timestamps to improve the accuracy of their /// ZIPs, but we don't parse them yet. [We're working on this](https://github.com/zip-rs/zip/issues/156#issuecomment-652981904), /// however this API shouldn't be considered complete. #[derive(Debug, Clone, Copy)] pub struct DateTime { year: u16, month: u8, day: u8, hour: u8, minute: u8, second: u8, } impl ::std::default::Default for DateTime { /// Constructs an 'default' datetime of 1980-01-01 00:00:00 fn default() -> DateTime { DateTime { year: 1980, month: 1, day: 1, hour: 0, minute: 0, second: 0, } } } impl DateTime { /// Converts an msdos (u16, u16) pair to a DateTime object pub fn from_msdos(datepart: u16, timepart: u16) -> DateTime { let seconds = (timepart & 0b0000000000011111) << 1; let minutes = (timepart & 0b0000011111100000) >> 5; let hours = (timepart & 0b1111100000000000) >> 11; let days = (datepart & 0b0000000000011111) >> 0; let months = (datepart & 0b0000000111100000) >> 5; let years = (datepart & 0b1111111000000000) >> 9; DateTime { year: (years + 1980) as u16, month: months as u8, day: days as u8, hour: hours as u8, minute: minutes as u8, second: seconds as u8, } } /// Constructs a DateTime from a specific date and time /// /// The bounds are: /// * year: [1980, 2107] /// * month: [1, 12] /// * day: [1, 31] /// * hour: [0, 23] /// * minute: [0, 59] /// * second: [0, 60] pub fn from_date_and_time( year: u16, month: u8, day: u8, hour: u8, minute: u8, second: u8, ) -> Result { if year >= 1980 && year <= 2107 && month >= 1 && month <= 12 && day >= 1 && day <= 31 && hour <= 23 && minute <= 59 && second <= 60 { Ok(DateTime { year, month, day, hour, minute, second, }) } else { Err(()) } } #[cfg(feature = "time")] /// Converts a ::time::Tm object to a DateTime /// /// Returns `Err` when this object is out of bounds pub fn from_time(tm: ::time::Tm) -> Result { if tm.tm_year >= 80 && tm.tm_year <= 207 && tm.tm_mon >= 0 && tm.tm_mon <= 11 && tm.tm_mday >= 1 && tm.tm_mday <= 31 && tm.tm_hour >= 0 && tm.tm_hour <= 23 && tm.tm_min >= 0 && tm.tm_min <= 59 && tm.tm_sec >= 0 && tm.tm_sec <= 60 { Ok(DateTime { year: (tm.tm_year + 1900) as u16, month: (tm.tm_mon + 1) as u8, day: tm.tm_mday as u8, hour: tm.tm_hour as u8, minute: tm.tm_min as u8, second: tm.tm_sec as u8, }) } else { Err(()) } } /// Gets the time portion of this datetime in the msdos representation pub fn timepart(&self) -> u16 { ((self.second as u16) >> 1) | ((self.minute as u16) << 5) | ((self.hour as u16) << 11) } /// Gets the date portion of this datetime in the msdos representation pub fn datepart(&self) -> u16 { (self.day as u16) | ((self.month as u16) << 5) | ((self.year - 1980) << 9) } #[cfg(feature = "time")] /// Converts the datetime to a Tm structure /// /// The fields `tm_wday`, `tm_yday`, `tm_utcoff` and `tm_nsec` are set to their defaults. pub fn to_time(&self) -> ::time::Tm { ::time::Tm { tm_sec: self.second as i32, tm_min: self.minute as i32, tm_hour: self.hour as i32, tm_mday: self.day as i32, tm_mon: self.month as i32 - 1, tm_year: self.year as i32 - 1900, tm_isdst: -1, ..::time::empty_tm() } } /// Get the year. There is no epoch, i.e. 2018 will be returned as 2018. pub fn year(&self) -> u16 { self.year } /// Get the month, where 1 = january and 12 = december pub fn month(&self) -> u8 { self.month } /// Get the day pub fn day(&self) -> u8 { self.day } /// Get the hour pub fn hour(&self) -> u8 { self.hour } /// Get the minute pub fn minute(&self) -> u8 { self.minute } /// Get the second pub fn second(&self) -> u8 { self.second } } pub const DEFAULT_VERSION: u8 = 46; /// Structure representing a ZIP file. #[derive(Debug, Clone)] pub struct ZipFileData { /// Compatibility of the file attribute information pub system: System, /// Specification version pub version_made_by: u8, /// True if the file is encrypted. pub encrypted: bool, /// True if the file uses a data-descriptor section pub using_data_descriptor: bool, /// Compression method used to store the file pub compression_method: crate::compression::CompressionMethod, /// Last modified time. This will only have a 2 second precision. pub last_modified_time: DateTime, /// CRC32 checksum pub crc32: u32, /// Size of the file in the ZIP pub compressed_size: u64, /// Size of the file when extracted pub uncompressed_size: u64, /// Name of the file pub file_name: String, /// Raw file name. To be used when file_name was incorrectly decoded. pub file_name_raw: Vec, /// Extra field usually used for storage expansion pub extra_field: Vec, /// File comment pub file_comment: String, /// Specifies where the local header of the file starts pub header_start: u64, /// Specifies where the central header of the file starts /// /// Note that when this is not known, it is set to 0 pub central_header_start: u64, /// Specifies where the compressed data of the file starts pub data_start: u64, /// External file attributes pub external_attributes: u32, /// Reserve local ZIP64 extra field pub large_file: bool, } impl ZipFileData { pub fn file_name_sanitized(&self) -> ::std::path::PathBuf { let no_null_filename = match self.file_name.find('\0') { Some(index) => &self.file_name[0..index], None => &self.file_name, } .to_string(); // zip files can contain both / and \ as separators regardless of the OS // and as we want to return a sanitized PathBuf that only supports the // OS separator let's convert incompatible separators to compatible ones let separator = ::std::path::MAIN_SEPARATOR; let opposite_separator = match separator { '/' => '\\', _ => '/', }; let filename = no_null_filename.replace(&opposite_separator.to_string(), &separator.to_string()); ::std::path::Path::new(&filename) .components() .filter(|component| match *component { ::std::path::Component::Normal(..) => true, _ => false, }) .fold(::std::path::PathBuf::new(), |mut path, ref cur| { path.push(cur.as_os_str()); path }) } pub fn zip64_extension(&self) -> bool { self.uncompressed_size > 0xFFFFFFFF || self.compressed_size > 0xFFFFFFFF || self.header_start > 0xFFFFFFFF } pub fn version_needed(&self) -> u16 { // higher versions matched first match (self.zip64_extension(), self.compression_method) { #[cfg(feature = "bzip2")] (_, crate::compression::CompressionMethod::Bzip2) => 46, (true, _) => 45, _ => 20, } } } #[cfg(test)] mod test { #[test] fn system() { use super::System; assert_eq!(System::Dos as u16, 0u16); assert_eq!(System::Unix as u16, 3u16); assert_eq!(System::from_u8(0), System::Dos); assert_eq!(System::from_u8(3), System::Unix); } #[test] fn sanitize() { use super::*; let file_name = "/path/../../../../etc/./passwd\0/etc/shadow".to_string(); let data = ZipFileData { system: System::Dos, version_made_by: 0, encrypted: false, using_data_descriptor: false, compression_method: crate::compression::CompressionMethod::Stored, last_modified_time: DateTime::default(), crc32: 0, compressed_size: 0, uncompressed_size: 0, file_name: file_name.clone(), file_name_raw: file_name.into_bytes(), extra_field: Vec::new(), file_comment: String::new(), header_start: 0, data_start: 0, central_header_start: 0, external_attributes: 0, large_file: false, }; assert_eq!( data.file_name_sanitized(), ::std::path::PathBuf::from("path/etc/passwd") ); } #[test] fn datetime_default() { use super::DateTime; let dt = DateTime::default(); assert_eq!(dt.timepart(), 0); assert_eq!(dt.datepart(), 0b0000000_0001_00001); } #[test] fn datetime_max() { use super::DateTime; let dt = DateTime::from_date_and_time(2107, 12, 31, 23, 59, 60).unwrap(); assert_eq!(dt.timepart(), 0b10111_111011_11110); assert_eq!(dt.datepart(), 0b1111111_1100_11111); } #[test] fn datetime_bounds() { use super::DateTime; assert!(DateTime::from_date_and_time(2000, 1, 1, 23, 59, 60).is_ok()); assert!(DateTime::from_date_and_time(2000, 1, 1, 24, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2000, 1, 1, 0, 60, 0).is_err()); assert!(DateTime::from_date_and_time(2000, 1, 1, 0, 0, 61).is_err()); assert!(DateTime::from_date_and_time(2107, 12, 31, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(1980, 1, 1, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(1979, 1, 1, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(1980, 0, 1, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(1980, 1, 0, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2108, 12, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2107, 13, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2107, 12, 32, 0, 0, 0).is_err()); } #[cfg(feature = "time")] #[test] fn datetime_from_time_bounds() { use super::DateTime; // 1979-12-31 23:59:59 assert!(DateTime::from_time(::time::Tm { tm_sec: 59, tm_min: 59, tm_hour: 23, tm_mday: 31, tm_mon: 11, // tm_mon has number range [0, 11] tm_year: 79, // 1979 - 1900 = 79 ..::time::empty_tm() }) .is_err()); // 1980-01-01 00:00:00 assert!(DateTime::from_time(::time::Tm { tm_sec: 0, tm_min: 0, tm_hour: 0, tm_mday: 1, tm_mon: 0, // tm_mon has number range [0, 11] tm_year: 80, // 1980 - 1900 = 80 ..::time::empty_tm() }) .is_ok()); // 2107-12-31 23:59:59 assert!(DateTime::from_time(::time::Tm { tm_sec: 59, tm_min: 59, tm_hour: 23, tm_mday: 31, tm_mon: 11, // tm_mon has number range [0, 11] tm_year: 207, // 2107 - 1900 = 207 ..::time::empty_tm() }) .is_ok()); // 2108-01-01 00:00:00 assert!(DateTime::from_time(::time::Tm { tm_sec: 0, tm_min: 0, tm_hour: 0, tm_mday: 1, tm_mon: 0, // tm_mon has number range [0, 11] tm_year: 208, // 2108 - 1900 = 208 ..::time::empty_tm() }) .is_err()); } #[test] fn time_conversion() { use super::DateTime; let dt = DateTime::from_msdos(0x4D71, 0x54CF); assert_eq!(dt.year(), 2018); assert_eq!(dt.month(), 11); assert_eq!(dt.day(), 17); assert_eq!(dt.hour(), 10); assert_eq!(dt.minute(), 38); assert_eq!(dt.second(), 30); #[cfg(feature = "time")] assert_eq!( format!("{}", dt.to_time().rfc3339()), "2018-11-17T10:38:30Z" ); } #[test] fn time_out_of_bounds() { use super::DateTime; let dt = DateTime::from_msdos(0xFFFF, 0xFFFF); assert_eq!(dt.year(), 2107); assert_eq!(dt.month(), 15); assert_eq!(dt.day(), 31); assert_eq!(dt.hour(), 31); assert_eq!(dt.minute(), 63); assert_eq!(dt.second(), 62); #[cfg(feature = "time")] assert_eq!( format!("{}", dt.to_time().rfc3339()), "2107-15-31T31:63:62Z" ); let dt = DateTime::from_msdos(0x0000, 0x0000); assert_eq!(dt.year(), 1980); assert_eq!(dt.month(), 0); assert_eq!(dt.day(), 0); assert_eq!(dt.hour(), 0); assert_eq!(dt.minute(), 0); assert_eq!(dt.second(), 0); #[cfg(feature = "time")] assert_eq!( format!("{}", dt.to_time().rfc3339()), "1980-00-00T00:00:00Z" ); } #[cfg(feature = "time")] #[test] fn time_at_january() { use super::DateTime; // 2020-01-01 00:00:00 let clock = ::time::Timespec::new(1577836800, 0); let tm = ::time::at_utc(clock); assert!(DateTime::from_time(tm).is_ok()); } } zip-0.5.13/src/write.rs000064400000000000000000001264020000000000000127650ustar 00000000000000//! Types for creating ZIP archives use crate::compression::CompressionMethod; use crate::read::{central_header_to_zip_file, ZipArchive, ZipFile}; use crate::result::{ZipError, ZipResult}; use crate::spec; use crate::types::{DateTime, System, ZipFileData, DEFAULT_VERSION}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use crc32fast::Hasher; use std::default::Default; use std::io; use std::io::prelude::*; use std::mem; #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] use flate2::write::DeflateEncoder; #[cfg(feature = "bzip2")] use bzip2::write::BzEncoder; enum GenericZipWriter { Closed, Storer(W), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] Deflater(DeflateEncoder), #[cfg(feature = "bzip2")] Bzip2(BzEncoder), } /// ZIP archive generator /// /// Handles the bookkeeping involved in building an archive, and provides an /// API to edit its contents. /// /// ``` /// # fn doit() -> zip::result::ZipResult<()> /// # { /// # use zip::ZipWriter; /// use std::io::Write; /// use zip::write::FileOptions; /// /// // We use a buffer here, though you'd normally use a `File` /// let mut buf = [0; 65536]; /// let mut zip = zip::ZipWriter::new(std::io::Cursor::new(&mut buf[..])); /// /// let options = zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Stored); /// zip.start_file("hello_world.txt", options)?; /// zip.write(b"Hello, World!")?; /// /// // Apply the changes you've made. /// // Dropping the `ZipWriter` will have the same effect, but may silently fail /// zip.finish()?; /// /// # Ok(()) /// # } /// # doit().unwrap(); /// ``` pub struct ZipWriter { inner: GenericZipWriter, files: Vec, stats: ZipWriterStats, writing_to_file: bool, writing_to_extra_field: bool, writing_to_central_extra_field_only: bool, writing_raw: bool, comment: Vec, } #[derive(Default)] struct ZipWriterStats { hasher: Hasher, start: u64, bytes_written: u64, } struct ZipRawValues { crc32: u32, compressed_size: u64, uncompressed_size: u64, } /// Metadata for a file to be written #[derive(Copy, Clone)] pub struct FileOptions { compression_method: CompressionMethod, last_modified_time: DateTime, permissions: Option, large_file: bool, } impl FileOptions { /// Construct a new FileOptions object pub fn default() -> FileOptions { FileOptions { #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] compression_method: CompressionMethod::Deflated, #[cfg(not(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" )))] compression_method: CompressionMethod::Stored, #[cfg(feature = "time")] last_modified_time: DateTime::from_time(time::now()).unwrap_or_default(), #[cfg(not(feature = "time"))] last_modified_time: DateTime::default(), permissions: None, large_file: false, } } /// Set the compression method for the new file /// /// The default is `CompressionMethod::Deflated`. If the deflate compression feature is /// disabled, `CompressionMethod::Stored` becomes the default. pub fn compression_method(mut self, method: CompressionMethod) -> FileOptions { self.compression_method = method; self } /// Set the last modified time /// /// The default is the current timestamp if the 'time' feature is enabled, and 1980-01-01 /// otherwise pub fn last_modified_time(mut self, mod_time: DateTime) -> FileOptions { self.last_modified_time = mod_time; self } /// Set the permissions for the new file. /// /// The format is represented with unix-style permissions. /// The default is `0o644`, which represents `rw-r--r--` for files, /// and `0o755`, which represents `rwxr-xr-x` for directories pub fn unix_permissions(mut self, mode: u32) -> FileOptions { self.permissions = Some(mode & 0o777); self } /// Set whether the new file's compressed and uncompressed size is less than 4 GiB. /// /// If set to `false` and the file exceeds the limit, an I/O error is thrown. If set to `true`, /// readers will require ZIP64 support and if the file does not exceed the limit, 20 B are /// wasted. The default is `false`. pub fn large_file(mut self, large: bool) -> FileOptions { self.large_file = large; self } } impl Default for FileOptions { fn default() -> Self { Self::default() } } impl Write for ZipWriter { fn write(&mut self, buf: &[u8]) -> io::Result { if !self.writing_to_file { return Err(io::Error::new( io::ErrorKind::Other, "No file has been started", )); } match self.inner.ref_mut() { Some(ref mut w) => { if self.writing_to_extra_field { self.files.last_mut().unwrap().extra_field.write(buf) } else { let write_result = w.write(buf); if let Ok(count) = write_result { self.stats.update(&buf[0..count]); if self.stats.bytes_written > 0xFFFFFFFF && !self.files.last_mut().unwrap().large_file { let _inner = mem::replace(&mut self.inner, GenericZipWriter::Closed); return Err(io::Error::new( io::ErrorKind::Other, "Large file option has not been set", )); } } write_result } } None => Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", )), } } fn flush(&mut self) -> io::Result<()> { match self.inner.ref_mut() { Some(ref mut w) => w.flush(), None => Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", )), } } } impl ZipWriterStats { fn update(&mut self, buf: &[u8]) { self.hasher.update(buf); self.bytes_written += buf.len() as u64; } } impl ZipWriter { /// Initializes the archive from an existing ZIP archive, making it ready for append. pub fn new_append(mut readwriter: A) -> ZipResult> { let (footer, cde_start_pos) = spec::CentralDirectoryEnd::find_and_parse(&mut readwriter)?; if footer.disk_number != footer.disk_with_central_directory { return Err(ZipError::UnsupportedArchive( "Support for multi-disk files is not implemented", )); } let (archive_offset, directory_start, number_of_files) = ZipArchive::get_directory_counts(&mut readwriter, &footer, cde_start_pos)?; if let Err(_) = readwriter.seek(io::SeekFrom::Start(directory_start)) { return Err(ZipError::InvalidArchive( "Could not seek to start of central directory", )); } let files = (0..number_of_files) .map(|_| central_header_to_zip_file(&mut readwriter, archive_offset)) .collect::, _>>()?; let _ = readwriter.seek(io::SeekFrom::Start(directory_start)); // seek directory_start to overwrite it Ok(ZipWriter { inner: GenericZipWriter::Storer(readwriter), files, stats: Default::default(), writing_to_file: false, writing_to_extra_field: false, writing_to_central_extra_field_only: false, comment: footer.zip_file_comment, writing_raw: true, // avoid recomputing the last file's header }) } } impl ZipWriter { /// Initializes the archive. /// /// Before writing to this object, the [`ZipWriter::start_file`] function should be called. pub fn new(inner: W) -> ZipWriter { ZipWriter { inner: GenericZipWriter::Storer(inner), files: Vec::new(), stats: Default::default(), writing_to_file: false, writing_to_extra_field: false, writing_to_central_extra_field_only: false, writing_raw: false, comment: Vec::new(), } } /// Set ZIP archive comment. pub fn set_comment(&mut self, comment: S) where S: Into, { self.set_raw_comment(comment.into().into()) } /// Set ZIP archive comment. /// /// This sets the raw bytes of the comment. The comment /// is typically expected to be encoded in UTF-8 pub fn set_raw_comment(&mut self, comment: Vec) { self.comment = comment; } /// Start a new file for with the requested options. fn start_entry( &mut self, name: S, options: FileOptions, raw_values: Option, ) -> ZipResult<()> where S: Into, { self.finish_file()?; let raw_values = raw_values.unwrap_or_else(|| ZipRawValues { crc32: 0, compressed_size: 0, uncompressed_size: 0, }); { let writer = self.inner.get_plain(); let header_start = writer.seek(io::SeekFrom::Current(0))?; let permissions = options.permissions.unwrap_or(0o100644); let mut file = ZipFileData { system: System::Unix, version_made_by: DEFAULT_VERSION, encrypted: false, using_data_descriptor: false, compression_method: options.compression_method, last_modified_time: options.last_modified_time, crc32: raw_values.crc32, compressed_size: raw_values.compressed_size, uncompressed_size: raw_values.uncompressed_size, file_name: name.into(), file_name_raw: Vec::new(), // Never used for saving extra_field: Vec::new(), file_comment: String::new(), header_start, data_start: 0, central_header_start: 0, external_attributes: permissions << 16, large_file: options.large_file, }; write_local_file_header(writer, &file)?; let header_end = writer.seek(io::SeekFrom::Current(0))?; self.stats.start = header_end; file.data_start = header_end; self.stats.bytes_written = 0; self.stats.hasher = Hasher::new(); self.files.push(file); } Ok(()) } fn finish_file(&mut self) -> ZipResult<()> { if self.writing_to_extra_field { // Implicitly calling [`ZipWriter::end_extra_data`] for empty files. self.end_extra_data()?; } self.inner.switch_to(CompressionMethod::Stored)?; let writer = self.inner.get_plain(); if !self.writing_raw { let file = match self.files.last_mut() { None => return Ok(()), Some(f) => f, }; file.crc32 = self.stats.hasher.clone().finalize(); file.uncompressed_size = self.stats.bytes_written; let file_end = writer.seek(io::SeekFrom::Current(0))?; file.compressed_size = file_end - self.stats.start; update_local_file_header(writer, file)?; writer.seek(io::SeekFrom::Start(file_end))?; } self.writing_to_file = false; self.writing_raw = false; Ok(()) } /// Create a file in the archive and start writing its' contents. /// /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`] pub fn start_file(&mut self, name: S, mut options: FileOptions) -> ZipResult<()> where S: Into, { if options.permissions.is_none() { options.permissions = Some(0o644); } *options.permissions.as_mut().unwrap() |= 0o100000; self.start_entry(name, options, None)?; self.inner.switch_to(options.compression_method)?; self.writing_to_file = true; Ok(()) } /// Starts a file, taking a Path as argument. /// /// This function ensures that the '/' path separator is used. It also ignores all non 'Normal' /// Components, such as a starting '/' or '..' and '.'. #[deprecated( since = "0.5.7", note = "by stripping `..`s from the path, the meaning of paths can change. Use `start_file` instead." )] pub fn start_file_from_path( &mut self, path: &std::path::Path, options: FileOptions, ) -> ZipResult<()> { self.start_file(path_to_string(path), options) } /// Create an aligned file in the archive and start writing its' contents. /// /// Returns the number of padding bytes required to align the file. /// /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`] pub fn start_file_aligned( &mut self, name: S, options: FileOptions, align: u16, ) -> Result where S: Into, { let data_start = self.start_file_with_extra_data(name, options)?; let align = align as u64; if align > 1 && data_start % align != 0 { let pad_length = (align - (data_start + 4) % align) % align; let pad = vec![0; pad_length as usize]; self.write_all(b"za").map_err(ZipError::from)?; // 0x617a self.write_u16::(pad.len() as u16) .map_err(ZipError::from)?; self.write_all(&pad).map_err(ZipError::from)?; assert_eq!(self.end_local_start_central_extra_data()? % align, 0); } let extra_data_end = self.end_extra_data()?; Ok(extra_data_end - data_start) } /// Create a file in the archive and start writing its extra data first. /// /// Finish writing extra data and start writing file data with [`ZipWriter::end_extra_data`]. /// Optionally, distinguish local from central extra data with /// [`ZipWriter::end_local_start_central_extra_data`]. /// /// Returns the preliminary starting offset of the file data without any extra data allowing to /// align the file data by calculating a pad length to be prepended as part of the extra data. /// /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`] /// /// ``` /// use byteorder::{LittleEndian, WriteBytesExt}; /// use zip::{ZipArchive, ZipWriter, result::ZipResult}; /// use zip::{write::FileOptions, CompressionMethod}; /// use std::io::{Write, Cursor}; /// /// # fn main() -> ZipResult<()> { /// let mut archive = Cursor::new(Vec::new()); /// /// { /// let mut zip = ZipWriter::new(&mut archive); /// let options = FileOptions::default() /// .compression_method(CompressionMethod::Stored); /// /// zip.start_file_with_extra_data("identical_extra_data.txt", options)?; /// let extra_data = b"local and central extra data"; /// zip.write_u16::(0xbeef)?; /// zip.write_u16::(extra_data.len() as u16)?; /// zip.write_all(extra_data)?; /// zip.end_extra_data()?; /// zip.write_all(b"file data")?; /// /// let data_start = zip.start_file_with_extra_data("different_extra_data.txt", options)?; /// let extra_data = b"local extra data"; /// zip.write_u16::(0xbeef)?; /// zip.write_u16::(extra_data.len() as u16)?; /// zip.write_all(extra_data)?; /// let data_start = data_start as usize + 4 + extra_data.len() + 4; /// let align = 64; /// let pad_length = (align - data_start % align) % align; /// assert_eq!(pad_length, 19); /// zip.write_u16::(0xdead)?; /// zip.write_u16::(pad_length as u16)?; /// zip.write_all(&vec![0; pad_length])?; /// let data_start = zip.end_local_start_central_extra_data()?; /// assert_eq!(data_start as usize % align, 0); /// let extra_data = b"central extra data"; /// zip.write_u16::(0xbeef)?; /// zip.write_u16::(extra_data.len() as u16)?; /// zip.write_all(extra_data)?; /// zip.end_extra_data()?; /// zip.write_all(b"file data")?; /// /// zip.finish()?; /// } /// /// let mut zip = ZipArchive::new(archive)?; /// assert_eq!(&zip.by_index(0)?.extra_data()[4..], b"local and central extra data"); /// assert_eq!(&zip.by_index(1)?.extra_data()[4..], b"central extra data"); /// # Ok(()) /// # } /// ``` pub fn start_file_with_extra_data( &mut self, name: S, mut options: FileOptions, ) -> ZipResult where S: Into, { if options.permissions.is_none() { options.permissions = Some(0o644); } *options.permissions.as_mut().unwrap() |= 0o100000; self.start_entry(name, options, None)?; self.writing_to_file = true; self.writing_to_extra_field = true; Ok(self.files.last().unwrap().data_start) } /// End local and start central extra data. Requires [`ZipWriter::start_file_with_extra_data`]. /// /// Returns the final starting offset of the file data. pub fn end_local_start_central_extra_data(&mut self) -> ZipResult { let data_start = self.end_extra_data()?; self.files.last_mut().unwrap().extra_field.clear(); self.writing_to_extra_field = true; self.writing_to_central_extra_field_only = true; Ok(data_start) } /// End extra data and start file data. Requires [`ZipWriter::start_file_with_extra_data`]. /// /// Returns the final starting offset of the file data. pub fn end_extra_data(&mut self) -> ZipResult { // Require `start_file_with_extra_data()`. Ensures `file` is some. if !self.writing_to_extra_field { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Not writing to extra field", ))); } let file = self.files.last_mut().unwrap(); validate_extra_data(&file)?; if !self.writing_to_central_extra_field_only { let writer = self.inner.get_plain(); // Append extra data to local file header and keep it for central file header. writer.write_all(&file.extra_field)?; // Update final `data_start`. let header_end = file.data_start + file.extra_field.len() as u64; self.stats.start = header_end; file.data_start = header_end; // Update extra field length in local file header. let extra_field_length = if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16; writer.seek(io::SeekFrom::Start(file.header_start + 28))?; writer.write_u16::(extra_field_length)?; writer.seek(io::SeekFrom::Start(header_end))?; self.inner.switch_to(file.compression_method)?; } self.writing_to_extra_field = false; self.writing_to_central_extra_field_only = false; Ok(file.data_start) } /// Add a new file using the already compressed data from a ZIP file being read and renames it, this /// allows faster copies of the `ZipFile` since there is no need to decompress and compress it again. /// Any `ZipFile` metadata is copied and not checked, for example the file CRC. /// ```no_run /// use std::fs::File; /// use std::io::{Read, Seek, Write}; /// use zip::{ZipArchive, ZipWriter}; /// /// fn copy_rename( /// src: &mut ZipArchive, /// dst: &mut ZipWriter, /// ) -> zip::result::ZipResult<()> /// where /// R: Read + Seek, /// W: Write + Seek, /// { /// // Retrieve file entry by name /// let file = src.by_name("src_file.txt")?; /// /// // Copy and rename the previously obtained file entry to the destination zip archive /// dst.raw_copy_file_rename(file, "new_name.txt")?; /// /// Ok(()) /// } /// ``` pub fn raw_copy_file_rename(&mut self, mut file: ZipFile, name: S) -> ZipResult<()> where S: Into, { let options = FileOptions::default() .last_modified_time(file.last_modified()) .compression_method(file.compression()); if let Some(perms) = file.unix_mode() { options.unix_permissions(perms); } let raw_values = ZipRawValues { crc32: file.crc32(), compressed_size: file.compressed_size(), uncompressed_size: file.size(), }; self.start_entry(name, options, Some(raw_values))?; self.writing_to_file = true; self.writing_raw = true; io::copy(file.get_raw_reader(), self)?; Ok(()) } /// Add a new file using the already compressed data from a ZIP file being read, this allows faster /// copies of the `ZipFile` since there is no need to decompress and compress it again. Any `ZipFile` /// metadata is copied and not checked, for example the file CRC. /// /// ```no_run /// use std::fs::File; /// use std::io::{Read, Seek, Write}; /// use zip::{ZipArchive, ZipWriter}; /// /// fn copy(src: &mut ZipArchive, dst: &mut ZipWriter) -> zip::result::ZipResult<()> /// where /// R: Read + Seek, /// W: Write + Seek, /// { /// // Retrieve file entry by name /// let file = src.by_name("src_file.txt")?; /// /// // Copy the previously obtained file entry to the destination zip archive /// dst.raw_copy_file(file)?; /// /// Ok(()) /// } /// ``` pub fn raw_copy_file(&mut self, file: ZipFile) -> ZipResult<()> { let name = file.name().to_owned(); self.raw_copy_file_rename(file, name) } /// Add a directory entry. /// /// You can't write data to the file afterwards. pub fn add_directory(&mut self, name: S, mut options: FileOptions) -> ZipResult<()> where S: Into, { if options.permissions.is_none() { options.permissions = Some(0o755); } *options.permissions.as_mut().unwrap() |= 0o40000; options.compression_method = CompressionMethod::Stored; let name_as_string = name.into(); // Append a slash to the filename if it does not end with it. let name_with_slash = match name_as_string.chars().last() { Some('/') | Some('\\') => name_as_string, _ => name_as_string + "/", }; self.start_entry(name_with_slash, options, None)?; self.writing_to_file = false; Ok(()) } /// Add a directory entry, taking a Path as argument. /// /// This function ensures that the '/' path seperator is used. It also ignores all non 'Normal' /// Components, such as a starting '/' or '..' and '.'. #[deprecated( since = "0.5.7", note = "by stripping `..`s from the path, the meaning of paths can change. Use `add_directory` instead." )] pub fn add_directory_from_path( &mut self, path: &std::path::Path, options: FileOptions, ) -> ZipResult<()> { self.add_directory(path_to_string(path), options) } /// Finish the last file and write all other zip-structures /// /// This will return the writer, but one should normally not append any data to the end of the file. /// Note that the zipfile will also be finished on drop. pub fn finish(&mut self) -> ZipResult { self.finalize()?; let inner = mem::replace(&mut self.inner, GenericZipWriter::Closed); Ok(inner.unwrap()) } fn finalize(&mut self) -> ZipResult<()> { self.finish_file()?; { let writer = self.inner.get_plain(); let central_start = writer.seek(io::SeekFrom::Current(0))?; for file in self.files.iter() { write_central_directory_header(writer, file)?; } let central_size = writer.seek(io::SeekFrom::Current(0))? - central_start; if self.files.len() > 0xFFFF || central_size > 0xFFFFFFFF || central_start > 0xFFFFFFFF { let zip64_footer = spec::Zip64CentralDirectoryEnd { version_made_by: DEFAULT_VERSION as u16, version_needed_to_extract: DEFAULT_VERSION as u16, disk_number: 0, disk_with_central_directory: 0, number_of_files_on_this_disk: self.files.len() as u64, number_of_files: self.files.len() as u64, central_directory_size: central_size, central_directory_offset: central_start, }; zip64_footer.write(writer)?; let zip64_footer = spec::Zip64CentralDirectoryEndLocator { disk_with_central_directory: 0, end_of_central_directory_offset: central_start + central_size, number_of_disks: 1, }; zip64_footer.write(writer)?; } let number_of_files = if self.files.len() > 0xFFFF { 0xFFFF } else { self.files.len() as u16 }; let footer = spec::CentralDirectoryEnd { disk_number: 0, disk_with_central_directory: 0, zip_file_comment: self.comment.clone(), number_of_files_on_this_disk: number_of_files, number_of_files, central_directory_size: if central_size > 0xFFFFFFFF { 0xFFFFFFFF } else { central_size as u32 }, central_directory_offset: if central_start > 0xFFFFFFFF { 0xFFFFFFFF } else { central_start as u32 }, }; footer.write(writer)?; } Ok(()) } } impl Drop for ZipWriter { fn drop(&mut self) { if !self.inner.is_closed() { if let Err(e) = self.finalize() { let _ = write!(&mut io::stderr(), "ZipWriter drop failed: {:?}", e); } } } } impl GenericZipWriter { fn switch_to(&mut self, compression: CompressionMethod) -> ZipResult<()> { match self.current_compression() { Some(method) if method == compression => return Ok(()), None => { return Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", ) .into()) } _ => {} } let bare = match mem::replace(self, GenericZipWriter::Closed) { GenericZipWriter::Storer(w) => w, #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] GenericZipWriter::Deflater(w) => w.finish()?, #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(w) => w.finish()?, GenericZipWriter::Closed => { return Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", ) .into()) } }; *self = { #[allow(deprecated)] match compression { CompressionMethod::Stored => GenericZipWriter::Storer(bare), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] CompressionMethod::Deflated => GenericZipWriter::Deflater(DeflateEncoder::new( bare, flate2::Compression::default(), )), #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => { GenericZipWriter::Bzip2(BzEncoder::new(bare, bzip2::Compression::default())) } CompressionMethod::Unsupported(..) => { return Err(ZipError::UnsupportedArchive("Unsupported compression")) } } }; Ok(()) } fn ref_mut(&mut self) -> Option<&mut dyn Write> { match *self { GenericZipWriter::Storer(ref mut w) => Some(w as &mut dyn Write), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] GenericZipWriter::Deflater(ref mut w) => Some(w as &mut dyn Write), #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(ref mut w) => Some(w as &mut dyn Write), GenericZipWriter::Closed => None, } } fn is_closed(&self) -> bool { match *self { GenericZipWriter::Closed => true, _ => false, } } fn get_plain(&mut self) -> &mut W { match *self { GenericZipWriter::Storer(ref mut w) => w, _ => panic!("Should have switched to stored beforehand"), } } fn current_compression(&self) -> Option { match *self { GenericZipWriter::Storer(..) => Some(CompressionMethod::Stored), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] GenericZipWriter::Deflater(..) => Some(CompressionMethod::Deflated), #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(..) => Some(CompressionMethod::Bzip2), GenericZipWriter::Closed => None, } } fn unwrap(self) -> W { match self { GenericZipWriter::Storer(w) => w, _ => panic!("Should have switched to stored beforehand"), } } } fn write_local_file_header(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { // local file header signature writer.write_u32::(spec::LOCAL_FILE_HEADER_SIGNATURE)?; // version needed to extract writer.write_u16::(file.version_needed())?; // general purpose bit flag let flag = if !file.file_name.is_ascii() { 1u16 << 11 } else { 0 }; writer.write_u16::(flag)?; // Compression method #[allow(deprecated)] writer.write_u16::(file.compression_method.to_u16())?; // last mod file time and last mod file date writer.write_u16::(file.last_modified_time.timepart())?; writer.write_u16::(file.last_modified_time.datepart())?; // crc-32 writer.write_u32::(file.crc32)?; // compressed size writer.write_u32::(if file.compressed_size > 0xFFFFFFFF { 0xFFFFFFFF } else { file.compressed_size as u32 })?; // uncompressed size writer.write_u32::(if file.uncompressed_size > 0xFFFFFFFF { 0xFFFFFFFF } else { file.uncompressed_size as u32 })?; // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length let extra_field_length = if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16; writer.write_u16::(extra_field_length)?; // file name writer.write_all(file.file_name.as_bytes())?; // zip64 extra field if file.large_file { write_local_zip64_extra_field(writer, &file)?; } Ok(()) } fn update_local_file_header( writer: &mut T, file: &ZipFileData, ) -> ZipResult<()> { const CRC32_OFFSET: u64 = 14; writer.seek(io::SeekFrom::Start(file.header_start + CRC32_OFFSET))?; writer.write_u32::(file.crc32)?; writer.write_u32::(if file.compressed_size > 0xFFFFFFFF { if file.large_file { 0xFFFFFFFF } else { // compressed size can be slightly larger than uncompressed size return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Large file option has not been set", ))); } } else { file.compressed_size as u32 })?; writer.write_u32::(if file.uncompressed_size > 0xFFFFFFFF { // uncompressed size is checked on write to catch it as soon as possible 0xFFFFFFFF } else { file.uncompressed_size as u32 })?; if file.large_file { update_local_zip64_extra_field(writer, file)?; } Ok(()) } fn write_central_directory_header(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { // buffer zip64 extra field to determine its variable length let mut zip64_extra_field = [0; 28]; let zip64_extra_field_length = write_central_zip64_extra_field(&mut zip64_extra_field.as_mut(), file)?; // central file header signature writer.write_u32::(spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE)?; // version made by let version_made_by = (file.system as u16) << 8 | (file.version_made_by as u16); writer.write_u16::(version_made_by)?; // version needed to extract writer.write_u16::(file.version_needed())?; // general puprose bit flag let flag = if !file.file_name.is_ascii() { 1u16 << 11 } else { 0 }; writer.write_u16::(flag)?; // compression method #[allow(deprecated)] writer.write_u16::(file.compression_method.to_u16())?; // last mod file time + date writer.write_u16::(file.last_modified_time.timepart())?; writer.write_u16::(file.last_modified_time.datepart())?; // crc-32 writer.write_u32::(file.crc32)?; // compressed size writer.write_u32::(if file.compressed_size > 0xFFFFFFFF { 0xFFFFFFFF } else { file.compressed_size as u32 })?; // uncompressed size writer.write_u32::(if file.uncompressed_size > 0xFFFFFFFF { 0xFFFFFFFF } else { file.uncompressed_size as u32 })?; // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length writer.write_u16::(zip64_extra_field_length + file.extra_field.len() as u16)?; // file comment length writer.write_u16::(0)?; // disk number start writer.write_u16::(0)?; // internal file attribytes writer.write_u16::(0)?; // external file attributes writer.write_u32::(file.external_attributes)?; // relative offset of local header writer.write_u32::(if file.header_start > 0xFFFFFFFF { 0xFFFFFFFF } else { file.header_start as u32 })?; // file name writer.write_all(file.file_name.as_bytes())?; // zip64 extra field writer.write_all(&zip64_extra_field[..zip64_extra_field_length as usize])?; // extra field writer.write_all(&file.extra_field)?; // file comment // Ok(()) } fn validate_extra_data(file: &ZipFileData) -> ZipResult<()> { let mut data = file.extra_field.as_slice(); if data.len() > 0xFFFF { return Err(ZipError::Io(io::Error::new( io::ErrorKind::InvalidData, "Extra data exceeds extra field", ))); } while data.len() > 0 { let left = data.len(); if left < 4 { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Incomplete extra data header", ))); } let kind = data.read_u16::()?; let size = data.read_u16::()? as usize; let left = left - 4; if kind == 0x0001 { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "No custom ZIP64 extra data allowed", ))); } #[cfg(not(feature = "unreserved"))] { if kind <= 31 || EXTRA_FIELD_MAPPING.iter().any(|&mapped| mapped == kind) { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, format!( "Extra data header ID {:#06} requires crate feature \"unreserved\"", kind, ), ))); } } if size > left { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Extra data size exceeds extra field", ))); } data = &data[size..]; } Ok(()) } fn write_local_zip64_extra_field(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { // This entry in the Local header MUST include BOTH original // and compressed file size fields. writer.write_u16::(0x0001)?; writer.write_u16::(16)?; writer.write_u64::(file.uncompressed_size)?; writer.write_u64::(file.compressed_size)?; // Excluded fields: // u32: disk start number Ok(()) } fn update_local_zip64_extra_field( writer: &mut T, file: &ZipFileData, ) -> ZipResult<()> { let zip64_extra_field = file.header_start + 30 + file.file_name.as_bytes().len() as u64; writer.seek(io::SeekFrom::Start(zip64_extra_field + 4))?; writer.write_u64::(file.uncompressed_size)?; writer.write_u64::(file.compressed_size)?; // Excluded fields: // u32: disk start number Ok(()) } fn write_central_zip64_extra_field(writer: &mut T, file: &ZipFileData) -> ZipResult { // The order of the fields in the zip64 extended // information record is fixed, but the fields MUST // only appear if the corresponding Local or Central // directory record field is set to 0xFFFF or 0xFFFFFFFF. let mut size = 0; let uncompressed_size = file.uncompressed_size > 0xFFFFFFFF; let compressed_size = file.compressed_size > 0xFFFFFFFF; let header_start = file.header_start > 0xFFFFFFFF; if uncompressed_size { size += 8; } if compressed_size { size += 8; } if header_start { size += 8; } if size > 0 { writer.write_u16::(0x0001)?; writer.write_u16::(size)?; size += 4; if uncompressed_size { writer.write_u64::(file.uncompressed_size)?; } if compressed_size { writer.write_u64::(file.compressed_size)?; } if header_start { writer.write_u64::(file.header_start)?; } // Excluded fields: // u32: disk start number } Ok(size) } fn path_to_string(path: &std::path::Path) -> String { let mut path_str = String::new(); for component in path.components() { if let std::path::Component::Normal(os_str) = component { if !path_str.is_empty() { path_str.push('/'); } path_str.push_str(&*os_str.to_string_lossy()); } } path_str } #[cfg(test)] mod test { use super::{FileOptions, ZipWriter}; use crate::compression::CompressionMethod; use crate::types::DateTime; use std::io; use std::io::Write; #[test] fn write_empty_zip() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer.set_comment("ZIP"); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 25); assert_eq!( *result.get_ref(), [80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 90, 73, 80] ); } #[test] fn write_zip_dir() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .add_directory( "test", FileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a directory is not allowed, and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 108); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 1, 2, 46, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, 65, 0, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 51, 0, 0, 0, 35, 0, 0, 0, 0, 0, ] as &[u8] ); } #[test] fn write_mimetype_zip() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); let options = FileOptions { compression_method: CompressionMethod::Stored, last_modified_time: DateTime::default(), permissions: Some(33188), large_file: false, }; writer.start_file("mimetype", options).unwrap(); writer .write(b"application/vnd.oasis.opendocument.text") .unwrap(); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 153); let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); assert_eq!(result.get_ref(), &v); } #[test] fn path_to_string() { let mut path = std::path::PathBuf::new(); #[cfg(windows)] path.push(r"C:\"); #[cfg(unix)] path.push("/"); path.push("windows"); path.push(".."); path.push("."); path.push("system32"); let path_str = super::path_to_string(&path); assert_eq!(path_str, "windows/system32"); } } #[cfg(not(feature = "unreserved"))] const EXTRA_FIELD_MAPPING: [u16; 49] = [ 0x0001, 0x0007, 0x0008, 0x0009, 0x000a, 0x000c, 0x000d, 0x000e, 0x000f, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x0020, 0x0021, 0x0022, 0x0023, 0x0065, 0x0066, 0x4690, 0x07c8, 0x2605, 0x2705, 0x2805, 0x334d, 0x4341, 0x4453, 0x4704, 0x470f, 0x4b46, 0x4c41, 0x4d49, 0x4f4c, 0x5356, 0x5455, 0x554e, 0x5855, 0x6375, 0x6542, 0x7075, 0x756e, 0x7855, 0xa11e, 0xa220, 0xfd4a, 0x9901, 0x9902, ]; zip-0.5.13/src/zipcrypto.rs000064400000000000000000000203000000000000000136640ustar 00000000000000//! Implementation of the ZipCrypto algorithm //! //! The following paper was used to implement the ZipCrypto algorithm: //! [https://courses.cs.ut.ee/MTAT.07.022/2015_fall/uploads/Main/dmitri-report-f15-16.pdf](https://courses.cs.ut.ee/MTAT.07.022/2015_fall/uploads/Main/dmitri-report-f15-16.pdf) use std::num::Wrapping; /// A container to hold the current key state struct ZipCryptoKeys { key_0: Wrapping, key_1: Wrapping, key_2: Wrapping, } impl ZipCryptoKeys { fn new() -> ZipCryptoKeys { ZipCryptoKeys { key_0: Wrapping(0x12345678), key_1: Wrapping(0x23456789), key_2: Wrapping(0x34567890), } } fn update(&mut self, input: u8) { self.key_0 = ZipCryptoKeys::crc32(self.key_0, input); self.key_1 = (self.key_1 + (self.key_0 & Wrapping(0xff))) * Wrapping(0x08088405) + Wrapping(1); self.key_2 = ZipCryptoKeys::crc32(self.key_2, (self.key_1 >> 24).0 as u8); } fn stream_byte(&mut self) -> u8 { let temp: Wrapping = Wrapping(self.key_2.0 as u16) | Wrapping(3); ((temp * (temp ^ Wrapping(1))) >> 8).0 as u8 } fn decrypt_byte(&mut self, cipher_byte: u8) -> u8 { let plain_byte: u8 = self.stream_byte() ^ cipher_byte; self.update(plain_byte); plain_byte } #[allow(dead_code)] fn encrypt_byte(&mut self, plain_byte: u8) -> u8 { let cipher_byte: u8 = self.stream_byte() ^ plain_byte; self.update(plain_byte); cipher_byte } fn crc32(crc: Wrapping, input: u8) -> Wrapping { return (crc >> 8) ^ Wrapping(CRCTABLE[((crc & Wrapping(0xff)).0 as u8 ^ input) as usize]); } } /// A ZipCrypto reader with unverified password pub struct ZipCryptoReader { file: R, keys: ZipCryptoKeys, } pub enum ZipCryptoValidator { PkzipCrc32(u32), InfoZipMsdosTime(u16), } impl ZipCryptoReader { /// Note: The password is `&[u8]` and not `&str` because the /// [zip specification](https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.3.TXT) /// does not specify password encoding (see function `update_keys` in the specification). /// Therefore, if `&str` was used, the password would be UTF-8 and it /// would be impossible to decrypt files that were encrypted with a /// password byte sequence that is unrepresentable in UTF-8. pub fn new(file: R, password: &[u8]) -> ZipCryptoReader { let mut result = ZipCryptoReader { file: file, keys: ZipCryptoKeys::new(), }; // Key the cipher by updating the keys with the password. for byte in password.iter() { result.keys.update(*byte); } result } /// Read the ZipCrypto header bytes and validate the password. pub fn validate( mut self, validator: ZipCryptoValidator, ) -> Result>, std::io::Error> { // ZipCrypto prefixes a file with a 12 byte header let mut header_buf = [0u8; 12]; self.file.read_exact(&mut header_buf)?; for byte in header_buf.iter_mut() { *byte = self.keys.decrypt_byte(*byte); } match validator { ZipCryptoValidator::PkzipCrc32(crc32_plaintext) => { // PKZIP before 2.0 used 2 byte CRC check. // PKZIP 2.0+ used 1 byte CRC check. It's more secure. // We also use 1 byte CRC. if (crc32_plaintext >> 24) as u8 != header_buf[11] { return Ok(None); // Wrong password } } ZipCryptoValidator::InfoZipMsdosTime(last_mod_time) => { // Info-ZIP modification to ZipCrypto format: // If bit 3 of the general purpose bit flag is set // (indicates that the file uses a data-descriptor section), // it uses high byte of 16-bit File Time. // Info-ZIP code probably writes 2 bytes of File Time. // We check only 1 byte. if (last_mod_time >> 8) as u8 != header_buf[11] { return Ok(None); // Wrong password } } } Ok(Some(ZipCryptoReaderValid { reader: self })) } } /// A ZipCrypto reader with verified password pub struct ZipCryptoReaderValid { reader: ZipCryptoReader, } impl std::io::Read for ZipCryptoReaderValid { fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result { // Note: There might be potential for optimization. Inspiration can be found at: // https://github.com/kornelski/7z/blob/master/CPP/7zip/Crypto/ZipCrypto.cpp let result = self.reader.file.read(&mut buf); for byte in buf.iter_mut() { *byte = self.reader.keys.decrypt_byte(*byte); } result } } impl ZipCryptoReaderValid { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> R { self.reader.file } } static CRCTABLE: [u32; 256] = [ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, ]; zip-0.5.13/tests/data/comment_garbage.zip000064400000000000000000000000560000000000000164030ustar 00000000000000PKshort.omment bla bla blazip-0.5.13/tests/data/files_and_dirs.zip000064400000000000000000000007160000000000000162410ustar 00000000000000PK x;N!X file0.txtUT M\R\ux File at the root. PK x;Ndir1/UT M\$R\ux PK x;Ndir2/UT M\$R\ux PK x;N!X file0.txtUTM\ux PK x;NAUdir1/UTM\ux PK x;NAdir2/UTM\ux PKzip-0.5.13/tests/data/invalid_offset.zip000064400000000000000000000023310000000000000162630ustar 00000000000000PK pJzip/UT PY"PYux PKpJ zip/.DS_StoreUT PYPYux 1 @E -- ` z/ (؈HSy.f~Q4X) &ivvf8qF'ㅄemZ/cQ!B᰿f~ iO6>OўtmKhG{:u47-a|qB1G{:B (z!c|9ZĊ9[aA S<:Z!>PK\gJӆmzip/Cargo.tomlUT PYlPYux 1 Ew:WIPN@Mz:UmvP܄zip/.DS_StoreUTPYux PK\gJӆm[zip/Cargo.tomlUTPYux PK zhJ A@zip/corpus/UTPYux PKzhJL{K3zip/corpus/9fdad349dac578687a62907dd7ba4295801fa566UTPYux PKhJ2 zip/read.rsUTOPYux PK qJ /seeds/UT"PYux PKpJjmzip/seeds/.DS_StoreUTPYux PK~zip-0.5.13/tests/data/invalid_offset2.zip000064400000000000000000000001650000000000000163500ustar 00000000000000PK'PKP)PK,PKP)PK=%zip-0.5.13/tests/data/mimetype.zip000064400000000000000000000002310000000000000151150ustar 00000000000000PK!^2 ''mimetypeapplication/vnd.oasis.opendocument.textPK.!^2 ''mimetypePK6Mzip-0.5.13/tests/data/zip64_demo.zip000064400000000000000000000003400000000000000152450ustar 00000000000000Leading junk. PK-q KU{-Hello, world! PK-q KU{-PK,-/APKpPK/Azip-0.5.13/tests/end_to_end.rs000064400000000000000000000127440000000000000143070ustar 00000000000000use byteorder::{LittleEndian, WriteBytesExt}; use std::collections::HashSet; use std::io::prelude::*; use std::io::{Cursor, Seek}; use std::iter::FromIterator; use zip::write::FileOptions; use zip::CompressionMethod; // This test asserts that after creating a zip file, then reading its contents back out, // the extracted data will *always* be exactly the same as the original data. #[test] fn end_to_end() { let file = &mut Cursor::new(Vec::new()); write_to_zip(file).expect("file written"); check_zip_contents(file, ENTRY_NAME); } // This test asserts that after copying a `ZipFile` to a new `ZipWriter`, then reading its // contents back out, the extracted data will *always* be exactly the same as the original data. #[test] fn copy() { let src_file = &mut Cursor::new(Vec::new()); write_to_zip(src_file).expect("file written"); let mut tgt_file = &mut Cursor::new(Vec::new()); { let mut src_archive = zip::ZipArchive::new(src_file).unwrap(); let mut zip = zip::ZipWriter::new(&mut tgt_file); { let file = src_archive.by_name(ENTRY_NAME).expect("file found"); zip.raw_copy_file(file).unwrap(); } { let file = src_archive.by_name(ENTRY_NAME).expect("file found"); zip.raw_copy_file_rename(file, COPY_ENTRY_NAME).unwrap(); } } let mut tgt_archive = zip::ZipArchive::new(tgt_file).unwrap(); check_zip_file_contents(&mut tgt_archive, ENTRY_NAME); check_zip_file_contents(&mut tgt_archive, COPY_ENTRY_NAME); } // This test asserts that after appending to a `ZipWriter`, then reading its contents back out, // both the prior data and the appended data will be exactly the same as their originals. #[test] fn append() { let mut file = &mut Cursor::new(Vec::new()); write_to_zip(file).expect("file written"); { let mut zip = zip::ZipWriter::new_append(&mut file).unwrap(); zip.start_file(COPY_ENTRY_NAME, Default::default()).unwrap(); zip.write_all(LOREM_IPSUM).unwrap(); zip.finish().unwrap(); } let mut zip = zip::ZipArchive::new(&mut file).unwrap(); check_zip_file_contents(&mut zip, ENTRY_NAME); check_zip_file_contents(&mut zip, COPY_ENTRY_NAME); } fn write_to_zip(file: &mut Cursor>) -> zip::result::ZipResult<()> { let mut zip = zip::ZipWriter::new(file); zip.add_directory("test/", Default::default())?; let options = FileOptions::default() .compression_method(CompressionMethod::Stored) .unix_permissions(0o755); zip.start_file("test/☃.txt", options)?; zip.write_all(b"Hello, World!\n")?; zip.start_file_with_extra_data("test_with_extra_data/🐢.txt", Default::default())?; zip.write_u16::(0xbeef)?; zip.write_u16::(EXTRA_DATA.len() as u16)?; zip.write_all(EXTRA_DATA)?; zip.end_extra_data()?; zip.write_all(b"Hello, World! Again.\n")?; zip.start_file(ENTRY_NAME, Default::default())?; zip.write_all(LOREM_IPSUM)?; zip.finish()?; Ok(()) } fn read_zip(zip_file: R) -> zip::result::ZipResult> { let mut archive = zip::ZipArchive::new(zip_file).unwrap(); let expected_file_names = [ "test/", "test/☃.txt", "test_with_extra_data/🐢.txt", ENTRY_NAME, ]; let expected_file_names = HashSet::from_iter(expected_file_names.iter().map(|&v| v)); let file_names = archive.file_names().collect::>(); assert_eq!(file_names, expected_file_names); { let file_with_extra_data = archive.by_name("test_with_extra_data/🐢.txt")?; let mut extra_data = Vec::new(); extra_data.write_u16::(0xbeef)?; extra_data.write_u16::(EXTRA_DATA.len() as u16)?; extra_data.write_all(EXTRA_DATA)?; assert_eq!(file_with_extra_data.extra_data(), extra_data.as_slice()); } Ok(archive) } fn read_zip_file( archive: &mut zip::ZipArchive, name: &str, ) -> zip::result::ZipResult { let mut file = archive.by_name(name)?; let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); Ok(contents) } fn check_zip_contents(zip_file: &mut Cursor>, name: &str) { let mut archive = read_zip(zip_file).unwrap(); check_zip_file_contents(&mut archive, name); } fn check_zip_file_contents(archive: &mut zip::ZipArchive, name: &str) { let file_contents: String = read_zip_file(archive, name).unwrap(); assert!(file_contents.as_bytes() == LOREM_IPSUM); } const LOREM_IPSUM : &'static [u8] = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit. In tellus elit, tristique vitae mattis egestas, ultricies vitae risus. Quisque sit amet quam ut urna aliquet molestie. Proin blandit ornare dui, a tempor nisl accumsan in. Praesent a consequat felis. Morbi metus diam, auctor in auctor vel, feugiat id odio. Curabitur ex ex, dictum quis auctor quis, suscipit id lorem. Aliquam vestibulum dolor nec enim vehicula, porta tristique augue tincidunt. Vivamus ut gravida est. Sed pellentesque, dolor vitae tristique consectetur, neque lectus pulvinar dui, sed feugiat purus diam id lectus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Maecenas feugiat velit in ex ultrices scelerisque id id neque. "; const EXTRA_DATA: &'static [u8] = b"Extra Data"; const ENTRY_NAME: &str = "test/lorem_ipsum.txt"; const COPY_ENTRY_NAME: &str = "test/lorem_ipsum_renamed.txt"; zip-0.5.13/tests/invalid_date.rs000064400000000000000000000026500000000000000146270ustar 00000000000000use std::io::Cursor; use zip::read::ZipArchive; const BUF: &[u8] = &[ 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x1c, 0x00, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x55, 0x54, 0x09, 0x00, 0x03, 0xf4, 0x5c, 0x88, 0x5a, 0xf4, 0x5c, 0x88, 0x5a, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // time part: 0 seconds, 0 minutes, 0 hours 0x00, 0x00, // date part: day 0 (invalid), month 0 (invalid), year 0 (1980) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0xed, 0x41, 0x00, 0x00, 0x00, 0x00, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x55, 0x54, 0x05, 0x00, 0x03, 0xf4, 0x5c, 0x88, 0x5a, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x58, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, ]; #[test] fn invalid_date() { let _archive = ZipArchive::new(Cursor::new(BUF)).unwrap(); } zip-0.5.13/tests/zip64_large.rs000064400000000000000000000264750000000000000143450ustar 00000000000000// The following is a hexdump of a zip64 file containing the following files: // zero4400: 4400 MB of zeroes // zero100: 100 MB of zeroes // zero4400_2: 4400 MB of zeroes // // 00000000 50 4b 03 04 2d 00 00 00 00 00 1b 6e 51 4d 66 82 |PK..-......nQMf.| // 00000010 13 da ff ff ff ff ff ff ff ff 08 00 30 00 7a 65 |............0.ze| // 00000020 72 6f 34 34 30 30 55 54 09 00 03 a5 21 c7 5b db |ro4400UT....!.[.| // 00000030 21 c7 5b 75 78 0b 00 01 04 e8 03 00 00 04 e8 03 |!.[ux...........| // 00000040 00 00 01 00 10 00 00 00 00 13 01 00 00 00 00 00 |................| // 00000050 00 13 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| // 00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| // * // 113000050 00 00 00 00 00 00 50 4b 03 04 0a 00 00 00 00 00 |......PK........| // 113000060 2b 6e 51 4d 98 23 28 4b 00 00 40 06 00 00 40 06 |+nQM.#(K..@...@.| // 113000070 07 00 1c 00 7a 65 72 6f 31 30 30 55 54 09 00 03 |....zero100UT...| // 113000080 c2 21 c7 5b c2 21 c7 5b 75 78 0b 00 01 04 e8 03 |.!.[.!.[ux......| // 113000090 00 00 04 e8 03 00 00 00 00 00 00 00 00 00 00 00 |................| // 1130000a0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| // * // 119400090 00 00 00 00 00 00 00 50 4b 03 04 2d 00 00 00 00 |.......PK..-....| // 1194000a0 00 3b 6e 51 4d 66 82 13 da ff ff ff ff ff ff ff |.;nQMf..........| // 1194000b0 ff 0a 00 30 00 7a 65 72 6f 34 34 30 30 5f 32 55 |...0.zero4400_2U| // 1194000c0 54 09 00 03 e2 21 c7 5b db 21 c7 5b 75 78 0b 00 |T....!.[.!.[ux..| // 1194000d0 01 04 e8 03 00 00 04 e8 03 00 00 01 00 10 00 00 |................| // 1194000e0 00 00 13 01 00 00 00 00 00 00 13 01 00 00 00 00 |................| // 1194000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| // * // 22c4000e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 50 |...............P| // 22c4000f0 4b 01 02 1e 03 2d 00 00 00 00 00 1b 6e 51 4d 66 |K....-......nQMf| // 22c400100 82 13 da ff ff ff ff ff ff ff ff 08 00 2c 00 00 |.............,..| // 22c400110 00 00 00 00 00 00 00 a4 81 00 00 00 00 7a 65 72 |.............zer| // 22c400120 6f 34 34 30 30 55 54 05 00 03 a5 21 c7 5b 75 78 |o4400UT....!.[ux| // 22c400130 0b 00 01 04 e8 03 00 00 04 e8 03 00 00 01 00 10 |................| // 22c400140 00 00 00 00 13 01 00 00 00 00 00 00 13 01 00 00 |................| // 22c400150 00 50 4b 01 02 1e 03 0a 00 00 00 00 00 2b 6e 51 |.PK..........+nQ| // 22c400160 4d 98 23 28 4b 00 00 40 06 00 00 40 06 07 00 24 |M.#(K..@...@...$| // 22c400170 00 00 00 00 00 00 00 00 00 a4 81 ff ff ff ff 7a |...............z| // 22c400180 65 72 6f 31 30 30 55 54 05 00 03 c2 21 c7 5b 75 |ero100UT....!.[u| // 22c400190 78 0b 00 01 04 e8 03 00 00 04 e8 03 00 00 01 00 |x...............| // 22c4001a0 08 00 56 00 00 13 01 00 00 00 50 4b 01 02 1e 03 |..V.......PK....| // 22c4001b0 2d 00 00 00 00 00 3b 6e 51 4d 66 82 13 da ff ff |-.....;nQMf.....| // 22c4001c0 ff ff ff ff ff ff 0a 00 34 00 00 00 00 00 00 00 |........4.......| // 22c4001d0 00 00 a4 81 ff ff ff ff 7a 65 72 6f 34 34 30 30 |........zero4400| // 22c4001e0 5f 32 55 54 05 00 03 e2 21 c7 5b 75 78 0b 00 01 |_2UT....!.[ux...| // 22c4001f0 04 e8 03 00 00 04 e8 03 00 00 01 00 18 00 00 00 |................| // 22c400200 00 13 01 00 00 00 00 00 00 13 01 00 00 00 97 00 |................| // 22c400210 40 19 01 00 00 00 50 4b 06 06 2c 00 00 00 00 00 |@.....PK..,.....| // 22c400220 00 00 1e 03 2d 00 00 00 00 00 00 00 00 00 03 00 |....-...........| // 22c400230 00 00 00 00 00 00 03 00 00 00 00 00 00 00 27 01 |..............'.| // 22c400240 00 00 00 00 00 00 ef 00 40 2c 02 00 00 00 50 4b |........@,....PK| // 22c400250 06 07 00 00 00 00 16 02 40 2c 02 00 00 00 01 00 |........@,......| // 22c400260 00 00 50 4b 05 06 00 00 00 00 03 00 03 00 27 01 |..PK..........'.| // 22c400270 00 00 ff ff ff ff 00 00 |........| // 22c400278 use std::io::{self, Read, Seek, SeekFrom}; const BLOCK1_LENGTH: u64 = 0x60; const BLOCK1: [u8; BLOCK1_LENGTH as usize] = [ 0x50, 0x4b, 0x03, 0x04, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x6e, 0x51, 0x4d, 0x66, 0x82, 0x13, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08, 0x00, 0x30, 0x00, 0x7a, 0x65, 0x72, 0x6f, 0x34, 0x34, 0x30, 0x30, 0x55, 0x54, 0x09, 0x00, 0x03, 0xa5, 0x21, 0xc7, 0x5b, 0xdb, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; const BLOCK2_LENGTH: u64 = 0x50; const BLOCK2: [u8; BLOCK2_LENGTH as usize] = [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x6e, 0x51, 0x4d, 0x98, 0x23, 0x28, 0x4b, 0x00, 0x00, 0x40, 0x06, 0x00, 0x00, 0x40, 0x06, 0x07, 0x00, 0x1c, 0x00, 0x7a, 0x65, 0x72, 0x6f, 0x31, 0x30, 0x30, 0x55, 0x54, 0x09, 0x00, 0x03, 0xc2, 0x21, 0xc7, 0x5b, 0xc2, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; const BLOCK3_LENGTH: u64 = 0x60; const BLOCK3: [u8; BLOCK3_LENGTH as usize] = [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x03, 0x04, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x6e, 0x51, 0x4d, 0x66, 0x82, 0x13, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0x30, 0x00, 0x7a, 0x65, 0x72, 0x6f, 0x34, 0x34, 0x30, 0x30, 0x5f, 0x32, 0x55, 0x54, 0x09, 0x00, 0x03, 0xe2, 0x21, 0xc7, 0x5b, 0xdb, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, ]; const BLOCK4_LENGTH: u64 = 0x198; const BLOCK4: [u8; BLOCK4_LENGTH as usize] = [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x6e, 0x51, 0x4d, 0x66, 0x82, 0x13, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x65, 0x72, 0x6f, 0x34, 0x34, 0x30, 0x30, 0x55, 0x54, 0x05, 0x00, 0x03, 0xa5, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x6e, 0x51, 0x4d, 0x98, 0x23, 0x28, 0x4b, 0x00, 0x00, 0x40, 0x06, 0x00, 0x00, 0x40, 0x06, 0x07, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x81, 0xff, 0xff, 0xff, 0xff, 0x7a, 0x65, 0x72, 0x6f, 0x31, 0x30, 0x30, 0x55, 0x54, 0x05, 0x00, 0x03, 0xc2, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x08, 0x00, 0x56, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x6e, 0x51, 0x4d, 0x66, 0x82, 0x13, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x81, 0xff, 0xff, 0xff, 0xff, 0x7a, 0x65, 0x72, 0x6f, 0x34, 0x34, 0x30, 0x30, 0x5f, 0x32, 0x55, 0x54, 0x05, 0x00, 0x03, 0xe2, 0x21, 0xc7, 0x5b, 0x75, 0x78, 0x0b, 0x00, 0x01, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x04, 0xe8, 0x03, 0x00, 0x00, 0x01, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x97, 0x00, 0x40, 0x19, 0x01, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x06, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0x00, 0x40, 0x2c, 0x02, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x16, 0x02, 0x40, 0x2c, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x27, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, ]; const BLOCK1_START: u64 = 0x000000000; const BLOCK2_START: u64 = 0x113000050; const BLOCK3_START: u64 = 0x119400090; const BLOCK4_START: u64 = 0x22c4000e0; const BLOCK1_END: u64 = BLOCK1_START + BLOCK1_LENGTH - 1; const BLOCK2_END: u64 = BLOCK2_START + BLOCK2_LENGTH - 1; const BLOCK3_END: u64 = BLOCK3_START + BLOCK3_LENGTH - 1; const BLOCK4_END: u64 = BLOCK4_START + BLOCK4_LENGTH - 1; const TOTAL_LENGTH: u64 = BLOCK4_START + BLOCK4_LENGTH; struct Zip64File { pointer: u64, } impl Zip64File { fn new() -> Self { Zip64File { pointer: 0 } } } impl Seek for Zip64File { fn seek(&mut self, pos: SeekFrom) -> io::Result { match pos { SeekFrom::Start(offset) => { self.pointer = offset; } SeekFrom::End(offset) => { if offset > 0 || offset < -(TOTAL_LENGTH as i64) { return Err(io::Error::new(io::ErrorKind::Other, "Invalid seek offset")); } self.pointer = (TOTAL_LENGTH as i64 + offset) as u64; } SeekFrom::Current(offset) => { let seekpos = self.pointer as i64 + offset; if seekpos < 0 || seekpos as u64 > TOTAL_LENGTH { return Err(io::Error::new(io::ErrorKind::Other, "Invalid seek offset")); } self.pointer = seekpos as u64; } } Ok(self.pointer) } } impl Read for Zip64File { fn read(&mut self, buf: &mut [u8]) -> io::Result { if self.pointer >= TOTAL_LENGTH { return Ok(0); } match self.pointer { BLOCK1_START..=BLOCK1_END => { buf[0] = BLOCK1[(self.pointer - BLOCK1_START) as usize]; } BLOCK2_START..=BLOCK2_END => { buf[0] = BLOCK2[(self.pointer - BLOCK2_START) as usize]; } BLOCK3_START..=BLOCK3_END => { buf[0] = BLOCK3[(self.pointer - BLOCK3_START) as usize]; } BLOCK4_START..=BLOCK4_END => { buf[0] = BLOCK4[(self.pointer - BLOCK4_START) as usize]; } _ => { buf[0] = 0; } } self.pointer += 1; Ok(1) } } #[test] fn zip64_large() { let zipfile = Zip64File::new(); let mut archive = zip::ZipArchive::new(zipfile).unwrap(); let mut buf = [0u8; 32]; for i in 0..archive.len() { let mut file = archive.by_index(i).unwrap(); let outpath = file.enclosed_name().unwrap(); println!( "Entry {} has name \"{}\" ({} bytes)", i, outpath.display(), file.size() ); match file.read_exact(&mut buf) { Ok(()) => println!("The first {} bytes are: {:?}", buf.len(), buf), Err(e) => println!("Could not read the file: {:?}", e), }; } } zip-0.5.13/tests/zip_comment_garbage.rs000064400000000000000000000017670000000000000162100ustar 00000000000000// Some zip files can contain garbage after the comment. For example, python zipfile generates // it when opening a zip in 'a' mode: // // >>> from zipfile import ZipFile // >>> with ZipFile('comment_garbage.zip', 'a') as z: // ... z.comment = b'long comment bla bla bla' // ... // >>> with ZipFile('comment_garbage.zip', 'a') as z: // ... z.comment = b'short.' // ... // >>> // // Hexdump: // // 00000000 50 4b 05 06 00 00 00 00 00 00 00 00 00 00 00 00 |PK..............| // 00000010 00 00 00 00 06 00 73 68 6f 72 74 2e 6f 6d 6d 65 |......short.omme| // 00000020 6e 74 20 62 6c 61 20 62 6c 61 20 62 6c 61 |nt bla bla bla| // 0000002e use std::io; use zip::ZipArchive; #[test] fn correctly_handle_zip_with_garbage_after_comment() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/comment_garbage.zip")); let archive = ZipArchive::new(io::Cursor::new(v)).expect("couldn't open test zip file"); assert_eq!(archive.comment(), "short.".as_bytes()); } zip-0.5.13/tests/zip_crypto.rs000064400000000000000000000101620000000000000144030ustar 00000000000000// The following is a hexdump of a zip file containing the following // ZipCrypto encrypted file: // test.txt: 35 bytes, contents: `abcdefghijklmnopqrstuvwxyz123456789`, password: `test` // // 00000000 50 4b 03 04 14 00 01 00 00 00 54 bd b5 50 2f 20 |PK........T..P/ | // 00000010 79 55 2f 00 00 00 23 00 00 00 08 00 00 00 74 65 |yU/...#.......te| // 00000020 73 74 2e 74 78 74 ca 2d 1d 27 19 19 63 43 77 9a |st.txt.-.'..cCw.| // 00000030 71 76 c9 ec d1 6f d9 f5 22 67 b3 8f 52 b5 41 bc |qv...o.."g..R.A.| // 00000040 5c 36 f2 1d 84 c3 c0 28 3b fd e1 70 c2 cc 0c 11 |\6.....(;..p....| // 00000050 0c c5 95 2f a4 50 4b 01 02 3f 00 14 00 01 00 00 |.../.PK..?......| // 00000060 00 54 bd b5 50 2f 20 79 55 2f 00 00 00 23 00 00 |.T..P/ yU/...#..| // 00000070 00 08 00 24 00 00 00 00 00 00 00 20 00 00 00 00 |...$....... ....| // 00000080 00 00 00 74 65 73 74 2e 74 78 74 0a 00 20 00 00 |...test.txt.. ..| // 00000090 00 00 00 01 00 18 00 31 b2 3b bf b8 2f d6 01 31 |.......1.;../..1| // 000000a0 b2 3b bf b8 2f d6 01 a8 c4 45 bd b8 2f d6 01 50 |.;../....E../..P| // 000000b0 4b 05 06 00 00 00 00 01 00 01 00 5a 00 00 00 55 |K..........Z...U| // 000000c0 00 00 00 00 00 |.....| // 000000c5 use std::io::Cursor; use std::io::Read; #[test] fn encrypted_file() { let zip_file_bytes = &mut Cursor::new(vec![ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0x54, 0xbd, 0xb5, 0x50, 0x2f, 0x20, 0x79, 0x55, 0x2f, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78, 0x74, 0xca, 0x2d, 0x1d, 0x27, 0x19, 0x19, 0x63, 0x43, 0x77, 0x9a, 0x71, 0x76, 0xc9, 0xec, 0xd1, 0x6f, 0xd9, 0xf5, 0x22, 0x67, 0xb3, 0x8f, 0x52, 0xb5, 0x41, 0xbc, 0x5c, 0x36, 0xf2, 0x1d, 0x84, 0xc3, 0xc0, 0x28, 0x3b, 0xfd, 0xe1, 0x70, 0xc2, 0xcc, 0x0c, 0x11, 0x0c, 0xc5, 0x95, 0x2f, 0xa4, 0x50, 0x4b, 0x01, 0x02, 0x3f, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0x54, 0xbd, 0xb5, 0x50, 0x2f, 0x20, 0x79, 0x55, 0x2f, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x08, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x0a, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x18, 0x00, 0x31, 0xb2, 0x3b, 0xbf, 0xb8, 0x2f, 0xd6, 0x01, 0x31, 0xb2, 0x3b, 0xbf, 0xb8, 0x2f, 0xd6, 0x01, 0xa8, 0xc4, 0x45, 0xbd, 0xb8, 0x2f, 0xd6, 0x01, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, ]); let mut archive = zip::ZipArchive::new(zip_file_bytes).unwrap(); assert_eq!(archive.len(), 1); //Only one file inside archive: `test.txt` { // No password let file = archive.by_index(0); match file { Err(zip::result::ZipError::UnsupportedArchive( zip::result::ZipError::PASSWORD_REQUIRED, )) => (), Err(_) => panic!( "Expected PasswordRequired error when opening encrypted file without password" ), Ok(_) => panic!("Error: Successfully opened encrypted file without password?!"), } } { // Wrong password let file = archive.by_index_decrypt(0, b"wrong password"); match file { Ok(Err(zip::result::InvalidPassword)) => (), Err(_) => panic!( "Expected InvalidPassword error when opening encrypted file with wrong password" ), Ok(Ok(_)) => panic!("Error: Successfully opened encrypted file with wrong password?!"), } } { // Correct password, read contents let mut file = archive .by_index_decrypt(0, "test".as_bytes()) .unwrap() .unwrap(); let file_name = file.enclosed_name().unwrap(); assert_eq!(file_name, std::path::PathBuf::from("test.txt")); let mut data = Vec::new(); file.read_to_end(&mut data).unwrap(); assert_eq!(data, "abcdefghijklmnopqrstuvwxyz123456789".as_bytes()); } }