async_zip-0.0.16/.cargo_vcs_info.json0000644000000001360000000000100131170ustar { "git": { "sha1": "7ae5331e5de730cb87e02f6647bcaba278a8ddbf" }, "path_in_vcs": "" }async_zip-0.0.16/.github/dependabot.yml000064400000000000000000000004361046102023000161020ustar 00000000000000version: 2 updates: - package-ecosystem: "github-actions" # Workflow files stored in the # default location of `.github/workflows` directory: "/" schedule: interval: "daily" - package-ecosystem: "cargo" directory: "/" schedule: interval: "daily" async_zip-0.0.16/.github/workflows/ci-clippy.yml000064400000000000000000000004501046102023000177170ustar 00000000000000name: clippy (Linux) on: push: branches: [ main ] pull_request: branches: [ main ] env: CARGO_TERM_COLOR: always jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run clippy run: cargo clippy --all-features -- -D clippy::allasync_zip-0.0.16/.github/workflows/ci-fmt.yml000064400000000000000000000004161046102023000172070ustar 00000000000000name: rustfmt (Linux) on: push: branches: [ main ] pull_request: branches: [ main ] env: CARGO_TERM_COLOR: always jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run rustfmt run: cargo fmt --checkasync_zip-0.0.16/.github/workflows/ci-linux.yml000064400000000000000000000021611046102023000175570ustar 00000000000000name: Test (Linux) on: push: branches: [ main ] pull_request: branches: [ main ] env: CARGO_TERM_COLOR: always jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Test [no features] run: cargo test --verbose - name: Test ['chrono' feature] run: cargo test --verbose --features chrono - name: Test ['tokio' feature] run: cargo test --verbose --features tokio - name: Test ['tokio-fs' feature] run: cargo test --verbose --features tokio-fs - name: Test ['deflate' feature] run: cargo test --verbose --features deflate - name: Test ['bzip2' feature] run: cargo test --verbose --features bzip2 - name: Test ['lzma' feature] run: cargo test --verbose --features lzma - name: Test ['zstd' feature] run: cargo test --verbose --features zstd - name: Test ['xz' feature] run: cargo test --verbose --features xz - name: Test ['deflate64' feature] run: cargo test --verbose --features deflate64 - name: Test ['full' feature] run: cargo test --verbose --features full async_zip-0.0.16/.github/workflows/ci-typos.yml000064400000000000000000000005141046102023000175760ustar 00000000000000name: typos (Linux) on: push: branches: [ main ] pull_request: branches: [ main ] env: CARGO_TERM_COLOR: always jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install typos run: cargo install typos-cli - name: Run typos run: typos --format briefasync_zip-0.0.16/.github/workflows/ci-wasm.yml000064400000000000000000000010001046102023000173560ustar 00000000000000name: Build (WASM) on: push: branches: [ main ] pull_request: branches: [ main ] env: CARGO_TERM_COLOR: always jobs: build: name: Build ['full-wasm' feature] on ${{ matrix.target }} runs-on: ubuntu-latest strategy: matrix: target: - wasm32-wasi - wasm32-unknown-unknown steps: - uses: actions/checkout@v4 - run: rustup target add ${{ matrix.target }} - run: cargo build --verbose --target ${{ matrix.target }} --features full-wasm async_zip-0.0.16/.gitignore000064400000000000000000000006771046102023000137110ustar 00000000000000# Generated by Cargo # will have compiled files and executables /target/ /examples/**/target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html /Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk /examples/**/*.rs.bk # Ignore generated zip test file that is large /src/tests/read/zip64/zip64many.zip async_zip-0.0.16/Cargo.lock0000644000001442530000000000100111030ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "actix-codec" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57a7559404a7f3573127aab53c08ce37a6c6a315c374a31070f3c91cd1b4a7fe" dependencies = [ "bitflags", "bytes", "futures-core", "futures-sink", "log", "memchr", "pin-project-lite", "tokio", "tokio-util", ] [[package]] name = "actix-http" version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2079246596c18b4a33e274ae10c0e50613f4d32a4198e09c7b93771013fed74" dependencies = [ "actix-codec", "actix-rt", "actix-service", "actix-utils", "ahash 0.8.3", "base64", "bitflags", "brotli", "bytes", "bytestring", "derive_more", "encoding_rs", "flate2", "futures-core", "h2", "http", "httparse", "httpdate", "itoa", "language-tags", "local-channel", "mime", "percent-encoding", "pin-project-lite", "rand", "sha1", "smallvec", "tokio", "tokio-util", "tracing", "zstd 0.12.3+zstd.1.5.2", ] [[package]] name = "actix-macros" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "465a6172cf69b960917811022d8f29bc0b7fa1398bc4f78b3c466673db1213b6" dependencies = [ "quote", "syn", ] [[package]] name = "actix-multipart" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee489e3c01eae4d1c35b03c4493f71cb40d93f66b14558feb1b1a807671cc4e" dependencies = [ "actix-multipart-derive", "actix-utils", "actix-web", "bytes", "derive_more", "futures-core", "futures-util", "httparse", "local-waker", "log", "memchr", "mime", "serde", "serde_json", "serde_plain", "tempfile", "tokio", ] [[package]] name = "actix-multipart-derive" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ec592f234db8a253cf80531246a4407c8a70530423eea80688a6c5a44a110e7" dependencies = [ "darling", "parse-size", "proc-macro2", "quote", "syn", ] [[package]] name = "actix-router" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66ff4d247d2b160861fa2866457e85706833527840e4133f8f49aa423a38799" dependencies = [ "bytestring", "http", "regex", "serde", "tracing", ] [[package]] name = "actix-rt" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15265b6b8e2347670eb363c47fc8c75208b4a4994b27192f345fcbe707804f3e" dependencies = [ "futures-core", "tokio", ] [[package]] name = "actix-server" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e8613a75dd50cc45f473cee3c34d59ed677c0f7b44480ce3b8247d7dc519327" dependencies = [ "actix-rt", "actix-service", "actix-utils", "futures-core", "futures-util", "mio", "num_cpus", "socket2", "tokio", "tracing", ] [[package]] name = "actix-service" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" dependencies = [ "futures-core", "paste", "pin-project-lite", ] [[package]] name = "actix-utils" version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" dependencies = [ "local-waker", "pin-project-lite", ] [[package]] name = "actix-web" version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd3cb42f9566ab176e1ef0b8b3a896529062b4efc6be0123046095914c4c1c96" dependencies = [ "actix-codec", "actix-http", "actix-macros", "actix-router", "actix-rt", "actix-server", "actix-service", "actix-utils", "actix-web-codegen", "ahash 0.7.6", "bytes", "bytestring", "cfg-if", "cookie", "derive_more", "encoding_rs", "futures-core", "futures-util", "http", "itoa", "language-tags", "log", "mime", "once_cell", "pin-project-lite", "regex", "serde", "serde_json", "serde_urlencoded", "smallvec", "socket2", "time", "url", ] [[package]] name = "actix-web-codegen" version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2262160a7ae29e3415554a3f1fc04c764b1540c116aa524683208078b7a75bc9" dependencies = [ "actix-router", "proc-macro2", "quote", "syn", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aes" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", "cipher", "cpufeatures", "opaque-debug", ] [[package]] name = "ahash" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ "getrandom", "once_cell", "version_check", ] [[package]] name = "ahash" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", "getrandom", "once_cell", "version_check", ] [[package]] name = "aho-corasick" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] [[package]] name = "alloc-no-stdlib" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] name = "alloc-stdlib" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" dependencies = [ "alloc-no-stdlib", ] [[package]] name = "android_system_properties" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] [[package]] name = "anyhow" version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "async-compression" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb42b2197bf15ccb092b62c74515dbd8b86d0effd934795f6687c93b6e679a2c" dependencies = [ "bzip2", "deflate64", "flate2", "futures-core", "futures-io", "memchr", "pin-project-lite", "xz2", "zstd 0.12.3+zstd.1.5.2", "zstd-safe 6.0.5+zstd.1.5.4", ] [[package]] name = "async_zip" version = "0.0.16" dependencies = [ "actix-multipart", "actix-web", "anyhow", "async-compression", "chrono", "crc32fast", "derive_more", "env_logger", "futures", "futures-lite", "pin-project", "sanitize-filename", "thiserror", "tokio", "tokio-util", "uuid", "zip", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base64" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "brotli" version = "3.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", "brotli-decompressor", ] [[package]] name = "brotli-decompressor" version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", ] [[package]] name = "bumpalo" version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" [[package]] name = "byteorder" version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "bytestring" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "238e4886760d98c4f899360c834fa93e62cf7f721ac3c2da375cbdf4b8679aae" dependencies = [ "bytes", ] [[package]] name = "bzip2" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", ] [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "cc" version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" dependencies = [ "jobserver", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ "iana-time-zone", "num-integer", "num-traits", "winapi", ] [[package]] name = "cipher" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ "generic-array", ] [[package]] name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "convert_case" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "cookie" version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" dependencies = [ "percent-encoding", "time", "version_check", ] [[package]] name = "core-foundation-sys" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-utils" version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "darling" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ "darling_core", "darling_macro", ] [[package]] name = "darling_core" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", "syn", ] [[package]] name = "darling_macro" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", "syn", ] [[package]] name = "deflate64" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30dc5bb425a582de72bb57130320aac133893ea85f6151f79bd9aa9067114f60" [[package]] name = "derive_more" version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", "proc-macro2", "quote", "rustc_version", "syn", ] [[package]] name = "digest" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer", "crypto-common", "subtle", ] [[package]] name = "encoding_rs" version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] [[package]] name = "env_logger" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" dependencies = [ "humantime", "is-terminal", "log", "regex", "termcolor", ] [[package]] name = "errno" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", "windows-sys 0.48.0", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "fastrand" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] name = "fastrand" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "flate2" version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ "percent-encoding", ] [[package]] name = "futures" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-io" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-lite" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aeee267a1883f7ebef3700f262d2d54de95dfaf38189015a74fdc4e0c7ad8143" dependencies = [ "fastrand 2.0.1", "futures-core", "futures-io", "parking", "pin-project-lite", ] [[package]] name = "futures-macro" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "futures-sink" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-util" version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "h2" version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", "http", "indexmap", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hermit-abi" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] [[package]] name = "hermit-abi" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" [[package]] name = "hmac" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ "digest", ] [[package]] name = "http" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "httparse" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "humantime" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "iana-time-zone" version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd911b35d940d2bd0bea0f9100068e5b97b51a1cbe13d13382f132e0365257a0" dependencies = [ "android_system_properties", "core-foundation-sys", "js-sys", "wasm-bindgen", "winapi", ] [[package]] name = "ident_case" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", ] [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "io-lifetimes" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ "hermit-abi 0.3.1", "libc", "windows-sys 0.48.0", ] [[package]] name = "is-terminal" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", "rustix", "windows-sys 0.48.0", ] [[package]] name = "itoa" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jobserver" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" dependencies = [ "libc", ] [[package]] name = "js-sys" version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] [[package]] name = "language-tags" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.142" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317" [[package]] name = "linux-raw-sys" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36eb31c1778188ae1e64398743890d0877fef36d11521ac60406b42016e8c2cf" [[package]] name = "local-channel" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f303ec0e94c6c54447f84f3b0ef7af769858a9c4ef56ef2a986d3dcd4c3fc9c" dependencies = [ "futures-core", "futures-sink", "futures-util", "local-waker", ] [[package]] name = "local-waker" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e34f76eb3611940e0e7d53a9aaa4e6a3151f69541a282fd0dad5571420c53ff1" [[package]] name = "lock_api" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] [[package]] name = "lzma-sys" version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb4b7c3eddad11d3af9e86c487607d2d2442d185d848575365c4856ba96d619" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "mime" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", ] [[package]] name = "mio" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi", "windows-sys 0.42.0", ] [[package]] name = "num-integer" version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ "autocfg", "num-traits", ] [[package]] name = "num-traits" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ "hermit-abi 0.1.19", "libc", ] [[package]] name = "once_cell" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" [[package]] name = "opaque-debug" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "parking" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "cfg-if", "libc", "redox_syscall 0.2.16", "smallvec", "windows-sys 0.42.0", ] [[package]] name = "parse-size" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "944553dd59c802559559161f9816429058b869003836120e262e8caec061b7ae" [[package]] name = "password-hash" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", "rand_core", "subtle", ] [[package]] name = "paste" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest", "hmac", "password-hash", "sha2", ] [[package]] name = "percent-encoding" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pin-project" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "pin-project-lite" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb" [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" version = "1.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "redox_syscall" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "redox_syscall" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags", ] [[package]] name = "regex" version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] name = "rustix" version = "0.37.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b864d3c18a5785a05953adeed93e2dca37ed30f18e69bba9f30079d51f363f" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", "windows-sys 0.48.0", ] [[package]] name = "ryu" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "sanitize-filename" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ed72fbaf78e6f2d41744923916966c4fbe3d7c74e3037a8ee482f1115572603" dependencies = [ "lazy_static", "regex", ] [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "semver" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "serde" version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" [[package]] name = "serde_json" version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", "serde", ] [[package]] name = "serde_plain" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6018081315db179d0ce57b1fe4b62a12a0028c9cf9bbef868c9cf477b3c34ae" dependencies = [ "serde", ] [[package]] name = "serde_urlencoded" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", "serde", ] [[package]] name = "sha1" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha2" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "signal-hook-registry" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", ] [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subtle" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tempfile" version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", "fastrand 1.9.0", "redox_syscall 0.3.5", "rustix", "windows-sys 0.45.0", ] [[package]] name = "termcolor" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] [[package]] name = "thiserror" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "time" version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] [[package]] name = "tinyvec" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" dependencies = [ "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.42.0", ] [[package]] name = "tokio-macros" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tokio-util" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", "pin-project-lite", "tokio", "tracing", ] [[package]] name = "tracing" version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", ] [[package]] name = "typenum" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "unicode-bidi" version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-normalization" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "url" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] [[package]] name = "uuid" version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b55a3fef2a1e3b3a00ce878640918820d3c51081576ac657d23af9fc7928fdb" dependencies = [ "getrandom", "serde", ] [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-sys" version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ "windows-targets 0.42.2", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.0", ] [[package]] name = "windows-targets" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-targets" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", "windows_i686_gnu 0.48.0", "windows_i686_msvc 0.48.0", "windows_x86_64_gnu 0.48.0", "windows_x86_64_gnullvm 0.48.0", "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "xz2" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c179869f34fc7c01830d3ce7ea2086bc3a07e0d35289b667d0a8bf910258926c" dependencies = [ "lzma-sys", ] [[package]] name = "zip" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0445d0fbc924bb93539b4316c11afb121ea39296f99a3c4c9edad09e3658cdef" dependencies = [ "aes", "byteorder", "bzip2", "constant_time_eq", "crc32fast", "crossbeam-utils", "flate2", "hmac", "pbkdf2", "sha1", "time", "zstd 0.11.2+zstd.1.5.2", ] [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ "zstd-safe 5.0.2+zstd.1.5.2", ] [[package]] name = "zstd" version = "0.12.3+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" dependencies = [ "zstd-safe 6.0.5+zstd.1.5.4", ] [[package]] name = "zstd-safe" version = "5.0.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" dependencies = [ "libc", "zstd-sys", ] [[package]] name = "zstd-safe" version = "6.0.5+zstd.1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" dependencies = [ "libc", "zstd-sys", ] [[package]] name = "zstd-sys" version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", "pkg-config", ] async_zip-0.0.16/Cargo.toml0000644000000052100000000000100111130ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "async_zip" version = "0.0.16" authors = ["Harry [hello@majored.pw]"] description = "An asynchronous ZIP archive reading/writing crate." homepage = "https://github.com/Majored/rs-async-zip" documentation = "https://docs.rs/async_zip/" readme = "README.md" keywords = [ "async", "zip", "archive", "tokio", ] categories = [ "asynchronous", "compression", ] license = "MIT" repository = "https://github.com/Majored/rs-async-zip" [package.metadata.docs.rs] all-features = true rustdoc-args = [ "--cfg", "docsrs", ] [dependencies.async-compression] version = "0.4.2" features = ["futures-io"] optional = true default-features = false [dependencies.chrono] version = "0.4" features = ["clock"] optional = true default-features = false [dependencies.crc32fast] version = "1" [dependencies.futures-lite] version = "2.1.0" features = ["std"] default-features = false [dependencies.pin-project] version = "1" [dependencies.thiserror] version = "1" [dependencies.tokio] version = "1" optional = true default-features = false [dependencies.tokio-util] version = "0.7" features = ["compat"] optional = true [dev-dependencies.actix-multipart] version = "0.6" [dev-dependencies.actix-web] version = "4" [dev-dependencies.anyhow] version = "1" [dev-dependencies.derive_more] version = "0.99" [dev-dependencies.env_logger] version = "0.10.0" [dev-dependencies.futures] version = "0.3" [dev-dependencies.sanitize-filename] version = "0.5" [dev-dependencies.tokio] version = "1" features = ["full"] [dev-dependencies.tokio-util] version = "0.7" features = ["compat"] [dev-dependencies.uuid] version = "1" features = [ "v4", "serde", ] [dev-dependencies.zip] version = "0.6.3" [features] bzip2 = ["async-compression/bzip2"] deflate = ["async-compression/deflate"] deflate64 = ["async-compression/deflate64"] full = [ "chrono", "tokio-fs", "deflate", "bzip2", "lzma", "zstd", "xz", "deflate64", ] full-wasm = [ "chrono", "deflate", "zstd", ] lzma = ["async-compression/lzma"] tokio = [ "dep:tokio", "tokio-util", ] tokio-fs = ["tokio/fs"] xz = ["async-compression/xz"] zstd = ["async-compression/zstd"] async_zip-0.0.16/Cargo.toml.orig000064400000000000000000000036221046102023000146010ustar 00000000000000[package] name = "async_zip" version = "0.0.16" edition = "2021" authors = ["Harry [hello@majored.pw]"] repository = "https://github.com/Majored/rs-async-zip" description = "An asynchronous ZIP archive reading/writing crate." readme = "README.md" license = "MIT" documentation = "https://docs.rs/async_zip/" homepage = "https://github.com/Majored/rs-async-zip" keywords = ["async", "zip", "archive", "tokio"] categories = ["asynchronous", "compression"] [features] full = ["chrono", "tokio-fs", "deflate", "bzip2", "lzma", "zstd", "xz", "deflate64"] # All features that are compatible with WASM full-wasm = ["chrono", "deflate", "zstd"] tokio = ["dep:tokio", "tokio-util"] tokio-fs = ["tokio/fs"] deflate = ["async-compression/deflate"] bzip2 = ["async-compression/bzip2"] lzma = ["async-compression/lzma"] zstd = ["async-compression/zstd"] xz = ["async-compression/xz"] deflate64 = ["async-compression/deflate64"] [package.metadata.docs.rs] all-features = true # defines the configuration attribute `docsrs` rustdoc-args = ["--cfg", "docsrs"] [dependencies] crc32fast = "1" futures-lite = { version = "2.1.0", default-features = false, features = ["std"] } pin-project = "1" thiserror = "1" async-compression = { version = "0.4.2", default-features = false, features = ["futures-io"], optional = true } chrono = { version = "0.4", default-features = false, features = ["clock"], optional = true } tokio = { version = "1", default-features = false, optional = true } tokio-util = { version = "0.7", features = ["compat"], optional = true } [dev-dependencies] # tests tokio = { version = "1", features = ["full"] } tokio-util = { version = "0.7", features = ["compat"] } env_logger = "0.10.0" zip = "0.6.3" # shared across multiple examples anyhow = "1" sanitize-filename = "0.5" # actix_multipart actix-web = "4" actix-multipart = "0.6" futures = "0.3" derive_more = "0.99" uuid = { version = "1", features = ["v4", "serde"] } async_zip-0.0.16/LICENSE000064400000000000000000000021041046102023000127110ustar 00000000000000MIT License Copyright (c) 2021 Harry Copyright (c) 2023 Cognite AS Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. async_zip-0.0.16/README.md000064400000000000000000000072371046102023000131770ustar 00000000000000# async_zip [![Crates.io](https://img.shields.io/crates/v/async_zip?style=flat-square)](https://crates.io/crates/async_zip) [![Crates.io](https://img.shields.io/crates/d/async_zip?style=flat-square)](https://crates.io/crates/async_zip) [![docs.rs](https://img.shields.io/docsrs/async_zip?style=flat-square)](https://docs.rs/async_zip/) [![GitHub Workflow Status (branch)](https://img.shields.io/github/actions/workflow/status/Majored/rs-async-zip/ci-linux.yml?branch=main&style=flat-square)](https://github.com/Majored/rs-async-zip/actions?query=branch%3Amain) [![GitHub](https://img.shields.io/github/license/Majored/rs-async-zip?style=flat-square)](https://github.com/Majored/rs-async-zip/blob/main/LICENSE) An asynchronous ZIP archive reading/writing crate. ## Features - A base implementation atop `futures`'s IO traits. - An extended implementation atop `tokio`'s IO traits. - Support for Stored, Deflate, bzip2, LZMA, zstd, and xz compression methods. - Various different reading approaches (seek, stream, filesystem, in-memory buffer, etc). - Support for writing complete data (u8 slices) or streams using data descriptors. - Initial support for ZIP64 reading and writing. - Aims for reasonable [specification](https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md) compliance. ## Installation & Basic Usage ```toml [dependencies] async_zip = { version = "0.0.16", features = ["full"] } ``` A (soon to be) extensive list of [examples](https://github.com/Majored/rs-async-zip/tree/main/examples) can be found under the `/examples` directory. ### Feature Flags - `full` - Enables all below features. - `full-wasm` - Enables all below features that are compatible with WASM. - `chrono` - Enables support for parsing dates via `chrono`. - `tokio` - Enables support for the `tokio` implementation module. - `tokio-fs` - Enables support for the `tokio::fs` reading module. - `deflate` - Enables support for the Deflate compression method. - `bzip2` - Enables support for the bzip2 compression method. - `lzma` - Enables support for the LZMA compression method. - `zstd` - Enables support for the zstd compression method. - `xz` - Enables support for the xz compression method. ### Reading ```rust use tokio::{io::AsyncReadExt, fs::File}; use async_zip::tokio::read::seek::ZipFileReader; ... let mut file = File::open("./Archive.zip").await?; let mut zip = ZipFileReader::with_tokio(&mut file).await?; let mut string = String::new(); let mut reader = zip.reader_with_entry(0).await?; reader.read_to_string_checked(&mut string).await?; println!("{}", string); ``` ### Writing ```rust use async_zip::tokio::write::ZipFileWriter; use async_zip::{Compression, ZipEntryBuilder}; use tokio::fs::File; ... let mut file = File::create("foo.zip").await?; let mut writer = ZipFileWriter::with_tokio(&mut file); let data = b"This is an example file."; let builder = ZipEntryBuilder::new("bar.txt".into(), Compression::Deflate); writer.write_entry_whole(builder, data).await?; writer.close().await?; ``` ## Contributions Whilst I will be continuing to maintain this crate myself, reasonable specification compliance is a huge undertaking for a single individual. As such, contributions will always be encouraged and appreciated. No contribution guidelines exist but additions should be developed with readability in mind, with appropriate comments, and make use of `rustfmt`. ## Issues & Support Whether you're wanting to report a bug you've come across during use of this crate or are seeking general help/assistance, please utilise the [issues tracker](https://github.com/Majored/rs-async-zip/issues) and provide as much detail as possible (eg. recreation steps). I try to respond to issues within a reasonable timeframe. async_zip-0.0.16/SPECIFICATION.md000064400000000000000000005222701046102023000142210ustar 00000000000000File: APPNOTE.TXT - .ZIP File Format Specification Version: 6.3.9 Status: FINAL - replaces version 6.3.8 Revised: July 15, 2020 Copyright (c) 1989 - 2014, 2018, 2019, 2020 PKWARE Inc., All Rights Reserved. ## 1.0 Introduction ## 1.1 Purpose ### 1.1.1 This specification is intended to define a cross-platform, interoperable file storage and transfer format. Since its first publication in 1989, PKWARE, Inc. ("PKWARE") has remained committed to ensuring the interoperability of the .ZIP file format through periodic publication and maintenance of this specification. We trust that all .ZIP compatible vendors and application developers that use and benefit from this format will share and support this commitment to interoperability. ## 1.2 Scope ### 1.2.1 ZIP is one of the most widely used compressed file formats. It is universally used to aggregate, compress, and encrypt files into a single interoperable container. No specific use or application need is defined by this format and no specific implementation guidance is provided. This document provides details on the storage format for creating ZIP files. Information is provided on the records and fields that describe what a ZIP file is. ## 1.3 Trademarks ### 1.3.1 PKWARE, PKZIP, Smartcrypt, SecureZIP, and PKSFX are registered trademarks of PKWARE, Inc. in the United States and elsewhere. PKPatchMaker, Deflate64, and ZIP64 are trademarks of PKWARE, Inc. Other marks referenced within this document appear for identification purposes only and are the property of their respective owners. ## 1.4 Permitted Use ### 1.4.1 This document, "APPNOTE.TXT - .ZIP File Format Specification" is the exclusive property of PKWARE. Use of the information contained in this document is permitted solely for the purpose of creating products, programs and processes that read and write files in the ZIP format subject to the terms and conditions herein. ### 1.4.2 Use of the content of this document within other publications is permitted only through reference to this document. Any reproduction or distribution of this document in whole or in part without prior written permission from PKWARE is strictly prohibited. ### 1.4.3 Certain technological components provided in this document are the patented proprietary technology of PKWARE and as such require a separate, executed license agreement from PKWARE. Applicable components are marked with the following, or similar, statement: 'Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information'. ## 1.5 Contacting PKWARE ### 1.5.1 If you have questions on this format, its use, or licensing, or if you wish to report defects, request changes or additions, please contact: PKWARE, Inc. 201 E. Pittsburgh Avenue, Suite 400 Milwaukee, WI 53204 +1-414-289-9788 +1-414-289-9789 FAX zipformat@pkware.com ### 1.5.2 Information about this format and a reference copy of this document is publicly available at: http://www.pkware.com/appnote ## 1.6 Disclaimer ### 1.6.1 Although PKWARE will attempt to supply current and accurate information relating to its file formats, algorithms, and the subject programs, the possibility of error or omission cannot be eliminated. PKWARE therefore expressly disclaims any warranty that the information contained in the associated materials relating to the subject programs and/or the format of the files created or accessed by the subject programs and/or the algorithms used by the subject programs, or any other matter, is current, correct or accurate as delivered. Any risk of damage due to any possible inaccurate information is assumed by the user of the information. Furthermore, the information relating to the subject programs and/or the file formats created or accessed by the subject programs and/or the algorithms used by the subject programs is subject to change without notice. ## 2.0 Revisions ## 2.1 Document Status ### 2.1.1 If the STATUS of this file is marked as DRAFT, the content defines proposed revisions to this specification which may consist of changes to the ZIP format itself, or that may consist of other content changes to this document. Versions of this document and the format in DRAFT form may be subject to modification prior to publication STATUS of FINAL. DRAFT versions are published periodically to provide notification to the ZIP community of pending changes and to provide opportunity for review and comment. ### 2.1.2 Versions of this document having a STATUS of FINAL are considered to be in the final form for that version of the document and are not subject to further change until a new, higher version numbered document is published. Newer versions of this format specification are intended to remain interoperable with all prior versions whenever technically possible. ## 2.2 Change Log Version Change Description Date ------- ------------------ ---------- 5.2 -Single Password Symmetric Encryption 07/16/2003 storage 6.1.0 -Smartcard compatibility 01/20/2004 -Documentation on certificate storage 6.2.0 -Introduction of Central Directory 04/26/2004 Encryption for encrypting metadata -Added OS X to Version Made By values 6.2.1 -Added Extra Field placeholder for 04/01/2005 POSZIP using ID 0x4690 -Clarified size field on "zip64 end of central directory record" 6.2.2 -Documented Final Feature Specification 01/06/2006 for Strong Encryption -Clarifications and typographical corrections 6.3.0 -Added tape positioning storage 09/29/2006 parameters -Expanded list of supported hash algorithms -Expanded list of supported compression algorithms -Expanded list of supported encryption algorithms -Added option for Unicode filename storage -Clarifications for consistent use of Data Descriptor records -Added additional "Extra Field" definitions 6.3.1 -Corrected standard hash values for 04/11/2007 SHA-256/384/512 6.3.2 -Added compression method 97 09/28/2007 -Documented InfoZIP "Extra Field" values for UTF-8 file name and file comment storage 6.3.3 -Formatting changes to support 09/01/2012 easier referencing of this APPNOTE from other documents and standards 6.3.4 -Address change 10/01/2014 6.3.5 -Documented compression methods 16 11/31/2018 and 99 (4.4.5, 4.6.1, 5.11, 5.17, APPENDIX E) -Corrected several typographical errors (2.1.2, 3.2, 4.1.1, 10.2) -Marked legacy algorithms as no longer suitable for use (4.4.5.1) -Added clarity on MS DOS time format (4.4.6) -Assign extrafield ID for Timestamps (4.5.2) -Field code description correction (A.2) -More consistent use of MAY/SHOULD/MUST -Expanded 0x0065 record attribute codes (B.2) -Initial information on 0x0022 Extra Data 6.3.6 -Corrected typographical error 04/26/2019 (4.4.1.3) 6.3.7 -Added Zstandard compression method ID (4.4.5) -Corrected several reported typos -Marked intended use for general purpose bit 14 -Added Data Stream Alignment Extra Data info (4.6.11) 6.3.8 -Resolved Zstandard compression method ID conflict (4.4.5) -Added additional compression method ID values in use 6.3.9 -Corrected a typo in Data Stream Alignment description (4.6.11) ## 3.0 Notations 3.1 Use of the term MUST or SHALL indicates a required element. 3.2 MUST NOT or SHALL NOT indicates an element is prohibited from use. 3.3 SHOULD indicates a RECOMMENDED element. 3.4 SHOULD NOT indicates an element NOT RECOMMENDED for use. 3.5 MAY indicates an OPTIONAL element. ## 4.0 ZIP Files ## 4.1 What is a ZIP file ### 4.1.1 ZIP files MAY be identified by the standard .ZIP file extension although use of a file extension is not required. Use of the extension .ZIPX is also recognized and MAY be used for ZIP files. Other common file extensions using the ZIP format include .JAR, .WAR, .DOCX, .XLSX, .PPTX, .ODT, .ODS, .ODP and others. Programs reading or writing ZIP files SHOULD rely on internal record signatures described in this document to identify files in this format. ### 4.1.2 ZIP files SHOULD contain at least one file and MAY contain multiple files. ### 4.1.3 Data compression MAY be used to reduce the size of files placed into a ZIP file, but is not required. This format supports the use of multiple data compression algorithms. When compression is used, one of the documented compression algorithms MUST be used. Implementors are advised to experiment with their data to determine which of the available algorithms provides the best compression for their needs. Compression method 8 (Deflate) is the method used by default by most ZIP compatible application programs. ### 4.1.4 Data encryption MAY be used to protect files within a ZIP file. Keying methods supported for encryption within this format include passwords and public/private keys. Either MAY be used individually or in combination. Encryption MAY be applied to individual files. Additional security MAY be used through the encryption of ZIP file metadata stored within the Central Directory. See the section on the Strong Encryption Specification for information. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. ### 4.1.5 Data integrity MUST be provided for each file using CRC32. ### 4.1.6 Additional data integrity MAY be included through the use of digital signatures. Individual files MAY be signed with one or more digital signatures. The Central Directory, if signed, MUST use a single signature. ### 4.1.7 Files MAY be placed within a ZIP file uncompressed or stored. The term "stored" as used in the context of this document means the file is copied into the ZIP file uncompressed. ### 4.1.8 Each data file placed into a ZIP file MAY be compressed, stored, encrypted or digitally signed independent of how other data files in the same ZIP file are archived. ### 4.1.9 ZIP files MAY be streamed, split into segments (on fixed or on removable media) or "self-extracting". Self-extracting ZIP files MUST include extraction code for a target platform within the ZIP file. ### 4.1.10 Extensibility is provided for platform or application specific needs through extra data fields that MAY be defined for custom purposes. Extra data definitions MUST NOT conflict with existing documented record definitions. ### 4.1.11 Common uses for ZIP MAY also include the use of manifest files. Manifest files store application specific information within a file stored within the ZIP file. This manifest file SHOULD be the first file in the ZIP file. This specification does not provide any information or guidance on the use of manifest files within ZIP files. Refer to the application developer for information on using manifest files and for any additional profile information on using ZIP within an application. ### 4.1.12 ZIP files MAY be placed within other ZIP files. ## 4.2 ZIP Metadata ### 4.2.1 ZIP files are identified by metadata consisting of defined record types containing the storage information necessary for maintaining the files placed into a ZIP file. Each record type MUST be identified using a header signature that identifies the record type. Signature values begin with the two byte constant marker of 0x4b50, representing the characters "PK". ## 4.3 General Format of a .ZIP file ### 4.3.1 A ZIP file MUST contain an "end of central directory record". A ZIP file containing only an "end of central directory record" is considered an empty ZIP file. Files MAY be added or replaced within a ZIP file, or deleted. A ZIP file MUST have only one "end of central directory record". Other records defined in this specification MAY be used as needed to support storage requirements for individual ZIP files. ### 4.3.2 Each file placed into a ZIP file MUST be preceded by a "local file header" record for that file. Each "local file header" MUST be accompanied by a corresponding "central directory header" record within the central directory section of the ZIP file. ### 4.3.3 Files MAY be stored in arbitrary order within a ZIP file. A ZIP file MAY span multiple volumes or it MAY be split into user-defined segment sizes. All values MUST be stored in little-endian byte order unless otherwise specified in this document for a specific data element. ### 4.3.4 Compression MUST NOT be applied to a "local file header", an "encryption header", or an "end of central directory record". Individual "central directory records" MUST NOT be compressed, but the aggregate of all central directory records MAY be compressed. ### 4.3.5 File data MAY be followed by a "data descriptor" for the file. Data descriptors are used to facilitate ZIP file streaming. ### 4.3.6 Overall .ZIP file format: [local file header 1] [encryption header 1] [file data 1] [data descriptor 1] . . . [local file header n] [encryption header n] [file data n] [data descriptor n] [archive decryption header] [archive extra data record] [central directory header 1] . . . [central directory header n] [zip64 end of central directory record] [zip64 end of central directory locator] [end of central directory record] ### 4.3.7 Local file header: local file header signature 4 bytes (0x04034b50) version needed to extract 2 bytes general purpose bit flag 2 bytes compression method 2 bytes last mod file time 2 bytes last mod file date 2 bytes crc-32 4 bytes compressed size 4 bytes uncompressed size 4 bytes file name length 2 bytes extra field length 2 bytes file name (variable size) extra field (variable size) ### 4.3.8 File data Immediately following the local header for a file SHOULD be placed the compressed or stored data for the file. If the file is encrypted, the encryption header for the file SHOULD be placed after the local header and before the file data. The series of [local file header][encryption header] [file data][data descriptor] repeats for each file in the .ZIP archive. Zero-byte files, directories, and other file types that contain no content MUST NOT include file data. ### 4.3.9 Data descriptor: crc-32 4 bytes compressed size 4 bytes uncompressed size 4 bytes ### 4.3.9.1 This descriptor MUST exist if bit 3 of the general purpose bit flag is set (see below). It is byte aligned and immediately follows the last byte of compressed data. This descriptor SHOULD be used only when it was not possible to seek in the output .ZIP file, e.g., when the output .ZIP file was standard output or a non-seekable device. For ZIP64(tm) format archives, the compressed and uncompressed sizes are 8 bytes each. ### 4.3.9.2 When compressing files, compressed and uncompressed sizes SHOULD be stored in ZIP64 format (as 8 byte values) when a file's size exceeds 0xFFFFFFFF. However ZIP64 format MAY be used regardless of the size of a file. When extracting, if the zip64 extended information extra field is present for the file the compressed and uncompressed sizes will be 8 byte values. ### 4.3.9.3 Although not originally assigned a signature, the value 0x08074b50 has commonly been adopted as a signature value for the data descriptor record. Implementers SHOULD be aware that ZIP files MAY be encountered with or without this signature marking data descriptors and SHOULD account for either case when reading ZIP files to ensure compatibility. ### 4.3.9.4 When writing ZIP files, implementors SHOULD include the signature value marking the data descriptor record. When the signature is used, the fields currently defined for the data descriptor record will immediately follow the signature. ### 4.3.9.5 An extensible data descriptor will be released in a future version of this APPNOTE. This new record is intended to resolve conflicts with the use of this record going forward, and to provide better support for streamed file processing. ### 4.3.9.6 When the Central Directory Encryption method is used, the data descriptor record is not required, but MAY be used. If present, and bit 3 of the general purpose bit field is set to indicate its presence, the values in fields of the data descriptor record MUST be set to binary zeros. See the section on the Strong Encryption Specification for information. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. ### 4.3.10 Archive decryption header: ### 4.3.10.1 The Archive Decryption Header is introduced in version 6.2 of the ZIP format specification. This record exists in support of the Central Directory Encryption Feature implemented as part of the Strong Encryption Specification as described in this document. When the Central Directory Structure is encrypted, this decryption header MUST precede the encrypted data segment. ### 4.3.10.2 The encrypted data segment SHALL consist of the Archive extra data record (if present) and the encrypted Central Directory Structure data. The format of this data record is identical to the Decryption header record preceding compressed file data. If the central directory structure is encrypted, the location of the start of this data record is determined using the Start of Central Directory field in the Zip64 End of Central Directory record. See the section on the Strong Encryption Specification for information on the fields used in the Archive Decryption Header record. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. ### 4.3.11 Archive extra data record: archive extra data signature 4 bytes (0x08064b50) extra field length 4 bytes extra field data (variable size) ### 4.3.11.1 The Archive Extra Data Record is introduced in version 6.2 of the ZIP format specification. This record MAY be used in support of the Central Directory Encryption Feature implemented as part of the Strong Encryption Specification as described in this document. When present, this record MUST immediately precede the central directory data structure. ### 4.3.11.2 The size of this data record SHALL be included in the Size of the Central Directory field in the End of Central Directory record. If the central directory structure is compressed, but not encrypted, the location of the start of this data record is determined using the Start of Central Directory field in the Zip64 End of Central Directory record. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. ### 4.3.12 Central directory structure: [central directory header 1] . . . [central directory header n] [digital signature] File header: central file header signature 4 bytes (0x02014b50) version made by 2 bytes version needed to extract 2 bytes general purpose bit flag 2 bytes compression method 2 bytes last mod file time 2 bytes last mod file date 2 bytes crc-32 4 bytes compressed size 4 bytes uncompressed size 4 bytes file name length 2 bytes extra field length 2 bytes file comment length 2 bytes disk number start 2 bytes internal file attributes 2 bytes external file attributes 4 bytes relative offset of local header 4 bytes file name (variable size) extra field (variable size) file comment (variable size) ### 4.3.13 Digital signature: header signature 4 bytes (0x05054b50) size of data 2 bytes signature data (variable size) With the introduction of the Central Directory Encryption feature in version 6.2 of this specification, the Central Directory Structure MAY be stored both compressed and encrypted. Although not required, it is assumed when encrypting the Central Directory Structure, that it will be compressed for greater storage efficiency. Information on the Central Directory Encryption feature can be found in the section describing the Strong Encryption Specification. The Digital Signature record will be neither compressed nor encrypted. ### 4.3.14 Zip64 end of central directory record zip64 end of central dir signature 4 bytes (0x06064b50) size of zip64 end of central directory record 8 bytes version made by 2 bytes version needed to extract 2 bytes number of this disk 4 bytes number of the disk with the start of the central directory 4 bytes total number of entries in the central directory on this disk 8 bytes total number of entries in the central directory 8 bytes size of the central directory 8 bytes offset of start of central directory with respect to the starting disk number 8 bytes zip64 extensible data sector (variable size) ### 4.3.14.1 The value stored into the "size of zip64 end of central directory record" SHOULD be the size of the remaining record and SHOULD NOT include the leading 12 bytes. Size = SizeOfFixedFields + SizeOfVariableData - 12. ### 4.3.14.2 The above record structure defines Version 1 of the zip64 end of central directory record. Version 1 was implemented in versions of this specification preceding 6.2 in support of the ZIP64 large file feature. The introduction of the Central Directory Encryption feature implemented in version 6.2 as part of the Strong Encryption Specification defines Version 2 of this record structure. Refer to the section describing the Strong Encryption Specification for details on the version 2 format for this record. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information applicable to use of Version 2 of this record. ### 4.3.14.3 Special purpose data MAY reside in the zip64 extensible data sector field following either a V1 or V2 version of this record. To ensure identification of this special purpose data it MUST include an identifying header block consisting of the following: Header ID - 2 bytes Data Size - 4 bytes The Header ID field indicates the type of data that is in the data block that follows. Data Size identifies the number of bytes that follow for this data block type. ### 4.3.14.4 Multiple special purpose data blocks MAY be present. Each MUST be preceded by a Header ID and Data Size field. Current mappings of Header ID values supported in this field are as defined in APPENDIX C. ### 4.3.15 Zip64 end of central directory locator zip64 end of central dir locator signature 4 bytes (0x07064b50) number of the disk with the start of the zip64 end of central directory 4 bytes relative offset of the zip64 end of central directory record 8 bytes total number of disks 4 bytes ### 4.3.16 End of central directory record: end of central dir signature 4 bytes (0x06054b50) number of this disk 2 bytes number of the disk with the start of the central directory 2 bytes total number of entries in the central directory on this disk 2 bytes total number of entries in the central directory 2 bytes size of the central directory 4 bytes offset of start of central directory with respect to the starting disk number 4 bytes .ZIP file comment length 2 bytes .ZIP file comment (variable size) ## 4.4 Explanation of fields ### 4.4.1 General notes on fields ### 4.4.1.1 All fields unless otherwise noted are unsigned and stored in Intel low-byte:high-byte, low-word:high-word order. ### 4.4.1.2 String fields are not null terminated, since the length is given explicitly. ### 4.4.1.3 The entries in the central directory MAY NOT necessarily be in the same order that files appear in the .ZIP file. ### 4.4.1.4 If one of the fields in the end of central directory record is too small to hold required data, the field SHOULD be set to -1 (0xFFFF or 0xFFFFFFFF) and the ZIP64 format record SHOULD be created. ### 4.4.1.5 The end of central directory record and the Zip64 end of central directory locator record MUST reside on the same disk when splitting or spanning an archive. ### 4.4.2 version made by (2 bytes) ### 4.4.2.1 The upper byte indicates the compatibility of the file attribute information. If the external file attributes are compatible with MS-DOS and can be read by PKZIP for DOS version 2.04g then this value will be zero. If these attributes are not compatible, then this value will identify the host system on which the attributes are compatible. Software can use this information to determine the line record format for text files etc. ### 4.4.2.2 The current mappings are: 0 - MS-DOS and OS/2 (FAT / VFAT / FAT32 file systems) 1 - Amiga 2 - OpenVMS 3 - UNIX 4 - VM/CMS 5 - Atari ST 6 - OS/2 H.P.F.S. 7 - Macintosh 8 - Z-System 9 - CP/M 10 - Windows NTFS 11 - MVS (OS/390 - Z/OS) 12 - VSE 13 - Acorn Risc 14 - VFAT 15 - alternate MVS 16 - BeOS 17 - Tandem 18 - OS/400 19 - OS X (Darwin) 20 thru 255 - unused ### 4.4.2.3 The lower byte indicates the ZIP specification version (the version of this document) supported by the software used to encode the file. The value/10 indicates the major version number, and the value mod 10 is the minor version number. ### 4.4.3 version needed to extract (2 bytes) ### 4.4.3.1 The minimum supported ZIP specification version needed to extract the file, mapped as above. This value is based on the specific format features a ZIP program MUST support to be able to extract the file. If multiple features are applied to a file, the minimum version MUST be set to the feature having the highest value. New features or feature changes affecting the published format specification will be implemented using higher version numbers than the last published value to avoid conflict. ### 4.4.3.2 Current minimum feature versions are as defined below: 1.0 - Default value 1.1 - File is a volume label 2.0 - File is a folder (directory) 2.0 - File is compressed using Deflate compression 2.0 - File is encrypted using traditional PKWARE encryption 2.1 - File is compressed using Deflate64(tm) 2.5 - File is compressed using PKWARE DCL Implode 2.7 - File is a patch data set 4.5 - File uses ZIP64 format extensions 4.6 - File is compressed using BZIP2 compression* 5.0 - File is encrypted using DES 5.0 - File is encrypted using 3DES 5.0 - File is encrypted using original RC2 encryption 5.0 - File is encrypted using RC4 encryption 5.1 - File is encrypted using AES encryption 5.1 - File is encrypted using corrected RC2 encryption** 5.2 - File is encrypted using corrected RC2-64 encryption** 6.1 - File is encrypted using non-OAEP key wrapping*** 6.2 - Central directory encryption 6.3 - File is compressed using LZMA 6.3 - File is compressed using PPMd+ 6.3 - File is encrypted using Blowfish 6.3 - File is encrypted using Twofish ### 4.4.3.3 Notes on version needed to extract * Early 7.x (pre-7.2) versions of PKZIP incorrectly set the version needed to extract for BZIP2 compression to be 50 when it SHOULD have been 46. ** Refer to the section on Strong Encryption Specification for additional information regarding RC2 corrections. *** Certificate encryption using non-OAEP key wrapping is the intended mode of operation for all versions beginning with 6.1. Support for OAEP key wrapping MUST only be used for backward compatibility when sending ZIP files to be opened by versions of PKZIP older than 6.1 (5.0 or 6.0). + Files compressed using PPMd MUST set the version needed to extract field to 6.3, however, not all ZIP programs enforce this and MAY be unable to decompress data files compressed using PPMd if this value is set. When using ZIP64 extensions, the corresponding value in the zip64 end of central directory record MUST also be set. This field SHOULD be set appropriately to indicate whether Version 1 or Version 2 format is in use. ### 4.4.4 general purpose bit flag: (2 bytes) Bit 0: If set, indicates that the file is encrypted. (For Method 6 - Imploding) Bit 1: If the compression method used was type 6, Imploding, then this bit, if set, indicates an 8K sliding dictionary was used. If clear, then a 4K sliding dictionary was used. Bit 2: If the compression method used was type 6, Imploding, then this bit, if set, indicates 3 Shannon-Fano trees were used to encode the sliding dictionary output. If clear, then 2 Shannon-Fano trees were used. (For Methods 8 and 9 - Deflating) Bit 2 Bit 1 0 0 Normal (-en) compression option was used. 0 1 Maximum (-exx/-ex) compression option was used. 1 0 Fast (-ef) compression option was used. 1 1 Super Fast (-es) compression option was used. (For Method 14 - LZMA) Bit 1: If the compression method used was type 14, LZMA, then this bit, if set, indicates an end-of-stream (EOS) marker is used to mark the end of the compressed data stream. If clear, then an EOS marker is not present and the compressed data size must be known to extract. Note: Bits 1 and 2 are undefined if the compression method is any other. Bit 3: If this bit is set, the fields crc-32, compressed size and uncompressed size are set to zero in the local header. The correct values are put in the data descriptor immediately following the compressed data. (Note: PKZIP version 2.04g for DOS only recognizes this bit for method 8 compression, newer versions of PKZIP recognize this bit for any compression method.) Bit 4: Reserved for use with method 8, for enhanced deflating. Bit 5: If this bit is set, this indicates that the file is compressed patched data. (Note: Requires PKZIP version 2.70 or greater) Bit 6: Strong encryption. If this bit is set, you MUST set the version needed to extract value to at least 50 and you MUST also set bit 0. If AES encryption is used, the version needed to extract value MUST be at least 51. See the section describing the Strong Encryption Specification for details. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. Bit 7: Currently unused. Bit 8: Currently unused. Bit 9: Currently unused. Bit 10: Currently unused. Bit 11: Language encoding flag (EFS). If this bit is set, the filename and comment fields for this file MUST be encoded using UTF-8. (see APPENDIX D) Bit 12: Reserved by PKWARE for enhanced compression. Bit 13: Set when encrypting the Central Directory to indicate selected data values in the Local Header are masked to hide their actual values. See the section describing the Strong Encryption Specification for details. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. Bit 14: Reserved by PKWARE for alternate streams. Bit 15: Reserved by PKWARE. ### 4.4.5 compression method: (2 bytes) 0 - The file is stored (no compression) 1 - The file is Shrunk 2 - The file is Reduced with compression factor 1 3 - The file is Reduced with compression factor 2 4 - The file is Reduced with compression factor 3 5 - The file is Reduced with compression factor 4 6 - The file is Imploded 7 - Reserved for Tokenizing compression algorithm 8 - The file is Deflated 9 - Enhanced Deflating using Deflate64(tm) 10 - PKWARE Data Compression Library Imploding (old IBM TERSE) 11 - Reserved by PKWARE 12 - File is compressed using BZIP2 algorithm 13 - Reserved by PKWARE 14 - LZMA 15 - Reserved by PKWARE 16 - IBM z/OS CMPSC Compression 17 - Reserved by PKWARE 18 - File is compressed using IBM TERSE (new) 19 - IBM LZ77 z Architecture 20 - deprecated (use method 93 for zstd) 93 - Zstandard (zstd) Compression 94 - MP3 Compression 95 - XZ Compression 96 - JPEG variant 97 - WavPack compressed data 98 - PPMd version I, Rev 1 99 - AE-x encryption marker (see APPENDIX E) ### 4.4.5.1 Methods 1-6 are legacy algorithms and are no longer recommended for use when compressing files. ### 4.4.6 date and time fields: (2 bytes each) The date and time are encoded in standard MS-DOS format. If input came from standard input, the date and time are those at which compression was started for this data. If encrypting the central directory and general purpose bit flag 13 is set indicating masking, the value stored in the Local Header will be zero. MS-DOS time format is different from more commonly used computer time formats such as UTC. For example, MS-DOS uses year values relative to 1980 and 2 second precision. ### 4.4.7 CRC-32: (4 bytes) The CRC-32 algorithm was generously contributed by David Schwaderer and can be found in his excellent book "C Programmers Guide to NetBIOS" published by Howard W. Sams & Co. Inc. The 'magic number' for the CRC is 0xdebb20e3. The proper CRC pre and post conditioning is used, meaning that the CRC register is pre-conditioned with all ones (a starting value of 0xffffffff) and the value is post-conditioned by taking the one's complement of the CRC residual. If bit 3 of the general purpose flag is set, this field is set to zero in the local header and the correct value is put in the data descriptor and in the central directory. When encrypting the central directory, if the local header is not in ZIP64 format and general purpose bit flag 13 is set indicating masking, the value stored in the Local Header will be zero. ### 4.4.8 compressed size: (4 bytes) ### 4.4.9 uncompressed size: (4 bytes) The size of the file compressed (4.4.8) and uncompressed, (4.4.9) respectively. When a decryption header is present it will be placed in front of the file data and the value of the compressed file size will include the bytes of the decryption header. If bit 3 of the general purpose bit flag is set, these fields are set to zero in the local header and the correct values are put in the data descriptor and in the central directory. If an archive is in ZIP64 format and the value in this field is 0xFFFFFFFF, the size will be in the corresponding 8 byte ZIP64 extended information extra field. When encrypting the central directory, if the local header is not in ZIP64 format and general purpose bit flag 13 is set indicating masking, the value stored for the uncompressed size in the Local Header will be zero. ### 4.4.10 file name length: (2 bytes) ### 4.4.11 extra field length: (2 bytes) ### 4.4.12 file comment length: (2 bytes) The length of the file name, extra field, and comment fields respectively. The combined length of any directory record and these three fields SHOULD NOT generally exceed 65,535 bytes. If input came from standard input, the file name length is set to zero. ### 4.4.13 disk number start: (2 bytes) The number of the disk on which this file begins. If an archive is in ZIP64 format and the value in this field is 0xFFFF, the size will be in the corresponding 4 byte zip64 extended information extra field. ### 4.4.14 internal file attributes: (2 bytes) Bits 1 and 2 are reserved for use by PKWARE. ### 4.4.14.1 The lowest bit of this field indicates, if set, that the file is apparently an ASCII or text file. If not set, that the file apparently contains binary data. The remaining bits are unused in version 1.0. ### 4.4.14.2 The 0x0002 bit of this field indicates, if set, that a 4 byte variable record length control field precedes each logical record indicating the length of the record. The record length control field is stored in little-endian byte order. This flag is independent of text control characters, and if used in conjunction with text data, includes any control characters in the total length of the record. This value is provided for mainframe data transfer support. ### 4.4.15 external file attributes: (4 bytes) The mapping of the external attributes is host-system dependent (see 'version made by'). For MS-DOS, the low order byte is the MS-DOS directory attribute byte. If input came from standard input, this field is set to zero. ### 4.4.16 relative offset of local header: (4 bytes) This is the offset from the start of the first disk on which this file appears, to where the local header SHOULD be found. If an archive is in ZIP64 format and the value in this field is 0xFFFFFFFF, the size will be in the corresponding 8 byte zip64 extended information extra field. ### 4.4.17 file name: (Variable) ### 4.4.17.1 The name of the file, with optional relative path. The path stored MUST NOT contain a drive or device letter, or a leading slash. All slashes MUST be forward slashes '/' as opposed to backwards slashes '\' for compatibility with Amiga and UNIX file systems etc. If input came from standard input, there is no file name field. ### 4.4.17.2 If using the Central Directory Encryption Feature and general purpose bit flag 13 is set indicating masking, the file name stored in the Local Header will not be the actual file name. A masking value consisting of a unique hexadecimal value will be stored. This value will be sequentially incremented for each file in the archive. See the section on the Strong Encryption Specification for details on retrieving the encrypted file name. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. ### 4.4.18 file comment: (Variable) The comment for this file. ### 4.4.19 number of this disk: (2 bytes) The number of this disk, which contains central directory end record. If an archive is in ZIP64 format and the value in this field is 0xFFFF, the size will be in the corresponding 4 byte zip64 end of central directory field. ### 4.4.20 number of the disk with the start of the central directory: (2 bytes) The number of the disk on which the central directory starts. If an archive is in ZIP64 format and the value in this field is 0xFFFF, the size will be in the corresponding 4 byte zip64 end of central directory field. ### 4.4.21 total number of entries in the central dir on this disk: (2 bytes) The number of central directory entries on this disk. If an archive is in ZIP64 format and the value in this field is 0xFFFF, the size will be in the corresponding 8 byte zip64 end of central directory field. ### 4.4.22 total number of entries in the central dir: (2 bytes) The total number of files in the .ZIP file. If an archive is in ZIP64 format and the value in this field is 0xFFFF, the size will be in the corresponding 8 byte zip64 end of central directory field. ### 4.4.23 size of the central directory: (4 bytes) The size (in bytes) of the entire central directory. If an archive is in ZIP64 format and the value in this field is 0xFFFFFFFF, the size will be in the corresponding 8 byte zip64 end of central directory field. ### 4.4.24 offset of start of central directory with respect to the starting disk number: (4 bytes) Offset of the start of the central directory on the disk on which the central directory starts. If an archive is in ZIP64 format and the value in this field is 0xFFFFFFFF, the size will be in the corresponding 8 byte zip64 end of central directory field. ### 4.4.25 .ZIP file comment length: (2 bytes) The length of the comment for this .ZIP file. ### 4.4.26 .ZIP file comment: (Variable) The comment for this .ZIP file. ZIP file comment data is stored unsecured. No encryption or data authentication is applied to this area at this time. Confidential information SHOULD NOT be stored in this section. ### 4.4.27 zip64 extensible data sector (variable size) (currently reserved for use by PKWARE) ### 4.4.28 extra field: (Variable) This SHOULD be used for storage expansion. If additional information needs to be stored within a ZIP file for special application or platform needs, it SHOULD be stored here. Programs supporting earlier versions of this specification can then safely skip the file, and find the next file or header. This field will be 0 length in version 1.0. Existing extra fields are defined in the section Extensible data fields that follows. ## 4.5 Extensible data fields ### 4.5.1 In order to allow different programs and different types of information to be stored in the 'extra' field in .ZIP files, the following structure MUST be used for all programs storing data in this field: header1+data1 + header2+data2 . . . Each header MUST consist of: Header ID - 2 bytes Data Size - 2 bytes Note: all fields stored in Intel low-byte/high-byte order. The Header ID field indicates the type of data that is in the following data block. Header IDs of 0 thru 31 are reserved for use by PKWARE. The remaining IDs can be used by third party vendors for proprietary usage. ### 4.5.2 The current Header ID mappings defined by PKWARE are: 0x0001 Zip64 extended information extra field 0x0007 AV Info 0x0008 Reserved for extended language encoding data (PFS) (see APPENDIX D) 0x0009 OS/2 0x000a NTFS 0x000c OpenVMS 0x000d UNIX 0x000e Reserved for file stream and fork descriptors 0x000f Patch Descriptor 0x0014 PKCS#7 Store for X.509 Certificates 0x0015 X.509 Certificate ID and Signature for individual file 0x0016 X.509 Certificate ID for Central Directory 0x0017 Strong Encryption Header 0x0018 Record Management Controls 0x0019 PKCS#7 Encryption Recipient Certificate List 0x0020 Reserved for Timestamp record 0x0021 Policy Decryption Key Record 0x0022 Smartcrypt Key Provider Record 0x0023 Smartcrypt Policy Key Data Record 0x0065 IBM S/390 (Z390), AS/400 (I400) attributes - uncompressed 0x0066 Reserved for IBM S/390 (Z390), AS/400 (I400) attributes - compressed 0x4690 POSZIP 4690 (reserved) ### 4.5.3 -Zip64 Extended Information Extra Field (0x0001): The following is the layout of the zip64 extended information "extra" block. If one of the size or offset fields in the Local or Central directory record is too small to hold the required data, a Zip64 extended information record is created. The order of the fields in the zip64 extended information record is fixed, but the fields MUST only appear if the corresponding Local or Central directory record field is set to 0xFFFF or 0xFFFFFFFF. Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (ZIP64) 0x0001 2 bytes Tag for this "extra" block type Size 2 bytes Size of this "extra" block Original Size 8 bytes Original uncompressed file size Compressed Size 8 bytes Size of compressed data Relative Header Offset 8 bytes Offset of local header record Disk Start Number 4 bytes Number of the disk on which this file starts This entry in the Local header MUST include BOTH original and compressed file size fields. If encrypting the central directory and bit 13 of the general purpose bit flag is set indicating masking, the value stored in the Local Header for the original file size will be zero. ### 4.5.4 -OS/2 Extra Field (0x0009): The following is the layout of the OS/2 attributes "extra" block. (Last Revision 09/05/95) Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (OS/2) 0x0009 2 bytes Tag for this "extra" block type TSize 2 bytes Size for the following data block BSize 4 bytes Uncompressed Block Size CType 2 bytes Compression type EACRC 4 bytes CRC value for uncompress block (var) variable Compressed block The OS/2 extended attribute structure (FEA2LIST) is compressed and then stored in its entirety within this structure. There will only ever be one "block" of data in VarFields[]. ### 4.5.5 -NTFS Extra Field (0x000a): The following is the layout of the NTFS attributes "extra" block. (Note: At this time the Mtime, Atime and Ctime values MAY be used on any WIN32 system.) Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (NTFS) 0x000a 2 bytes Tag for this "extra" block type TSize 2 bytes Size of the total "extra" block Reserved 4 bytes Reserved for future use Tag1 2 bytes NTFS attribute tag value #1 Size1 2 bytes Size of attribute #1, in bytes (var) Size1 Attribute #1 data . . . TagN 2 bytes NTFS attribute tag value #N SizeN 2 bytes Size of attribute #N, in bytes (var) SizeN Attribute #N data For NTFS, values for Tag1 through TagN are as follows: (currently only one set of attributes is defined for NTFS) Tag Size Description ----- ---- ----------- 0x0001 2 bytes Tag for attribute #1 Size1 2 bytes Size of attribute #1, in bytes Mtime 8 bytes File last modification time Atime 8 bytes File last access time Ctime 8 bytes File creation time ### 4.5.6 -OpenVMS Extra Field (0x000c): The following is the layout of the OpenVMS attributes "extra" block. Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (VMS) 0x000c 2 bytes Tag for this "extra" block type TSize 2 bytes Size of the total "extra" block CRC 4 bytes 32-bit CRC for remainder of the block Tag1 2 bytes OpenVMS attribute tag value #1 Size1 2 bytes Size of attribute #1, in bytes (var) Size1 Attribute #1 data . . . TagN 2 bytes OpenVMS attribute tag value #N SizeN 2 bytes Size of attribute #N, in bytes (var) SizeN Attribute #N data OpenVMS Extra Field Rules: ### 4.5.6.1. There will be one or more attributes present, which will each be preceded by the above TagX & SizeX values. These values are identical to the ATR$C_XXXX and ATR$S_XXXX constants which are defined in ATR.H under OpenVMS C. Neither of these values will ever be zero. ### 4.5.6.2. No word alignment or padding is performed. ### 4.5.6.3. A well-behaved PKZIP/OpenVMS program SHOULD NOT produce more than one sub-block with the same TagX value. Also, there MUST NOT be more than one "extra" block of type 0x000c in a particular directory record. ### 4.5.7 -UNIX Extra Field (0x000d): The following is the layout of the UNIX "extra" block. Note: all fields are stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (UNIX) 0x000d 2 bytes Tag for this "extra" block type TSize 2 bytes Size for the following data block Atime 4 bytes File last access time Mtime 4 bytes File last modification time Uid 2 bytes File user ID Gid 2 bytes File group ID (var) variable Variable length data field The variable length data field will contain file type specific data. Currently the only values allowed are the original "linked to" file names for hard or symbolic links, and the major and minor device node numbers for character and block device nodes. Since device nodes cannot be either symbolic or hard links, only one set of variable length data is stored. Link files will have the name of the original file stored. This name is NOT NULL terminated. Its size can be determined by checking TSize - 12. Device entries will have eight bytes stored as two 4 byte entries (in little endian format). The first entry will be the major device number, and the second the minor device number. ### 4.5.8 -PATCH Descriptor Extra Field (0x000f): ### 4.5.8.1 The following is the layout of the Patch Descriptor "extra" block. Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (Patch) 0x000f 2 bytes Tag for this "extra" block type TSize 2 bytes Size of the total "extra" block Version 2 bytes Version of the descriptor Flags 4 bytes Actions and reactions (see below) OldSize 4 bytes Size of the file about to be patched OldCRC 4 bytes 32-bit CRC of the file to be patched NewSize 4 bytes Size of the resulting file NewCRC 4 bytes 32-bit CRC of the resulting file ### 4.5.8.2 Actions and reactions Bits Description ---- ---------------- 0 Use for auto detection 1 Treat as a self-patch 2-3 RESERVED 4-5 Action (see below) 6-7 RESERVED 8-9 Reaction (see below) to absent file 10-11 Reaction (see below) to newer file 12-13 Reaction (see below) to unknown file 14-15 RESERVED 16-31 RESERVED ### 4.5.8.2.1 Actions Action Value ------ ----- none 0 add 1 delete 2 patch 3 ### 4.5.8.2.2 Reactions Reaction Value -------- ----- ask 0 skip 1 ignore 2 fail 3 ### 4.5.8.3 Patch support is provided by PKPatchMaker(tm) technology and is covered under U.S. Patents and Patents Pending. The use or implementation in a product of certain technological aspects set forth in the current APPNOTE, including those with regard to strong encryption or patching requires a license from PKWARE. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. ### 4.5.9 -PKCS#7 Store for X.509 Certificates (0x0014): This field MUST contain information about each of the certificates files MAY be signed with. When the Central Directory Encryption feature is enabled for a ZIP file, this record will appear in the Archive Extra Data Record, otherwise it will appear in the first central directory record and will be ignored in any other record. Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (Store) 0x0014 2 bytes Tag for this "extra" block type TSize 2 bytes Size of the store data TData TSize Data about the store ### 4.5.10 -X.509 Certificate ID and Signature for individual file (0x0015): This field contains the information about which certificate in the PKCS#7 store was used to sign a particular file. It also contains the signature data. This field can appear multiple times, but can only appear once per certificate. Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (CID) 0x0015 2 bytes Tag for this "extra" block type TSize 2 bytes Size of data that follows TData TSize Signature Data ### 4.5.11 -X.509 Certificate ID and Signature for central directory (0x0016): This field contains the information about which certificate in the PKCS#7 store was used to sign the central directory structure. When the Central Directory Encryption feature is enabled for a ZIP file, this record will appear in the Archive Extra Data Record, otherwise it will appear in the first central directory record. Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (CDID) 0x0016 2 bytes Tag for this "extra" block type TSize 2 bytes Size of data that follows TData TSize Data ### 4.5.12 -Strong Encryption Header (0x0017): Value Size Description ----- ---- ----------- 0x0017 2 bytes Tag for this "extra" block type TSize 2 bytes Size of data that follows Format 2 bytes Format definition for this record AlgID 2 bytes Encryption algorithm identifier Bitlen 2 bytes Bit length of encryption key Flags 2 bytes Processing flags CertData TSize-8 Certificate decryption extra field data (refer to the explanation for CertData in the section describing the Certificate Processing Method under the Strong Encryption Specification) See the section describing the Strong Encryption Specification for details. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. ### 4.5.13 -Record Management Controls (0x0018): Value Size Description ----- ---- ----------- (Rec-CTL) 0x0018 2 bytes Tag for this "extra" block type CSize 2 bytes Size of total extra block data Tag1 2 bytes Record control attribute 1 Size1 2 bytes Size of attribute 1, in bytes Data1 Size1 Attribute 1 data . . . TagN 2 bytes Record control attribute N SizeN 2 bytes Size of attribute N, in bytes DataN SizeN Attribute N data ### 4.5.14 -PKCS#7 Encryption Recipient Certificate List (0x0019): This field MAY contain information about each of the certificates used in encryption processing and it can be used to identify who is allowed to decrypt encrypted files. This field SHOULD only appear in the archive extra data record. This field is not required and serves only to aid archive modifications by preserving public encryption key data. Individual security requirements may dictate that this data be omitted to deter information exposure. Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- (CStore) 0x0019 2 bytes Tag for this "extra" block type TSize 2 bytes Size of the store data TData TSize Data about the store TData: Value Size Description ----- ---- ----------- Version 2 bytes Format version number - MUST be 0x0001 at this time CStore (var) PKCS#7 data blob See the section describing the Strong Encryption Specification for details. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. ### 4.5.15 -MVS Extra Field (0x0065): The following is the layout of the MVS "extra" block. Note: Some fields are stored in Big Endian format. All text is in EBCDIC format unless otherwise specified. Value Size Description ----- ---- ----------- (MVS) 0x0065 2 bytes Tag for this "extra" block type TSize 2 bytes Size for the following data block ID 4 bytes EBCDIC "Z390" 0xE9F3F9F0 or "T4MV" for TargetFour (var) TSize-4 Attribute data (see APPENDIX B) ### 4.5.16 -OS/400 Extra Field (0x0065): The following is the layout of the OS/400 "extra" block. Note: Some fields are stored in Big Endian format. All text is in EBCDIC format unless otherwise specified. Value Size Description ----- ---- ----------- (OS400) 0x0065 2 bytes Tag for this "extra" block type TSize 2 bytes Size for the following data block ID 4 bytes EBCDIC "I400" 0xC9F4F0F0 or "T4MV" for TargetFour (var) TSize-4 Attribute data (see APPENDIX A) ### 4.5.17 -Policy Decryption Key Record Extra Field (0x0021): The following is the layout of the Policy Decryption Key "extra" block. TData is a variable length, variable content field. It holds information about encryptions and/or encryption key sources. Contact PKWARE for information on current TData structures. Information in this "extra" block may aternatively be placed within comment fields. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. Value Size Description ----- ---- ----------- 0x0021 2 bytes Tag for this "extra" block type TSize 2 bytes Size for the following data block TData TSize Data about the key ### 4.5.18 -Key Provider Record Extra Field (0x0022): The following is the layout of the Key Provider "extra" block. TData is a variable length, variable content field. It holds information about encryptions and/or encryption key sources. Contact PKWARE for information on current TData structures. Information in this "extra" block may aternatively be placed within comment fields. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. Value Size Description ----- ---- ----------- 0x0022 2 bytes Tag for this "extra" block type TSize 2 bytes Size for the following data block TData TSize Data about the key ### 4.5.19 -Policy Key Data Record Record Extra Field (0x0023): The following is the layout of the Policy Key Data "extra" block. TData is a variable length, variable content field. It holds information about encryptions and/or encryption key sources. Contact PKWARE for information on current TData structures. Information in this "extra" block may aternatively be placed within comment fields. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. Value Size Description ----- ---- ----------- 0x0023 2 bytes Tag for this "extra" block type TSize 2 bytes Size for the following data block TData TSize Data about the key ## 4.6 Third Party Mappings ### 4.6.1 Third party mappings commonly used are: 0x07c8 Macintosh 0x2605 ZipIt Macintosh 0x2705 ZipIt Macintosh 1.3.5+ 0x2805 ZipIt Macintosh 1.3.5+ 0x334d Info-ZIP Macintosh 0x4341 Acorn/SparkFS 0x4453 Windows NT security descriptor (binary ACL) 0x4704 VM/CMS 0x470f MVS 0x4b46 FWKCS MD5 (see below) 0x4c41 OS/2 access control list (text ACL) 0x4d49 Info-ZIP OpenVMS 0x4f4c Xceed original location extra field 0x5356 AOS/VS (ACL) 0x5455 extended timestamp 0x554e Xceed unicode extra field 0x5855 Info-ZIP UNIX (original, also OS/2, NT, etc) 0x6375 Info-ZIP Unicode Comment Extra Field 0x6542 BeOS/BeBox 0x7075 Info-ZIP Unicode Path Extra Field 0x756e ASi UNIX 0x7855 Info-ZIP UNIX (new) 0xa11e Data Stream Alignment (Apache Commons-Compress) 0xa220 Microsoft Open Packaging Growth Hint 0xfd4a SMS/QDOS 0x9901 AE-x encryption structure (see APPENDIX E) 0x9902 unknown Detailed descriptions of Extra Fields defined by third party mappings will be documented as information on these data structures is made available to PKWARE. PKWARE does not guarantee the accuracy of any published third party data. ### 4.6.2 Third-party Extra Fields MUST include a Header ID using the format defined in the section of this document titled Extensible Data Fields (section 4.5). The Data Size field indicates the size of the following data block. Programs can use this value to skip to the next header block, passing over any data blocks that are not of interest. Note: As stated above, the size of the entire .ZIP file header, including the file name, comment, and extra field SHOULD NOT exceed 64K in size. ### 4.6.3 In case two different programs appropriate the same Header ID value, it is strongly recommended that each program SHOULD place a unique signature of at least two bytes in size (and preferably 4 bytes or bigger) at the start of each data area. Every program SHOULD verify that its unique signature is present, in addition to the Header ID value being correct, before assuming that it is a block of known type. Third-party Mappings: ### 4.6.4 -ZipIt Macintosh Extra Field (long) (0x2605): The following is the layout of the ZipIt extra block for Macintosh. The local-header and central-header versions are identical. This block MUST be present if the file is stored MacBinary-encoded and it SHOULD NOT be used if the file is not stored MacBinary-encoded. Value Size Description ----- ---- ----------- (Mac2) 0x2605 Short tag for this extra block type TSize Short total data size for this block "ZPIT" beLong extra-field signature FnLen Byte length of FileName FileName variable full Macintosh filename FileType Byte[4] four-byte Mac file type string Creator Byte[4] four-byte Mac creator string ### 4.6.5 -ZipIt Macintosh Extra Field (short, for files) (0x2705): The following is the layout of a shortened variant of the ZipIt extra block for Macintosh (without "full name" entry). This variant is used by ZipIt 1.3.5 and newer for entries of files (not directories) that do not have a MacBinary encoded file. The local-header and central-header versions are identical. Value Size Description ----- ---- ----------- (Mac2b) 0x2705 Short tag for this extra block type TSize Short total data size for this block (12) "ZPIT" beLong extra-field signature FileType Byte[4] four-byte Mac file type string Creator Byte[4] four-byte Mac creator string fdFlags beShort attributes from FInfo.frFlags, MAY be omitted 0x0000 beShort reserved, MAY be omitted ### 4.6.6 -ZipIt Macintosh Extra Field (short, for directories) (0x2805): The following is the layout of a shortened variant of the ZipIt extra block for Macintosh used only for directory entries. This variant is used by ZipIt 1.3.5 and newer to save some optional Mac-specific information about directories. The local-header and central-header versions are identical. Value Size Description ----- ---- ----------- (Mac2c) 0x2805 Short tag for this extra block type TSize Short total data size for this block (12) "ZPIT" beLong extra-field signature frFlags beShort attributes from DInfo.frFlags, MAY be omitted View beShort ZipIt view flag, MAY be omitted The View field specifies ZipIt-internal settings as follows: Bits of the Flags: bit 0 if set, the folder is shown expanded (open) when the archive contents are viewed in ZipIt. bits 1-15 reserved, zero; ### 4.6.7 -FWKCS MD5 Extra Field (0x4b46): The FWKCS Contents_Signature System, used in automatically identifying files independent of file name, optionally adds and uses an extra field to support the rapid creation of an enhanced contents_signature: Header ID = 0x4b46 Data Size = 0x0013 Preface = 'M','D','5' followed by 16 bytes containing the uncompressed file's 128_bit MD5 hash(1), low byte first. When FWKCS revises a .ZIP file central directory to add this extra field for a file, it also replaces the central directory entry for that file's uncompressed file length with a measured value. FWKCS provides an option to strip this extra field, if present, from a .ZIP file central directory. In adding this extra field, FWKCS preserves .ZIP file Authenticity Verification; if stripping this extra field, FWKCS preserves all versions of AV through PKZIP version 2.04g. FWKCS, and FWKCS Contents_Signature System, are trademarks of Frederick W. Kantor. (1) R. Rivest, RFC1321.TXT, MIT Laboratory for Computer Science and RSA Data Security, Inc., April 1992. ll.76-77: "The MD5 algorithm is being placed in the public domain for review and possible adoption as a standard." ### 4.6.8 -Info-ZIP Unicode Comment Extra Field (0x6375): Stores the UTF-8 version of the file comment as stored in the central directory header. (Last Revision 20070912) Value Size Description ----- ---- ----------- (UCom) 0x6375 Short tag for this extra block type ("uc") TSize Short total data size for this block Version 1 byte version of this extra field, currently 1 ComCRC32 4 bytes Comment Field CRC32 Checksum UnicodeCom Variable UTF-8 version of the entry comment Currently Version is set to the number 1. If there is a need to change this field, the version will be incremented. Changes MAY NOT be backward compatible so this extra field SHOULD NOT be used if the version is not recognized. The ComCRC32 is the standard zip CRC32 checksum of the File Comment field in the central directory header. This is used to verify that the comment field has not changed since the Unicode Comment extra field was created. This can happen if a utility changes the File Comment field but does not update the UTF-8 Comment extra field. If the CRC check fails, this Unicode Comment extra field SHOULD be ignored and the File Comment field in the header SHOULD be used instead. The UnicodeCom field is the UTF-8 version of the File Comment field in the header. As UnicodeCom is defined to be UTF-8, no UTF-8 byte order mark (BOM) is used. The length of this field is determined by subtracting the size of the previous fields from TSize. If both the File Name and Comment fields are UTF-8, the new General Purpose Bit Flag, bit 11 (Language encoding flag (EFS)), can be used to indicate both the header File Name and Comment fields are UTF-8 and, in this case, the Unicode Path and Unicode Comment extra fields are not needed and SHOULD NOT be created. Note that, for backward compatibility, bit 11 SHOULD only be used if the native character set of the paths and comments being zipped up are already in UTF-8. It is expected that the same file comment storage method, either general purpose bit 11 or extra fields, be used in both the Local and Central Directory Header for a file. ### 4.6.9 -Info-ZIP Unicode Path Extra Field (0x7075): Stores the UTF-8 version of the file name field as stored in the local header and central directory header. (Last Revision 20070912) Value Size Description ----- ---- ----------- (UPath) 0x7075 Short tag for this extra block type ("up") TSize Short total data size for this block Version 1 byte version of this extra field, currently 1 NameCRC32 4 bytes File Name Field CRC32 Checksum UnicodeName Variable UTF-8 version of the entry File Name Currently Version is set to the number 1. If there is a need to change this field, the version will be incremented. Changes MAY NOT be backward compatible so this extra field SHOULD NOT be used if the version is not recognized. The NameCRC32 is the standard zip CRC32 checksum of the File Name field in the header. This is used to verify that the header File Name field has not changed since the Unicode Path extra field was created. This can happen if a utility renames the File Name but does not update the UTF-8 path extra field. If the CRC check fails, this UTF-8 Path Extra Field SHOULD be ignored and the File Name field in the header SHOULD be used instead. The UnicodeName is the UTF-8 version of the contents of the File Name field in the header. As UnicodeName is defined to be UTF-8, no UTF-8 byte order mark (BOM) is used. The length of this field is determined by subtracting the size of the previous fields from TSize. If both the File Name and Comment fields are UTF-8, the new General Purpose Bit Flag, bit 11 (Language encoding flag (EFS)), can be used to indicate that both the header File Name and Comment fields are UTF-8 and, in this case, the Unicode Path and Unicode Comment extra fields are not needed and SHOULD NOT be created. Note that, for backward compatibility, bit 11 SHOULD only be used if the native character set of the paths and comments being zipped up are already in UTF-8. It is expected that the same file name storage method, either general purpose bit 11 or extra fields, be used in both the Local and Central Directory Header for a file. ### 4.6.10 -Microsoft Open Packaging Growth Hint (0xa220): Value Size Description ----- ---- ----------- 0xa220 Short tag for this extra block type TSize Short size of Sig + PadVal + Padding Sig Short verification signature (A028) PadVal Short Initial padding value Padding variable filled with NULL characters ### 4.6.11 -Data Stream Alignment (Apache Commons-Compress) (0xa11e): (per Zbynek Vyskovsky) Defines alignment of data stream of this entry within the zip archive. Additionally, indicates whether the compression method should be kept when re-compressing the zip file. The purpose of this extra field is to align specific resources to word or page boundaries so they can be easily mapped into memory. Value Size Description ----- ---- ----------- 0xa11e Short tag for this extra block type TSize Short total data size for this block (2+padding) alignment Short required alignment and indicator 0x00 Variable padding The alignment field (lower 15 bits) defines the minimal alignment required by the data stream. Bit 15 of alignment field indicates whether the compression method of this entry can be changed when recompressing the zip file. The value 0 means the compression method should not be changed. The value 1 indicates the compression method may be changed. The padding field contains padding to ensure the correct alignment. It can be changed at any time when the offset or required alignment changes. (see https://issues.apache.org/jira/browse/COMPRESS-391) ## 4.7 Manifest Files ### 4.7.1 Applications using ZIP files MAY have a need for additional information that MUST be included with the files placed into a ZIP file. Application specific information that cannot be stored using the defined ZIP storage records SHOULD be stored using the extensible Extra Field convention defined in this document. However, some applications MAY use a manifest file as a means for storing additional information. One example is the META-INF/MANIFEST.MF file used in ZIP formatted files having the .JAR extension (JAR files). ### 4.7.2 A manifest file is a file created for the application process that requires this information. A manifest file MAY be of any file type required by the defining application process. It is placed within the same ZIP file as files to which this information applies. By convention, this file is typically the first file placed into the ZIP file and it MAY include a defined directory path. ### 4.7.3 Manifest files MAY be compressed or encrypted as needed for application processing of the files inside the ZIP files. Manifest files are outside of the scope of this specification. ## 5.0 Explanation of compression methods ## 5.1 UnShrinking - Method 1 ### 5.1.1 Shrinking is a Dynamic Ziv-Lempel-Welch compression algorithm with partial clearing. The initial code size is 9 bits, and the maximum code size is 13 bits. Shrinking differs from conventional Dynamic Ziv-Lempel-Welch implementations in several respects: ### 5.1.2 The code size is controlled by the compressor, and is not automatically increased when codes larger than the current code size are created (but not necessarily used). When the decompressor encounters the code sequence 256 (decimal) followed by 1, it SHOULD increase the code size read from the input stream to the next bit size. No blocking of the codes is performed, so the next code at the increased size SHOULD be read from the input stream immediately after where the previous code at the smaller bit size was read. Again, the decompressor SHOULD NOT increase the code size used until the sequence 256,1 is encountered. ### 5.1.3 When the table becomes full, total clearing is not performed. Rather, when the compressor emits the code sequence 256,2 (decimal), the decompressor SHOULD clear all leaf nodes from the Ziv-Lempel tree, and continue to use the current code size. The nodes that are cleared from the Ziv-Lempel tree are then re-used, with the lowest code value re-used first, and the highest code value re-used last. The compressor can emit the sequence 256,2 at any time. ## 5.2 Expanding - Methods 2-5 ### 5.2.1 The Reducing algorithm is actually a combination of two distinct algorithms. The first algorithm compresses repeated byte sequences, and the second algorithm takes the compressed stream from the first algorithm and applies a probabilistic compression method. ### 5.2.2 The probabilistic compression stores an array of 'follower sets' S(j), for j=0 to 255, corresponding to each possible ASCII character. Each set contains between 0 and 32 characters, to be denoted as S(j)[0],...,S(j)[m], where m<32. The sets are stored at the beginning of the data area for a Reduced file, in reverse order, with S(255) first, and S(0) last. ### 5.2.3 The sets are encoded as { N(j), S(j)[0],...,S(j)[N(j)-1] }, where N(j) is the size of set S(j). N(j) can be 0, in which case the follower set for S(j) is empty. Each N(j) value is encoded in 6 bits, followed by N(j) eight bit character values corresponding to S(j)[0] to S(j)[N(j)-1] respectively. If N(j) is 0, then no values for S(j) are stored, and the value for N(j-1) immediately follows. ### 5.2.4 Immediately after the follower sets, is the compressed data stream. The compressed data stream can be interpreted for the probabilistic decompression as follows: let Last-Character <- 0. loop until done if the follower set S(Last-Character) is empty then read 8 bits from the input stream, and copy this value to the output stream. otherwise if the follower set S(Last-Character) is non-empty then read 1 bit from the input stream. if this bit is not zero then read 8 bits from the input stream, and copy this value to the output stream. otherwise if this bit is zero then read B(N(Last-Character)) bits from the input stream, and assign this value to I. Copy the value of S(Last-Character)[I] to the output stream. assign the last value placed on the output stream to Last-Character. end loop B(N(j)) is defined as the minimal number of bits required to encode the value N(j)-1. ### 5.2.5 The decompressed stream from above can then be expanded to re-create the original file as follows: let State <- 0. loop until done read 8 bits from the input stream into C. case State of 0: if C is not equal to DLE (144 decimal) then copy C to the output stream. otherwise if C is equal to DLE then let State <- 1. 1: if C is non-zero then let V <- C. let Len <- L(V) let State <- F(Len). otherwise if C is zero then copy the value 144 (decimal) to the output stream. let State <- 0 2: let Len <- Len + C let State <- 3. 3: move backwards D(V,C) bytes in the output stream (if this position is before the start of the output stream, then assume that all the data before the start of the output stream is filled with zeros). copy Len+3 bytes from this position to the output stream. let State <- 0. end case end loop The functions F,L, and D are dependent on the 'compression factor', 1 through 4, and are defined as follows: For compression factor 1: L(X) equals the lower 7 bits of X. F(X) equals 2 if X equals 127 otherwise F(X) equals 3. D(X,Y) equals the (upper 1 bit of X) * 256 + Y + 1. For compression factor 2: L(X) equals the lower 6 bits of X. F(X) equals 2 if X equals 63 otherwise F(X) equals 3. D(X,Y) equals the (upper 2 bits of X) * 256 + Y + 1. For compression factor 3: L(X) equals the lower 5 bits of X. F(X) equals 2 if X equals 31 otherwise F(X) equals 3. D(X,Y) equals the (upper 3 bits of X) * 256 + Y + 1. For compression factor 4: L(X) equals the lower 4 bits of X. F(X) equals 2 if X equals 15 otherwise F(X) equals 3. D(X,Y) equals the (upper 4 bits of X) * 256 + Y + 1. ## 5.3 Imploding - Method 6 ### 5.3.1 The Imploding algorithm is actually a combination of two distinct algorithms. The first algorithm compresses repeated byte sequences using a sliding dictionary. The second algorithm is used to compress the encoding of the sliding dictionary output, using multiple Shannon-Fano trees. ### 5.3.2 The Imploding algorithm can use a 4K or 8K sliding dictionary size. The dictionary size used can be determined by bit 1 in the general purpose flag word; a 0 bit indicates a 4K dictionary while a 1 bit indicates an 8K dictionary. ### 5.3.3 The Shannon-Fano trees are stored at the start of the compressed file. The number of trees stored is defined by bit 2 in the general purpose flag word; a 0 bit indicates two trees stored, a 1 bit indicates three trees are stored. If 3 trees are stored, the first Shannon-Fano tree represents the encoding of the Literal characters, the second tree represents the encoding of the Length information, the third represents the encoding of the Distance information. When 2 Shannon-Fano trees are stored, the Length tree is stored first, followed by the Distance tree. ### 5.3.4 The Literal Shannon-Fano tree, if present is used to represent the entire ASCII character set, and contains 256 values. This tree is used to compress any data not compressed by the sliding dictionary algorithm. When this tree is present, the Minimum Match Length for the sliding dictionary is 3. If this tree is not present, the Minimum Match Length is 2. ### 5.3.5 The Length Shannon-Fano tree is used to compress the Length part of the (length,distance) pairs from the sliding dictionary output. The Length tree contains 64 values, ranging from the Minimum Match Length, to 63 plus the Minimum Match Length. ### 5.3.6 The Distance Shannon-Fano tree is used to compress the Distance part of the (length,distance) pairs from the sliding dictionary output. The Distance tree contains 64 values, ranging from 0 to 63, representing the upper 6 bits of the distance value. The distance values themselves will be between 0 and the sliding dictionary size, either 4K or 8K. ### 5.3.7 The Shannon-Fano trees themselves are stored in a compressed format. The first byte of the tree data represents the number of bytes of data representing the (compressed) Shannon-Fano tree minus 1. The remaining bytes represent the Shannon-Fano tree data encoded as: High 4 bits: Number of values at this bit length + 1. (1 - 16) Low 4 bits: Bit Length needed to represent value + 1. (1 - 16) ### 5.3.8 The Shannon-Fano codes can be constructed from the bit lengths using the following algorithm: 1) Sort the Bit Lengths in ascending order, while retaining the order of the original lengths stored in the file. 2) Generate the Shannon-Fano trees: Code <- 0 CodeIncrement <- 0 LastBitLength <- 0 i <- number of Shannon-Fano codes - 1 (either 255 or 63) loop while i >= 0 Code = Code + CodeIncrement if BitLength(i) <> LastBitLength then LastBitLength=BitLength(i) CodeIncrement = 1 shifted left (16 - LastBitLength) ShannonCode(i) = Code i <- i - 1 end loop 3) Reverse the order of all the bits in the above ShannonCode() vector, so that the most significant bit becomes the least significant bit. For example, the value 0x1234 (hex) would become 0x2C48 (hex). 4) Restore the order of Shannon-Fano codes as originally stored within the file. Example: This example will show the encoding of a Shannon-Fano tree of size 8. Notice that the actual Shannon-Fano trees used for Imploding are either 64 or 256 entries in size. Example: 0x02, 0x42, 0x01, 0x13 The first byte indicates 3 values in this table. Decoding the bytes: 0x42 = 5 codes of 3 bits long 0x01 = 1 code of 2 bits long 0x13 = 2 codes of 4 bits long This would generate the original bit length array of: (3, 3, 3, 3, 3, 2, 4, 4) There are 8 codes in this table for the values 0 thru 7. Using the algorithm to obtain the Shannon-Fano codes produces: Reversed Order Original Val Sorted Constructed Code Value Restored Length --- ------ ----------------- -------- -------- ------ 0: 2 1100000000000000 11 101 3 1: 3 1010000000000000 101 001 3 2: 3 1000000000000000 001 110 3 3: 3 0110000000000000 110 010 3 4: 3 0100000000000000 010 100 3 5: 3 0010000000000000 100 11 2 6: 4 0001000000000000 1000 1000 4 7: 4 0000000000000000 0000 0000 4 The values in the Val, Order Restored and Original Length columns now represent the Shannon-Fano encoding tree that can be used for decoding the Shannon-Fano encoded data. How to parse the variable length Shannon-Fano values from the data stream is beyond the scope of this document. (See the references listed at the end of this document for more information.) However, traditional decoding schemes used for Huffman variable length decoding, such as the Greenlaw algorithm, can be successfully applied. ### 5.3.9 The compressed data stream begins immediately after the compressed Shannon-Fano data. The compressed data stream can be interpreted as follows: loop until done read 1 bit from input stream. if this bit is non-zero then (encoded data is literal data) if Literal Shannon-Fano tree is present read and decode character using Literal Shannon-Fano tree. otherwise read 8 bits from input stream. copy character to the output stream. otherwise (encoded data is sliding dictionary match) if 8K dictionary size read 7 bits for offset Distance (lower 7 bits of offset). otherwise read 6 bits for offset Distance (lower 6 bits of offset). using the Distance Shannon-Fano tree, read and decode the upper 6 bits of the Distance value. using the Length Shannon-Fano tree, read and decode the Length value. Length <- Length + Minimum Match Length if Length = 63 + Minimum Match Length read 8 bits from the input stream, add this value to Length. move backwards Distance+1 bytes in the output stream, and copy Length characters from this position to the output stream. (if this position is before the start of the output stream, then assume that all the data before the start of the output stream is filled with zeros). end loop ## 5.4 Tokenizing - Method 7 ### 5.4.1 This method is not used by PKZIP. ## 5.5 Deflating - Method 8 ### 5.5.1 The Deflate algorithm is similar to the Implode algorithm using a sliding dictionary of up to 32K with secondary compression from Huffman/Shannon-Fano codes. ### 5.5.2 The compressed data is stored in blocks with a header describing the block and the Huffman codes used in the data block. The header format is as follows: Bit 0: Last Block bit This bit is set to 1 if this is the last compressed block in the data. Bits 1-2: Block type 00 (0) - Block is stored - All stored data is byte aligned. Skip bits until next byte, then next word = block length, followed by the ones compliment of the block length word. Remaining data in block is the stored data. 01 (1) - Use fixed Huffman codes for literal and distance codes. Lit Code Bits Dist Code Bits --------- ---- --------- ---- 0 - 143 8 0 - 31 5 144 - 255 9 256 - 279 7 280 - 287 8 Literal codes 286-287 and distance codes 30-31 are never used but participate in the huffman construction. 10 (2) - Dynamic Huffman codes. (See expanding Huffman codes) 11 (3) - Reserved - Flag a "Error in compressed data" if seen. ### 5.5.3 Expanding Huffman Codes If the data block is stored with dynamic Huffman codes, the Huffman codes are sent in the following compressed format: 5 Bits: # of Literal codes sent - 256 (256 - 286) All other codes are never sent. 5 Bits: # of Dist codes - 1 (1 - 32) 4 Bits: # of Bit Length codes - 3 (3 - 19) The Huffman codes are sent as bit lengths and the codes are built as described in the implode algorithm. The bit lengths themselves are compressed with Huffman codes. There are 19 bit length codes: 0 - 15: Represent bit lengths of 0 - 15 16: Copy the previous bit length 3 - 6 times. The next 2 bits indicate repeat length (0 = 3, ... ,3 = 6) Example: Codes 8, 16 (+2 bits 11), 16 (+2 bits 10) will expand to 12 bit lengths of 8 (1 + 6 + 5) 17: Repeat a bit length of 0 for 3 - 10 times. (3 bits of length) 18: Repeat a bit length of 0 for 11 - 138 times (7 bits of length) The lengths of the bit length codes are sent packed 3 bits per value (0 - 7) in the following order: 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 The Huffman codes SHOULD be built as described in the Implode algorithm except codes are assigned starting at the shortest bit length, i.e. the shortest code SHOULD be all 0's rather than all 1's. Also, codes with a bit length of zero do not participate in the tree construction. The codes are then used to decode the bit lengths for the literal and distance tables. The bit lengths for the literal tables are sent first with the number of entries sent described by the 5 bits sent earlier. There are up to 286 literal characters; the first 256 represent the respective 8 bit character, code 256 represents the End-Of-Block code, the remaining 29 codes represent copy lengths of 3 thru 258. There are up to 30 distance codes representing distances from 1 thru 32k as described below. Length Codes ------------ Extra Extra Extra Extra Code Bits Length Code Bits Lengths Code Bits Lengths Code Bits Length(s) ---- ---- ------ ---- ---- ------- ---- ---- ------- ---- ---- --------- 257 0 3 265 1 11,12 273 3 35-42 281 5 131-162 258 0 4 266 1 13,14 274 3 43-50 282 5 163-194 259 0 5 267 1 15,16 275 3 51-58 283 5 195-226 260 0 6 268 1 17,18 276 3 59-66 284 5 227-257 261 0 7 269 2 19-22 277 4 67-82 285 0 258 262 0 8 270 2 23-26 278 4 83-98 263 0 9 271 2 27-30 279 4 99-114 264 0 10 272 2 31-34 280 4 115-130 Distance Codes -------------- Extra Extra Extra Extra Code Bits Dist Code Bits Dist Code Bits Distance Code Bits Distance ---- ---- ---- ---- ---- ------ ---- ---- -------- ---- ---- -------- 0 0 1 8 3 17-24 16 7 257-384 24 11 4097-6144 1 0 2 9 3 25-32 17 7 385-512 25 11 6145-8192 2 0 3 10 4 33-48 18 8 513-768 26 12 8193-12288 3 0 4 11 4 49-64 19 8 769-1024 27 12 12289-16384 4 1 5,6 12 5 65-96 20 9 1025-1536 28 13 16385-24576 5 1 7,8 13 5 97-128 21 9 1537-2048 29 13 24577-32768 6 2 9-12 14 6 129-192 22 10 2049-3072 7 2 13-16 15 6 193-256 23 10 3073-4096 ### 5.5.4 The compressed data stream begins immediately after the compressed header data. The compressed data stream can be interpreted as follows: do read header from input stream. if stored block skip bits until byte aligned read count and 1's compliment of count copy count bytes data block otherwise loop until end of block code sent decode literal character from input stream if literal < 256 copy character to the output stream otherwise if literal = end of block break from loop otherwise decode distance from input stream move backwards distance bytes in the output stream, and copy length characters from this position to the output stream. end loop while not last block if data descriptor exists skip bits until byte aligned read crc and sizes endif ## 5.6 Enhanced Deflating - Method 9 ### 5.6.1 The Enhanced Deflating algorithm is similar to Deflate but uses a sliding dictionary of up to 64K. Deflate64(tm) is supported by the Deflate extractor. ## 5.7 BZIP2 - Method 12 ### 5.7.1 BZIP2 is an open-source data compression algorithm developed by Julian Seward. Information and source code for this algorithm can be found on the internet. ## 5.8 LZMA - Method 14 ### 5.8.1 LZMA is a block-oriented, general purpose data compression algorithm developed and maintained by Igor Pavlov. It is a derivative of LZ77 that utilizes Markov chains and a range coder. Information and source code for this algorithm can be found on the internet. Consult with the author of this algorithm for information on terms or restrictions on use. Support for LZMA within the ZIP format is defined as follows: ### 5.8.2 The Compression method field within the ZIP Local and Central Header records will be set to the value 14 to indicate data was compressed using LZMA. ### 5.8.3 The Version needed to extract field within the ZIP Local and Central Header records will be set to 6.3 to indicate the minimum ZIP format version supporting this feature. ### 5.8.4 File data compressed using the LZMA algorithm MUST be placed immediately following the Local Header for the file. If a standard ZIP encryption header is required, it will follow the Local Header and will precede the LZMA compressed file data segment. The location of LZMA compressed data segment within the ZIP format will be as shown: [local header file 1] [encryption header file 1] [LZMA compressed data segment for file 1] [data descriptor 1] [local header file 2] ### 5.8.5 The encryption header and data descriptor records MAY be conditionally present. The LZMA Compressed Data Segment will consist of an LZMA Properties Header followed by the LZMA Compressed Data as shown: [LZMA properties header for file 1] [LZMA compressed data for file 1] ### 5.8.6 The LZMA Compressed Data will be stored as provided by the LZMA compression library. Compressed size, uncompressed size and other file characteristics about the file being compressed MUST be stored in standard ZIP storage format. ### 5.8.7 The LZMA Properties Header will store specific data required to decompress the LZMA compressed Data. This data is set by the LZMA compression engine using the function WriteCoderProperties() as documented within the LZMA SDK. ### 5.8.8 Storage fields for the property information within the LZMA Properties Header are as follows: LZMA Version Information 2 bytes LZMA Properties Size 2 bytes LZMA Properties Data variable, defined by "LZMA Properties Size" ### 5.8.8.1 LZMA Version Information - this field identifies which version of the LZMA SDK was used to compress a file. The first byte will store the major version number of the LZMA SDK and the second byte will store the minor number. ### 5.8.8.2 LZMA Properties Size - this field defines the size of the remaining property data. Typically this size SHOULD be determined by the version of the SDK. This size field is included as a convenience and to help avoid any ambiguity arising in the future due to changes in this compression algorithm. ### 5.8.8.3 LZMA Property Data - this variable sized field records the required values for the decompressor as defined by the LZMA SDK. The data stored in this field SHOULD be obtained using the WriteCoderProperties() in the version of the SDK defined by the "LZMA Version Information" field. ### 5.8.8.4 The layout of the "LZMA Properties Data" field is a function of the LZMA compression algorithm. It is possible that this layout MAY be changed by the author over time. The data layout in version 4.3 of the LZMA SDK defines a 5 byte array that uses 4 bytes to store the dictionary size in little-endian order. This is preceded by a single packed byte as the first element of the array that contains the following fields: PosStateBits LiteralPosStateBits LiteralContextBits Refer to the LZMA documentation for a more detailed explanation of these fields. ### 5.8.9 Data compressed with method 14, LZMA, MAY include an end-of-stream (EOS) marker ending the compressed data stream. This marker is not required, but its use is highly recommended to facilitate processing and implementers SHOULD include the EOS marker whenever possible. When the EOS marker is used, general purpose bit 1 MUSY be set. If general purpose bit 1 is not set, the EOS marker is not present. ## 5.9 WavPack - Method 97 ### 5.9.1 Information describing the use of compression method 97 is provided by WinZIP International, LLC. This method relies on the open source WavPack audio compression utility developed by David Bryant. Information on WavPack is available at www.wavpack.com. Please consult with the author of this algorithm for information on terms and restrictions on use. ### 5.9.2 WavPack data for a file begins immediately after the end of the local header data. This data is the output from WavPack compression routines. Within the ZIP file, the use of WavPack compression is indicated by setting the compression method field to a value of 97 in both the local header and the central directory header. The Version needed to extract and version made by fields use the same values as are used for data compressed using the Deflate algorithm. ### 5.9.3 An implementation note for storing digital sample data when using WavPack compression within ZIP files is that all of the bytes of the sample data SHOULD be compressed. This includes any unused bits up to the byte boundary. An example is a 2 byte sample that uses only 12 bits for the sample data with 4 unused bits. If only 12 bits are passed as the sample size to the WavPack routines, the 4 unused bits will be set to 0 on extraction regardless of their original state. To avoid this, the full 16 bits of the sample data size SHOULD be provided. ## 5.10 PPMd - Method 98 ### 5.10.1 PPMd is a data compression algorithm developed by Dmitry Shkarin which includes a carryless rangecoder developed by Dmitry Subbotin. This algorithm is based on predictive phrase matching on multiple order contexts. Information and source code for this algorithm can be found on the internet. Consult with the author of this algorithm for information on terms or restrictions on use. ### 5.10.2 Support for PPMd within the ZIP format currently is provided only for version I, revision 1 of the algorithm. Storage requirements for using this algorithm are as follows: ### 5.10.3 Parameters needed to control the algorithm are stored in the two bytes immediately preceding the compressed data. These bytes are used to store the following fields: Model order - sets the maximum model order, default is 8, possible values are from 2 to 16 inclusive Sub-allocator size - sets the size of sub-allocator in MB, default is 50, possible values are from 1MB to 256MB inclusive Model restoration method - sets the method used to restart context model at memory insufficiency, values are: 0 - restarts model from scratch - default 1 - cut off model - decreases performance by as much as 2x 2 - freeze context tree - not recommended ### 5.10.4 An example for packing these fields into the 2 byte storage field is illustrated below. These values are stored in Intel low-byte/high-byte order. wPPMd = (Model order - 1) + ((Sub-allocator size - 1) << 4) + (Model restoration method << 12) ## 5.11 AE-x Encryption marker - Method 99 ## 5.12 JPEG variant - Method 96 ## 5.13 PKWARE Data Compression Library Imploding - Method 10 ## 5.14 Reserved - Method 11 ## 5.15 Reserved - Method 13 ## 5.16 Reserved - Method 15 ## 5.17 IBM z/OS CMPSC Compression - Method 16 Method 16 utilizes the IBM hardware compression facility available on most IBM mainframes. Hardware compression can significantly increase the speed of data compression. This method uses a variant of the LZ78 algorithm. CMPSC hardware compression is performed using the COMPRESSION CALL instruction. ZIP archives can be created using this method only on mainframes supporting the CP instruction. Extraction MAY occur on any platform supporting this compression algorithm. Use of this algorithm requires creation of a compression dictionary and an expansion dictionary. The expansion dictionary MUST be placed into the ZIP archive for use on the system where extraction will occur. Additional information on this compression algorithm and dictionaries can be found in the IBM provided document titled IBM ESA/390 Data Compression (SA22-7208-01). Storage requirements for using CMPSC compression are as follows. The format for the compressed data stream placed into the ZIP archive following the Local Header is: [dictionary header] [expansion dictionary] [CMPSC compressed data] If encryption is used to encrypt a file compressed with CMPSC, these sections MUST be encrypted as a single entity. The format of the dictionary header is: Value Size Description ----- ---- ----------- Version 1 byte 1 Flags/Symsize 1 byte Processing flags and symbol size DictionaryLen 4 bytes Length of the expansion dictionary Explanation of processing flags and symbol size: The high 4 bits are used to store the processing flags. The low 4 bits represent the size of a symbol, in bits (values range from 9-13). Flag values are defined below. 0x80 - expansion dictionary 0x40 - expansion dictionary is compressed using Deflate 0x20 - Reserved 0x10 - Reserved ## 5.18 Reserved - Method 17 ## 5.19 IBM TERSE - Method 18 ## 5.20 IBM LZ77 z Architecture - Method 19 ## 6.0 Traditional PKWARE Encryption ### 6.0.1 The following information discusses the decryption steps required to support traditional PKWARE encryption. This form of encryption is considered weak by today's standards and its use is recommended only for situations with low security needs or for compatibility with older .ZIP applications. ## 6.1 Traditional PKWARE Decryption ### 6.1.1 PKWARE is grateful to Mr. Roger Schlafly for his expert contribution towards the development of PKWARE's traditional encryption. ### 6.1.2 PKZIP encrypts the compressed data stream. Encrypted files MUST be decrypted before they can be extracted to their original form. ### 6.1.3 Each encrypted file has an extra 12 bytes stored at the start of the data area defining the encryption header for that file. The encryption header is originally set to random values, and then itself encrypted, using three, 32-bit keys. The key values are initialized using the supplied encryption password. After each byte is encrypted, the keys are then updated using pseudo-random number generation techniques in combination with the same CRC-32 algorithm used in PKZIP and described elsewhere in this document. ### 6.1.4 The following are the basic steps required to decrypt a file: 1) Initialize the three 32-bit keys with the password. 2) Read and decrypt the 12-byte encryption header, further initializing the encryption keys. 3) Read and decrypt the compressed data stream using the encryption keys. ### 6.1.5 Initializing the encryption keys Key(0) <- 305419896 Key(1) <- 591751049 Key(2) <- 878082192 loop for i <- 0 to length(password)-1 update_keys(password(i)) end loop Where update_keys() is defined as: update_keys(char): Key(0) <- crc32(key(0),char) Key(1) <- Key(1) + (Key(0) & 000000ffH) Key(1) <- Key(1) * 134775813 + 1 Key(2) <- crc32(key(2),key(1) >> 24) end update_keys Where crc32(old_crc,char) is a routine that given a CRC value and a character, returns an updated CRC value after applying the CRC-32 algorithm described elsewhere in this document. ### 6.1.6 Decrypting the encryption header The purpose of this step is to further initialize the encryption keys, based on random data, to render a plaintext attack on the data ineffective. Read the 12-byte encryption header into Buffer, in locations Buffer(0) thru Buffer(11). loop for i <- 0 to 11 C <- buffer(i) ^ decrypt_byte() update_keys(C) buffer(i) <- C end loop Where decrypt_byte() is defined as: unsigned char decrypt_byte() local unsigned short temp temp <- Key(2) | 2 decrypt_byte <- (temp * (temp ^ 1)) >> 8 end decrypt_byte After the header is decrypted, the last 1 or 2 bytes in Buffer SHOULD be the high-order word/byte of the CRC for the file being decrypted, stored in Intel low-byte/high-byte order. Versions of PKZIP prior to 2.0 used a 2 byte CRC check; a 1 byte CRC check is used on versions after 2.0. This can be used to test if the password supplied is correct or not. ### 6.1.7 Decrypting the compressed data stream The compressed data stream can be decrypted as follows: loop until done read a character into C Temp <- C ^ decrypt_byte() update_keys(temp) output Temp end loop ## 7.0 Strong Encryption Specification ### 7.0.1 Portions of the Strong Encryption technology defined in this specification are covered under patents and pending patent applications. Refer to the section in this document entitled "Incorporating PKWARE Proprietary Technology into Your Product" for more information. ## 7.1 Strong Encryption Overview ### 7.1.1 Version 5.x of this specification introduced support for strong encryption algorithms. These algorithms can be used with either a password or an X.509v3 digital certificate to encrypt each file. This format specification supports either password or certificate based encryption to meet the security needs of today, to enable interoperability between users within both PKI and non-PKI environments, and to ensure interoperability between different computing platforms that are running a ZIP program. ### 7.1.2 Password based encryption is the most common form of encryption people are familiar with. However, inherent weaknesses with passwords (e.g. susceptibility to dictionary/brute force attack) as well as password management and support issues make certificate based encryption a more secure and scalable option. Industry efforts and support are defining and moving towards more advanced security solutions built around X.509v3 digital certificates and Public Key Infrastructures(PKI) because of the greater scalability, administrative options, and more robust security over traditional password based encryption. ### 7.1.3 Most standard encryption algorithms are supported with this specification. Reference implementations for many of these algorithms are available from either commercial or open source distributors. Readily available cryptographic toolkits make implementation of the encryption features straight-forward. This document is not intended to provide a treatise on data encryption principles or theory. Its purpose is to document the data structures required for implementing interoperable data encryption within the .ZIP format. It is strongly recommended that you have a good understanding of data encryption before reading further. ### 7.1.4 The algorithms introduced in Version 5.0 of this specification include: RC2 40 bit, 64 bit, and 128 bit RC4 40 bit, 64 bit, and 128 bit DES 3DES 112 bit and 168 bit Version 5.1 adds support for the following: AES 128 bit, 192 bit, and 256 bit ### 7.1.5 Version 6.1 introduces encryption data changes to support interoperability with Smartcard and USB Token certificate storage methods which do not support the OAEP strengthening standard. ### 7.1.6 Version 6.2 introduces support for encrypting metadata by compressing and encrypting the central directory data structure to reduce information leakage. Information leakage can occur in legacy ZIP applications through exposure of information about a file even though that file is stored encrypted. The information exposed consists of file characteristics stored within the records and fields defined by this specification. This includes data such as a file's name, its original size, timestamp and CRC32 value. ### 7.1.7 Version 6.3 introduces support for encrypting data using the Blowfish and Twofish algorithms. These are symmetric block ciphers developed by Bruce Schneier. Blowfish supports using a variable length key from 32 to 448 bits. Block size is 64 bits. Implementations SHOULD use 16 rounds and the only mode supported within ZIP files is CBC. Twofish supports key sizes 128, 192 and 256 bits. Block size is 128 bits. Implementations SHOULD use 16 rounds and the only mode supported within ZIP files is CBC. Information and source code for both Blowfish and Twofish algorithms can be found on the internet. Consult with the author of these algorithms for information on terms or restrictions on use. ### 7.1.8 Central Directory Encryption provides greater protection against information leakage by encrypting the Central Directory structure and by masking key values that are replicated in the unencrypted Local Header. ZIP compatible programs that cannot interpret an encrypted Central Directory structure cannot rely on the data in the corresponding Local Header for decompression information. ### 7.1.9 Extra Field records that MAY contain information about a file that SHOULD not be exposed SHOULD NOT be stored in the Local Header and SHOULD only be written to the Central Directory where they can be encrypted. This design currently does not support streaming. Information in the End of Central Directory record, the Zip64 End of Central Directory Locator, and the Zip64 End of Central Directory records are not encrypted. Access to view data on files within a ZIP file with an encrypted Central Directory requires the appropriate password or private key for decryption prior to viewing any files, or any information about the files, in the archive. ### 7.1.10 Older ZIP compatible programs not familiar with the Central Directory Encryption feature will no longer be able to recognize the Central Directory and MAY assume the ZIP file is corrupt. Programs that attempt streaming access using Local Headers will see invalid information for each file. Central Directory Encryption need not be used for every ZIP file. Its use is recommended for greater security. ZIP files not using Central Directory Encryption SHOULD operate as in the past. ### 7.1.11 This strong encryption feature specification is intended to provide for scalable, cross-platform encryption needs ranging from simple password encryption to authenticated public/private key encryption. ### 7.1.12 Encryption provides data confidentiality and privacy. It is recommended that you combine X.509 digital signing with encryption to add authentication and non-repudiation. ## 7.2 Single Password Symmetric Encryption Method ### 7.2.1 The Single Password Symmetric Encryption Method using strong encryption algorithms operates similarly to the traditional PKWARE encryption defined in this format. Additional data structures are added to support the processing needs of the strong algorithms. The Strong Encryption data structures are: ### 7.2.2 General Purpose Bits - Bits 0 and 6 of the General Purpose bit flag in both local and central header records. Both bits set indicates strong encryption. Bit 13, when set indicates the Central Directory is encrypted and that selected fields in the Local Header are masked to hide their actual value. ### 7.2.3 Extra Field 0x0017 in central header only. Fields to consider in this record are: ### 7.2.3.1 Format - the data format identifier for this record. The only value allowed at this time is the integer value 2. ### 7.2.3.2 AlgId - integer identifier of the encryption algorithm from the following range 0x6601 - DES 0x6602 - RC2 (version needed to extract < 5.2) 0x6603 - 3DES 168 0x6609 - 3DES 112 0x660E - AES 128 0x660F - AES 192 0x6610 - AES 256 0x6702 - RC2 (version needed to extract >= 5.2) 0x6720 - Blowfish 0x6721 - Twofish 0x6801 - RC4 0xFFFF - Unknown algorithm ### 7.2.3.3 Bitlen - Explicit bit length of key 32 - 448 bits ### 7.2.3.4 Flags - Processing flags needed for decryption 0x0001 - Password is required to decrypt 0x0002 - Certificates only 0x0003 - Password or certificate required to decrypt Values > 0x0003 reserved for certificate processing ### 7.2.4 Decryption header record preceding compressed file data. -Decryption Header: Value Size Description ----- ---- ----------- IVSize 2 bytes Size of initialization vector (IV) IVData IVSize Initialization vector for this file Size 4 bytes Size of remaining decryption header data Format 2 bytes Format definition for this record AlgID 2 bytes Encryption algorithm identifier Bitlen 2 bytes Bit length of encryption key Flags 2 bytes Processing flags ErdSize 2 bytes Size of Encrypted Random Data ErdData ErdSize Encrypted Random Data Reserved1 4 bytes Reserved certificate processing data Reserved2 (var) Reserved for certificate processing data VSize 2 bytes Size of password validation data VData VSize-4 Password validation data VCRC32 4 bytes Standard ZIP CRC32 of password validation data ### 7.2.4.1 IVData - The size of the IV SHOULD match the algorithm block size. The IVData can be completely random data. If the size of the randomly generated data does not match the block size it SHOULD be complemented with zero's or truncated as necessary. If IVSize is 0,then IV = CRC32 + Uncompressed File Size (as a 64 bit little-endian, unsigned integer value). ### 7.2.4.2 Format - the data format identifier for this record. The only value allowed at this time is the integer value 3. ### 7.2.4.3 AlgId - integer identifier of the encryption algorithm from the following range 0x6601 - DES 0x6602 - RC2 (version needed to extract < 5.2) 0x6603 - 3DES 168 0x6609 - 3DES 112 0x660E - AES 128 0x660F - AES 192 0x6610 - AES 256 0x6702 - RC2 (version needed to extract >= 5.2) 0x6720 - Blowfish 0x6721 - Twofish 0x6801 - RC4 0xFFFF - Unknown algorithm ### 7.2.4.4 Bitlen - Explicit bit length of key 32 - 448 bits ### 7.2.4.5 Flags - Processing flags needed for decryption 0x0001 - Password is required to decrypt 0x0002 - Certificates only 0x0003 - Password or certificate required to decrypt Values > 0x0003 reserved for certificate processing ### 7.2.4.6 ErdData - Encrypted random data is used to store random data that is used to generate a file session key for encrypting each file. SHA1 is used to calculate hash data used to derive keys. File session keys are derived from a master session key generated from the user-supplied password. If the Flags field in the decryption header contains the value 0x4000, then the ErdData field MUST be decrypted using 3DES. If the value 0x4000 is not set, then the ErdData field MUST be decrypted using AlgId. ### 7.2.4.7 Reserved1 - Reserved for certificate processing, if value is zero, then Reserved2 data is absent. See the explanation under the Certificate Processing Method for details on this data structure. ### 7.2.4.8 Reserved2 - If present, the size of the Reserved2 data structure is located by skipping the first 4 bytes of this field and using the next 2 bytes as the remaining size. See the explanation under the Certificate Processing Method for details on this data structure. ### 7.2.4.9 VSize - This size value will always include the 4 bytes of the VCRC32 data and will be greater than 4 bytes. ### 7.2.4 .10 VData - Random data for password validation. This data is VSize in length and VSize MUST be a multiple of the encryption block size. VCRC32 is a checksum value of VData. VData and VCRC32 are stored encrypted and start the stream of encrypted data for a file. ### 7.2.5 Useful Tips ### 7.2.5.1 Strong Encryption is always applied to a file after compression. The block oriented algorithms all operate in Cypher Block Chaining (CBC) mode. The block size used for AES encryption is 16. All other block algorithms use a block size of 8. Two IDs are defined for RC2 to account for a discrepancy found in the implementation of the RC2 algorithm in the cryptographic library on Windows XP SP1 and all earlier versions of Windows. It is recommended that zero length files not be encrypted, however programs SHOULD be prepared to extract them if they are found within a ZIP file. ### 7.2.5.2 A pseudo-code representation of the encryption process is as follows: Password = GetUserPassword() MasterSessionKey = DeriveKey(SHA1(Password)) RD = CryptographicStrengthRandomData() For Each File IV = CryptographicStrengthRandomData() VData = CryptographicStrengthRandomData() VCRC32 = CRC32(VData) FileSessionKey = DeriveKey(SHA1(IV + RD) ErdData = Encrypt(RD,MasterSessionKey,IV) Encrypt(VData + VCRC32 + FileData, FileSessionKey,IV) Done ### 7.2.5.3 The function names and parameter requirements will depend on the choice of the cryptographic toolkit selected. Almost any toolkit supporting the reference implementations for each algorithm can be used. The RSA BSAFE(r), OpenSSL, and Microsoft CryptoAPI libraries are all known to work well. 7.3 Single Password - Central Directory Encryption -------------------------------------------------- ### 7.3.1 Central Directory Encryption is achieved within the .ZIP format by encrypting the Central Directory structure. This encapsulates the metadata most often used for processing .ZIP files. Additional metadata is stored for redundancy in the Local Header for each file. The process of concealing metadata by encrypting the Central Directory does not protect the data within the Local Header. To avoid information leakage from the exposed metadata in the Local Header, the fields containing information about a file are masked. ### 7.3.2 Local Header Masking replaces the true content of the fields for a file in the Local Header with false information. When masked, the Local Header is not suitable for streaming access and the options for data recovery of damaged archives is reduced. Extra Data fields that MAY contain confidential data SHOULD NOT be stored within the Local Header. The value set into the Version needed to extract field SHOULD be the correct value needed to extract the file without regard to Central Directory Encryption. The fields within the Local Header targeted for masking when the Central Directory is encrypted are: Field Name Mask Value ------------------ --------------------------- compression method 0 last mod file time 0 last mod file date 0 crc-32 0 compressed size 0 uncompressed size 0 file name (variable size) Base 16 value from the range 1 - 0xFFFFFFFFFFFFFFFF represented as a string whose size will be set into the file name length field The Base 16 value assigned as a masked file name is simply a sequentially incremented value for each file starting with 1 for the first file. Modifications to a ZIP file MAY cause different values to be stored for each file. For compatibility, the file name field in the Local Header SHOULD NOT be left blank. As of Version 6.2 of this specification, the Compression Method and Compressed Size fields are not yet masked. Fields having a value of 0xFFFF or 0xFFFFFFFF for the ZIP64 format SHOULD NOT be masked. ### 7.3.3 Encrypting the Central Directory Encryption of the Central Directory does not include encryption of the Central Directory Signature data, the Zip64 End of Central Directory record, the Zip64 End of Central Directory Locator, or the End of Central Directory record. The ZIP file comment data is never encrypted. Before encrypting the Central Directory, it MAY optionally be compressed. Compression is not required, but for storage efficiency it is assumed this structure will be compressed before encrypting. Similarly, this specification supports compressing the Central Directory without requiring that it also be encrypted. Early implementations of this feature will assume the encryption method applied to files matches the encryption applied to the Central Directory. Encryption of the Central Directory is done in a manner similar to that of file encryption. The encrypted data is preceded by a decryption header. The decryption header is known as the Archive Decryption Header. The fields of this record are identical to the decryption header preceding each encrypted file. The location of the Archive Decryption Header is determined by the value in the Start of the Central Directory field in the Zip64 End of Central Directory record. When the Central Directory is encrypted, the Zip64 End of Central Directory record will always be present. The layout of the Zip64 End of Central Directory record for all versions starting with 6.2 of this specification will follow the Version 2 format. The Version 2 format is as follows: The leading fixed size fields within the Version 1 format for this record remain unchanged. The record signature for both Version 1 and Version 2 will be 0x06064b50. Immediately following the last byte of the field known as the Offset of Start of Central Directory With Respect to the Starting Disk Number will begin the new fields defining Version 2 of this record. ### 7.3.4 New fields for Version 2 Note: all fields stored in Intel low-byte/high-byte order. Value Size Description ----- ---- ----------- Compression Method 2 bytes Method used to compress the Central Directory Compressed Size 8 bytes Size of the compressed data Original Size 8 bytes Original uncompressed size AlgId 2 bytes Encryption algorithm ID BitLen 2 bytes Encryption key length Flags 2 bytes Encryption flags HashID 2 bytes Hash algorithm identifier Hash Length 2 bytes Length of hash data Hash Data (variable) Hash data The Compression Method accepts the same range of values as the corresponding field in the Central Header. The Compressed Size and Original Size values will not include the data of the Central Directory Signature which is compressed or encrypted. The AlgId, BitLen, and Flags fields accept the same range of values the corresponding fields within the 0x0017 record. Hash ID identifies the algorithm used to hash the Central Directory data. This data does not have to be hashed, in which case the values for both the HashID and Hash Length will be 0. Possible values for HashID are: Value Algorithm ------ --------- 0x0000 none 0x0001 CRC32 0x8003 MD5 0x8004 SHA1 0x8007 RIPEMD160 0x800C SHA256 0x800D SHA384 0x800E SHA512 ### 7.3.5 When the Central Directory data is signed, the same hash algorithm used to hash the Central Directory for signing SHOULD be used. This is recommended for processing efficiency, however, it is permissible for any of the above algorithms to be used independent of the signing process. The Hash Data will contain the hash data for the Central Directory. The length of this data will vary depending on the algorithm used. The Version Needed to Extract SHOULD be set to 62. The value for the Total Number of Entries on the Current Disk will be 0. These records will no longer support random access when encrypting the Central Directory. ### 7.3.6 When the Central Directory is compressed and/or encrypted, the End of Central Directory record will store the value 0xFFFFFFFF as the value for the Total Number of Entries in the Central Directory. The value stored in the Total Number of Entries in the Central Directory on this Disk field will be 0. The actual values will be stored in the equivalent fields of the Zip64 End of Central Directory record. ### 7.3.7 Decrypting and decompressing the Central Directory is accomplished in the same manner as decrypting and decompressing a file. 7.4 Certificate Processing Method --------------------------------- The Certificate Processing Method for ZIP file encryption defines the following additional data fields: ### 7.4.1 Certificate Flag Values Additional processing flags that can be present in the Flags field of both the 0x0017 field of the central directory Extra Field and the Decryption header record preceding compressed file data are: 0x0007 - reserved for future use 0x000F - reserved for future use 0x0100 - Indicates non-OAEP key wrapping was used. If this this field is set, the version needed to extract MUST be at least 61. This means OAEP key wrapping is not used when generating a Master Session Key using ErdData. 0x4000 - ErdData MUST be decrypted using 3DES-168, otherwise use the same algorithm used for encrypting the file contents. 0x8000 - reserved for future use ### 7.4.2 CertData - Extra Field 0x0017 record certificate data structure The data structure used to store certificate data within the section of the Extra Field defined by the CertData field of the 0x0017 record are as shown: Value Size Description ----- ---- ----------- RCount 4 bytes Number of recipients. HashAlg 2 bytes Hash algorithm identifier HSize 2 bytes Hash size SRList (var) Simple list of recipients hashed public keys RCount This defines the number intended recipients whose public keys were used for encryption. This identifies the number of elements in the SRList. HashAlg This defines the hash algorithm used to calculate the public key hash of each public key used for encryption. This field currently supports only the following value for SHA-1 0x8004 - SHA1 HSize This defines the size of a hashed public key. SRList This is a variable length list of the hashed public keys for each intended recipient. Each element in this list is HSize. The total size of SRList is determined using RCount * HSize. ### 7.4.3 Reserved1 - Certificate Decryption Header Reserved1 Data Value Size Description ----- ---- ----------- RCount 4 bytes Number of recipients. RCount This defines the number intended recipients whose public keys were used for encryption. This defines the number of elements in the REList field defined below. ### 7.4.4 Reserved2 - Certificate Decryption Header Reserved2 Data Structures Value Size Description ----- ---- ----------- HashAlg 2 bytes Hash algorithm identifier HSize 2 bytes Hash size REList (var) List of recipient data elements HashAlg This defines the hash algorithm used to calculate the public key hash of each public key used for encryption. This field currently supports only the following value for SHA-1 0x8004 - SHA1 HSize This defines the size of a hashed public key defined in REHData. REList This is a variable length of list of recipient data. Each element in this list consists of a Recipient Element data structure as follows: Recipient Element (REList) Data Structure: Value Size Description ----- ---- ----------- RESize 2 bytes Size of REHData + REKData REHData HSize Hash of recipients public key REKData (var) Simple key blob RESize This defines the size of an individual REList element. This value is the combined size of the REHData field + REKData field. REHData is defined by HSize. REKData is variable and can be calculated for each REList element using RESize and HSize. REHData Hashed public key for this recipient. REKData Simple Key Blob. The format of this data structure is identical to that defined in the Microsoft CryptoAPI and generated using the CryptExportKey() function. The version of the Simple Key Blob supported at this time is 0x02 as defined by Microsoft. ## 7.5 Certificate Processing - Central Directory Encryption ### 7.5.1 Central Directory Encryption using Digital Certificates will operate in a manner similar to that of Single Password Central Directory Encryption. This record will only be present when there is data to place into it. Currently, data is placed into this record when digital certificates are used for either encrypting or signing the files within a ZIP file. When only password encryption is used with no certificate encryption or digital signing, this record is not currently needed. When present, this record will appear before the start of the actual Central Directory data structure and will be located immediately after the Archive Decryption Header if the Central Directory is encrypted. ### 7.5.2 The Archive Extra Data record will be used to store the following information. Additional data MAY be added in future versions. Extra Data Fields: 0x0014 - PKCS#7 Store for X.509 Certificates 0x0016 - X.509 Certificate ID and Signature for central directory 0x0019 - PKCS#7 Encryption Recipient Certificate List The 0x0014 and 0x0016 Extra Data records that otherwise would be located in the first record of the Central Directory for digital certificate processing. When encrypting or compressing the Central Directory, the 0x0014 and 0x0016 records MUST be located in the Archive Extra Data record and they SHOULD NOT remain in the first Central Directory record. The Archive Extra Data record will also be used to store the 0x0019 data. ### 7.5.3 When present, the size of the Archive Extra Data record will be included in the size of the Central Directory. The data of the Archive Extra Data record will also be compressed and encrypted along with the Central Directory data structure. ## 7.6 Certificate Processing Differences ### 7.6.1 The Certificate Processing Method of encryption differs from the Single Password Symmetric Encryption Method as follows. Instead of using a user-defined password to generate a master session key, cryptographically random data is used. The key material is then wrapped using standard key-wrapping techniques. This key material is wrapped using the public key of each recipient that will need to decrypt the file using their corresponding private key. ### 7.6.2 This specification currently assumes digital certificates will follow the X.509 V3 format for 1024 bit and higher RSA format digital certificates. Implementation of this Certificate Processing Method requires supporting logic for key access and management. This logic is outside the scope of this specification. ## 7.7 OAEP Processing with Certificate-based Encryption ### 7.7.1 OAEP stands for Optimal Asymmetric Encryption Padding. It is a strengthening technique used for small encoded items such as decryption keys. This is commonly applied in cryptographic key-wrapping techniques and is supported by PKCS #1. Versions 5.0 and 6.0 of this specification were designed to support OAEP key-wrapping for certificate-based decryption keys for additional security. ### 7.7.2 Support for private keys stored on Smartcards or Tokens introduced a conflict with this OAEP logic. Most card and token products do not support the additional strengthening applied to OAEP key-wrapped data. In order to resolve this conflict, versions 6.1 and above of this specification will no longer support OAEP when encrypting using digital certificates. ### 7.7.3 Versions of PKZIP available during initial development of the certificate processing method set a value of 61 into the version needed to extract field for a file. This indicates that non-OAEP key wrapping is used. This affects certificate encryption only, and password encryption functions SHOULD NOT be affected by this value. This means values of 61 MAY be found on files encrypted with certificates only, or on files encrypted with both password encryption and certificate encryption. Files encrypted with both methods can safely be decrypted using the password methods documented. ## 7.8 Additional Encryption/Decryption Data Records ### 7.8.1 Additional information MAY be stored within a ZIP file in support of the strong password and certificate encryption methods defined above. These include, but are not limited to the following record types. 0x0021 Policy Decryption Key Record 0x0022 Smartcrypt Key Provider Record 0x0023 Smartcrypt Policy Key Data Record ## 8.0 Splitting and Spanning ZIP files 8.1 Spanned ZIP files ### 8.1.1 Spanning is the process of segmenting a ZIP file across multiple removable media. This support has typically only been provided for DOS formatted floppy diskettes. 8.2 Split ZIP files ### 8.2.1 File splitting is a newer derivation of spanning. Splitting follows the same segmentation process as spanning, however, it does not require writing each segment to a unique removable medium and instead supports placing all pieces onto local or non-removable locations such as file systems, local drives, folders, etc. 8.3 File Naming Differences ### 8.3.1 A key difference between spanned and split ZIP files is that all pieces of a spanned ZIP file have the same name. Since each piece is written to a separate volume, no name collisions occur and each segment can reuse the original .ZIP file name given to the archive. ### 8.3.2 Sequence ordering for DOS spanned archives uses the DOS volume label to determine segment numbers. Volume labels for each segment are written using the form PKBACK#xxx, where xxx is the segment number written as a decimal value from 001 - nnn. ### 8.3.3 Split ZIP files are typically written to the same location and are subject to name collisions if the spanned name format is used since each segment will reside on the same drive. To avoid name collisions, split archives are named as follows. Segment 1 = filename.z01 Segment n-1 = filename.z(n-1) Segment n = filename.zip ### 8.3.4 The .ZIP extension is used on the last segment to support quickly reading the central directory. The segment number n SHOULD be a decimal value. 8.4 Spanned Self-extracting ZIP Files ### 8.4.1 Spanned ZIP files MAY be PKSFX Self-extracting ZIP files. PKSFX files MAY also be split, however, in this case the first segment MUST be named filename.exe. The first segment of a split PKSFX archive MUST be large enough to include the entire executable program. 8.5 Capacities and Markers ### 8.5.1 Capacities for split archives are as follows: Maximum number of segments = 4,294,967,295 - 1 Maximum .ZIP segment size = 4,294,967,295 bytes Minimum segment size = 64K Maximum PKSFX segment size = 2,147,483,647 bytes ### 8.5.2 Segment sizes MAY be different however by convention, all segment sizes SHOULD be the same with the exception of the last, which MAY be smaller. Local and central directory header records MUST NOT be split across a segment boundary. When writing a header record, if the number of bytes remaining within a segment is less than the size of the header record, end the current segment and write the header at the start of the next segment. The central directory MAY span segment boundaries, but no single record in the central directory SHOULD be split across segments. ### 8.5.3 Spanned/Split archives created using PKZIP for Windows (V2.50 or greater), PKZIP Command Line (V2.50 or greater), or PKZIP Explorer will include a special spanning signature as the first 4 bytes of the first segment of the archive. This signature (0x08074b50) will be followed immediately by the local header signature for the first file in the archive. ### 8.5.4 A special spanning marker MAY also appear in spanned/split archives if the spanning or splitting process starts but only requires one segment. In this case the 0x08074b50 signature will be replaced with the temporary spanning marker signature of 0x30304b50. Split archives can only be uncompressed by other versions of PKZIP that know how to create a split archive. ### 8.5.5 The signature value 0x08074b50 is also used by some ZIP implementations as a marker for the Data Descriptor record. Conflict in this alternate assignment can be avoided by ensuring the position of the signature within the ZIP file to determine the use for which it is intended. ## 9.0 Change Process 9.1 In order for the .ZIP file format to remain a viable technology, this specification SHOULD be considered as open for periodic review and revision. Although this format was originally designed with a certain level of extensibility, not all changes in technology (present or future) were or will be necessarily considered in its design. 9.2 If your application requires new definitions to the extensible sections in this format, or if you would like to submit new data structures or new capabilities, please forward your request to zipformat@pkware.com. All submissions will be reviewed by the ZIP File Specification Committee for possible inclusion into future versions of this specification. 9.3 Periodic revisions to this specification will be published as DRAFT or as FINAL status to ensure interoperability. We encourage comments and feedback that MAY help improve clarity or content. ## 10.0 Incorporating PKWARE Proprietary Technology into Your Product 10.1 The Use or Implementation in a product of APPNOTE technological components pertaining to either strong encryption or patching requires a separate, executed license agreement from PKWARE. Please contact PKWARE at zipformat@pkware.com or +1-414-289-9788 with regard to acquiring such a license. 10.2 Additional information regarding PKWARE proprietary technology is available at http://www.pkware.com/appnote. ## 11.0 Acknowledgements In addition to the above mentioned contributors to PKZIP and PKUNZIP, PKWARE would like to extend special thanks to Robert Mahoney for suggesting the extension .ZIP for this software. ## 12.0 References Fiala, Edward R., and Greene, Daniel H., "Data compression with finite windows", Communications of the ACM, Volume 32, Number 4, April 1989, pages 490-505. Held, Gilbert, "Data Compression, Techniques and Applications, Hardware and Software Considerations", John Wiley & Sons, 1987. Huffman, D.A., "A method for the construction of minimum-redundancy codes", Proceedings of the IRE, Volume 40, Number 9, September 1952, pages 1098-1101. Nelson, Mark, "LZW Data Compression", Dr. Dobbs Journal, Volume 14, Number 10, October 1989, pages 29-37. Nelson, Mark, "The Data Compression Book", M&T Books, 1991. Storer, James A., "Data Compression, Methods and Theory", Computer Science Press, 1988 Welch, Terry, "A Technique for High-Performance Data Compression", IEEE Computer, Volume 17, Number 6, June 1984, pages 8-19. Ziv, J. and Lempel, A., "A universal algorithm for sequential data compression", Communications of the ACM, Volume 30, Number 6, June 1987, pages 520-540. Ziv, J. and Lempel, A., "Compression of individual sequences via variable-rate coding", IEEE Transactions on Information Theory, Volume 24, Number 5, September 1978, pages 530-536. APPENDIX A - AS/400 Extra Field (0x0065) Attribute Definitions -------------------------------------------------------------- A.1 Field Definition Structure: a. field length including length 2 bytes Big Endian b. field code 2 bytes c. data x bytes A.2 Field Code Description 4001 Source type i.e. CLP etc 4002 The text description of the library 4003 The text description of the file 4004 The text description of the member 4005 x'F0' or 0 is PF-DTA, x'F1' or 1 is PF_SRC 4007 Database Type Code 1 byte 4008 Database file and fields definition 4009 GZIP file type 2 bytes 400B IFS code page 2 bytes 400C IFS Time of last file status change 4 bytes 400D IFS Access Time 4 bytes 400E IFS Modification time 4 bytes 005C Length of the records in the file 2 bytes 0068 GZIP two words 8 bytes APPENDIX B - z/OS Extra Field (0x0065) Attribute Definitions ------------------------------------------------------------ B.1 Field Definition Structure: a. field length including length 2 bytes Big Endian b. field code 2 bytes c. data x bytes B.2 Field Code Description 0001 File Type 2 bytes 0002 NonVSAM Record Format 1 byte 0003 Reserved 0004 NonVSAM Block Size 2 bytes Big Endian 0005 Primary Space Allocation 3 bytes Big Endian 0006 Secondary Space Allocation 3 bytes Big Endian 0007 Space Allocation Type1 byte flag 0008 Modification Date Retired with PKZIP 5.0 + 0009 Expiration Date Retired with PKZIP 5.0 + 000A PDS Directory Block Allocation 3 bytes Big Endian binary value 000B NonVSAM Volume List variable 000C UNIT Reference Retired with PKZIP 5.0 + 000D DF/SMS Management Class 8 bytes EBCDIC Text Value 000E DF/SMS Storage Class 8 bytes EBCDIC Text Value 000F DF/SMS Data Class 8 bytes EBCDIC Text Value 0010 PDS/PDSE Member Info. 30 bytes 0011 VSAM sub-filetype 2 bytes 0012 VSAM LRECL 13 bytes EBCDIC "(num_avg num_max)" 0013 VSAM Cluster Name Retired with PKZIP 5.0 + 0014 VSAM KSDS Key Information 13 bytes EBCDIC "(num_length num_position)" 0015 VSAM Average LRECL 5 bytes EBCDIC num_value padded with blanks 0016 VSAM Maximum LRECL 5 bytes EBCDIC num_value padded with blanks 0017 VSAM KSDS Key Length 5 bytes EBCDIC num_value padded with blanks 0018 VSAM KSDS Key Position 5 bytes EBCDIC num_value padded with blanks 0019 VSAM Data Name 1-44 bytes EBCDIC text string 001A VSAM KSDS Index Name 1-44 bytes EBCDIC text string 001B VSAM Catalog Name 1-44 bytes EBCDIC text string 001C VSAM Data Space Type 9 bytes EBCDIC text string 001D VSAM Data Space Primary 9 bytes EBCDIC num_value left-justified 001E VSAM Data Space Secondary 9 bytes EBCDIC num_value left-justified 001F VSAM Data Volume List variable EBCDIC text list of 6-character Volume IDs 0020 VSAM Data Buffer Space 8 bytes EBCDIC num_value left-justified 0021 VSAM Data CISIZE 5 bytes EBCDIC num_value left-justified 0022 VSAM Erase Flag 1 byte flag 0023 VSAM Free CI % 3 bytes EBCDIC num_value left-justified 0024 VSAM Free CA % 3 bytes EBCDIC num_value left-justified 0025 VSAM Index Volume List variable EBCDIC text list of 6-character Volume IDs 0026 VSAM Ordered Flag 1 byte flag 0027 VSAM REUSE Flag 1 byte flag 0028 VSAM SPANNED Flag 1 byte flag 0029 VSAM Recovery Flag 1 byte flag 002A VSAM WRITECHK Flag 1 byte flag 002B VSAM Cluster/Data SHROPTS 3 bytes EBCDIC "n,y" 002C VSAM Index SHROPTS 3 bytes EBCDIC "n,y" 002D VSAM Index Space Type 9 bytes EBCDIC text string 002E VSAM Index Space Primary 9 bytes EBCDIC num_value left-justified 002F VSAM Index Space Secondary 9 bytes EBCDIC num_value left-justified 0030 VSAM Index CISIZE 5 bytes EBCDIC num_value left-justified 0031 VSAM Index IMBED 1 byte flag 0032 VSAM Index Ordered Flag 1 byte flag 0033 VSAM REPLICATE Flag 1 byte flag 0034 VSAM Index REUSE Flag 1 byte flag 0035 VSAM Index WRITECHK Flag 1 byte flag Retired with PKZIP 5.0 + 0036 VSAM Owner 8 bytes EBCDIC text string 0037 VSAM Index Owner 8 bytes EBCDIC text string 0038 Reserved 0039 Reserved 003A Reserved 003B Reserved 003C Reserved 003D Reserved 003E Reserved 003F Reserved 0040 Reserved 0041 Reserved 0042 Reserved 0043 Reserved 0044 Reserved 0045 Reserved 0046 Reserved 0047 Reserved 0048 Reserved 0049 Reserved 004A Reserved 004B Reserved 004C Reserved 004D Reserved 004E Reserved 004F Reserved 0050 Reserved 0051 Reserved 0052 Reserved 0053 Reserved 0054 Reserved 0055 Reserved 0056 Reserved 0057 Reserved 0058 PDS/PDSE Member TTR Info. 6 bytes Big Endian 0059 PDS 1st LMOD Text TTR 3 bytes Big Endian 005A PDS LMOD EP Rec # 4 bytes Big Endian 005B Reserved 005C Max Length of records 2 bytes Big Endian 005D PDSE Flag 1 byte flag 005E Reserved 005F Reserved 0060 Reserved 0061 Reserved 0062 Reserved 0063 Reserved 0064 Reserved 0065 Last Date Referenced 4 bytes Packed Hex "yyyymmdd" 0066 Date Created 4 bytes Packed Hex "yyyymmdd" 0068 GZIP two words 8 bytes 0071 Extended NOTE Location 12 bytes Big Endian 0072 Archive device UNIT 6 bytes EBCDIC 0073 Archive 1st Volume 6 bytes EBCDIC 0074 Archive 1st VOL File Seq# 2 bytes Binary 0075 Native I/O Flags 2 bytes 0081 Unix File Type 1 byte enumerated 0082 Unix File Format 1 byte enumerated 0083 Unix File Character Set Tag Info 4 bytes 0090 ZIP Environmental Processing Info 4 bytes 0091 EAV EATTR Flags 1 byte 0092 DSNTYPE Flags 1 byte 0093 Total Space Allocation (Cyls) 4 bytes Big Endian 009D NONVSAM DSORG 2 bytes 009E Program Virtual Object Info 3 bytes 009F Encapsulated file Info 9 bytes 400C Unix File Creation Time 4 bytes 400D Unix File Access Time 4 bytes 400E Unix File Modification time 4 bytes 4101 IBMCMPSC Compression Info variable 4102 IBMCMPSC Compression Size 8 bytes Big Endian APPENDIX C - Zip64 Extensible Data Sector Mappings --------------------------------------------------- -Z390 Extra Field: The following is the general layout of the attributes for the ZIP 64 "extra" block for extended tape operations. Note: some fields stored in Big Endian format. All text is in EBCDIC format unless otherwise specified. Value Size Description ----- ---- ----------- (Z390) 0x0065 2 bytes Tag for this "extra" block type Size 4 bytes Size for the following data block Tag 4 bytes EBCDIC "Z390" Length71 2 bytes Big Endian Subcode71 2 bytes Enote type code FMEPos 1 byte Length72 2 bytes Big Endian Subcode72 2 bytes Unit type code Unit 1 byte Unit Length73 2 bytes Big Endian Subcode73 2 bytes Volume1 type code FirstVol 1 byte Volume Length74 2 bytes Big Endian Subcode74 2 bytes FirstVol file sequence FileSeq 2 bytes Sequence APPENDIX D - Language Encoding (EFS) ------------------------------------ D.1 The ZIP format has historically supported only the original IBM PC character encoding set, commonly referred to as IBM Code Page 437. This limits storing file name characters to only those within the original MS-DOS range of values and does not properly support file names in other character encodings, or languages. To address this limitation, this specification will support the following change. D.2 If general purpose bit 11 is unset, the file name and comment SHOULD conform to the original ZIP character encoding. If general purpose bit 11 is set, the filename and comment MUST support The Unicode Standard, Version 4.1.0 or greater using the character encoding form defined by the UTF-8 storage specification. The Unicode Standard is published by the The Unicode Consortium (www.unicode.org). UTF-8 encoded data stored within ZIP files is expected to not include a byte order mark (BOM). D.3 Applications MAY choose to supplement this file name storage through the use of the 0x0008 Extra Field. Storage for this optional field is currently undefined, however it will be used to allow storing extended information on source or target encoding that MAY further assist applications with file name, or file content encoding tasks. Please contact PKWARE with any requirements on how this field SHOULD be used. D.4 The 0x0008 Extra Field storage MAY be used with either setting for general purpose bit 11. Examples of the intended usage for this field is to store whether "modified-UTF-8" (JAVA) is used, or UTF-8-MAC. Similarly, other commonly used character encoding (code page) designations can be indicated through this field. Formalized values for use of the 0x0008 record remain undefined at this time. The definition for the layout of the 0x0008 field will be published when available. Use of the 0x0008 Extra Field provides for storing data within a ZIP file in an encoding other than IBM Code Page 437 or UTF-8. D.5 General purpose bit 11 will not imply any encoding of file content or password. Values defining character encoding for file content or password MUST be stored within the 0x0008 Extended Language Encoding Extra Field. D.6 Ed Gordon of the Info-ZIP group has defined a pair of "extra field" records that can be used to store UTF-8 file name and file comment fields. These records can be used for cases when the general purpose bit 11 method for storing UTF-8 data in the standard file name and comment fields is not desirable. A common case for this alternate method is if backward compatibility with older programs is required. D.7 Definitions for the record structure of these fields are included above in the section on 3rd party mappings for "extra field" records. These records are identified by Header ID's 0x6375 (Info-ZIP Unicode Comment Extra Field) and 0x7075 (Info-ZIP Unicode Path Extra Field). D.8 The choice of which storage method to use when writing a ZIP file is left to the implementation. Developers SHOULD expect that a ZIP file MAY contain either method and SHOULD provide support for reading data in either format. Use of general purpose bit 11 reduces storage requirements for file name data by not requiring additional "extra field" data for each file, but can result in older ZIP programs not being able to extract files. Use of the 0x6375 and 0x7075 records will result in a ZIP file that SHOULD always be readable by older ZIP programs, but requires more storage per file to write file name and/or file comment fields. APPENDIX E - AE-x encryption marker ----------------------------------- E.1 AE-x defines an alternate password-based encryption method used in ZIP files that is based on a file encryption utility developed by Dr. Brian Gladman. Information on Dr. Gladman's method is available at http://www.gladman.me.uk/cryptography_technology/fileencrypt/ E.2 AE-x uses AES with CTR (counter mode) and HMAC-SHA1. It defines encryption using key sizes of 128 bits or 256 bits. It does not restrict support for decrypting 192 bits. E.3 This method uses the standard ZIP encryption bit (bit 0) of the general purpose bit flag (section 4.4.4) to indicate a file is encrypted. E.4 The compression method field (section 4.4.5) is set to 99 to indicate a file has been encrypted using this method. E.5 The actual compression method is stored in an extra field structure identified by a Header ID of 0x9901. Information on this record structure can be found at http://www.winzip.com/aes_info.htm. E.6 Two versions are defined for the 0x9901 structure. E.6.1 Version 1 stores the file CRC value in the CRC-32 field (section 4.4.7). E.6.2 Version 2 stores a value of 0 in the CRC-32 field. async_zip-0.0.16/examples/actix_multipart.rs000064400000000000000000000053131046102023000173060ustar 00000000000000// Copyright (c) 2022 FL33TW00D (https://github.com/FL33TW00D) // Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE #[cfg(features = "deflate")] mod inner { use async_zip::write::ZipFileWriter; use async_zip::{Compression, ZipEntryBuilder}; use std::path::Path; use actix_multipart::Multipart; use actix_web::{web, App, HttpServer, Responder, ResponseError, Result}; use derive_more::{Display, Error}; use futures::StreamExt; use futures_lite::io::AsyncWriteExt; use tokio::fs::File; use uuid::Uuid; const TMP_DIR: &str = "./tmp/"; #[derive(Debug, Display, Error)] #[display(fmt = "An error occurred during ZIP creation which was logged to stderr.")] struct CreationError; impl ResponseError for CreationError {} async fn do_main() -> std::io::Result<()> { let tmp_path = Path::new(TMP_DIR); if !tmp_path.exists() { tokio::fs::create_dir(tmp_path).await?; } let factory = || App::new().route("/", web::post().to(handler)); HttpServer::new(factory).bind(("127.0.0.1", 8080))?.run().await } async fn handler(multipart: Multipart) -> Result { match create_archive(multipart).await { Ok(name) => Ok(format!("Successfully created archive: {}", name)), Err(err) => { eprintln!("[ERROR] {:?}", err); Err(CreationError) } } } async fn create_archive(mut body: Multipart) -> Result { let archive_name = format!("tmp/{}", Uuid::new_v4()); let mut archive = File::create(archive_name.clone()).await?; let mut writer = ZipFileWriter::new(&mut archive); while let Some(item) = body.next().await { let mut field = item?; let filename = match field.content_disposition().get_filename() { Some(filename) => sanitize_filename::sanitize(filename), None => Uuid::new_v4().to_string(), }; let builder = ZipEntryBuilder::new(filename, Compression::Deflate); let mut entry_writer = writer.write_entry_stream(builder).await.unwrap(); while let Some(chunk) = field.next().await { entry_writer.write_all_buf(&mut chunk?).await?; } entry_writer.close().await.unwrap(); } writer.close().await.unwrap(); archive.shutdown().await.unwrap(); Ok(archive_name) } } #[actix_web::main] async fn main() -> std::io::Result<()> { #[cfg(features = "deflate")] { inner::do_main().await?; } Ok(()) } async_zip-0.0.16/examples/cli_compress.rs000064400000000000000000000100631046102023000165550ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) #[tokio::main] async fn main() { #[cfg(features = "deflate")] if let Err(err) = inner::run().await { eprintln!("Error: {}", err); eprintln!("Usage: cli_compress "); std::process::exit(1); } } #[cfg(features = "deflate")] mod inner { use async_zip::base::write::ZipFileWriter; use async_zip::{Compression, ZipEntryBuilder}; use std::path::{Path, PathBuf}; use anyhow::{anyhow, bail, Result}; use futures_lite::io::AsyncReadExt; use tokio::fs::File; async fn run() -> Result<()> { let mut args = std::env::args().skip(1); let input_str = args.next().ok_or(anyhow!("No input file or directory specified."))?; let input_path = Path::new(&input_str); let output_str = args.next().ok_or(anyhow!("No output file specified."))?; let output_path = Path::new(&output_str); let input_pathbuf = input_path.canonicalize().map_err(|_| anyhow!("Unable to canonicalise input path."))?; let input_path = input_pathbuf.as_path(); if output_path.exists() { bail!("The output file specified already exists."); } if !input_path.exists() { bail!("The input file or directory specified doesn't exist."); } let mut output_writer = ZipFileWriter::new(File::create(output_path).await?); if input_path.is_dir() { handle_directory(input_path, &mut output_writer).await?; } else { handle_singular(input_path, &mut output_writer).await?; } output_writer.close().await?; println!("Successfully written ZIP file '{}'.", output_path.display()); Ok(()) } async fn handle_singular(input_path: &Path, writer: &mut ZipFileWriter) -> Result<()> { let filename = input_path.file_name().ok_or(anyhow!("Input path terminates in '...'."))?; let filename = filename.to_str().ok_or(anyhow!("Input path not valid UTF-8."))?; write_entry(filename, input_path, writer).await } async fn handle_directory(input_path: &Path, writer: &mut ZipFileWriter) -> Result<()> { let entries = walk_dir(input_path.into()).await?; let input_dir_str = input_path.as_os_str().to_str().ok_or(anyhow!("Input path not valid UTF-8."))?; for entry_path_buf in entries { let entry_path = entry_path_buf.as_path(); let entry_str = entry_path.as_os_str().to_str().ok_or(anyhow!("Directory file path not valid UTF-8."))?; if !entry_str.starts_with(input_dir_str) { bail!("Directory file path does not start with base input directory path."); } let entry_str = &entry_str[input_dir_str.len() + 1..]; write_entry(entry_str, entry_path, writer).await?; } Ok(()) } async fn write_entry(filename: &str, input_path: &Path, writer: &mut ZipFileWriter) -> Result<()> { let mut input_file = File::open(input_path).await?; let input_file_size = input_file.metadata().await?.len() as usize; let mut buffer = Vec::with_capacity(input_file_size); input_file.read_to_end(&mut buffer).await?; let builder = ZipEntryBuilder::new(filename.into(), Compression::Deflate); writer.write_entry_whole(builder, &buffer).await?; Ok(()) } async fn walk_dir(dir: PathBuf) -> Result> { let mut dirs = vec![dir]; let mut files = vec![]; while !dirs.is_empty() { let mut dir_iter = tokio::fs::read_dir(dirs.remove(0)).await?; while let Some(entry) = dir_iter.next_entry().await? { let entry_path_buf = entry.path(); if entry_path_buf.is_dir() { dirs.push(entry_path_buf); } else { files.push(entry_path_buf); } } } Ok(files) } } async_zip-0.0.16/examples/file_extraction.rs000064400000000000000000000067341046102023000172640ustar 00000000000000//! Demonstrates how to safely extract everything from a ZIP file. //! //! Extracting zip files from untrusted sources without proper sanitization //! could be exploited by directory traversal attacks. //! //! //! This example tries to minimize that risk by following the implementation from //! Python's Standard Library. //! //! //! use std::{ env::current_dir, path::{Path, PathBuf}, }; use async_zip::base::read::seek::ZipFileReader; use tokio::fs::{create_dir_all, File, OpenOptions}; use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt}; #[tokio::main] async fn main() { let archive = File::open("example.zip").await.expect("Failed to open zip file"); let out_dir = current_dir().expect("Failed to get current working directory"); unzip_file(archive, &out_dir).await; } /// Returns a relative path without reserved names, redundant separators, ".", or "..". fn sanitize_file_path(path: &str) -> PathBuf { // Replaces backwards slashes path.replace('\\', "/") // Sanitizes each component .split('/') .map(sanitize_filename::sanitize) .collect() } /// Extracts everything from the ZIP archive to the output directory async fn unzip_file(archive: File, out_dir: &Path) { let archive = archive.compat(); let mut reader = ZipFileReader::new(archive).await.expect("Failed to read zip file"); for index in 0..reader.file().entries().len() { let entry = reader.file().entries().get(index).unwrap(); let path = out_dir.join(sanitize_file_path(entry.filename().as_str().unwrap())); // If the filename of the entry ends with '/', it is treated as a directory. // This is implemented by previous versions of this crate and the Python Standard Library. // https://docs.rs/async_zip/0.0.8/src/async_zip/read/mod.rs.html#63-65 // https://github.com/python/cpython/blob/820ef62833bd2d84a141adedd9a05998595d6b6d/Lib/zipfile.py#L528 let entry_is_dir = entry.dir().unwrap(); let mut entry_reader = reader.reader_without_entry(index).await.expect("Failed to read ZipEntry"); if entry_is_dir { // The directory may have been created if iteration is out of order. if !path.exists() { create_dir_all(&path).await.expect("Failed to create extracted directory"); } } else { // Creates parent directories. They may not exist if iteration is out of order // or the archive does not contain directory entries. let parent = path.parent().expect("A file entry should have parent directories"); if !parent.is_dir() { create_dir_all(parent).await.expect("Failed to create parent directories"); } let writer = OpenOptions::new() .write(true) .create_new(true) .open(&path) .await .expect("Failed to create extracted file"); futures_lite::io::copy(&mut entry_reader, &mut writer.compat_write()) .await .expect("Failed to copy to extracted file"); // Closes the file and manipulates its metadata here if you wish to preserve its metadata from the archive. } } } async_zip-0.0.16/rustfmt.toml000064400000000000000000000000541046102023000143070ustar 00000000000000max_width = 120 use_small_heuristics = "Max"async_zip-0.0.16/src/base/mod.rs000064400000000000000000000003521046102023000145350ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A base runtime-agnostic implementation using `futures`'s IO types. pub mod read; pub mod write; async_zip-0.0.16/src/base/read/io/combined_record.rs000064400000000000000000000055301046102023000204210ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // Copyright (c) 2023 Cognite AS // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::spec::header::{EndOfCentralDirectoryHeader, Zip64EndOfCentralDirectoryRecord}; /// Combines all the fields in EOCDR and Zip64EOCDR into one struct. #[derive(Debug)] pub struct CombinedCentralDirectoryRecord { pub version_made_by: Option, pub version_needed_to_extract: Option, pub disk_number: u32, pub disk_number_start_of_cd: u32, pub num_entries_in_directory_on_disk: u64, pub num_entries_in_directory: u64, pub directory_size: u64, pub offset_of_start_of_directory: u64, pub file_comment_length: u16, } impl CombinedCentralDirectoryRecord { /// Combine an EOCDR with an optional Zip64EOCDR. /// /// Fields that are set to their max value in the EOCDR will be overwritten by the contents of /// the corresponding Zip64EOCDR field. pub fn combine(eocdr: EndOfCentralDirectoryHeader, zip64eocdr: Zip64EndOfCentralDirectoryRecord) -> Self { let mut combined = Self::from(&eocdr); if eocdr.disk_num == u16::MAX { combined.disk_number = zip64eocdr.disk_number; } if eocdr.start_cent_dir_disk == u16::MAX { combined.disk_number_start_of_cd = zip64eocdr.disk_number_start_of_cd; } if eocdr.num_of_entries_disk == u16::MAX { combined.num_entries_in_directory_on_disk = zip64eocdr.num_entries_in_directory_on_disk; } if eocdr.num_of_entries == u16::MAX { combined.num_entries_in_directory = zip64eocdr.num_entries_in_directory; } if eocdr.size_cent_dir == u32::MAX { combined.directory_size = zip64eocdr.directory_size; } if eocdr.cent_dir_offset == u32::MAX { combined.offset_of_start_of_directory = zip64eocdr.offset_of_start_of_directory; } combined.version_made_by = Some(zip64eocdr.version_made_by); combined.version_needed_to_extract = Some(zip64eocdr.version_needed_to_extract); combined } } // An implementation for the case of no zip64EOCDR. impl From<&EndOfCentralDirectoryHeader> for CombinedCentralDirectoryRecord { fn from(header: &EndOfCentralDirectoryHeader) -> Self { Self { version_made_by: None, version_needed_to_extract: None, disk_number: header.disk_num as u32, disk_number_start_of_cd: header.start_cent_dir_disk as u32, num_entries_in_directory_on_disk: header.num_of_entries_disk as u64, num_entries_in_directory: header.num_of_entries as u64, directory_size: header.size_cent_dir as u64, offset_of_start_of_directory: header.cent_dir_offset as u64, file_comment_length: header.file_comm_length, } } } async_zip-0.0.16/src/base/read/io/compressed.rs000064400000000000000000000077301046102023000174530ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::spec::Compression; use std::pin::Pin; use std::task::{Context, Poll}; #[cfg(any( feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz", feature = "deflate64" ))] use async_compression::futures::bufread; use futures_lite::io::{AsyncBufRead, AsyncRead}; use pin_project::pin_project; /// A wrapping reader which holds concrete types for all respective compression method readers. #[pin_project(project = CompressedReaderProj)] pub(crate) enum CompressedReader { Stored(#[pin] R), #[cfg(feature = "deflate")] Deflate(#[pin] bufread::DeflateDecoder), #[cfg(feature = "deflate64")] Deflate64(#[pin] bufread::Deflate64Decoder), #[cfg(feature = "bzip2")] Bz(#[pin] bufread::BzDecoder), #[cfg(feature = "lzma")] Lzma(#[pin] bufread::LzmaDecoder), #[cfg(feature = "zstd")] Zstd(#[pin] bufread::ZstdDecoder), #[cfg(feature = "xz")] Xz(#[pin] bufread::XzDecoder), } impl CompressedReader where R: AsyncBufRead + Unpin, { /// Constructs a new wrapping reader from a generic [`AsyncBufRead`] implementer. pub(crate) fn new(reader: R, compression: Compression) -> Self { match compression { Compression::Stored => CompressedReader::Stored(reader), #[cfg(feature = "deflate")] Compression::Deflate => CompressedReader::Deflate(bufread::DeflateDecoder::new(reader)), #[cfg(feature = "deflate64")] Compression::Deflate64 => CompressedReader::Deflate64(bufread::Deflate64Decoder::new(reader)), #[cfg(feature = "bzip2")] Compression::Bz => CompressedReader::Bz(bufread::BzDecoder::new(reader)), #[cfg(feature = "lzma")] Compression::Lzma => CompressedReader::Lzma(bufread::LzmaDecoder::new(reader)), #[cfg(feature = "zstd")] Compression::Zstd => CompressedReader::Zstd(bufread::ZstdDecoder::new(reader)), #[cfg(feature = "xz")] Compression::Xz => CompressedReader::Xz(bufread::XzDecoder::new(reader)), } } /// Consumes this reader and returns the inner value. pub(crate) fn into_inner(self) -> R { match self { CompressedReader::Stored(inner) => inner, #[cfg(feature = "deflate")] CompressedReader::Deflate(inner) => inner.into_inner(), #[cfg(feature = "deflate64")] CompressedReader::Deflate64(inner) => inner.into_inner(), #[cfg(feature = "bzip2")] CompressedReader::Bz(inner) => inner.into_inner(), #[cfg(feature = "lzma")] CompressedReader::Lzma(inner) => inner.into_inner(), #[cfg(feature = "zstd")] CompressedReader::Zstd(inner) => inner.into_inner(), #[cfg(feature = "xz")] CompressedReader::Xz(inner) => inner.into_inner(), } } } impl AsyncRead for CompressedReader where R: AsyncBufRead + Unpin, { fn poll_read(self: Pin<&mut Self>, c: &mut Context<'_>, b: &mut [u8]) -> Poll> { match self.project() { CompressedReaderProj::Stored(inner) => inner.poll_read(c, b), #[cfg(feature = "deflate")] CompressedReaderProj::Deflate(inner) => inner.poll_read(c, b), #[cfg(feature = "deflate64")] CompressedReaderProj::Deflate64(inner) => inner.poll_read(c, b), #[cfg(feature = "bzip2")] CompressedReaderProj::Bz(inner) => inner.poll_read(c, b), #[cfg(feature = "lzma")] CompressedReaderProj::Lzma(inner) => inner.poll_read(c, b), #[cfg(feature = "zstd")] CompressedReaderProj::Zstd(inner) => inner.poll_read(c, b), #[cfg(feature = "xz")] CompressedReaderProj::Xz(inner) => inner.poll_read(c, b), } } } async_zip-0.0.16/src/base/read/io/entry.rs000064400000000000000000000104611046102023000164430ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::base::read::io::{compressed::CompressedReader, hashed::HashedReader, owned::OwnedReader}; use crate::entry::ZipEntry; use crate::error::{Result, ZipError}; use crate::spec::Compression; use std::pin::Pin; use std::task::{Context, Poll}; use futures_lite::io::{AsyncRead, AsyncReadExt, BufReader, Take}; use pin_project::pin_project; /// A type which encodes that [`ZipEntryReader`] has associated entry data. pub struct WithEntry<'a>(OwnedEntry<'a>); /// A type which encodes that [`ZipEntryReader`] has no associated entry data. pub struct WithoutEntry; /// A ZIP entry reader which may implement decompression. #[pin_project] pub struct ZipEntryReader<'a, R, E> { #[pin] reader: HashedReader>>>, entry: E, } impl<'a, R> ZipEntryReader<'a, R, WithoutEntry> where R: AsyncRead + Unpin, { /// Constructs a new entry reader from its required parameters (incl. an owned R). pub(crate) fn new_with_owned(reader: BufReader, compression: Compression, size: u64) -> Self { let reader = HashedReader::new(CompressedReader::new(OwnedReader::Owned(reader).take(size), compression)); Self { reader, entry: WithoutEntry } } /// Constructs a new entry reader from its required parameters (incl. a mutable borrow of an R). pub(crate) fn new_with_borrow(reader: BufReader<&'a mut R>, compression: Compression, size: u64) -> Self { let reader = HashedReader::new(CompressedReader::new(OwnedReader::Borrow(reader).take(size), compression)); Self { reader, entry: WithoutEntry } } pub(crate) fn into_with_entry(self, entry: &'a ZipEntry) -> ZipEntryReader<'a, R, WithEntry<'a>> { ZipEntryReader { reader: self.reader, entry: WithEntry(OwnedEntry::Borrow(entry)) } } pub(crate) fn into_with_entry_owned(self, entry: ZipEntry) -> ZipEntryReader<'a, R, WithEntry<'a>> { ZipEntryReader { reader: self.reader, entry: WithEntry(OwnedEntry::Owned(entry)) } } } impl<'a, R, E> AsyncRead for ZipEntryReader<'a, R, E> where R: AsyncRead + Unpin, { fn poll_read(self: Pin<&mut Self>, c: &mut Context<'_>, b: &mut [u8]) -> Poll> { self.project().reader.poll_read(c, b) } } impl<'a, R, E> ZipEntryReader<'a, R, E> where R: AsyncRead + Unpin, { /// Computes and returns the CRC32 hash of bytes read by this reader so far. /// /// This hash should only be computed once EOF has been reached. pub fn compute_hash(&mut self) -> u32 { self.reader.swap_and_compute_hash() } /// Consumes this reader and returns the inner value. pub(crate) fn into_inner(self) -> R { self.reader.into_inner().into_inner().into_inner().owned_into_inner() } } impl ZipEntryReader<'_, R, WithEntry<'_>> where R: AsyncRead + Unpin, { /// Returns an immutable reference to the associated entry data. pub fn entry(&self) -> &'_ ZipEntry { self.entry.0.entry() } /// Reads all bytes until EOF has been reached, appending them to buf, and verifies the CRC32 values. /// /// This is a helper function synonymous to [`AsyncReadExt::read_to_end()`]. pub async fn read_to_end_checked(&mut self, buf: &mut Vec) -> Result { let read = self.read_to_end(buf).await?; if self.compute_hash() == self.entry.0.entry().crc32() { Ok(read) } else { Err(ZipError::CRC32CheckError) } } /// Reads all bytes until EOF has been reached, placing them into buf, and verifies the CRC32 values. /// /// This is a helper function synonymous to [`AsyncReadExt::read_to_string()`]. pub async fn read_to_string_checked(&mut self, buf: &mut String) -> Result { let read = self.read_to_string(buf).await?; if self.compute_hash() == self.entry.0.entry().crc32() { Ok(read) } else { Err(ZipError::CRC32CheckError) } } } enum OwnedEntry<'a> { Owned(ZipEntry), Borrow(&'a ZipEntry), } impl<'a> OwnedEntry<'a> { pub fn entry(&self) -> &'_ ZipEntry { match self { OwnedEntry::Owned(entry) => entry, OwnedEntry::Borrow(entry) => entry, } } } async_zip-0.0.16/src/base/read/io/hashed.rs000064400000000000000000000033521046102023000165370ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::base::read::io::poll_result_ok; use std::pin::Pin; use std::task::{ready, Context, Poll}; use crc32fast::Hasher; use futures_lite::io::AsyncRead; use pin_project::pin_project; /// A wrapping reader which computes the CRC32 hash of data read via [`AsyncRead`]. #[pin_project] pub(crate) struct HashedReader { #[pin] pub(crate) reader: R, pub(crate) hasher: Hasher, } impl HashedReader where R: AsyncRead + Unpin, { /// Constructs a new wrapping reader from a generic [`AsyncRead`] implementer. pub(crate) fn new(reader: R) -> Self { Self { reader, hasher: Hasher::default() } } /// Swaps the internal hasher and returns the computed CRC32 hash. /// /// The internal hasher is taken and replaced with a newly-constructed one. As a result, this method should only be /// called once EOF has been reached and it's known that no more data will be read, else the computed hash(s) won't /// accurately represent the data read in. pub(crate) fn swap_and_compute_hash(&mut self) -> u32 { std::mem::take(&mut self.hasher).finalize() } /// Consumes this reader and returns the inner value. pub(crate) fn into_inner(self) -> R { self.reader } } impl AsyncRead for HashedReader where R: AsyncRead + Unpin, { fn poll_read(self: Pin<&mut Self>, c: &mut Context<'_>, b: &mut [u8]) -> Poll> { let project = self.project(); let written = poll_result_ok!(ready!(project.reader.poll_read(c, b))); project.hasher.update(&b[..written]); Poll::Ready(Ok(written)) } } async_zip-0.0.16/src/base/read/io/locator.rs000064400000000000000000000114271046102023000167500ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! //! //! As with other ZIP libraries, we face the predicament that the end of central directory record may contain a //! variable-length file comment. As a result, we cannot just make the assumption that the start of this record is //! 18 bytes (the length of the EOCDR) offset from the end of the data - we must locate it ourselves. //! //! The `zip-rs` crate handles this by reading in reverse from the end of the data. This involves seeking backwards //! by a single byte each iteration and reading 4 bytes into a u32. Whether this is performant/acceptable within a //! a non-async context, I'm unsure, but it isn't desirable within an async context. Especially since we cannot just //! place a [`BufReader`] infront of the upstream reader (as its internal buffer is invalidated on each seek). //! //! Reading in reverse is still desirable as the use of file comments is limited and they're unlikely to be large. //! //! The below method is one that compromises on these two contention points. Please submit an issue or PR if you know //! of a better algorithm for this (and have tested/verified its performance). #[cfg(doc)] use futures_lite::io::BufReader; use crate::error::{Result as ZipResult, ZipError}; use crate::spec::consts::{EOCDR_LENGTH, EOCDR_SIGNATURE, SIGNATURE_LENGTH}; use futures_lite::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, SeekFrom}; /// The buffer size used when locating the EOCDR, equal to 2KiB. const BUFFER_SIZE: usize = 2048; /// The upper bound of where the EOCDR signature cannot be located. const EOCDR_UPPER_BOUND: u64 = EOCDR_LENGTH as u64; /// The lower bound of where the EOCDR signature cannot be located. const EOCDR_LOWER_BOUND: u64 = EOCDR_UPPER_BOUND + SIGNATURE_LENGTH as u64 + u16::MAX as u64; /// Locate the `end of central directory record` offset, if one exists. /// The returned offset excludes the signature (4 bytes) /// /// This method involves buffered reading in reverse and reverse linear searching along those buffers for the EOCDR /// signature. As a result of this buffered approach, we reduce seeks when compared to `zip-rs`'s method by a factor /// of the buffer size. We also then don't have to do individual u32 reads against the upstream reader. /// /// Whilst I haven't done any in-depth benchmarks, when reading a ZIP file with the maximum length comment, this method /// saw a reduction in location time by a factor of 500 when compared with the `zip-rs` method. pub async fn eocdr(mut reader: R) -> ZipResult where R: AsyncRead + AsyncSeek + Unpin, { let length = reader.seek(SeekFrom::End(0)).await?; let signature = &EOCDR_SIGNATURE.to_le_bytes(); let mut buffer: [u8; BUFFER_SIZE] = [0; BUFFER_SIZE]; let mut position = length.saturating_sub((EOCDR_LENGTH + BUFFER_SIZE) as u64); reader.seek(SeekFrom::Start(position)).await?; loop { let read = reader.read(&mut buffer).await?; if let Some(match_index) = reverse_search_buffer(&buffer[..read], signature) { return Ok(position + (match_index + 1) as u64); } // If we hit the start of the data or the lower bound, we're unable to locate the EOCDR. if position == 0 || position <= length.saturating_sub(EOCDR_LOWER_BOUND) { return Err(ZipError::UnableToLocateEOCDR); } // To handle the case where the EOCDR signature crosses buffer boundaries, we simply overlap reads by the // signature length. This significantly reduces the complexity of handling partial matches with very little // overhead. position = position.saturating_sub((BUFFER_SIZE - SIGNATURE_LENGTH) as u64); reader.seek(SeekFrom::Start(position)).await?; } } /// A naive reverse linear search along the buffer for the specified signature bytes. /// /// This is already surprisingly performant. For instance, using memchr::memchr() to match for the first byte of the /// signature, and then manual byte comparisons for the remaining signature bytes was actually slower by a factor of /// 2.25. This method was explored as tokio's `read_until()` implementation uses memchr::memchr(). pub(crate) fn reverse_search_buffer(buffer: &[u8], signature: &[u8]) -> Option { 'outer: for index in (0..buffer.len()).rev() { for (signature_index, signature_byte) in signature.iter().rev().enumerate() { if let Some(next_index) = index.checked_sub(signature_index) { if buffer[next_index] != *signature_byte { continue 'outer; } } else { break 'outer; } } return Some(index); } None } async_zip-0.0.16/src/base/read/io/mod.rs000064400000000000000000000030141046102023000160550ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) pub(crate) mod combined_record; pub(crate) mod compressed; pub(crate) mod entry; pub(crate) mod hashed; pub(crate) mod locator; pub(crate) mod owned; pub use combined_record::CombinedCentralDirectoryRecord; use crate::string::{StringEncoding, ZipString}; use futures_lite::io::{AsyncRead, AsyncReadExt}; /// Read and return a dynamic length string from a reader which impls AsyncRead. pub(crate) async fn read_string(reader: R, length: usize, encoding: StringEncoding) -> std::io::Result where R: AsyncRead + Unpin, { Ok(ZipString::new(read_bytes(reader, length).await?, encoding)) } /// Read and return a dynamic length vector of bytes from a reader which impls AsyncRead. pub(crate) async fn read_bytes(reader: R, length: usize) -> std::io::Result> where R: AsyncRead + Unpin, { let mut buffer = Vec::with_capacity(length); reader.take(length as u64).read_to_end(&mut buffer).await?; Ok(buffer) } /// A macro that returns the inner value of an Ok or early-returns in the case of an Err. /// /// This is almost identical to the ? operator but handles the situation when a Result is used in combination with /// Poll (eg. tokio's IO traits such as AsyncRead). macro_rules! poll_result_ok { ($poll:expr) => { match $poll { Ok(inner) => inner, Err(err) => return Poll::Ready(Err(err)), } }; } use poll_result_ok; async_zip-0.0.16/src/base/read/io/owned.rs000064400000000000000000000037521046102023000164230ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use std::pin::Pin; use std::task::{Context, Poll}; use futures_lite::io::{AsyncBufRead, AsyncRead, BufReader}; use pin_project::pin_project; /// A wrapping reader which holds an owned R or a mutable borrow to R. /// /// This is used to represent whether the supplied reader can be acted on concurrently or not (with an owned value /// suggesting that R implements some method of synchronisation & cloning). #[pin_project(project = OwnedReaderProj)] pub(crate) enum OwnedReader<'a, R> { Owned(#[pin] BufReader), Borrow(#[pin] BufReader<&'a mut R>), } impl<'a, R> OwnedReader<'a, R> where R: AsyncRead + Unpin, { /// Consumes an owned reader and returns the inner value. pub(crate) fn owned_into_inner(self) -> R { match self { OwnedReader::Owned(inner) => inner.into_inner(), OwnedReader::Borrow(_) => panic!("not OwnedReader::Owned value"), } } } impl<'a, R> AsyncBufRead for OwnedReader<'a, R> where R: AsyncRead + Unpin, { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.project() { OwnedReaderProj::Owned(inner) => inner.poll_fill_buf(cx), OwnedReaderProj::Borrow(inner) => inner.poll_fill_buf(cx), } } fn consume(self: Pin<&mut Self>, amt: usize) { match self.project() { OwnedReaderProj::Owned(inner) => inner.consume(amt), OwnedReaderProj::Borrow(inner) => inner.consume(amt), } } } impl<'a, R> AsyncRead for OwnedReader<'a, R> where R: AsyncRead + Unpin, { fn poll_read(self: Pin<&mut Self>, c: &mut Context<'_>, b: &mut [u8]) -> Poll> { match self.project() { OwnedReaderProj::Owned(inner) => inner.poll_read(c, b), OwnedReaderProj::Borrow(inner) => inner.poll_read(c, b), } } } async_zip-0.0.16/src/base/read/mem.rs000064400000000000000000000120251046102023000154470ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A concurrent ZIP reader which acts over an owned vector of bytes. //! //! Concurrency is achieved as a result of: //! - Wrapping the provided vector of bytes within an [`Arc`] to allow shared ownership. //! - Wrapping this [`Arc`] around a [`Cursor`] when reading (as the [`Arc`] can deref and coerce into a `&[u8]`). //! //! ### Usage //! Unlike the [`seek`] module, we no longer hold a mutable reference to any inner reader which in turn, allows the //! construction of concurrent [`ZipEntryReader`]s. Though, note that each individual [`ZipEntryReader`] cannot be sent //! between thread boundaries due to the masked lifetime requirement. Therefore, the overarching [`ZipFileReader`] //! should be cloned and moved into those contexts when needed. //! //! ### Concurrent Example //! ```no_run //! # use async_zip::base::read::mem::ZipFileReader; //! # use async_zip::error::Result; //! # use futures_lite::io::AsyncReadExt; //! # //! async fn run() -> Result<()> { //! let reader = ZipFileReader::new(Vec::new()).await?; //! let result = tokio::join!(read(&reader, 0), read(&reader, 1)); //! //! let data_0 = result.0?; //! let data_1 = result.1?; //! //! // Use data within current scope. //! //! Ok(()) //! } //! //! async fn read(reader: &ZipFileReader, index: usize) -> Result> { //! let mut entry = reader.reader_without_entry(index).await?; //! let mut data = Vec::new(); //! entry.read_to_end(&mut data).await?; //! Ok(data) //! } //! ``` //! //! ### Parallel Example //! ```no_run //! # use async_zip::base::read::mem::ZipFileReader; //! # use async_zip::error::Result; //! # use futures_lite::io::AsyncReadExt; //! # //! async fn run() -> Result<()> { //! let reader = ZipFileReader::new(Vec::new()).await?; //! //! let handle_0 = tokio::spawn(read(reader.clone(), 0)); //! let handle_1 = tokio::spawn(read(reader.clone(), 1)); //! //! let data_0 = handle_0.await.expect("thread panicked")?; //! let data_1 = handle_1.await.expect("thread panicked")?; //! //! // Use data within current scope. //! //! Ok(()) //! } //! //! async fn read(reader: ZipFileReader, index: usize) -> Result> { //! let mut entry = reader.reader_without_entry(index).await?; //! let mut data = Vec::new(); //! entry.read_to_end(&mut data).await?; //! Ok(data) //! } //! ``` #[cfg(doc)] use crate::base::read::seek; use crate::base::read::io::entry::ZipEntryReader; use crate::error::{Result, ZipError}; use crate::file::ZipFile; use std::sync::Arc; use futures_lite::io::{BufReader, Cursor}; use super::io::entry::{WithEntry, WithoutEntry}; struct Inner { data: Vec, file: ZipFile, } // A concurrent ZIP reader which acts over an owned vector of bytes. #[derive(Clone)] pub struct ZipFileReader { inner: Arc, } impl ZipFileReader { /// Constructs a new ZIP reader from an owned vector of bytes. pub async fn new(data: Vec) -> Result { let file = crate::base::read::file(Cursor::new(&data)).await?; Ok(ZipFileReader::from_raw_parts(data, file)) } /// Constructs a ZIP reader from an owned vector of bytes and ZIP file information derived from those bytes. /// /// Providing a [`ZipFile`] that wasn't derived from those bytes may lead to inaccurate parsing. pub fn from_raw_parts(data: Vec, file: ZipFile) -> ZipFileReader { ZipFileReader { inner: Arc::new(Inner { data, file }) } } /// Returns this ZIP file's information. pub fn file(&self) -> &ZipFile { &self.inner.file } /// Returns the raw bytes provided to the reader during construction. pub fn data(&self) -> &[u8] { &self.inner.data } /// Returns a new entry reader if the provided index is valid. pub async fn reader_without_entry(&self, index: usize) -> Result, WithoutEntry>> { let stored_entry = self.inner.file.entries.get(index).ok_or(ZipError::EntryIndexOutOfBounds)?; let mut cursor = BufReader::new(Cursor::new(&self.inner.data[..])); stored_entry.seek_to_data_offset(&mut cursor).await?; Ok(ZipEntryReader::new_with_owned( cursor, stored_entry.entry.compression(), stored_entry.entry.compressed_size(), )) } /// Returns a new entry reader if the provided index is valid. pub async fn reader_with_entry(&self, index: usize) -> Result, WithEntry<'_>>> { let stored_entry = self.inner.file.entries.get(index).ok_or(ZipError::EntryIndexOutOfBounds)?; let mut cursor = BufReader::new(Cursor::new(&self.inner.data[..])); stored_entry.seek_to_data_offset(&mut cursor).await?; let reader = ZipEntryReader::new_with_owned( cursor, stored_entry.entry.compression(), stored_entry.entry.compressed_size(), ); Ok(reader.into_with_entry(stored_entry)) } } async_zip-0.0.16/src/base/read/mod.rs000064400000000000000000000312061046102023000154520ustar 00000000000000// Copyright (c) 2022-2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A module which supports reading ZIP files. pub mod mem; pub mod seek; pub mod stream; pub(crate) mod io; use crate::ZipString; // Re-exported as part of the public API. pub use crate::base::read::io::entry::WithEntry; pub use crate::base::read::io::entry::WithoutEntry; pub use crate::base::read::io::entry::ZipEntryReader; use crate::date::ZipDateTime; use crate::entry::{StoredZipEntry, ZipEntry}; use crate::error::{Result, ZipError}; use crate::file::ZipFile; use crate::spec::attribute::AttributeCompatibility; use crate::spec::consts::LFH_LENGTH; use crate::spec::consts::{CDH_SIGNATURE, LFH_SIGNATURE, NON_ZIP64_MAX_SIZE, SIGNATURE_LENGTH, ZIP64_EOCDL_LENGTH}; use crate::spec::header::InfoZipUnicodeCommentExtraField; use crate::spec::header::InfoZipUnicodePathExtraField; use crate::spec::header::{ CentralDirectoryRecord, EndOfCentralDirectoryHeader, ExtraField, LocalFileHeader, Zip64EndOfCentralDirectoryLocator, Zip64EndOfCentralDirectoryRecord, Zip64ExtendedInformationExtraField, }; use crate::spec::Compression; use crate::string::StringEncoding; use crate::base::read::io::CombinedCentralDirectoryRecord; use crate::spec::parse::parse_extra_fields; use futures_lite::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, BufReader, SeekFrom}; /// The max buffer size used when parsing the central directory, equal to 20MiB. const MAX_CD_BUFFER_SIZE: usize = 20 * 1024 * 1024; pub(crate) async fn file(mut reader: R) -> Result where R: AsyncRead + AsyncSeek + Unpin, { // First find and parse the EOCDR. let eocdr_offset = crate::base::read::io::locator::eocdr(&mut reader).await?; reader.seek(SeekFrom::Start(eocdr_offset)).await?; let eocdr = EndOfCentralDirectoryHeader::from_reader(&mut reader).await?; let comment = io::read_string(&mut reader, eocdr.file_comm_length.into(), crate::StringEncoding::Utf8).await?; // Check the 20 bytes before the EOCDR for the Zip64 EOCDL, plus an extra 4 bytes because the offset // does not include the signature. If the ECODL exists we are dealing with a Zip64 file. let (eocdr, zip64) = match eocdr_offset.checked_sub(ZIP64_EOCDL_LENGTH + SIGNATURE_LENGTH as u64) { None => (CombinedCentralDirectoryRecord::from(&eocdr), false), Some(offset) => { reader.seek(SeekFrom::Start(offset)).await?; let zip64_locator = Zip64EndOfCentralDirectoryLocator::try_from_reader(&mut reader).await?; match zip64_locator { Some(locator) => { reader.seek(SeekFrom::Start(locator.relative_offset + SIGNATURE_LENGTH as u64)).await?; let zip64_eocdr = Zip64EndOfCentralDirectoryRecord::from_reader(&mut reader).await?; (CombinedCentralDirectoryRecord::combine(eocdr, zip64_eocdr), true) } None => (CombinedCentralDirectoryRecord::from(&eocdr), false), } } }; // Outdated feature so unlikely to ever make it into this crate. if eocdr.disk_number != eocdr.disk_number_start_of_cd || eocdr.num_entries_in_directory != eocdr.num_entries_in_directory_on_disk { return Err(ZipError::FeatureNotSupported("Spanned/split files")); } // Find and parse the central directory. reader.seek(SeekFrom::Start(eocdr.offset_of_start_of_directory)).await?; // To avoid lots of small reads to `reader` when parsing the central directory, we use a BufReader that can read the whole central directory at once. // Because `eocdr.offset_of_start_of_directory` is a u64, we use MAX_CD_BUFFER_SIZE to prevent very large buffer sizes. let buf = BufReader::with_capacity(std::cmp::min(eocdr.offset_of_start_of_directory as _, MAX_CD_BUFFER_SIZE), reader); let entries = crate::base::read::cd(buf, eocdr.num_entries_in_directory, zip64).await?; Ok(ZipFile { entries, comment, zip64 }) } pub(crate) async fn cd(mut reader: R, num_of_entries: u64, zip64: bool) -> Result> where R: AsyncRead + Unpin, { let num_of_entries = num_of_entries.try_into().map_err(|_| ZipError::TargetZip64NotSupported)?; let mut entries = Vec::with_capacity(num_of_entries); for _ in 0..num_of_entries { let entry = cd_record(&mut reader, zip64).await?; entries.push(entry); } Ok(entries) } pub(crate) fn get_zip64_extra_field(extra_fields: &[ExtraField]) -> Option<&Zip64ExtendedInformationExtraField> { for field in extra_fields { if let ExtraField::Zip64ExtendedInformation(zip64field) = field { return Some(zip64field); } } None } pub(crate) fn get_zip64_extra_field_mut( extra_fields: &mut [ExtraField], ) -> Option<&mut Zip64ExtendedInformationExtraField> { for field in extra_fields { if let ExtraField::Zip64ExtendedInformation(zip64field) = field { return Some(zip64field); } } None } fn get_combined_sizes( uncompressed_size: u32, compressed_size: u32, extra_field: &Option<&Zip64ExtendedInformationExtraField>, ) -> Result<(u64, u64)> { let mut uncompressed_size = uncompressed_size as u64; let mut compressed_size = compressed_size as u64; if let Some(extra_field) = extra_field { if let Some(s) = extra_field.uncompressed_size { uncompressed_size = s; } if let Some(s) = extra_field.compressed_size { compressed_size = s; } } Ok((uncompressed_size, compressed_size)) } pub(crate) async fn cd_record(mut reader: R, _zip64: bool) -> Result where R: AsyncRead + Unpin, { crate::utils::assert_signature(&mut reader, CDH_SIGNATURE).await?; let header = CentralDirectoryRecord::from_reader(&mut reader).await?; let header_size = (SIGNATURE_LENGTH + LFH_LENGTH) as u64; let trailing_size = header.file_name_length as u64 + header.extra_field_length as u64; let filename_basic = io::read_bytes(&mut reader, header.file_name_length.into()).await?; let compression = Compression::try_from(header.compression)?; let extra_field = io::read_bytes(&mut reader, header.extra_field_length.into()).await?; let extra_fields = parse_extra_fields(extra_field, header.uncompressed_size, header.compressed_size)?; let comment_basic = io::read_bytes(reader, header.file_comment_length.into()).await?; let zip64_extra_field = get_zip64_extra_field(&extra_fields); let (uncompressed_size, compressed_size) = get_combined_sizes(header.uncompressed_size, header.compressed_size, &zip64_extra_field)?; let mut file_offset = header.lh_offset as u64; if let Some(zip64_extra_field) = zip64_extra_field { if file_offset == NON_ZIP64_MAX_SIZE as u64 { if let Some(offset) = zip64_extra_field.relative_header_offset { file_offset = offset; } } } let filename = detect_filename(filename_basic, header.flags.filename_unicode, extra_fields.as_ref()); let comment = detect_comment(comment_basic, header.flags.filename_unicode, extra_fields.as_ref()); let entry = ZipEntry { filename, compression, #[cfg(any( feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz", feature = "deflate64" ))] compression_level: async_compression::Level::Default, attribute_compatibility: AttributeCompatibility::Unix, // FIXME: Default to Unix for the moment crc32: header.crc, uncompressed_size, compressed_size, last_modification_date: ZipDateTime { date: header.mod_date, time: header.mod_time }, internal_file_attribute: header.inter_attr, external_file_attribute: header.exter_attr, extra_fields, comment, }; // general_purpose_flag: header.flags, Ok(StoredZipEntry { entry, file_offset, header_size: header_size + trailing_size }) } pub(crate) async fn lfh(mut reader: R) -> Result> where R: AsyncRead + Unpin, { let signature = { let mut buffer = [0; 4]; reader.read_exact(&mut buffer).await?; u32::from_le_bytes(buffer) }; match signature { actual if actual == LFH_SIGNATURE => (), actual if actual == CDH_SIGNATURE => return Ok(None), actual => return Err(ZipError::UnexpectedHeaderError(actual, LFH_SIGNATURE)), }; let header = LocalFileHeader::from_reader(&mut reader).await?; let filename_basic = io::read_bytes(&mut reader, header.file_name_length.into()).await?; let compression = Compression::try_from(header.compression)?; let extra_field = io::read_bytes(&mut reader, header.extra_field_length.into()).await?; let extra_fields = parse_extra_fields(extra_field, header.uncompressed_size, header.compressed_size)?; let zip64_extra_field = get_zip64_extra_field(&extra_fields); let (uncompressed_size, compressed_size) = get_combined_sizes(header.uncompressed_size, header.compressed_size, &zip64_extra_field)?; if header.flags.data_descriptor { return Err(ZipError::FeatureNotSupported( "stream reading entries with data descriptors (planned to be reintroduced)", )); } if header.flags.encrypted { return Err(ZipError::FeatureNotSupported("encryption")); } let filename = detect_filename(filename_basic, header.flags.filename_unicode, extra_fields.as_ref()); let entry = ZipEntry { filename, compression, #[cfg(any( feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz", feature = "deflate64" ))] compression_level: async_compression::Level::Default, attribute_compatibility: AttributeCompatibility::Unix, // FIXME: Default to Unix for the moment crc32: header.crc, uncompressed_size, compressed_size, last_modification_date: ZipDateTime { date: header.mod_date, time: header.mod_time }, internal_file_attribute: 0, external_file_attribute: 0, extra_fields, comment: String::new().into(), }; Ok(Some(entry)) } fn detect_comment(basic: Vec, basic_is_utf8: bool, extra_fields: &[ExtraField]) -> ZipString { if basic_is_utf8 { ZipString::new(basic, StringEncoding::Utf8) } else { let unicode_extra = extra_fields.iter().find_map(|field| match field { ExtraField::InfoZipUnicodeComment(InfoZipUnicodeCommentExtraField::V1 { crc32, unicode }) => { if *crc32 == crc32fast::hash(&basic) { Some(std::string::String::from_utf8(unicode.clone())) } else { None } } _ => None, }); if let Some(Ok(s)) = unicode_extra { ZipString::new_with_alternative(s, basic) } else { // Do not treat as UTF-8 if UTF-8 flags are not set, // some string in MBCS may be valid UTF-8 in form, but they are not in truth. if basic.is_ascii() { // SAFETY: // a valid ASCII string is always a valid UTF-8 string unsafe { std::string::String::from_utf8_unchecked(basic).into() } } else { ZipString::new(basic, StringEncoding::Raw) } } } } fn detect_filename(basic: Vec, basic_is_utf8: bool, extra_fields: &[ExtraField]) -> ZipString { if basic_is_utf8 { ZipString::new(basic, StringEncoding::Utf8) } else { let unicode_extra = extra_fields.iter().find_map(|field| match field { ExtraField::InfoZipUnicodePath(InfoZipUnicodePathExtraField::V1 { crc32, unicode }) => { if *crc32 == crc32fast::hash(&basic) { Some(std::string::String::from_utf8(unicode.clone())) } else { None } } _ => None, }); if let Some(Ok(s)) = unicode_extra { ZipString::new_with_alternative(s, basic) } else { // Do not treat as UTF-8 if UTF-8 flags are not set, // some string in MBCS may be valid UTF-8 in form, but they are not in truth. if basic.is_ascii() { // SAFETY: // a valid ASCII string is always a valid UTF-8 string unsafe { std::string::String::from_utf8_unchecked(basic).into() } } else { ZipString::new(basic, StringEncoding::Raw) } } } } async_zip-0.0.16/src/base/read/seek.rs000064400000000000000000000112331046102023000156200ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A ZIP reader which acts over a seekable source. //! //! ### Example //! ```no_run //! # use async_zip::base::read::seek::ZipFileReader; //! # use async_zip::error::Result; //! # use futures_lite::io::AsyncReadExt; //! # use tokio::fs::File; //! # use tokio_util::compat::TokioAsyncReadCompatExt; //! # //! async fn run() -> Result<()> { //! let mut data = File::open("./foo.zip").await?; //! let mut reader = ZipFileReader::new(data.compat()).await?; //! //! let mut data = Vec::new(); //! let mut entry = reader.reader_without_entry(0).await?; //! entry.read_to_end(&mut data).await?; //! //! // Use data within current scope. //! //! Ok(()) //! } //! ``` use crate::base::read::io::entry::ZipEntryReader; use crate::error::{Result, ZipError}; use crate::file::ZipFile; #[cfg(feature = "tokio")] use crate::tokio::read::seek::ZipFileReader as TokioZipFileReader; use futures_lite::io::{AsyncRead, AsyncSeek, BufReader}; #[cfg(feature = "tokio")] use tokio_util::compat::{Compat, TokioAsyncReadCompatExt}; use super::io::entry::{WithEntry, WithoutEntry}; /// A ZIP reader which acts over a seekable source. #[derive(Clone)] pub struct ZipFileReader { reader: R, file: ZipFile, } impl ZipFileReader where R: AsyncRead + AsyncSeek + Unpin, { /// Constructs a new ZIP reader from a seekable source. pub async fn new(mut reader: R) -> Result> { let file = crate::base::read::file(&mut reader).await?; Ok(ZipFileReader::from_raw_parts(reader, file)) } /// Constructs a ZIP reader from a seekable source and ZIP file information derived from that source. /// /// Providing a [`ZipFile`] that wasn't derived from that source may lead to inaccurate parsing. pub fn from_raw_parts(reader: R, file: ZipFile) -> ZipFileReader { ZipFileReader { reader, file } } /// Returns this ZIP file's information. pub fn file(&self) -> &ZipFile { &self.file } /// Returns a mutable reference to the inner seekable source. /// /// Swapping the source (eg. via std::mem operations) may lead to inaccurate parsing. pub fn inner_mut(&mut self) -> &mut R { &mut self.reader } /// Returns the inner seekable source by consuming self. pub fn into_inner(self) -> R { self.reader } /// Returns a new entry reader if the provided index is valid. pub async fn reader_without_entry(&mut self, index: usize) -> Result> { let stored_entry = self.file.entries.get(index).ok_or(ZipError::EntryIndexOutOfBounds)?; let mut reader = BufReader::new(&mut self.reader); stored_entry.seek_to_data_offset(&mut reader).await?; Ok(ZipEntryReader::new_with_borrow( reader, stored_entry.entry.compression(), stored_entry.entry.compressed_size(), )) } /// Returns a new entry reader if the provided index is valid. pub async fn reader_with_entry(&mut self, index: usize) -> Result>> { let stored_entry = self.file.entries.get(index).ok_or(ZipError::EntryIndexOutOfBounds)?; let mut reader = BufReader::new(&mut self.reader); stored_entry.seek_to_data_offset(&mut reader).await?; let reader = ZipEntryReader::new_with_borrow( reader, stored_entry.entry.compression(), stored_entry.entry.compressed_size(), ); Ok(reader.into_with_entry(stored_entry)) } /// Returns a new entry reader if the provided index is valid. /// Consumes self pub async fn into_entry<'a>(self, index: usize) -> Result> where R: 'a, { let stored_entry = self.file.entries.get(index).ok_or(ZipError::EntryIndexOutOfBounds)?; let mut reader = BufReader::new(self.reader); stored_entry.seek_to_data_offset(&mut reader).await?; Ok(ZipEntryReader::new_with_owned( reader, stored_entry.entry.compression(), stored_entry.entry.compressed_size(), )) } } #[cfg(feature = "tokio")] impl ZipFileReader> where R: tokio::io::AsyncRead + tokio::io::AsyncSeek + Unpin, { /// Constructs a new tokio-specific ZIP reader from a seekable source. pub async fn with_tokio(reader: R) -> Result> { let mut reader = reader.compat(); let file = crate::base::read::file(&mut reader).await?; Ok(ZipFileReader::from_raw_parts(reader, file)) } } async_zip-0.0.16/src/base/read/stream.rs000064400000000000000000000135351046102023000161730ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A ZIP reader which acts over a non-seekable source. //! //! # API Design //! As opposed to other readers provided by this crate, it's important that the data of an entry is fully read before //! the proceeding entry is read. This is as a result of not being able to seek forwards or backwards, so we must end //! up at the start of the next entry. //! //! **We encode this invariant within Rust's type system so that it can be enforced at compile time.** //! //! This requires that any transition methods between these encoded types consume the reader and provide a new owned //! reader back. This is certainly something to keep in mind when working with this reader, but idiomatic code can //! still be produced nevertheless. //! //! # Considerations //! As the central directory of a ZIP archive is stored at the end of it, a non-seekable reader doesn't have access //! to it. We have to rely on information provided within the local file header which may not be accurate or complete. //! This results in: //! - The inability to read internally stored ZIP archives when using the Stored compression method. //! - No file comment being available (defaults to an empty string). //! - No internal or external file attributes being available (defaults to 0). //! - The extra field data potentially being inconsistent with what's stored in the central directory. //! - None of the following being available when the entry was written with a data descriptor (defaults to 0): //! - CRC //! - compressed size //! - uncompressed size //! //! # Example //! ```no_run //! # use futures_lite::io::Cursor; //! # use async_zip::error::Result; //! # use async_zip::base::read::stream::ZipFileReader; //! # //! # async fn run() -> Result<()> { //! let mut zip = ZipFileReader::new(Cursor::new([0; 0])); //! //! // Print the name of every file in a ZIP archive. //! while let Some(entry) = zip.next_with_entry().await? { //! println!("File: {}", entry.reader().entry().filename().as_str().unwrap()); //! zip = entry.skip().await?; //! } //! # //! # Ok(()) //! # } //! ``` use crate::base::read::io::entry::ZipEntryReader; use crate::error::Result; use crate::error::ZipError; #[cfg(feature = "tokio")] use crate::tokio::read::stream::Ready as TokioReady; use futures_lite::io::AsyncReadExt; use futures_lite::io::Take; use futures_lite::io::{AsyncRead, BufReader}; #[cfg(feature = "tokio")] use tokio_util::compat::TokioAsyncReadCompatExt; use super::io::entry::WithEntry; use super::io::entry::WithoutEntry; /// A type which encodes that [`ZipFileReader`] is ready to open a new entry. pub struct Ready(R); /// A type which encodes that [`ZipFileReader`] is currently reading an entry. pub struct Reading<'a, R, E>(ZipEntryReader<'a, R, E>); /// A ZIP reader which acts over a non-seekable source. /// /// See the [module-level docs](.) for more information. #[derive(Clone)] pub struct ZipFileReader(S); impl<'a, R> ZipFileReader> where R: AsyncRead + Unpin + 'a, { /// Constructs a new ZIP reader from a non-seekable source. pub fn new(reader: R) -> Self { Self(Ready(reader)) } /// Opens the next entry for reading if the central directory hasn’t yet been reached. pub async fn next_without_entry(mut self) -> Result, WithoutEntry>>>> { let entry = match crate::base::read::lfh(&mut self.0 .0).await? { Some(entry) => entry, None => return Ok(None), }; let reader = BufReader::new(self.0 .0.take(entry.compressed_size)); let reader = ZipEntryReader::new_with_owned(reader, entry.compression, entry.compressed_size); Ok(Some(ZipFileReader(Reading(reader)))) } /// Opens the next entry for reading if the central directory hasn’t yet been reached. pub async fn next_with_entry(mut self) -> Result, WithEntry<'a>>>>> { let entry = match crate::base::read::lfh(&mut self.0 .0).await? { Some(entry) => entry, None => return Ok(None), }; let reader = BufReader::new(self.0 .0.take(entry.compressed_size)); let reader = ZipEntryReader::new_with_owned(reader, entry.compression, entry.compressed_size); Ok(Some(ZipFileReader(Reading(reader.into_with_entry_owned(entry))))) } /// Consumes the `ZipFileReader` returning the original `reader` pub async fn into_inner(self) -> R { self.0 .0 } } #[cfg(feature = "tokio")] impl ZipFileReader> where R: tokio::io::AsyncRead + Unpin, { /// Constructs a new tokio-specific ZIP reader from a non-seekable source. pub fn with_tokio(reader: R) -> ZipFileReader> { Self(Ready(reader.compat())) } } impl<'a, R, E> ZipFileReader, E>> where R: AsyncRead + Unpin, { /// Returns an immutable reference to the inner entry reader. pub fn reader(&self) -> &ZipEntryReader<'a, Take, E> { &self.0 .0 } /// Returns a mutable reference to the inner entry reader. pub fn reader_mut(&mut self) -> &mut ZipEntryReader<'a, Take, E> { &mut self.0 .0 } /// Converts the reader back into the Ready state if EOF has been reached. pub async fn done(mut self) -> Result>> { if self.0 .0.read(&mut [0; 1]).await? != 0 { return Err(ZipError::EOFNotReached); } Ok(ZipFileReader(Ready(self.0 .0.into_inner().into_inner()))) } /// Reads until EOF and converts the reader back into the Ready state. pub async fn skip(mut self) -> Result>> { while self.0 .0.read(&mut [0; 2048]).await? != 0 {} Ok(ZipFileReader(Ready(self.0 .0.into_inner().into_inner()))) } } async_zip-0.0.16/src/base/write/compressed_writer.rs000064400000000000000000000147241046102023000206600ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::base::write::io::offset::AsyncOffsetWriter; use crate::spec::Compression; use std::io::Error; use std::pin::Pin; use std::task::{Context, Poll}; #[cfg(any(feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz"))] use async_compression::futures::write; use futures_lite::io::AsyncWrite; pub enum CompressedAsyncWriter<'b, W: AsyncWrite + Unpin> { Stored(ShutdownIgnoredWriter<&'b mut AsyncOffsetWriter>), #[cfg(feature = "deflate")] Deflate(write::DeflateEncoder>>), #[cfg(feature = "bzip2")] Bz(write::BzEncoder>>), #[cfg(feature = "lzma")] Lzma(write::LzmaEncoder>>), #[cfg(feature = "zstd")] Zstd(write::ZstdEncoder>>), #[cfg(feature = "xz")] Xz(write::XzEncoder>>), } impl<'b, W: AsyncWrite + Unpin> CompressedAsyncWriter<'b, W> { pub fn from_raw(writer: &'b mut AsyncOffsetWriter, compression: Compression) -> Self { match compression { Compression::Stored => CompressedAsyncWriter::Stored(ShutdownIgnoredWriter(writer)), #[cfg(feature = "deflate")] Compression::Deflate => { CompressedAsyncWriter::Deflate(write::DeflateEncoder::new(ShutdownIgnoredWriter(writer))) } #[cfg(feature = "deflate64")] Compression::Deflate64 => panic!("writing deflate64 is not supported"), #[cfg(feature = "bzip2")] Compression::Bz => CompressedAsyncWriter::Bz(write::BzEncoder::new(ShutdownIgnoredWriter(writer))), #[cfg(feature = "lzma")] Compression::Lzma => CompressedAsyncWriter::Lzma(write::LzmaEncoder::new(ShutdownIgnoredWriter(writer))), #[cfg(feature = "zstd")] Compression::Zstd => CompressedAsyncWriter::Zstd(write::ZstdEncoder::new(ShutdownIgnoredWriter(writer))), #[cfg(feature = "xz")] Compression::Xz => CompressedAsyncWriter::Xz(write::XzEncoder::new(ShutdownIgnoredWriter(writer))), } } pub fn into_inner(self) -> &'b mut AsyncOffsetWriter { match self { CompressedAsyncWriter::Stored(inner) => inner.into_inner(), #[cfg(feature = "deflate")] CompressedAsyncWriter::Deflate(inner) => inner.into_inner().into_inner(), #[cfg(feature = "bzip2")] CompressedAsyncWriter::Bz(inner) => inner.into_inner().into_inner(), #[cfg(feature = "lzma")] CompressedAsyncWriter::Lzma(inner) => inner.into_inner().into_inner(), #[cfg(feature = "zstd")] CompressedAsyncWriter::Zstd(inner) => inner.into_inner().into_inner(), #[cfg(feature = "xz")] CompressedAsyncWriter::Xz(inner) => inner.into_inner().into_inner(), } } } impl<'b, W: AsyncWrite + Unpin> AsyncWrite for CompressedAsyncWriter<'b, W> { fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { match *self { CompressedAsyncWriter::Stored(ref mut inner) => Pin::new(inner).poll_write(cx, buf), #[cfg(feature = "deflate")] CompressedAsyncWriter::Deflate(ref mut inner) => Pin::new(inner).poll_write(cx, buf), #[cfg(feature = "bzip2")] CompressedAsyncWriter::Bz(ref mut inner) => Pin::new(inner).poll_write(cx, buf), #[cfg(feature = "lzma")] CompressedAsyncWriter::Lzma(ref mut inner) => Pin::new(inner).poll_write(cx, buf), #[cfg(feature = "zstd")] CompressedAsyncWriter::Zstd(ref mut inner) => Pin::new(inner).poll_write(cx, buf), #[cfg(feature = "xz")] CompressedAsyncWriter::Xz(ref mut inner) => Pin::new(inner).poll_write(cx, buf), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { match *self { CompressedAsyncWriter::Stored(ref mut inner) => Pin::new(inner).poll_flush(cx), #[cfg(feature = "deflate")] CompressedAsyncWriter::Deflate(ref mut inner) => Pin::new(inner).poll_flush(cx), #[cfg(feature = "bzip2")] CompressedAsyncWriter::Bz(ref mut inner) => Pin::new(inner).poll_flush(cx), #[cfg(feature = "lzma")] CompressedAsyncWriter::Lzma(ref mut inner) => Pin::new(inner).poll_flush(cx), #[cfg(feature = "zstd")] CompressedAsyncWriter::Zstd(ref mut inner) => Pin::new(inner).poll_flush(cx), #[cfg(feature = "xz")] CompressedAsyncWriter::Xz(ref mut inner) => Pin::new(inner).poll_flush(cx), } } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { match *self { CompressedAsyncWriter::Stored(ref mut inner) => Pin::new(inner).poll_close(cx), #[cfg(feature = "deflate")] CompressedAsyncWriter::Deflate(ref mut inner) => Pin::new(inner).poll_close(cx), #[cfg(feature = "bzip2")] CompressedAsyncWriter::Bz(ref mut inner) => Pin::new(inner).poll_close(cx), #[cfg(feature = "lzma")] CompressedAsyncWriter::Lzma(ref mut inner) => Pin::new(inner).poll_close(cx), #[cfg(feature = "zstd")] CompressedAsyncWriter::Zstd(ref mut inner) => Pin::new(inner).poll_close(cx), #[cfg(feature = "xz")] CompressedAsyncWriter::Xz(ref mut inner) => Pin::new(inner).poll_close(cx), } } } pub struct ShutdownIgnoredWriter(W); impl ShutdownIgnoredWriter { pub fn into_inner(self) -> W { self.0 } } impl AsyncWrite for ShutdownIgnoredWriter { fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { Pin::new(&mut self.0).poll_write(cx, buf) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { Pin::new(&mut self.0).poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll> { Poll::Ready(Ok(())) } } async_zip-0.0.16/src/base/write/entry_stream.rs000064400000000000000000000277141046102023000176370ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::base::write::compressed_writer::CompressedAsyncWriter; use crate::base::write::get_or_put_info_zip_unicode_comment_extra_field_mut; use crate::base::write::get_or_put_info_zip_unicode_path_extra_field_mut; use crate::base::write::io::offset::AsyncOffsetWriter; use crate::base::write::CentralDirectoryEntry; use crate::base::write::ZipFileWriter; use crate::entry::ZipEntry; use crate::error::{Result, Zip64ErrorCase, ZipError}; use crate::spec::extra_field::ExtraFieldAsBytes; use crate::spec::header::InfoZipUnicodeCommentExtraField; use crate::spec::header::InfoZipUnicodePathExtraField; use crate::spec::header::{ CentralDirectoryRecord, ExtraField, GeneralPurposeFlag, HeaderId, LocalFileHeader, Zip64ExtendedInformationExtraField, }; use crate::string::StringEncoding; use std::io::Error; use std::pin::Pin; use std::task::{Context, Poll}; use crate::base::read::get_zip64_extra_field_mut; use crate::spec::consts::{NON_ZIP64_MAX_NUM_FILES, NON_ZIP64_MAX_SIZE}; use crc32fast::Hasher; use futures_lite::io::{AsyncWrite, AsyncWriteExt}; /// An entry writer which supports the streaming of data (ie. the writing of unknown size or data at runtime). /// /// # Note /// - This writer cannot be manually constructed; instead, use [`ZipFileWriter::write_entry_stream()`]. /// - [`EntryStreamWriter::close()`] must be called before a stream writer goes out of scope. /// - Utilities for working with [`AsyncWrite`] values are provided by [`AsyncWriteExt`]. pub struct EntryStreamWriter<'b, W: AsyncWrite + Unpin> { writer: AsyncOffsetWriter>, cd_entries: &'b mut Vec, entry: ZipEntry, hasher: Hasher, lfh: LocalFileHeader, lfh_offset: u64, data_offset: u64, force_no_zip64: bool, /// To write back to the original writer if zip64 is required. is_zip64: &'b mut bool, } impl<'b, W: AsyncWrite + Unpin> EntryStreamWriter<'b, W> { pub(crate) async fn from_raw( writer: &'b mut ZipFileWriter, mut entry: ZipEntry, ) -> Result> { let lfh_offset = writer.writer.offset(); let lfh = EntryStreamWriter::write_lfh(writer, &mut entry).await?; let data_offset = writer.writer.offset(); let force_no_zip64 = writer.force_no_zip64; let cd_entries = &mut writer.cd_entries; let is_zip64 = &mut writer.is_zip64; let writer = AsyncOffsetWriter::new(CompressedAsyncWriter::from_raw(&mut writer.writer, entry.compression())); Ok(EntryStreamWriter { writer, cd_entries, entry, lfh, lfh_offset, data_offset, hasher: Hasher::new(), force_no_zip64, is_zip64, }) } async fn write_lfh(writer: &'b mut ZipFileWriter, entry: &mut ZipEntry) -> Result { // Always emit a zip64 extended field, even if we don't need it, because we *might* need it. // If we are forcing no zip, we will have to error later if the file is too large. let (lfh_compressed, lfh_uncompressed) = if !writer.force_no_zip64 { if !writer.is_zip64 { writer.is_zip64 = true; } entry.extra_fields.push(ExtraField::Zip64ExtendedInformation(Zip64ExtendedInformationExtraField { header_id: HeaderId::ZIP64_EXTENDED_INFORMATION_EXTRA_FIELD, uncompressed_size: Some(entry.uncompressed_size), compressed_size: Some(entry.compressed_size), relative_header_offset: None, disk_start_number: None, })); (NON_ZIP64_MAX_SIZE, NON_ZIP64_MAX_SIZE) } else { if entry.compressed_size > NON_ZIP64_MAX_SIZE as u64 || entry.uncompressed_size > NON_ZIP64_MAX_SIZE as u64 { return Err(ZipError::Zip64Needed(Zip64ErrorCase::LargeFile)); } (entry.compressed_size as u32, entry.uncompressed_size as u32) }; let utf8_without_alternative = entry.filename().is_utf8_without_alternative() && entry.comment().is_utf8_without_alternative(); if !utf8_without_alternative { if matches!(entry.filename().encoding(), StringEncoding::Utf8) { let u_file_name = entry.filename().as_bytes().to_vec(); if !u_file_name.is_empty() { let basic_crc32 = crc32fast::hash(entry.filename().alternative().unwrap_or_else(|| entry.filename().as_bytes())); let upath_field = get_or_put_info_zip_unicode_path_extra_field_mut(entry.extra_fields.as_mut()); if let InfoZipUnicodePathExtraField::V1 { crc32, unicode } = upath_field { *crc32 = basic_crc32; *unicode = u_file_name; } } } if matches!(entry.comment().encoding(), StringEncoding::Utf8) { let u_comment = entry.comment().as_bytes().to_vec(); if !u_comment.is_empty() { let basic_crc32 = crc32fast::hash(entry.comment().alternative().unwrap_or_else(|| entry.comment().as_bytes())); let ucom_field = get_or_put_info_zip_unicode_comment_extra_field_mut(entry.extra_fields.as_mut()); if let InfoZipUnicodeCommentExtraField::V1 { crc32, unicode } = ucom_field { *crc32 = basic_crc32; *unicode = u_comment; } } } } let filename_basic = entry.filename().alternative().unwrap_or_else(|| entry.filename().as_bytes()); let lfh = LocalFileHeader { compressed_size: lfh_compressed, uncompressed_size: lfh_uncompressed, compression: entry.compression().into(), crc: entry.crc32, extra_field_length: entry .extra_fields() .count_bytes() .try_into() .map_err(|_| ZipError::ExtraFieldTooLarge)?, file_name_length: filename_basic.len().try_into().map_err(|_| ZipError::FileNameTooLarge)?, mod_time: entry.last_modification_date().time, mod_date: entry.last_modification_date().date, version: crate::spec::version::as_needed_to_extract(entry), flags: GeneralPurposeFlag { data_descriptor: true, encrypted: false, filename_unicode: utf8_without_alternative, }, }; writer.writer.write_all(&crate::spec::consts::LFH_SIGNATURE.to_le_bytes()).await?; writer.writer.write_all(&lfh.as_slice()).await?; writer.writer.write_all(filename_basic).await?; writer.writer.write_all(&entry.extra_fields().as_bytes()).await?; Ok(lfh) } /// Consumes this entry writer and completes all closing tasks. /// /// This includes: /// - Finalising the CRC32 hash value for the written data. /// - Calculating the compressed and uncompressed byte sizes. /// - Constructing a central directory header. /// - Pushing that central directory header to the [`ZipFileWriter`]'s store. /// /// Failure to call this function before going out of scope would result in a corrupted ZIP file. pub async fn close(mut self) -> Result<()> { self.writer.close().await?; let crc = self.hasher.finalize(); let uncompressed_size = self.writer.offset(); let inner_writer = self.writer.into_inner().into_inner(); let compressed_size = inner_writer.offset() - self.data_offset; let (cdr_compressed_size, cdr_uncompressed_size, lh_offset) = if self.force_no_zip64 { if uncompressed_size > NON_ZIP64_MAX_SIZE as u64 || compressed_size > NON_ZIP64_MAX_SIZE as u64 || self.lfh_offset > NON_ZIP64_MAX_SIZE as u64 { return Err(ZipError::Zip64Needed(Zip64ErrorCase::LargeFile)); } (uncompressed_size as u32, compressed_size as u32, self.lfh_offset as u32) } else { // When streaming an entry, we are always using a zip64 field. match get_zip64_extra_field_mut(&mut self.entry.extra_fields) { // This case shouldn't be necessary but is included for completeness. None => { self.entry.extra_fields.push(ExtraField::Zip64ExtendedInformation( Zip64ExtendedInformationExtraField { header_id: HeaderId::ZIP64_EXTENDED_INFORMATION_EXTRA_FIELD, uncompressed_size: Some(uncompressed_size), compressed_size: Some(compressed_size), relative_header_offset: Some(self.lfh_offset), disk_start_number: None, }, )); } Some(zip64) => { zip64.uncompressed_size = Some(uncompressed_size); zip64.compressed_size = Some(compressed_size); zip64.relative_header_offset = Some(self.lfh_offset); } } self.lfh.extra_field_length = self.entry.extra_fields().count_bytes().try_into().map_err(|_| ZipError::ExtraFieldTooLarge)?; (NON_ZIP64_MAX_SIZE, NON_ZIP64_MAX_SIZE, NON_ZIP64_MAX_SIZE) }; inner_writer.write_all(&crate::spec::consts::DATA_DESCRIPTOR_SIGNATURE.to_le_bytes()).await?; inner_writer.write_all(&crc.to_le_bytes()).await?; inner_writer.write_all(&cdr_compressed_size.to_le_bytes()).await?; inner_writer.write_all(&cdr_uncompressed_size.to_le_bytes()).await?; let comment_basic = self.entry.comment().alternative().unwrap_or_else(|| self.entry.comment().as_bytes()); let cdh = CentralDirectoryRecord { compressed_size: cdr_compressed_size, uncompressed_size: cdr_uncompressed_size, crc, v_made_by: crate::spec::version::as_made_by(), v_needed: self.lfh.version, compression: self.lfh.compression, extra_field_length: self.lfh.extra_field_length, file_name_length: self.lfh.file_name_length, file_comment_length: comment_basic.len().try_into().map_err(|_| ZipError::CommentTooLarge)?, mod_time: self.lfh.mod_time, mod_date: self.lfh.mod_date, flags: self.lfh.flags, disk_start: 0, inter_attr: self.entry.internal_file_attribute(), exter_attr: self.entry.external_file_attribute(), lh_offset, }; self.cd_entries.push(CentralDirectoryEntry { header: cdh, entry: self.entry }); // Ensure that we can fit this many files in this archive if forcing no zip64 if self.cd_entries.len() > NON_ZIP64_MAX_NUM_FILES as usize { if self.force_no_zip64 { return Err(ZipError::Zip64Needed(Zip64ErrorCase::TooManyFiles)); } if !*self.is_zip64 { *self.is_zip64 = true; } } Ok(()) } } impl<'a, W: AsyncWrite + Unpin> AsyncWrite for EntryStreamWriter<'a, W> { fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { let poll = Pin::new(&mut self.writer).poll_write(cx, buf); if let Poll::Ready(Ok(written)) = poll { self.hasher.update(&buf[0..written]); } poll } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { Pin::new(&mut self.writer).poll_flush(cx) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { Pin::new(&mut self.writer).poll_close(cx) } } async_zip-0.0.16/src/base/write/entry_whole.rs000064400000000000000000000264411046102023000174560ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::base::write::get_or_put_info_zip_unicode_comment_extra_field_mut; use crate::base::write::get_or_put_info_zip_unicode_path_extra_field_mut; use crate::base::write::{CentralDirectoryEntry, ZipFileWriter}; use crate::entry::ZipEntry; use crate::error::{Result, Zip64ErrorCase, ZipError}; use crate::spec::extra_field::Zip64ExtendedInformationExtraFieldBuilder; use crate::spec::header::{InfoZipUnicodeCommentExtraField, InfoZipUnicodePathExtraField}; use crate::spec::{ extra_field::ExtraFieldAsBytes, header::{CentralDirectoryRecord, ExtraField, GeneralPurposeFlag, LocalFileHeader}, Compression, }; use crate::StringEncoding; #[cfg(any(feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz"))] use futures_lite::io::Cursor; use crate::spec::consts::{NON_ZIP64_MAX_NUM_FILES, NON_ZIP64_MAX_SIZE}; #[cfg(any(feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz"))] use async_compression::futures::write; use futures_lite::io::{AsyncWrite, AsyncWriteExt}; pub struct EntryWholeWriter<'b, 'c, W: AsyncWrite + Unpin> { writer: &'b mut ZipFileWriter, entry: ZipEntry, data: &'c [u8], } impl<'b, 'c, W: AsyncWrite + Unpin> EntryWholeWriter<'b, 'c, W> { pub fn from_raw(writer: &'b mut ZipFileWriter, entry: ZipEntry, data: &'c [u8]) -> Self { Self { writer, entry, data } } pub async fn write(mut self) -> Result<()> { let mut _compressed_data: Option> = None; let compressed_data = match self.entry.compression() { Compression::Stored => self.data, #[cfg(any( feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz", feature = "deflate64" ))] _ => { _compressed_data = Some(compress(self.entry.compression(), self.data, self.entry.compression_level).await); _compressed_data.as_ref().unwrap() } }; let mut zip64_extra_field_builder = None; let (lfh_uncompressed_size, lfh_compressed_size) = if self.data.len() as u64 > NON_ZIP64_MAX_SIZE as u64 || compressed_data.len() as u64 > NON_ZIP64_MAX_SIZE as u64 { if self.writer.force_no_zip64 { return Err(ZipError::Zip64Needed(Zip64ErrorCase::LargeFile)); } if !self.writer.is_zip64 { self.writer.is_zip64 = true; } zip64_extra_field_builder = Some( Zip64ExtendedInformationExtraFieldBuilder::new() .sizes(compressed_data.len() as u64, self.data.len() as u64), ); (NON_ZIP64_MAX_SIZE, NON_ZIP64_MAX_SIZE) } else { (self.data.len() as u32, compressed_data.len() as u32) }; let lh_offset = if self.writer.writer.offset() > NON_ZIP64_MAX_SIZE as u64 { if self.writer.force_no_zip64 { return Err(ZipError::Zip64Needed(Zip64ErrorCase::LargeFile)); } if !self.writer.is_zip64 { self.writer.is_zip64 = true; } if let Some(zip64_extra_field) = zip64_extra_field_builder { zip64_extra_field_builder = Some(zip64_extra_field.relative_header_offset(self.writer.writer.offset())); } else { zip64_extra_field_builder = Some( Zip64ExtendedInformationExtraFieldBuilder::new() .relative_header_offset(self.writer.writer.offset()), ); } NON_ZIP64_MAX_SIZE } else { self.writer.writer.offset() as u32 }; if let Some(builder) = zip64_extra_field_builder { if !builder.eof_only() { self.entry.extra_fields.push(ExtraField::Zip64ExtendedInformation(builder.build()?)); zip64_extra_field_builder = None; } else { zip64_extra_field_builder = Some(builder); } } let utf8_without_alternative = self.entry.filename().is_utf8_without_alternative() && self.entry.comment().is_utf8_without_alternative(); if !utf8_without_alternative { if matches!(self.entry.filename().encoding(), StringEncoding::Utf8) { let u_file_name = self.entry.filename().as_bytes().to_vec(); if !u_file_name.is_empty() { let basic_crc32 = crc32fast::hash( self.entry.filename().alternative().unwrap_or_else(|| self.entry.filename().as_bytes()), ); let upath_field = get_or_put_info_zip_unicode_path_extra_field_mut(self.entry.extra_fields.as_mut()); if let InfoZipUnicodePathExtraField::V1 { crc32, unicode } = upath_field { *crc32 = basic_crc32; *unicode = u_file_name; } } } if matches!(self.entry.comment().encoding(), StringEncoding::Utf8) { let u_comment = self.entry.comment().as_bytes().to_vec(); if !u_comment.is_empty() { let basic_crc32 = crc32fast::hash( self.entry.comment().alternative().unwrap_or_else(|| self.entry.comment().as_bytes()), ); let ucom_field = get_or_put_info_zip_unicode_comment_extra_field_mut(self.entry.extra_fields.as_mut()); if let InfoZipUnicodeCommentExtraField::V1 { crc32, unicode } = ucom_field { *crc32 = basic_crc32; *unicode = u_comment; } } } } let filename_basic = self.entry.filename().alternative().unwrap_or_else(|| self.entry.filename().as_bytes()); let comment_basic = self.entry.comment().alternative().unwrap_or_else(|| self.entry.comment().as_bytes()); let lf_header = LocalFileHeader { compressed_size: lfh_compressed_size, uncompressed_size: lfh_uncompressed_size, compression: self.entry.compression().into(), crc: crc32fast::hash(self.data), extra_field_length: self .entry .extra_fields() .count_bytes() .try_into() .map_err(|_| ZipError::ExtraFieldTooLarge)?, file_name_length: filename_basic.len().try_into().map_err(|_| ZipError::FileNameTooLarge)?, mod_time: self.entry.last_modification_date().time, mod_date: self.entry.last_modification_date().date, version: crate::spec::version::as_needed_to_extract(&self.entry), flags: GeneralPurposeFlag { data_descriptor: false, encrypted: false, filename_unicode: utf8_without_alternative, }, }; let mut header = CentralDirectoryRecord { v_made_by: crate::spec::version::as_made_by(), v_needed: lf_header.version, compressed_size: lf_header.compressed_size, uncompressed_size: lf_header.uncompressed_size, compression: lf_header.compression, crc: lf_header.crc, extra_field_length: lf_header.extra_field_length, file_name_length: lf_header.file_name_length, file_comment_length: comment_basic.len().try_into().map_err(|_| ZipError::CommentTooLarge)?, mod_time: lf_header.mod_time, mod_date: lf_header.mod_date, flags: lf_header.flags, disk_start: 0, inter_attr: self.entry.internal_file_attribute(), exter_attr: self.entry.external_file_attribute(), lh_offset, }; self.writer.writer.write_all(&crate::spec::consts::LFH_SIGNATURE.to_le_bytes()).await?; self.writer.writer.write_all(&lf_header.as_slice()).await?; self.writer.writer.write_all(filename_basic).await?; self.writer.writer.write_all(&self.entry.extra_fields().as_bytes()).await?; self.writer.writer.write_all(compressed_data).await?; if let Some(builder) = zip64_extra_field_builder { self.entry.extra_fields.push(ExtraField::Zip64ExtendedInformation(builder.build()?)); header.extra_field_length = self.entry.extra_fields().count_bytes().try_into().map_err(|_| ZipError::ExtraFieldTooLarge)?; } self.writer.cd_entries.push(CentralDirectoryEntry { header, entry: self.entry }); // Ensure that we can fit this many files in this archive if forcing no zip64 if self.writer.cd_entries.len() > NON_ZIP64_MAX_NUM_FILES as usize { if self.writer.force_no_zip64 { return Err(ZipError::Zip64Needed(Zip64ErrorCase::TooManyFiles)); } if !self.writer.is_zip64 { self.writer.is_zip64 = true; } } Ok(()) } } #[cfg(any( feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz", feature = "deflate64" ))] async fn compress(compression: Compression, data: &[u8], level: async_compression::Level) -> Vec { // TODO: Reduce reallocations of Vec by making a lower-bound estimate of the length reduction and // pre-initialising the Vec to that length. Then truncate() to the actual number of bytes written. match compression { #[cfg(feature = "deflate")] Compression::Deflate => { let mut writer = write::DeflateEncoder::with_quality(Cursor::new(Vec::new()), level); writer.write_all(data).await.unwrap(); writer.close().await.unwrap(); writer.into_inner().into_inner() } #[cfg(feature = "deflate64")] Compression::Deflate64 => panic!("compressing deflate64 is not supported"), #[cfg(feature = "bzip2")] Compression::Bz => { let mut writer = write::BzEncoder::with_quality(Cursor::new(Vec::new()), level); writer.write_all(data).await.unwrap(); writer.close().await.unwrap(); writer.into_inner().into_inner() } #[cfg(feature = "lzma")] Compression::Lzma => { let mut writer = write::LzmaEncoder::with_quality(Cursor::new(Vec::new()), level); writer.write_all(data).await.unwrap(); writer.close().await.unwrap(); writer.into_inner().into_inner() } #[cfg(feature = "xz")] Compression::Xz => { let mut writer = write::XzEncoder::with_quality(Cursor::new(Vec::new()), level); writer.write_all(data).await.unwrap(); writer.close().await.unwrap(); writer.into_inner().into_inner() } #[cfg(feature = "zstd")] Compression::Zstd => { let mut writer = write::ZstdEncoder::with_quality(Cursor::new(Vec::new()), level); writer.write_all(data).await.unwrap(); writer.close().await.unwrap(); writer.into_inner().into_inner() } _ => unreachable!(), } } async_zip-0.0.16/src/base/write/io/mod.rs000064400000000000000000000002341046102023000162750ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) pub(crate) mod offset; async_zip-0.0.16/src/base/write/io/offset.rs000064400000000000000000000035651046102023000170160ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use std::io::{Error, IoSlice}; use std::pin::Pin; use std::task::{Context, Poll}; use futures_lite::io::AsyncWrite; use pin_project::pin_project; /// A wrapper around an [`AsyncWrite`] implementation which tracks the current byte offset. #[pin_project(project = OffsetWriterProj)] pub struct AsyncOffsetWriter { #[pin] inner: W, offset: u64, } impl AsyncOffsetWriter where W: AsyncWrite + Unpin, { /// Constructs a new wrapper from an inner [`AsyncWrite`] writer. pub fn new(inner: W) -> Self { Self { inner, offset: 0 } } /// Returns the current byte offset. pub fn offset(&self) -> u64 { self.offset } /// Consumes this wrapper and returns the inner [`AsyncWrite`] writer. pub fn into_inner(self) -> W { self.inner } pub fn inner_mut(&mut self) -> &mut W { &mut self.inner } } impl AsyncWrite for AsyncOffsetWriter where W: AsyncWrite + Unpin, { fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { let this = self.project(); let poll = this.inner.poll_write(cx, buf); if let Poll::Ready(Ok(inner)) = &poll { *this.offset += *inner as u64; } poll } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { self.project().inner.poll_flush(cx) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { self.project().inner.poll_close(cx) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[IoSlice<'_>], ) -> Poll> { self.project().inner.poll_write_vectored(cx, bufs) } } async_zip-0.0.16/src/base/write/mod.rs000064400000000000000000000250731046102023000156760ustar 00000000000000// Copyright (c) 2021-2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A module which supports writing ZIP files. //! //! # Example //! ### Whole data (u8 slice) //! ```no_run //! # #[cfg(feature = "deflate")] //! # { //! # use async_zip::{Compression, ZipEntryBuilder, base::write::ZipFileWriter}; //! # use async_zip::error::ZipError; //! # //! # async fn run() -> Result<(), ZipError> { //! let mut writer = ZipFileWriter::new(Vec::::new()); //! //! let data = b"This is an example file."; //! let opts = ZipEntryBuilder::new(String::from("foo.txt").into(), Compression::Deflate); //! //! writer.write_entry_whole(opts, data).await?; //! writer.close().await?; //! # Ok(()) //! # } //! # } //! ``` //! ### Stream data (unknown size & data) //! ```no_run //! # #[cfg(feature = "deflate")] //! # { //! # use async_zip::{Compression, ZipEntryBuilder, base::write::ZipFileWriter}; //! # use std::io::Cursor; //! # use async_zip::error::ZipError; //! # use futures_lite::io::AsyncWriteExt; //! # use tokio_util::compat::TokioAsyncWriteCompatExt; //! # //! # async fn run() -> Result<(), ZipError> { //! let mut writer = ZipFileWriter::new(Vec::::new()); //! //! let data = b"This is an example file."; //! let opts = ZipEntryBuilder::new(String::from("bar.txt").into(), Compression::Deflate); //! //! let mut entry_writer = writer.write_entry_stream(opts).await?; //! entry_writer.write_all(data).await.unwrap(); //! //! entry_writer.close().await?; //! writer.close().await?; //! # Ok(()) //! # } //! # } //! ``` pub(crate) mod compressed_writer; pub(crate) mod entry_stream; pub(crate) mod entry_whole; pub(crate) mod io; pub use entry_stream::EntryStreamWriter; #[cfg(feature = "tokio")] use tokio_util::compat::{Compat, TokioAsyncWriteCompatExt}; use crate::entry::ZipEntry; use crate::error::Result; use crate::spec::extra_field::ExtraFieldAsBytes; use crate::spec::header::{ CentralDirectoryRecord, EndOfCentralDirectoryHeader, ExtraField, InfoZipUnicodeCommentExtraField, InfoZipUnicodePathExtraField, Zip64EndOfCentralDirectoryLocator, Zip64EndOfCentralDirectoryRecord, }; #[cfg(feature = "tokio")] use crate::tokio::write::ZipFileWriter as TokioZipFileWriter; use entry_whole::EntryWholeWriter; use io::offset::AsyncOffsetWriter; use crate::spec::consts::{NON_ZIP64_MAX_NUM_FILES, NON_ZIP64_MAX_SIZE}; use futures_lite::io::{AsyncWrite, AsyncWriteExt}; pub(crate) struct CentralDirectoryEntry { pub header: CentralDirectoryRecord, pub entry: ZipEntry, } /// A ZIP file writer which acts over AsyncWrite implementers. /// /// # Note /// - [`ZipFileWriter::close()`] must be called before a stream writer goes out of scope. pub struct ZipFileWriter { pub(crate) writer: AsyncOffsetWriter, pub(crate) cd_entries: Vec, /// If true, will error if a Zip64 struct must be written. force_no_zip64: bool, /// Whether to write Zip64 end of directory structs. pub(crate) is_zip64: bool, comment_opt: Option, } impl ZipFileWriter { /// Construct a new ZIP file writer from a mutable reference to a writer. pub fn new(writer: W) -> Self { Self { writer: AsyncOffsetWriter::new(writer), cd_entries: Vec::new(), comment_opt: None, is_zip64: false, force_no_zip64: false, } } /// Force the ZIP writer to operate in non-ZIP64 mode. /// If any files would need ZIP64, an error will be raised. pub fn force_no_zip64(mut self) -> Self { self.force_no_zip64 = true; self } /// Force the ZIP writer to emit Zip64 structs at the end of the archive. /// Zip64 extended fields will only be written if needed. pub fn force_zip64(mut self) -> Self { self.is_zip64 = true; self } /// Write a new ZIP entry of known size and data. pub async fn write_entry_whole>(&mut self, entry: E, data: &[u8]) -> Result<()> { EntryWholeWriter::from_raw(self, entry.into(), data).write().await } /// Write an entry of unknown size and data via streaming (ie. using a data descriptor). /// The generated Local File Header will be invalid, with no compressed size, uncompressed size, /// and a null CRC. This might cause problems with the destination reader. pub async fn write_entry_stream>(&mut self, entry: E) -> Result> { EntryStreamWriter::from_raw(self, entry.into()).await } /// Set the ZIP file comment. pub fn comment(&mut self, comment: String) { self.comment_opt = Some(comment); } /// Returns a mutable reference to the inner writer. /// /// Care should be taken when using this inner writer as doing so may invalidate internal state of this writer. pub fn inner_mut(&mut self) -> &mut W { self.writer.inner_mut() } /// Consumes this ZIP writer and completes all closing tasks. /// /// This includes: /// - Writing all central directory headers. /// - Writing the end of central directory header. /// - Writing the file comment. /// /// Failure to call this function before going out of scope would result in a corrupted ZIP file. pub async fn close(mut self) -> Result { let cd_offset = self.writer.offset(); for entry in &self.cd_entries { let filename_basic = entry.entry.filename().alternative().unwrap_or_else(|| entry.entry.filename().as_bytes()); let comment_basic = entry.entry.comment().alternative().unwrap_or_else(|| entry.entry.comment().as_bytes()); self.writer.write_all(&crate::spec::consts::CDH_SIGNATURE.to_le_bytes()).await?; self.writer.write_all(&entry.header.as_slice()).await?; self.writer.write_all(filename_basic).await?; self.writer.write_all(&entry.entry.extra_fields().as_bytes()).await?; self.writer.write_all(comment_basic).await?; } let central_directory_size = self.writer.offset() - cd_offset; let central_directory_size_u32 = if central_directory_size > NON_ZIP64_MAX_SIZE as u64 { NON_ZIP64_MAX_SIZE } else { central_directory_size as u32 }; let num_entries_in_directory = self.cd_entries.len() as u64; let num_entries_in_directory_u16 = if num_entries_in_directory > NON_ZIP64_MAX_NUM_FILES as u64 { NON_ZIP64_MAX_NUM_FILES } else { num_entries_in_directory as u16 }; let cd_offset_u32 = if cd_offset > NON_ZIP64_MAX_SIZE as u64 { if self.force_no_zip64 { return Err(crate::error::ZipError::Zip64Needed(crate::error::Zip64ErrorCase::LargeFile)); } else { self.is_zip64 = true; } NON_ZIP64_MAX_SIZE } else { cd_offset as u32 }; // Add the zip64 EOCDR and EOCDL if we are in zip64 mode. if self.is_zip64 { let eocdr_offset = self.writer.offset(); let eocdr = Zip64EndOfCentralDirectoryRecord { size_of_zip64_end_of_cd_record: 44, version_made_by: crate::spec::version::as_made_by(), version_needed_to_extract: 46, disk_number: 0, disk_number_start_of_cd: 0, num_entries_in_directory_on_disk: num_entries_in_directory, num_entries_in_directory, directory_size: central_directory_size, offset_of_start_of_directory: cd_offset, }; self.writer.write_all(&crate::spec::consts::ZIP64_EOCDR_SIGNATURE.to_le_bytes()).await?; self.writer.write_all(&eocdr.as_bytes()).await?; let eocdl = Zip64EndOfCentralDirectoryLocator { number_of_disk_with_start_of_zip64_end_of_central_directory: 0, relative_offset: eocdr_offset, total_number_of_disks: 1, }; self.writer.write_all(&crate::spec::consts::ZIP64_EOCDL_SIGNATURE.to_le_bytes()).await?; self.writer.write_all(&eocdl.as_bytes()).await?; } let header = EndOfCentralDirectoryHeader { disk_num: 0, start_cent_dir_disk: 0, num_of_entries_disk: num_entries_in_directory_u16, num_of_entries: num_entries_in_directory_u16, size_cent_dir: central_directory_size_u32, cent_dir_offset: cd_offset_u32, file_comm_length: self.comment_opt.as_ref().map(|v| v.len() as u16).unwrap_or_default(), }; self.writer.write_all(&crate::spec::consts::EOCDR_SIGNATURE.to_le_bytes()).await?; self.writer.write_all(&header.as_slice()).await?; if let Some(comment) = self.comment_opt { self.writer.write_all(comment.as_bytes()).await?; } Ok(self.writer.into_inner()) } } #[cfg(feature = "tokio")] impl ZipFileWriter> where W: tokio::io::AsyncWrite + Unpin, { /// Construct a new ZIP file writer from a mutable reference to a writer. pub fn with_tokio(writer: W) -> TokioZipFileWriter { Self { writer: AsyncOffsetWriter::new(writer.compat_write()), cd_entries: Vec::new(), comment_opt: None, is_zip64: false, force_no_zip64: false, } } } pub(crate) fn get_or_put_info_zip_unicode_path_extra_field_mut( extra_fields: &mut Vec, ) -> &mut InfoZipUnicodePathExtraField { if !extra_fields.iter().any(|field| matches!(field, ExtraField::InfoZipUnicodePath(_))) { extra_fields .push(ExtraField::InfoZipUnicodePath(InfoZipUnicodePathExtraField::V1 { crc32: 0, unicode: vec![] })); } for field in extra_fields.iter_mut() { if let ExtraField::InfoZipUnicodePath(extra_field) = field { return extra_field; } } panic!("InfoZipUnicodePathExtraField not found after insertion") } pub(crate) fn get_or_put_info_zip_unicode_comment_extra_field_mut( extra_fields: &mut Vec, ) -> &mut InfoZipUnicodeCommentExtraField { if !extra_fields.iter().any(|field| matches!(field, ExtraField::InfoZipUnicodeComment(_))) { extra_fields .push(ExtraField::InfoZipUnicodeComment(InfoZipUnicodeCommentExtraField::V1 { crc32: 0, unicode: vec![] })); } for field in extra_fields.iter_mut() { if let ExtraField::InfoZipUnicodeComment(extra_field) = field { return extra_field; } } panic!("InfoZipUnicodeCommentExtraField not found after insertion") } async_zip-0.0.16/src/date.rs000064400000000000000000000051411046102023000137620ustar 00000000000000// Copyright (c) 2021-2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) #[cfg(feature = "chrono")] use chrono::{DateTime, Datelike, LocalResult, TimeZone, Timelike, Utc}; // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#446 // https://learn.microsoft.com/en-us/windows/win32/api/oleauto/nf-oleauto-dosdatetimetovarianttime /// A date and time stored as per the MS-DOS representation used by ZIP files. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Hash)] pub struct ZipDateTime { pub(crate) date: u16, pub(crate) time: u16, } impl ZipDateTime { /// Returns the year of this date & time. pub fn year(&self) -> i32 { (((self.date & 0xFE00) >> 9) + 1980).into() } /// Returns the month of this date & time. pub fn month(&self) -> u32 { ((self.date & 0x1E0) >> 5).into() } /// Returns the day of this date & time. pub fn day(&self) -> u32 { (self.date & 0x1F).into() } /// Returns the hour of this date & time. pub fn hour(&self) -> u32 { ((self.time & 0xF800) >> 11).into() } /// Returns the minute of this date & time. pub fn minute(&self) -> u32 { ((self.time & 0x7E0) >> 5).into() } /// Returns the second of this date & time. /// /// Note that MS-DOS has a maximum granularity of two seconds. pub fn second(&self) -> u32 { ((self.time & 0x1F) << 1).into() } /// Constructs chrono's [`DateTime`] representation of this date & time. /// /// Note that this requires the `chrono` feature. #[cfg(feature = "chrono")] pub fn as_chrono(&self) -> LocalResult> { Utc.with_ymd_and_hms(self.year(), self.month(), self.day(), self.hour(), self.minute(), self.second()) } /// Constructs this date & time from chrono's [`DateTime`] representation. /// /// Note that this requires the `chrono` feature. #[cfg(feature = "chrono")] pub fn from_chrono(dt: &DateTime) -> Self { let year: u16 = (((dt.date_naive().year() - 1980) << 9) & 0xFE00).try_into().unwrap(); let month: u16 = ((dt.date_naive().month() << 5) & 0x1E0).try_into().unwrap(); let day: u16 = (dt.date_naive().day() & 0x1F).try_into().unwrap(); let hour: u16 = ((dt.time().hour() << 11) & 0xF800).try_into().unwrap(); let min: u16 = ((dt.time().minute() << 5) & 0x7E0).try_into().unwrap(); let second: u16 = ((dt.time().second() >> 1) & 0x1F).try_into().unwrap(); ZipDateTime { date: year | month | day, time: hour | min | second } } } async_zip-0.0.16/src/entry/builder.rs000064400000000000000000000076621046102023000156460ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::entry::ZipEntry; use crate::spec::{attribute::AttributeCompatibility, header::ExtraField, Compression}; use crate::{date::ZipDateTime, string::ZipString}; /// A builder for [`ZipEntry`]. pub struct ZipEntryBuilder(pub(crate) ZipEntry); impl From for ZipEntryBuilder { fn from(entry: ZipEntry) -> Self { Self(entry) } } impl ZipEntryBuilder { /// Constructs a new builder which defines the raw underlying data of a ZIP entry. /// /// A filename and compression method are needed to construct the builder as minimal parameters. pub fn new(filename: ZipString, compression: Compression) -> Self { Self(ZipEntry::new(filename, compression)) } /// Sets the entry's filename. pub fn filename(mut self, filename: ZipString) -> Self { self.0.filename = filename; self } /// Sets the entry's compression method. pub fn compression(mut self, compression: Compression) -> Self { self.0.compression = compression; self } /// Set a size hint for the file, to be written into the local file header. /// Unlikely to be useful except for the case of streaming files to be Store'd. /// This size hint does not affect the central directory, nor does it affect whole files. pub fn size, M: Into>(mut self, compressed_size: N, uncompressed_size: M) -> Self { self.0.compressed_size = compressed_size.into(); self.0.uncompressed_size = uncompressed_size.into(); self } /// Set the deflate compression option. /// /// If the compression type isn't deflate, this option has no effect. #[cfg(any(feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz"))] pub fn deflate_option(mut self, option: crate::DeflateOption) -> Self { self.0.compression_level = option.into_level(); self } /// Sets the entry's attribute host compatibility. pub fn attribute_compatibility(mut self, compatibility: AttributeCompatibility) -> Self { self.0.attribute_compatibility = compatibility; self } /// Sets the entry's last modification date. pub fn last_modification_date(mut self, date: ZipDateTime) -> Self { self.0.last_modification_date = date; self } /// Sets the entry's internal file attribute. pub fn internal_file_attribute(mut self, attribute: u16) -> Self { self.0.internal_file_attribute = attribute; self } /// Sets the entry's external file attribute. pub fn external_file_attribute(mut self, attribute: u32) -> Self { self.0.external_file_attribute = attribute; self } /// Sets the entry's extra field data. pub fn extra_fields(mut self, field: Vec) -> Self { self.0.extra_fields = field; self } /// Sets the entry's file comment. pub fn comment(mut self, comment: ZipString) -> Self { self.0.comment = comment; self } /// Sets the entry's Unix permissions mode. /// /// If the attribute host compatibility isn't set to Unix, this will have no effect. pub fn unix_permissions(mut self, mode: u16) -> Self { if matches!(self.0.attribute_compatibility, AttributeCompatibility::Unix) { self.0.external_file_attribute = (self.0.external_file_attribute & 0xFFFF) | (mode as u32) << 16; } self } /// Consumes this builder and returns a final [`ZipEntry`]. /// /// This is equivalent to: /// ``` /// # use async_zip::{ZipEntry, ZipEntryBuilder, Compression}; /// # /// # let builder = ZipEntryBuilder::new(String::from("foo.bar").into(), Compression::Stored); /// let entry: ZipEntry = builder.into(); /// ``` pub fn build(self) -> ZipEntry { self.into() } } async_zip-0.0.16/src/entry/mod.rs000064400000000000000000000156761046102023000150030ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) pub mod builder; use std::ops::Deref; use futures_lite::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, SeekFrom}; use crate::entry::builder::ZipEntryBuilder; use crate::error::{Result, ZipError}; use crate::spec::{ attribute::AttributeCompatibility, consts::LFH_SIGNATURE, header::{ExtraField, LocalFileHeader}, Compression, }; use crate::{string::ZipString, ZipDateTime}; /// An immutable store of data about a ZIP entry. /// /// This type cannot be directly constructed so instead, the [`ZipEntryBuilder`] must be used. Internally this builder /// stores a [`ZipEntry`] so conversions between these two types via the [`From`] implementations will be /// non-allocating. #[derive(Clone, Debug)] pub struct ZipEntry { pub(crate) filename: ZipString, pub(crate) compression: Compression, #[cfg(any( feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz", feature = "deflate64" ))] pub(crate) compression_level: async_compression::Level, pub(crate) crc32: u32, pub(crate) uncompressed_size: u64, pub(crate) compressed_size: u64, pub(crate) attribute_compatibility: AttributeCompatibility, pub(crate) last_modification_date: ZipDateTime, pub(crate) internal_file_attribute: u16, pub(crate) external_file_attribute: u32, pub(crate) extra_fields: Vec, pub(crate) comment: ZipString, } impl From for ZipEntry { fn from(builder: ZipEntryBuilder) -> Self { builder.0 } } impl ZipEntry { pub(crate) fn new(filename: ZipString, compression: Compression) -> Self { ZipEntry { filename, compression, #[cfg(any( feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz", feature = "deflate64" ))] compression_level: async_compression::Level::Default, crc32: 0, uncompressed_size: 0, compressed_size: 0, attribute_compatibility: AttributeCompatibility::Unix, last_modification_date: ZipDateTime::default(), internal_file_attribute: 0, external_file_attribute: 0, extra_fields: Vec::new(), comment: String::new().into(), } } /// Returns the entry's filename. /// /// ## Note /// This will return the raw filename stored during ZIP creation. If calling this method on entries retrieved from /// untrusted ZIP files, the filename should be sanitised before being used as a path to prevent [directory /// traversal attacks](https://en.wikipedia.org/wiki/Directory_traversal_attack). pub fn filename(&self) -> &ZipString { &self.filename } /// Returns the entry's compression method. pub fn compression(&self) -> Compression { self.compression } /// Returns the entry's CRC32 value. pub fn crc32(&self) -> u32 { self.crc32 } /// Returns the entry's uncompressed size. pub fn uncompressed_size(&self) -> u64 { self.uncompressed_size } /// Returns the entry's compressed size. pub fn compressed_size(&self) -> u64 { self.compressed_size } /// Returns the entry's attribute's host compatibility. pub fn attribute_compatibility(&self) -> AttributeCompatibility { self.attribute_compatibility } /// Returns the entry's last modification time & date. pub fn last_modification_date(&self) -> &ZipDateTime { &self.last_modification_date } /// Returns the entry's internal file attribute. pub fn internal_file_attribute(&self) -> u16 { self.internal_file_attribute } /// Returns the entry's external file attribute pub fn external_file_attribute(&self) -> u32 { self.external_file_attribute } /// Returns the entry's extra field data. pub fn extra_fields(&self) -> &[ExtraField] { &self.extra_fields } /// Returns the entry's file comment. pub fn comment(&self) -> &ZipString { &self.comment } /// Returns the entry's integer-based UNIX permissions. /// /// # Note /// This will return None if the attribute host compatibility is not listed as Unix. pub fn unix_permissions(&self) -> Option { if !matches!(self.attribute_compatibility, AttributeCompatibility::Unix) { return None; } Some(((self.external_file_attribute) >> 16) as u16) } /// Returns whether or not the entry represents a directory. pub fn dir(&self) -> Result { Ok(self.filename.as_str()?.ends_with('/')) } } /// An immutable store of data about how a ZIP entry is stored within a specific archive. /// /// Besides storing archive independent information like the size and timestamp it can also be used to query /// information about how the entry is stored in an archive. #[derive(Clone)] pub struct StoredZipEntry { pub(crate) entry: ZipEntry, // pub(crate) general_purpose_flag: GeneralPurposeFlag, pub(crate) file_offset: u64, pub(crate) header_size: u64, } impl StoredZipEntry { /// Returns the offset in bytes to where the header of the entry starts. pub fn header_offset(&self) -> u64 { self.file_offset } /// Returns the combined size in bytes of the header, the filename, and any extra fields. /// /// Note: This uses the extra field length stored in the central directory, which may differ from that stored in /// the local file header. See specification: pub fn header_size(&self) -> u64 { self.header_size } /// Seek to the offset in bytes where the data of the entry starts. pub(crate) async fn seek_to_data_offset(&self, mut reader: &mut R) -> Result<()> { // Seek to the header reader.seek(SeekFrom::Start(self.file_offset)).await?; // Check the signature let signature = { let mut buffer = [0; 4]; reader.read_exact(&mut buffer).await?; u32::from_le_bytes(buffer) }; match signature { LFH_SIGNATURE => (), actual => return Err(ZipError::UnexpectedHeaderError(actual, LFH_SIGNATURE)), }; // Skip the local file header and trailing data let header = LocalFileHeader::from_reader(&mut reader).await?; let trailing_size = (header.file_name_length as i64) + (header.extra_field_length as i64); reader.seek(SeekFrom::Current(trailing_size)).await?; Ok(()) } } impl Deref for StoredZipEntry { type Target = ZipEntry; fn deref(&self) -> &Self::Target { &self.entry } } async_zip-0.0.16/src/error.rs000064400000000000000000000051621046102023000142010ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A module which holds relevant error reporting structures/types. use std::fmt::{Display, Formatter}; use thiserror::Error; /// A Result type alias over ZipError to minimise repetition. pub type Result = std::result::Result; #[derive(Debug, PartialEq, Eq)] pub enum Zip64ErrorCase { TooManyFiles, LargeFile, } impl Display for Zip64ErrorCase { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { Self::TooManyFiles => write!(f, "More than 65536 files in archive"), Self::LargeFile => write!(f, "File is larger than 4 GiB"), } } } /// An enum of possible errors and their descriptions. #[non_exhaustive] #[derive(Debug, Error)] pub enum ZipError { #[error("feature not supported: '{0}'")] FeatureNotSupported(&'static str), #[error("compression not supported: {0}")] CompressionNotSupported(u16), #[error("host attribute compatibility not supported: {0}")] AttributeCompatibilityNotSupported(u16), #[error("attempted to read a ZIP64 file whilst on a 32-bit target")] TargetZip64NotSupported, #[error("attempted to write a ZIP file with force_no_zip64 when ZIP64 is needed: {0}")] Zip64Needed(Zip64ErrorCase), #[error("end of file has not been reached")] EOFNotReached, #[error("extra fields exceeded maximum size")] ExtraFieldTooLarge, #[error("comment exceeded maximum size")] CommentTooLarge, #[error("filename exceeded maximum size")] FileNameTooLarge, #[error("attempted to convert non-UTF8 bytes to a string/str")] StringNotUtf8, #[error("unable to locate the end of central directory record")] UnableToLocateEOCDR, #[error("extra field size was indicated to be {0} but only {1} bytes remain")] InvalidExtraFieldHeader(u16, usize), #[error("zip64 extended information field was incomplete")] Zip64ExtendedFieldIncomplete, #[error("an upstream reader returned an error: {0}")] UpstreamReadError(#[from] std::io::Error), #[error("a computed CRC32 value did not match the expected value")] CRC32CheckError, #[error("entry index was out of bounds")] EntryIndexOutOfBounds, #[error("Encountered an unexpected header (actual: {0:#x}, expected: {1:#x}).")] UnexpectedHeaderError(u32, u32), #[error("Info-ZIP Unicode Comment Extra Field was incomplete")] InfoZipUnicodeCommentFieldIncomplete, #[error("Info-ZIP Unicode Path Extra Field was incomplete")] InfoZipUnicodePathFieldIncomplete, } async_zip-0.0.16/src/file/builder.rs000064400000000000000000000021211046102023000154050ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::{file::ZipFile, string::ZipString}; /// A builder for [`ZipFile`]. pub struct ZipFileBuilder(pub(crate) ZipFile); impl From for ZipFileBuilder { fn from(file: ZipFile) -> Self { Self(file) } } impl Default for ZipFileBuilder { fn default() -> Self { ZipFileBuilder(ZipFile { entries: Vec::new(), zip64: false, comment: String::new().into() }) } } impl ZipFileBuilder { pub fn new() -> Self { Self::default() } /// Sets the file's comment. pub fn comment(mut self, comment: ZipString) -> Self { self.0.comment = comment; self } /// Consumes this builder and returns a final [`ZipFile`]. /// /// This is equivalent to: /// ``` /// # use async_zip::{ZipFile, ZipFileBuilder}; /// # /// # let builder = ZipFileBuilder::new(); /// let file: ZipFile = builder.into(); /// ``` pub fn build(self) -> ZipFile { self.into() } } async_zip-0.0.16/src/file/mod.rs000064400000000000000000000016471046102023000145520ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) pub(crate) mod builder; use crate::{entry::StoredZipEntry, string::ZipString}; use builder::ZipFileBuilder; /// An immutable store of data about a ZIP file. #[derive(Clone)] pub struct ZipFile { pub(crate) entries: Vec, pub(crate) zip64: bool, pub(crate) comment: ZipString, } impl From for ZipFile { fn from(builder: ZipFileBuilder) -> Self { builder.0 } } impl ZipFile { /// Returns a list of this ZIP file's entries. pub fn entries(&self) -> &[StoredZipEntry] { &self.entries } /// Returns this ZIP file's trailing comment. pub fn comment(&self) -> &ZipString { &self.comment } /// Returns whether or not this ZIP file is zip64 pub fn zip64(&self) -> bool { self.zip64 } } async_zip-0.0.16/src/lib.rs000064400000000000000000000043641046102023000136210ustar 00000000000000// Copyright (c) 2021-2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) // Document all features on docs.rs #![cfg_attr(docsrs, feature(doc_cfg))] //! An asynchronous ZIP archive reading/writing crate. //! //! ## Features //! - A base implementation atop `futures`'s IO traits. //! - An extended implementation atop `tokio`'s IO traits. //! - Support for Stored, Deflate, bzip2, LZMA, zstd, and xz compression methods. //! - Various different reading approaches (seek, stream, filesystem, in-memory buffer). //! - Support for writing complete data (u8 slices) or stream writing using data descriptors. //! - Initial support for ZIP64 reading and writing. //! - Aims for reasonable [specification](https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md) compliance. //! //! ## Installation //! //! ```toml //! [dependencies] //! async_zip = { version = "0.0.16", features = ["full"] } //! ``` //! //! ### Feature Flags //! - `full` - Enables all below features. //! - `full-wasm` - Enables all below features that are compatible with WASM. //! - `chrono` - Enables support for parsing dates via `chrono`. //! - `tokio` - Enables support for the `tokio` implementation module. //! - `tokio-fs` - Enables support for the `tokio::fs` reading module. //! - `deflate` - Enables support for the Deflate compression method. //! - `bzip2` - Enables support for the bzip2 compression method. //! - `lzma` - Enables support for the LZMA compression method. //! - `zstd` - Enables support for the zstd compression method. //! - `xz` - Enables support for the xz compression method. //! //! [Read more.](https://github.com/Majored/rs-async-zip) pub mod base; pub mod error; #[cfg(feature = "tokio")] pub mod tokio; pub(crate) mod date; pub(crate) mod entry; pub(crate) mod file; pub(crate) mod spec; pub(crate) mod string; pub(crate) mod utils; #[cfg(test)] pub(crate) mod tests; pub use crate::spec::attribute::AttributeCompatibility; pub use crate::spec::compression::{Compression, DeflateOption}; pub use crate::entry::{builder::ZipEntryBuilder, StoredZipEntry, ZipEntry}; pub use crate::file::{builder::ZipFileBuilder, ZipFile}; pub use crate::date::ZipDateTime; pub use crate::string::{StringEncoding, ZipString}; async_zip-0.0.16/src/spec/attribute.rs000064400000000000000000000026551046102023000160110ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::error::{Result, ZipError}; /// An attribute host compatibility supported by this crate. #[non_exhaustive] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum AttributeCompatibility { Unix, } impl TryFrom for AttributeCompatibility { type Error = ZipError; // Convert a u16 stored with little endianness into a supported attribute host compatibility. // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4422 fn try_from(value: u16) -> Result { match value { 3 => Ok(AttributeCompatibility::Unix), _ => Err(ZipError::AttributeCompatibilityNotSupported(value)), } } } impl From<&AttributeCompatibility> for u16 { // Convert a supported attribute host compatibility into its relevant u16 stored with little endianness. // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4422 fn from(compatibility: &AttributeCompatibility) -> Self { match compatibility { AttributeCompatibility::Unix => 3, } } } impl From for u16 { // Convert a supported attribute host compatibility into its relevant u16 stored with little endianness. fn from(compatibility: AttributeCompatibility) -> Self { (&compatibility).into() } } async_zip-0.0.16/src/spec/compression.rs000064400000000000000000000066471046102023000163540ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::error::{Result, ZipError}; #[cfg(any(feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz"))] use async_compression::Level; /// A compression method supported by this crate. #[non_exhaustive] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Compression { Stored, #[cfg(feature = "deflate")] Deflate, #[cfg(feature = "deflate64")] Deflate64, #[cfg(feature = "bzip2")] Bz, #[cfg(feature = "lzma")] Lzma, #[cfg(feature = "zstd")] Zstd, #[cfg(feature = "xz")] Xz, } impl TryFrom for Compression { type Error = ZipError; // Convert a u16 stored with little endianness into a supported compression method. // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#445 fn try_from(value: u16) -> Result { match value { 0 => Ok(Compression::Stored), #[cfg(feature = "deflate")] 8 => Ok(Compression::Deflate), #[cfg(feature = "deflate64")] 9 => Ok(Compression::Deflate64), #[cfg(feature = "bzip2")] 12 => Ok(Compression::Bz), #[cfg(feature = "lzma")] 14 => Ok(Compression::Lzma), #[cfg(feature = "zstd")] 93 => Ok(Compression::Zstd), #[cfg(feature = "xz")] 95 => Ok(Compression::Xz), _ => Err(ZipError::CompressionNotSupported(value)), } } } impl From<&Compression> for u16 { // Convert a supported compression method into its relevant u16 stored with little endianness. // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#445 fn from(compression: &Compression) -> u16 { match compression { Compression::Stored => 0, #[cfg(feature = "deflate")] Compression::Deflate => 8, #[cfg(feature = "deflate64")] Compression::Deflate64 => 9, #[cfg(feature = "bzip2")] Compression::Bz => 12, #[cfg(feature = "lzma")] Compression::Lzma => 14, #[cfg(feature = "zstd")] Compression::Zstd => 93, #[cfg(feature = "xz")] Compression::Xz => 95, } } } impl From for u16 { fn from(compression: Compression) -> u16 { (&compression).into() } } /// Level of compression data should be compressed with for deflate. #[derive(Debug, Clone, Copy)] pub enum DeflateOption { // Normal (-en) compression option was used. Normal, // Maximum (-exx/-ex) compression option was used. Maximum, // Fast (-ef) compression option was used. Fast, // Super Fast (-es) compression option was used. Super, /// Other implementation defined level. Other(i32), } #[cfg(any(feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz"))] impl DeflateOption { pub(crate) fn into_level(self) -> Level { // FIXME: There's no clear documentation on what these specific levels defined in the ZIP specification relate // to. We want to be compatible with any other library, and not specific to `async_compression`'s levels. if let Self::Other(l) = self { Level::Precise(l) } else { Level::Default } } } async_zip-0.0.16/src/spec/consts.rs000064400000000000000000000033761046102023000153200ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) pub const SIGNATURE_LENGTH: usize = 4; // Local file header constants // // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#437 pub const LFH_SIGNATURE: u32 = 0x4034b50; #[allow(dead_code)] pub const LFH_LENGTH: usize = 26; // Central directory header constants // // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4312 pub const CDH_SIGNATURE: u32 = 0x2014b50; #[allow(dead_code)] pub const CDH_LENGTH: usize = 42; // End of central directory record constants // // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4316 pub const EOCDR_SIGNATURE: u32 = 0x6054b50; /// The minimum length of the EOCDR, excluding the signature. pub const EOCDR_LENGTH: usize = 18; /// The signature for the zip64 end of central directory record. /// Ref: https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4314 pub const ZIP64_EOCDR_SIGNATURE: u32 = 0x06064b50; /// The signature for the zip64 end of central directory locator. /// Ref: https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4315 pub const ZIP64_EOCDL_SIGNATURE: u32 = 0x07064b50; /// The length of the ZIP64 EOCDL, including the signature. /// The EOCDL has a fixed size, thankfully. pub const ZIP64_EOCDL_LENGTH: u64 = 20; /// The contents of a header field when one must reference the zip64 version instead. pub const NON_ZIP64_MAX_SIZE: u32 = 0xFFFFFFFF; /// The maximum number of files or disks in a ZIP file before it requires ZIP64. pub const NON_ZIP64_MAX_NUM_FILES: u16 = 0xFFFF; // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#439 pub const DATA_DESCRIPTOR_SIGNATURE: u32 = 0x8074b50; async_zip-0.0.16/src/spec/extra_field.rs000064400000000000000000000260461046102023000162740ustar 00000000000000// Copyright Cognite AS, 2023 use crate::error::{Result as ZipResult, ZipError}; use crate::spec::header::{ ExtraField, HeaderId, InfoZipUnicodeCommentExtraField, InfoZipUnicodePathExtraField, UnknownExtraField, Zip64ExtendedInformationExtraField, }; use super::consts::NON_ZIP64_MAX_SIZE; pub(crate) trait ExtraFieldAsBytes { fn as_bytes(&self) -> Vec; fn count_bytes(&self) -> usize; } impl ExtraFieldAsBytes for &[ExtraField] { fn as_bytes(&self) -> Vec { let mut buffer = Vec::new(); for field in self.iter() { buffer.append(&mut field.as_bytes()); } buffer } fn count_bytes(&self) -> usize { self.iter().map(|field| field.count_bytes()).sum() } } impl ExtraFieldAsBytes for ExtraField { fn as_bytes(&self) -> Vec { match self { ExtraField::Zip64ExtendedInformation(field) => field.as_bytes(), ExtraField::InfoZipUnicodeComment(field) => field.as_bytes(), ExtraField::InfoZipUnicodePath(field) => field.as_bytes(), ExtraField::Unknown(field) => field.as_bytes(), } } fn count_bytes(&self) -> usize { match self { ExtraField::Zip64ExtendedInformation(field) => field.count_bytes(), ExtraField::InfoZipUnicodeComment(field) => field.count_bytes(), ExtraField::InfoZipUnicodePath(field) => field.count_bytes(), ExtraField::Unknown(field) => field.count_bytes(), } } } impl ExtraFieldAsBytes for UnknownExtraField { fn as_bytes(&self) -> Vec { let mut bytes = Vec::new(); let header_id: u16 = self.header_id.into(); bytes.append(&mut header_id.to_le_bytes().to_vec()); bytes.append(&mut self.data_size.to_le_bytes().to_vec()); bytes.append(&mut self.content.clone()); bytes } fn count_bytes(&self) -> usize { 4 + self.content.len() } } impl ExtraFieldAsBytes for Zip64ExtendedInformationExtraField { fn as_bytes(&self) -> Vec { let mut bytes = Vec::new(); let header_id: u16 = self.header_id.into(); bytes.append(&mut header_id.to_le_bytes().to_vec()); bytes.append(&mut (self.content_size() as u16).to_le_bytes().to_vec()); if let Some(uncompressed_size) = &self.uncompressed_size { bytes.append(&mut uncompressed_size.to_le_bytes().to_vec()); } if let Some(compressed_size) = &self.compressed_size { bytes.append(&mut compressed_size.to_le_bytes().to_vec()); } if let Some(relative_header_offset) = &self.relative_header_offset { bytes.append(&mut relative_header_offset.to_le_bytes().to_vec()); } if let Some(disk_start_number) = &self.disk_start_number { bytes.append(&mut disk_start_number.to_le_bytes().to_vec()); } bytes } fn count_bytes(&self) -> usize { 4 + self.content_size() } } impl ExtraFieldAsBytes for InfoZipUnicodeCommentExtraField { fn as_bytes(&self) -> Vec { let mut bytes = Vec::new(); let header_id: u16 = HeaderId::INFO_ZIP_UNICODE_COMMENT_EXTRA_FIELD.into(); bytes.append(&mut header_id.to_le_bytes().to_vec()); match self { InfoZipUnicodeCommentExtraField::V1 { crc32, unicode } => { let data_size: u16 = (5 + unicode.len()).try_into().unwrap(); bytes.append(&mut data_size.to_le_bytes().to_vec()); bytes.push(1); bytes.append(&mut crc32.to_le_bytes().to_vec()); bytes.append(&mut unicode.clone()); } InfoZipUnicodeCommentExtraField::Unknown { version, data } => { let data_size: u16 = (1 + data.len()).try_into().unwrap(); bytes.append(&mut data_size.to_le_bytes().to_vec()); bytes.push(*version); bytes.append(&mut data.clone()); } } bytes } fn count_bytes(&self) -> usize { match self { InfoZipUnicodeCommentExtraField::V1 { unicode, .. } => 9 + unicode.len(), InfoZipUnicodeCommentExtraField::Unknown { data, .. } => 5 + data.len(), } } } impl ExtraFieldAsBytes for InfoZipUnicodePathExtraField { fn as_bytes(&self) -> Vec { let mut bytes = Vec::new(); let header_id: u16 = HeaderId::INFO_ZIP_UNICODE_PATH_EXTRA_FIELD.into(); bytes.append(&mut header_id.to_le_bytes().to_vec()); match self { InfoZipUnicodePathExtraField::V1 { crc32, unicode } => { let data_size: u16 = (5 + unicode.len()).try_into().unwrap(); bytes.append(&mut data_size.to_le_bytes().to_vec()); bytes.push(1); bytes.append(&mut crc32.to_le_bytes().to_vec()); bytes.append(&mut unicode.clone()); } InfoZipUnicodePathExtraField::Unknown { version, data } => { let data_size: u16 = (1 + data.len()).try_into().unwrap(); bytes.append(&mut data_size.to_le_bytes().to_vec()); bytes.push(*version); bytes.append(&mut data.clone()); } } bytes } fn count_bytes(&self) -> usize { match self { InfoZipUnicodePathExtraField::V1 { unicode, .. } => 9 + unicode.len(), InfoZipUnicodePathExtraField::Unknown { data, .. } => 5 + data.len(), } } } /// Parse a zip64 extra field from bytes. /// The content of "data" should exclude the header. fn zip64_extended_information_field_from_bytes( header_id: HeaderId, data: &[u8], uncompressed_size: u32, compressed_size: u32, ) -> ZipResult { // slice.take is nightly-only so we'll just use an index to track the current position let mut current_idx = 0; let uncompressed_size = if uncompressed_size == NON_ZIP64_MAX_SIZE && data.len() >= current_idx + 8 { let val = Some(u64::from_le_bytes(data[current_idx..current_idx + 8].try_into().unwrap())); current_idx += 8; val } else { None }; let compressed_size = if compressed_size == NON_ZIP64_MAX_SIZE && data.len() >= current_idx + 8 { let val = Some(u64::from_le_bytes(data[current_idx..current_idx + 8].try_into().unwrap())); current_idx += 8; val } else { None }; let relative_header_offset = if data.len() >= current_idx + 8 { let val = Some(u64::from_le_bytes(data[current_idx..current_idx + 8].try_into().unwrap())); current_idx += 8; val } else { None }; #[allow(unused_assignments)] let disk_start_number = if data.len() >= current_idx + 4 { let val = Some(u32::from_le_bytes(data[current_idx..current_idx + 4].try_into().unwrap())); current_idx += 4; val } else { None }; Ok(Zip64ExtendedInformationExtraField { header_id, uncompressed_size, compressed_size, relative_header_offset, disk_start_number, }) } fn info_zip_unicode_comment_extra_field_from_bytes( _header_id: HeaderId, data_size: u16, data: &[u8], ) -> ZipResult { if data.is_empty() { return Err(ZipError::InfoZipUnicodeCommentFieldIncomplete); } let version = data[0]; match version { 1 => { if data.len() < 5 { return Err(ZipError::InfoZipUnicodeCommentFieldIncomplete); } let crc32 = u32::from_le_bytes(data[1..5].try_into().unwrap()); let unicode = data[5..(data_size as usize)].to_vec(); Ok(InfoZipUnicodeCommentExtraField::V1 { crc32, unicode }) } _ => Ok(InfoZipUnicodeCommentExtraField::Unknown { version, data: data[1..(data_size as usize)].to_vec() }), } } fn info_zip_unicode_path_extra_field_from_bytes( _header_id: HeaderId, data_size: u16, data: &[u8], ) -> ZipResult { if data.is_empty() { return Err(ZipError::InfoZipUnicodePathFieldIncomplete); } let version = data[0]; match version { 1 => { if data.len() < 5 { return Err(ZipError::InfoZipUnicodePathFieldIncomplete); } let crc32 = u32::from_le_bytes(data[1..5].try_into().unwrap()); let unicode = data[5..(data_size as usize)].to_vec(); Ok(InfoZipUnicodePathExtraField::V1 { crc32, unicode }) } _ => Ok(InfoZipUnicodePathExtraField::Unknown { version, data: data[1..(data_size as usize)].to_vec() }), } } pub(crate) fn extra_field_from_bytes( header_id: HeaderId, data_size: u16, data: &[u8], uncompressed_size: u32, compressed_size: u32, ) -> ZipResult { match header_id { HeaderId::ZIP64_EXTENDED_INFORMATION_EXTRA_FIELD => Ok(ExtraField::Zip64ExtendedInformation( zip64_extended_information_field_from_bytes(header_id, data, uncompressed_size, compressed_size)?, )), HeaderId::INFO_ZIP_UNICODE_COMMENT_EXTRA_FIELD => Ok(ExtraField::InfoZipUnicodeComment( info_zip_unicode_comment_extra_field_from_bytes(header_id, data_size, data)?, )), HeaderId::INFO_ZIP_UNICODE_PATH_EXTRA_FIELD => Ok(ExtraField::InfoZipUnicodePath( info_zip_unicode_path_extra_field_from_bytes(header_id, data_size, data)?, )), _ => Ok(ExtraField::Unknown(UnknownExtraField { header_id, data_size, content: data.to_vec() })), } } pub struct Zip64ExtendedInformationExtraFieldBuilder { field: Zip64ExtendedInformationExtraField, } impl Zip64ExtendedInformationExtraFieldBuilder { pub fn new() -> Self { Self { field: Zip64ExtendedInformationExtraField { header_id: HeaderId::ZIP64_EXTENDED_INFORMATION_EXTRA_FIELD, uncompressed_size: None, compressed_size: None, relative_header_offset: None, disk_start_number: None, }, } } pub fn sizes(mut self, compressed_size: u64, uncompressed_size: u64) -> Self { self.field.compressed_size = Some(compressed_size); self.field.uncompressed_size = Some(uncompressed_size); self } pub fn relative_header_offset(mut self, relative_header_offset: u64) -> Self { self.field.relative_header_offset = Some(relative_header_offset); self } #[allow(dead_code)] pub fn disk_start_number(mut self, disk_start_number: u32) -> Self { self.field.disk_start_number = Some(disk_start_number); self } pub fn eof_only(&self) -> bool { (self.field.uncompressed_size.is_none() && self.field.compressed_size.is_none()) && (self.field.relative_header_offset.is_some() || self.field.disk_start_number.is_some()) } pub fn build(self) -> ZipResult { let field = self.field; if field.content_size() == 0 { return Err(ZipError::Zip64ExtendedFieldIncomplete); } Ok(field) } } async_zip-0.0.16/src/spec/header.rs000064400000000000000000000125731046102023000152360ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#437 pub struct LocalFileHeader { pub version: u16, pub flags: GeneralPurposeFlag, pub compression: u16, pub mod_time: u16, pub mod_date: u16, pub crc: u32, pub compressed_size: u32, pub uncompressed_size: u32, pub file_name_length: u16, pub extra_field_length: u16, } // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#444 #[derive(Copy, Clone)] pub struct GeneralPurposeFlag { pub encrypted: bool, pub data_descriptor: bool, pub filename_unicode: bool, } /// 2 byte header ids /// Ref https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#452 #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct HeaderId(pub u16); impl HeaderId { pub const ZIP64_EXTENDED_INFORMATION_EXTRA_FIELD: HeaderId = HeaderId(0x0001); pub const INFO_ZIP_UNICODE_COMMENT_EXTRA_FIELD: HeaderId = HeaderId(0x6375); pub const INFO_ZIP_UNICODE_PATH_EXTRA_FIELD: HeaderId = HeaderId(0x7075); } impl From for HeaderId { fn from(value: u16) -> Self { HeaderId(value) } } impl From for u16 { fn from(value: HeaderId) -> Self { value.0 } } /// Represents each extra field. /// Not strictly part of the spec, but is the most useful way to represent the data. #[derive(Clone, Debug)] #[non_exhaustive] pub enum ExtraField { Zip64ExtendedInformation(Zip64ExtendedInformationExtraField), InfoZipUnicodeComment(InfoZipUnicodeCommentExtraField), InfoZipUnicodePath(InfoZipUnicodePathExtraField), Unknown(UnknownExtraField), } /// An extended information header for Zip64. /// This field is used both for local file headers and central directory records. /// https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#453 #[derive(Clone, Debug)] pub struct Zip64ExtendedInformationExtraField { pub header_id: HeaderId, pub uncompressed_size: Option, pub compressed_size: Option, // While not specified in the spec, these two fields are often left out in practice. pub relative_header_offset: Option, pub disk_start_number: Option, } impl Zip64ExtendedInformationExtraField { pub(crate) fn content_size(&self) -> usize { self.uncompressed_size.map(|_| 8).unwrap_or_default() + self.compressed_size.map(|_| 8).unwrap_or_default() + self.relative_header_offset.map(|_| 8).unwrap_or_default() + self.disk_start_number.map(|_| 8).unwrap_or_default() } } /// Stores the UTF-8 version of the file comment as stored in the central directory header. /// https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#468 #[derive(Clone, Debug)] pub enum InfoZipUnicodeCommentExtraField { V1 { crc32: u32, unicode: Vec }, Unknown { version: u8, data: Vec }, } /// Stores the UTF-8 version of the file name field as stored in the local header and central directory header. /// https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#469 #[derive(Clone, Debug)] pub enum InfoZipUnicodePathExtraField { V1 { crc32: u32, unicode: Vec }, Unknown { version: u8, data: Vec }, } /// Represents any unparsed extra field. #[derive(Clone, Debug)] pub struct UnknownExtraField { pub header_id: HeaderId, pub data_size: u16, pub content: Vec, } // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4312 pub struct CentralDirectoryRecord { pub v_made_by: u16, pub v_needed: u16, pub flags: GeneralPurposeFlag, pub compression: u16, pub mod_time: u16, pub mod_date: u16, pub crc: u32, pub compressed_size: u32, pub uncompressed_size: u32, pub file_name_length: u16, pub extra_field_length: u16, pub file_comment_length: u16, pub disk_start: u16, pub inter_attr: u16, pub exter_attr: u32, pub lh_offset: u32, } // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4316 #[derive(Debug)] pub struct EndOfCentralDirectoryHeader { pub(crate) disk_num: u16, pub(crate) start_cent_dir_disk: u16, pub(crate) num_of_entries_disk: u16, pub(crate) num_of_entries: u16, pub(crate) size_cent_dir: u32, pub(crate) cent_dir_offset: u32, pub(crate) file_comm_length: u16, } // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4314 #[derive(Debug, PartialEq)] pub struct Zip64EndOfCentralDirectoryRecord { /// The size of this Zip64EndOfCentralDirectoryRecord. /// This is specified because there is a variable-length extra zip64 information sector. /// However, we will gleefully ignore this sector because it is reserved for use by PKWare. pub size_of_zip64_end_of_cd_record: u64, pub version_made_by: u16, pub version_needed_to_extract: u16, pub disk_number: u32, pub disk_number_start_of_cd: u32, pub num_entries_in_directory_on_disk: u64, pub num_entries_in_directory: u64, pub directory_size: u64, pub offset_of_start_of_directory: u64, } // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#4315 #[derive(Debug, PartialEq)] pub struct Zip64EndOfCentralDirectoryLocator { pub number_of_disk_with_start_of_zip64_end_of_central_directory: u32, pub relative_offset: u64, pub total_number_of_disks: u32, } async_zip-0.0.16/src/spec/mod.rs000064400000000000000000000005261046102023000145600ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) pub(crate) mod attribute; pub(crate) mod compression; pub(crate) mod consts; pub(crate) mod extra_field; pub(crate) mod header; pub(crate) mod parse; pub(crate) mod version; pub use compression::Compression; async_zip-0.0.16/src/spec/parse.rs000064400000000000000000000351051046102023000151140ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::error::{Result, ZipError}; use crate::spec::header::{ CentralDirectoryRecord, EndOfCentralDirectoryHeader, ExtraField, GeneralPurposeFlag, HeaderId, LocalFileHeader, Zip64EndOfCentralDirectoryLocator, Zip64EndOfCentralDirectoryRecord, }; use futures_lite::io::{AsyncRead, AsyncReadExt}; impl LocalFileHeader { pub fn as_slice(&self) -> [u8; 26] { let mut array = [0; 26]; let mut cursor = 0; array_push!(array, cursor, self.version.to_le_bytes()); array_push!(array, cursor, self.flags.as_slice()); array_push!(array, cursor, self.compression.to_le_bytes()); array_push!(array, cursor, self.mod_time.to_le_bytes()); array_push!(array, cursor, self.mod_date.to_le_bytes()); array_push!(array, cursor, self.crc.to_le_bytes()); array_push!(array, cursor, self.compressed_size.to_le_bytes()); array_push!(array, cursor, self.uncompressed_size.to_le_bytes()); array_push!(array, cursor, self.file_name_length.to_le_bytes()); array_push!(array, cursor, self.extra_field_length.to_le_bytes()); array } } impl GeneralPurposeFlag { pub fn as_slice(&self) -> [u8; 2] { let encrypted: u16 = match self.encrypted { false => 0x0, true => 0b1, }; let data_descriptor: u16 = match self.data_descriptor { false => 0x0, true => 0x8, }; let filename_unicode: u16 = match self.filename_unicode { false => 0x0, true => 0x800, }; (encrypted | data_descriptor | filename_unicode).to_le_bytes() } } impl CentralDirectoryRecord { pub fn as_slice(&self) -> [u8; 42] { let mut array = [0; 42]; let mut cursor = 0; array_push!(array, cursor, self.v_made_by.to_le_bytes()); array_push!(array, cursor, self.v_needed.to_le_bytes()); array_push!(array, cursor, self.flags.as_slice()); array_push!(array, cursor, self.compression.to_le_bytes()); array_push!(array, cursor, self.mod_time.to_le_bytes()); array_push!(array, cursor, self.mod_date.to_le_bytes()); array_push!(array, cursor, self.crc.to_le_bytes()); array_push!(array, cursor, self.compressed_size.to_le_bytes()); array_push!(array, cursor, self.uncompressed_size.to_le_bytes()); array_push!(array, cursor, self.file_name_length.to_le_bytes()); array_push!(array, cursor, self.extra_field_length.to_le_bytes()); array_push!(array, cursor, self.file_comment_length.to_le_bytes()); array_push!(array, cursor, self.disk_start.to_le_bytes()); array_push!(array, cursor, self.inter_attr.to_le_bytes()); array_push!(array, cursor, self.exter_attr.to_le_bytes()); array_push!(array, cursor, self.lh_offset.to_le_bytes()); array } } impl EndOfCentralDirectoryHeader { pub fn as_slice(&self) -> [u8; 18] { let mut array = [0; 18]; let mut cursor = 0; array_push!(array, cursor, self.disk_num.to_le_bytes()); array_push!(array, cursor, self.start_cent_dir_disk.to_le_bytes()); array_push!(array, cursor, self.num_of_entries_disk.to_le_bytes()); array_push!(array, cursor, self.num_of_entries.to_le_bytes()); array_push!(array, cursor, self.size_cent_dir.to_le_bytes()); array_push!(array, cursor, self.cent_dir_offset.to_le_bytes()); array_push!(array, cursor, self.file_comm_length.to_le_bytes()); array } } impl From<[u8; 26]> for LocalFileHeader { fn from(value: [u8; 26]) -> LocalFileHeader { LocalFileHeader { version: u16::from_le_bytes(value[0..2].try_into().unwrap()), flags: GeneralPurposeFlag::from(u16::from_le_bytes(value[2..4].try_into().unwrap())), compression: u16::from_le_bytes(value[4..6].try_into().unwrap()), mod_time: u16::from_le_bytes(value[6..8].try_into().unwrap()), mod_date: u16::from_le_bytes(value[8..10].try_into().unwrap()), crc: u32::from_le_bytes(value[10..14].try_into().unwrap()), compressed_size: u32::from_le_bytes(value[14..18].try_into().unwrap()), uncompressed_size: u32::from_le_bytes(value[18..22].try_into().unwrap()), file_name_length: u16::from_le_bytes(value[22..24].try_into().unwrap()), extra_field_length: u16::from_le_bytes(value[24..26].try_into().unwrap()), } } } impl From for GeneralPurposeFlag { fn from(value: u16) -> GeneralPurposeFlag { let encrypted = !matches!(value & 0x1, 0); let data_descriptor = !matches!((value & 0x8) >> 3, 0); let filename_unicode = !matches!((value & 0x800) >> 11, 0); GeneralPurposeFlag { encrypted, data_descriptor, filename_unicode } } } impl From<[u8; 42]> for CentralDirectoryRecord { fn from(value: [u8; 42]) -> CentralDirectoryRecord { CentralDirectoryRecord { v_made_by: u16::from_le_bytes(value[0..2].try_into().unwrap()), v_needed: u16::from_le_bytes(value[2..4].try_into().unwrap()), flags: GeneralPurposeFlag::from(u16::from_le_bytes(value[4..6].try_into().unwrap())), compression: u16::from_le_bytes(value[6..8].try_into().unwrap()), mod_time: u16::from_le_bytes(value[8..10].try_into().unwrap()), mod_date: u16::from_le_bytes(value[10..12].try_into().unwrap()), crc: u32::from_le_bytes(value[12..16].try_into().unwrap()), compressed_size: u32::from_le_bytes(value[16..20].try_into().unwrap()), uncompressed_size: u32::from_le_bytes(value[20..24].try_into().unwrap()), file_name_length: u16::from_le_bytes(value[24..26].try_into().unwrap()), extra_field_length: u16::from_le_bytes(value[26..28].try_into().unwrap()), file_comment_length: u16::from_le_bytes(value[28..30].try_into().unwrap()), disk_start: u16::from_le_bytes(value[30..32].try_into().unwrap()), inter_attr: u16::from_le_bytes(value[32..34].try_into().unwrap()), exter_attr: u32::from_le_bytes(value[34..38].try_into().unwrap()), lh_offset: u32::from_le_bytes(value[38..42].try_into().unwrap()), } } } impl From<[u8; 18]> for EndOfCentralDirectoryHeader { fn from(value: [u8; 18]) -> EndOfCentralDirectoryHeader { EndOfCentralDirectoryHeader { disk_num: u16::from_le_bytes(value[0..2].try_into().unwrap()), start_cent_dir_disk: u16::from_le_bytes(value[2..4].try_into().unwrap()), num_of_entries_disk: u16::from_le_bytes(value[4..6].try_into().unwrap()), num_of_entries: u16::from_le_bytes(value[6..8].try_into().unwrap()), size_cent_dir: u32::from_le_bytes(value[8..12].try_into().unwrap()), cent_dir_offset: u32::from_le_bytes(value[12..16].try_into().unwrap()), file_comm_length: u16::from_le_bytes(value[16..18].try_into().unwrap()), } } } impl From<[u8; 52]> for Zip64EndOfCentralDirectoryRecord { fn from(value: [u8; 52]) -> Self { Self { size_of_zip64_end_of_cd_record: u64::from_le_bytes(value[0..8].try_into().unwrap()), version_made_by: u16::from_le_bytes(value[8..10].try_into().unwrap()), version_needed_to_extract: u16::from_le_bytes(value[10..12].try_into().unwrap()), disk_number: u32::from_le_bytes(value[12..16].try_into().unwrap()), disk_number_start_of_cd: u32::from_le_bytes(value[16..20].try_into().unwrap()), num_entries_in_directory_on_disk: u64::from_le_bytes(value[20..28].try_into().unwrap()), num_entries_in_directory: u64::from_le_bytes(value[28..36].try_into().unwrap()), directory_size: u64::from_le_bytes(value[36..44].try_into().unwrap()), offset_of_start_of_directory: u64::from_le_bytes(value[44..52].try_into().unwrap()), } } } impl From<[u8; 16]> for Zip64EndOfCentralDirectoryLocator { fn from(value: [u8; 16]) -> Self { Self { number_of_disk_with_start_of_zip64_end_of_central_directory: u32::from_le_bytes( value[0..4].try_into().unwrap(), ), relative_offset: u64::from_le_bytes(value[4..12].try_into().unwrap()), total_number_of_disks: u32::from_le_bytes(value[12..16].try_into().unwrap()), } } } impl LocalFileHeader { pub async fn from_reader(reader: &mut R) -> Result { let mut buffer: [u8; 26] = [0; 26]; reader.read_exact(&mut buffer).await?; Ok(LocalFileHeader::from(buffer)) } } impl EndOfCentralDirectoryHeader { pub async fn from_reader(reader: &mut R) -> Result { let mut buffer: [u8; 18] = [0; 18]; reader.read_exact(&mut buffer).await?; Ok(EndOfCentralDirectoryHeader::from(buffer)) } } impl CentralDirectoryRecord { pub async fn from_reader(reader: &mut R) -> Result { let mut buffer: [u8; 42] = [0; 42]; reader.read_exact(&mut buffer).await?; Ok(CentralDirectoryRecord::from(buffer)) } } impl Zip64EndOfCentralDirectoryRecord { pub async fn from_reader(reader: &mut R) -> Result { let mut buffer: [u8; 52] = [0; 52]; reader.read_exact(&mut buffer).await?; Ok(Self::from(buffer)) } pub fn as_bytes(&self) -> [u8; 52] { let mut array = [0; 52]; let mut cursor = 0; array_push!(array, cursor, self.size_of_zip64_end_of_cd_record.to_le_bytes()); array_push!(array, cursor, self.version_made_by.to_le_bytes()); array_push!(array, cursor, self.version_needed_to_extract.to_le_bytes()); array_push!(array, cursor, self.disk_number.to_le_bytes()); array_push!(array, cursor, self.disk_number_start_of_cd.to_le_bytes()); array_push!(array, cursor, self.num_entries_in_directory_on_disk.to_le_bytes()); array_push!(array, cursor, self.num_entries_in_directory.to_le_bytes()); array_push!(array, cursor, self.directory_size.to_le_bytes()); array_push!(array, cursor, self.offset_of_start_of_directory.to_le_bytes()); array } } impl Zip64EndOfCentralDirectoryLocator { /// Read 4 bytes from the reader and check whether its signature matches that of the EOCDL. /// If it does, return Some(EOCDL), otherwise return None. pub async fn try_from_reader( reader: &mut R, ) -> Result> { let signature = { let mut buffer = [0; 4]; reader.read_exact(&mut buffer).await?; u32::from_le_bytes(buffer) }; if signature != ZIP64_EOCDL_SIGNATURE { return Ok(None); } let mut buffer: [u8; 16] = [0; 16]; reader.read_exact(&mut buffer).await?; Ok(Some(Self::from(buffer))) } pub fn as_bytes(&self) -> [u8; 16] { let mut array = [0; 16]; let mut cursor = 0; array_push!(array, cursor, self.number_of_disk_with_start_of_zip64_end_of_central_directory.to_le_bytes()); array_push!(array, cursor, self.relative_offset.to_le_bytes()); array_push!(array, cursor, self.total_number_of_disks.to_le_bytes()); array } } /// Parse the extra fields. pub fn parse_extra_fields(data: Vec, uncompressed_size: u32, compressed_size: u32) -> Result> { let mut cursor = 0; let mut extra_fields = Vec::new(); while cursor + 4 < data.len() { let header_id: HeaderId = u16::from_le_bytes(data[cursor..cursor + 2].try_into().unwrap()).into(); let field_size = u16::from_le_bytes(data[cursor + 2..cursor + 4].try_into().unwrap()); if cursor + 4 + field_size as usize > data.len() { return Err(ZipError::InvalidExtraFieldHeader(field_size, data.len() - cursor - 8 - field_size as usize)); } let data = &data[cursor + 4..cursor + 4 + field_size as usize]; extra_fields.push(extra_field_from_bytes(header_id, field_size, data, uncompressed_size, compressed_size)?); cursor += 4 + field_size as usize; } Ok(extra_fields) } /// Replace elements of an array at a given cursor index for use with a zero-initialised array. macro_rules! array_push { ($arr:ident, $cursor:ident, $value:expr) => {{ for entry in $value { $arr[$cursor] = entry; $cursor += 1; } }}; } use crate::spec::consts::ZIP64_EOCDL_SIGNATURE; use crate::spec::extra_field::extra_field_from_bytes; pub(crate) use array_push; #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_zip64_eocdr() { let eocdr: [u8; 56] = [ 0x50, 0x4B, 0x06, 0x06, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x03, 0x2D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; let without_signature: [u8; 52] = eocdr[4..56].try_into().unwrap(); let zip64eocdr = Zip64EndOfCentralDirectoryRecord::from(without_signature); assert_eq!( zip64eocdr, Zip64EndOfCentralDirectoryRecord { size_of_zip64_end_of_cd_record: 44, version_made_by: 798, version_needed_to_extract: 45, disk_number: 0, disk_number_start_of_cd: 0, num_entries_in_directory_on_disk: 1, num_entries_in_directory: 1, directory_size: 47, offset_of_start_of_directory: 64, } ) } #[tokio::test] async fn test_parse_zip64_eocdl() { let eocdl: [u8; 20] = [ 0x50, 0x4B, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, ]; let mut cursor = futures_lite::io::Cursor::new(eocdl); let zip64eocdl = Zip64EndOfCentralDirectoryLocator::try_from_reader(&mut cursor).await.unwrap().unwrap(); assert_eq!( zip64eocdl, Zip64EndOfCentralDirectoryLocator { number_of_disk_with_start_of_zip64_end_of_central_directory: 0, relative_offset: 111, total_number_of_disks: 1, } ) } } async_zip-0.0.16/src/spec/version.rs000064400000000000000000000022201046102023000154570ustar 00000000000000// Copyright (c) 2021 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::entry::ZipEntry; #[cfg(any( feature = "deflate", feature = "bzip2", feature = "zstd", feature = "lzma", feature = "xz", feature = "deflate64" ))] use crate::spec::Compression; pub(crate) const SPEC_VERSION_MADE_BY: u16 = 63; // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#443 pub fn as_needed_to_extract(entry: &ZipEntry) -> u16 { let mut version = match entry.compression() { #[cfg(feature = "deflate")] Compression::Deflate => 20, #[cfg(feature = "deflate64")] Compression::Deflate64 => 21, #[cfg(feature = "bzip2")] Compression::Bz => 46, #[cfg(feature = "lzma")] Compression::Lzma => 63, _ => 10, }; if let Ok(true) = entry.dir() { version = std::cmp::max(version, 20); } version } // https://github.com/Majored/rs-async-zip/blob/main/SPECIFICATION.md#442 pub fn as_made_by() -> u16 { // Default to UNIX mapping for the moment. 3 << 8 | SPEC_VERSION_MADE_BY } async_zip-0.0.16/src/string.rs000064400000000000000000000075321046102023000143610ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::error::{Result, ZipError}; /// A string encoding supported by this crate. #[derive(Debug, Clone, Copy)] pub enum StringEncoding { Utf8, Raw, } /// A string wrapper for handling different encodings. #[derive(Debug, Clone)] pub struct ZipString { encoding: StringEncoding, raw: Vec, alternative: Option>, } impl ZipString { /// Constructs a new encoded string from its raw bytes and its encoding type. /// /// # Note /// If the provided encoding is [`StringEncoding::Utf8`] but the raw bytes are not valid UTF-8 (ie. a call to /// `std::str::from_utf8()` fails), the encoding is defaulted back to [`StringEncoding::Raw`]. pub fn new(raw: Vec, mut encoding: StringEncoding) -> Self { if let StringEncoding::Utf8 = encoding { if std::str::from_utf8(&raw).is_err() { encoding = StringEncoding::Raw; } } Self { encoding, raw, alternative: None } } /// Constructs a new encoded string from utf-8 data, with an alternative in native MBCS encoding. pub fn new_with_alternative(utf8: String, alternative: Vec) -> Self { Self { encoding: StringEncoding::Utf8, raw: utf8.into_bytes(), alternative: Some(alternative) } } /// Returns the raw bytes for this string. pub fn as_bytes(&self) -> &[u8] { &self.raw } /// Returns the encoding type for this string. pub fn encoding(&self) -> StringEncoding { self.encoding } /// Returns the alternative bytes (in native MBCS encoding) for this string. pub fn alternative(&self) -> Option<&[u8]> { self.alternative.as_deref() } /// Returns the raw bytes converted into a string slice. /// /// # Note /// A call to this method will only succeed if the encoding type is [`StringEncoding::Utf8`]. pub fn as_str(&self) -> Result<&str> { if !matches!(self.encoding, StringEncoding::Utf8) { return Err(ZipError::StringNotUtf8); } // SAFETY: // "The bytes passed in must be valid UTF-8.' // // This function will error if self.encoding is not StringEncoding::Utf8. // // self.encoding is only ever StringEncoding::Utf8 if this variant was provided to the constructor AND the // call to `std::str::from_utf8()` within the constructor succeeded. Mutable access to the inner vector is // never given and no method implemented on this type mutates the inner vector. Ok(unsafe { std::str::from_utf8_unchecked(&self.raw) }) } /// Returns the raw bytes converted to an owned string. /// /// # Note /// A call to this method will only succeed if the encoding type is [`StringEncoding::Utf8`]. pub fn into_string(self) -> Result { if !matches!(self.encoding, StringEncoding::Utf8) { return Err(ZipError::StringNotUtf8); } // SAFETY: See above. Ok(unsafe { String::from_utf8_unchecked(self.raw) }) } /// Returns the alternative bytes (in native MBCS encoding) converted to the owned. pub fn into_alternative(self) -> Option> { self.alternative } /// Returns whether this string is encoded as utf-8 without an alternative. pub fn is_utf8_without_alternative(&self) -> bool { matches!(self.encoding, StringEncoding::Utf8) && self.alternative.is_none() } } impl From for ZipString { fn from(value: String) -> Self { Self { encoding: StringEncoding::Utf8, raw: value.into_bytes(), alternative: None } } } impl From<&str> for ZipString { fn from(value: &str) -> Self { Self { encoding: StringEncoding::Utf8, raw: value.as_bytes().to_vec(), alternative: None } } } async_zip-0.0.16/src/tests/combined/mod.rs000064400000000000000000000002041046102023000165410ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) async_zip-0.0.16/src/tests/mod.rs000064400000000000000000000007651046102023000147750ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) pub(crate) mod combined; pub(crate) mod read; pub(crate) mod spec; pub(crate) mod write; use std::sync::Once; static ENV_LOGGER: Once = Once::new(); /// Initialize the env logger for any tests that require it. /// Safe to call multiple times. fn init_logger() { ENV_LOGGER.call_once(|| env_logger::Builder::from_default_env().format_module_path(true).init()); } async_zip-0.0.16/src/tests/read/compression/bzip2.data000064400000000000000000000020551046102023000207770ustar 00000000000000BZh61AY&SY3n@1 "h0  "(Hr7async_zip-0.0.16/src/tests/read/compression/deflate.data000064400000000000000000000020111046102023000213450ustar 00000000000000KWHJ,async_zip-0.0.16/src/tests/read/compression/lzma.data000064400000000000000000000020361046102023000207130ustar 00000000000000]3@b1 %/툀async_zip-0.0.16/src/tests/read/compression/mod.rs000064400000000000000000000031711046102023000202430ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::base::read::io::compressed::CompressedReader; use crate::spec::Compression; compressed_test_helper!(stored_test, Compression::Stored, "foo bar", "foo bar"); #[cfg(feature = "deflate")] compressed_test_helper!(deflate_test, Compression::Deflate, "foo bar", include_bytes!("deflate.data")); #[cfg(feature = "bzip2")] compressed_test_helper!(bz_test, Compression::Bz, "foo bar", include_bytes!("bzip2.data")); #[cfg(feature = "lzma")] compressed_test_helper!(lzma_test, Compression::Lzma, "foo bar", include_bytes!("lzma.data")); #[cfg(feature = "zstd")] compressed_test_helper!(zstd_test, Compression::Zstd, "foo bar", include_bytes!("zstd.data")); #[cfg(feature = "xz")] compressed_test_helper!(xz_test, Compression::Xz, "foo bar", include_bytes!("xz.data")); /// A helper macro for generating a CompressedReader test using a specific compression method. macro_rules! compressed_test_helper { ($name:ident, $typ:expr, $data_raw:expr, $data:expr) => { #[cfg(test)] #[tokio::test] async fn $name() { use futures_lite::io::{AsyncReadExt, Cursor}; let data = $data; let data_raw = $data_raw; let cursor = Cursor::new(data); let mut reader = CompressedReader::new(cursor, $typ); let mut read_data = String::new(); reader.read_to_string(&mut read_data).await.expect("read into CompressedReader failed"); assert_eq!(read_data, data_raw); } }; } use compressed_test_helper; async_zip-0.0.16/src/tests/read/compression/xz.data000064400000000000000000000021001046102023000204010ustar 000000000000007zXZִF!t/foo bar"P .s}YZasync_zip-0.0.16/src/tests/read/compression/zstd.data000064400000000000000000000020201046102023000207250ustar 00000000000000(/X9foo barasync_zip-0.0.16/src/tests/read/locator/empty-buffer-boundary.zip000064400000000000000000000040241046102023000231700ustar 00000000000000PKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAasync_zip-0.0.16/src/tests/read/locator/empty-with-max-comment.zip000064400000000000000000002000251046102023000232730ustar 00000000000000PKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAasync_zip-0.0.16/src/tests/read/locator/empty.zip000064400000000000000000000000261046102023000200760ustar 00000000000000PKasync_zip-0.0.16/src/tests/read/locator/mod.rs000064400000000000000000000036001046102023000173420ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) #[test] fn search_one_byte_test() { let buffer: &[u8] = &[0x0, 0x0, 0x0, 0x0, 0x0, 0x0]; let signature: &[u8] = &[0x1]; let matched = crate::base::read::io::locator::reverse_search_buffer(buffer, signature); assert!(matched.is_none()); let buffer: &[u8] = &[0x2, 0x1, 0x0, 0x0, 0x0, 0x0]; let signature: &[u8] = &[0x1]; let matched = crate::base::read::io::locator::reverse_search_buffer(buffer, signature); assert!(matched.is_some()); assert_eq!(1, matched.unwrap()); } #[test] fn search_two_byte_test() { let buffer: &[u8] = &[0x2, 0x1, 0x0, 0x0, 0x0, 0x0]; let signature: &[u8] = &[0x2, 0x1]; let matched = crate::base::read::io::locator::reverse_search_buffer(buffer, signature); assert!(matched.is_some()); assert_eq!(1, matched.unwrap()); } #[tokio::test] async fn locator_empty_test() { use futures_lite::io::Cursor; let data = &include_bytes!("empty.zip"); let mut cursor = Cursor::new(data); let eocdr = crate::base::read::io::locator::eocdr(&mut cursor).await; assert!(eocdr.is_ok()); assert_eq!(eocdr.unwrap(), 4); } #[tokio::test] async fn locator_empty_max_comment_test() { use futures_lite::io::Cursor; let data = &include_bytes!("empty-with-max-comment.zip"); let mut cursor = Cursor::new(data); let eocdr = crate::base::read::io::locator::eocdr(&mut cursor).await; assert!(eocdr.is_ok()); assert_eq!(eocdr.unwrap(), 4); } #[tokio::test] async fn locator_buffer_boundary_test() { use futures_lite::io::Cursor; let data = &include_bytes!("empty-buffer-boundary.zip"); let mut cursor = Cursor::new(data); let eocdr = crate::base::read::io::locator::eocdr(&mut cursor).await; assert!(eocdr.is_ok()); assert_eq!(eocdr.unwrap(), 4); } async_zip-0.0.16/src/tests/read/mod.rs000064400000000000000000000003171046102023000157010ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) pub(crate) mod compression; pub(crate) mod locator; pub(crate) mod zip64; async_zip-0.0.16/src/tests/read/zip64/mod.rs000064400000000000000000000066461046102023000166700ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // Copyright (c) 2023 Cognite AS // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use futures_lite::io::AsyncReadExt; use crate::tests::init_logger; const ZIP64_ZIP_CONTENTS: &str = "Hello World!\n"; /// Tests opening and reading a zip64 archive. /// It contains one file named "-" with a zip 64 extended field header. #[tokio::test] async fn test_read_zip64_archive_mem() { use crate::base::read::mem::ZipFileReader; init_logger(); let data = include_bytes!("zip64.zip").to_vec(); let reader = ZipFileReader::new(data).await.unwrap(); let mut entry_reader = reader.reader_without_entry(0).await.unwrap(); let mut read_data = String::new(); entry_reader.read_to_string(&mut read_data).await.expect("read failed"); assert_eq!( read_data.chars().count(), ZIP64_ZIP_CONTENTS.chars().count(), "{read_data:?} != {ZIP64_ZIP_CONTENTS:?}" ); assert_eq!(read_data, ZIP64_ZIP_CONTENTS); } /// Like test_read_zip64_archive_mem() but for the streaming version #[tokio::test] async fn test_read_zip64_archive_stream() { use crate::base::read::stream::ZipFileReader; init_logger(); let data = include_bytes!("zip64.zip").to_vec(); let reader = ZipFileReader::new(data.as_slice()); let mut entry_reader = reader.next_without_entry().await.unwrap().unwrap(); let mut read_data = String::new(); entry_reader.reader_mut().read_to_string(&mut read_data).await.expect("read failed"); assert_eq!( read_data.chars().count(), ZIP64_ZIP_CONTENTS.chars().count(), "{read_data:?} != {ZIP64_ZIP_CONTENTS:?}" ); assert_eq!(read_data, ZIP64_ZIP_CONTENTS); } /// Generate an example file only if it doesn't exist already. /// The file is placed adjacent to this rs file. #[cfg(feature = "tokio")] fn generate_zip64many_zip() -> std::path::PathBuf { use std::io::Write; use zip::write::FileOptions; let mut path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")); path.push("src/tests/read/zip64/zip64many.zip"); // Only recreate the zip if it doesnt already exist. if path.exists() { return path; } let zip_file = std::fs::File::create(&path).unwrap(); let mut zip = zip::ZipWriter::new(zip_file); let options = FileOptions::default().compression_method(zip::CompressionMethod::Stored); for i in 0..2_u32.pow(16) + 1 { zip.start_file(format!("{i}.txt"), options).unwrap(); zip.write_all(b"\n").unwrap(); } zip.finish().unwrap(); path } /// Test reading a generated zip64 archive that contains more than 2^16 entries. #[cfg(feature = "tokio-fs")] #[tokio::test] async fn test_read_zip64_archive_many_entries() { use crate::tokio::read::fs::ZipFileReader; init_logger(); let path = generate_zip64many_zip(); let reader = ZipFileReader::new(path).await.unwrap(); // Verify that each entry exists and is has the contents "\n" for i in 0..2_u32.pow(16) + 1 { let entry = reader.file().entries().get(i as usize).unwrap(); eprintln!("{:?}", entry.filename().as_bytes()); assert_eq!(entry.filename.as_str().unwrap(), format!("{i}.txt")); let mut entry = reader.reader_without_entry(i as usize).await.unwrap(); let mut contents = String::new(); entry.read_to_string(&mut contents).await.unwrap(); assert_eq!(contents, "\n"); } } async_zip-0.0.16/src/tests/read/zip64/zip64.zip000064400000000000000000000003211046102023000172230ustar 00000000000000PK-m+V}- Hello World! PK-m+V} -PK,-/@PKoPK/@async_zip-0.0.16/src/tests/spec/date.rs000064400000000000000000000010011046102023000160450ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) #[cfg(feature = "chrono")] use chrono::{TimeZone, Utc}; #[test] #[cfg(feature = "chrono")] fn date_conversion_test() { let original_dt = Utc.timestamp_opt(1666544102, 0).unwrap(); let zip_dt = crate::ZipDateTime::from_chrono(&original_dt); let result_dt = zip_dt.as_chrono().single().expect("expected single unique result"); assert_eq!(result_dt, original_dt); } async_zip-0.0.16/src/tests/spec/mod.rs000064400000000000000000000002321046102023000157140ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) pub(crate) mod date; async_zip-0.0.16/src/tests/write/mod.rs000064400000000000000000000016221046102023000161200ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use futures_lite::io::AsyncWrite; use std::io::Error; use std::pin::Pin; use std::task::{Context, Poll}; pub(crate) mod offset; mod zip64; /// /dev/null for AsyncWrite. /// Useful for tests that involve writing, but not reading, large amounts of data. pub(crate) struct AsyncSink; // AsyncSink is always ready to receive bytes and throw them away. impl AsyncWrite for AsyncSink { fn poll_write(self: Pin<&mut Self>, _: &mut Context<'_>, buf: &[u8]) -> Poll> { Poll::Ready(Ok(buf.len())) } fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } } async_zip-0.0.16/src/tests/write/offset/mod.rs000064400000000000000000000013561046102023000174120ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::base::write::io::offset::AsyncOffsetWriter; #[tokio::test] async fn basic() { use futures_lite::io::AsyncWriteExt; use futures_lite::io::Cursor; let mut writer = AsyncOffsetWriter::new(Cursor::new(Vec::new())); assert_eq!(writer.offset(), 0); writer.write_all(b"Foo. Bar. Foo. Bar.").await.expect("failed to write data"); assert_eq!(writer.offset(), 19); writer.write_all(b"Foo. Foo.").await.expect("failed to write data"); assert_eq!(writer.offset(), 28); writer.write_all(b"Bar. Bar.").await.expect("failed to write data"); assert_eq!(writer.offset(), 37); } async_zip-0.0.16/src/tests/write/zip64/mod.rs000064400000000000000000000226451046102023000171040ustar 00000000000000// Copyright Cognite AS, 2023 use crate::base::write::ZipFileWriter; use crate::error::{Zip64ErrorCase, ZipError}; use crate::spec::consts::NON_ZIP64_MAX_SIZE; use crate::tests::init_logger; use crate::tests::write::AsyncSink; use crate::{Compression, ZipEntryBuilder}; use std::io::Read; use crate::spec::header::ExtraField; use futures_lite::io::AsyncWriteExt; // Useful constants for writing a large file. const BATCH_SIZE: usize = 100_000; const NUM_BATCHES: usize = NON_ZIP64_MAX_SIZE as usize / BATCH_SIZE + 1; const BATCHED_FILE_SIZE: usize = NUM_BATCHES * BATCH_SIZE; /// Test writing a small zip64 file. /// No zip64 extra fields will be emitted for EntryWhole. /// Z64 end of directory record & locator should be emitted #[tokio::test] async fn test_write_zip64_file() { init_logger(); let mut buffer = Vec::new(); let mut writer = ZipFileWriter::new(&mut buffer).force_zip64(); let entry = ZipEntryBuilder::new("file1".to_string().into(), Compression::Stored); writer.write_entry_whole(entry, &[0, 0, 0, 0]).await.unwrap(); let entry = ZipEntryBuilder::new("file2".to_string().into(), Compression::Stored); let mut entry_writer = writer.write_entry_stream(entry).await.unwrap(); entry_writer.write_all(&[0, 0, 0, 0]).await.unwrap(); entry_writer.close().await.unwrap(); writer.close().await.unwrap(); let cursor = std::io::Cursor::new(buffer); let mut zip = zip::read::ZipArchive::new(cursor).unwrap(); let mut file1 = zip.by_name("file1").unwrap(); assert_eq!(file1.extra_data(), &[] as &[u8]); let mut buffer = Vec::new(); file1.read_to_end(&mut buffer).unwrap(); assert_eq!(buffer.as_slice(), &[0, 0, 0, 0]); drop(file1); let mut file2 = zip.by_name("file2").unwrap(); let mut buffer = Vec::new(); file2.read_to_end(&mut buffer).unwrap(); assert_eq!(buffer.as_slice(), &[0, 0, 0, 0]); } /// Test writing a large zip64 file. This test will use upwards of 4GB of memory. #[tokio::test] async fn test_write_large_zip64_file() { init_logger(); // Allocate space with some extra for metadata records let mut buffer = Vec::with_capacity(BATCHED_FILE_SIZE + 100_000); let mut writer = ZipFileWriter::new(&mut buffer); // Stream-written zip files are dubiously spec-conformant. We need to specify a valid file size // in order for rs-zip (and unzip) to correctly read these files. let entry = ZipEntryBuilder::new("file".to_string().into(), Compression::Stored) .size(BATCHED_FILE_SIZE as u64, BATCHED_FILE_SIZE as u64); let mut entry_writer = writer.write_entry_stream(entry).await.unwrap(); for _ in 0..NUM_BATCHES { entry_writer.write_all(&[0; BATCH_SIZE]).await.unwrap(); } entry_writer.close().await.unwrap(); assert!(writer.is_zip64); let cd_entry = writer.cd_entries.last().unwrap(); match &cd_entry.entry.extra_fields.last().unwrap() { ExtraField::Zip64ExtendedInformation(zip64) => { assert_eq!(zip64.compressed_size.unwrap(), BATCHED_FILE_SIZE as u64); assert_eq!(zip64.uncompressed_size.unwrap(), BATCHED_FILE_SIZE as u64); } e => panic!("Expected a Zip64 extended field, got {:?}", e), } assert_eq!(cd_entry.header.uncompressed_size, NON_ZIP64_MAX_SIZE); assert_eq!(cd_entry.header.compressed_size, NON_ZIP64_MAX_SIZE); writer.close().await.unwrap(); let cursor = std::io::Cursor::new(buffer); let mut archive = zip::read::ZipArchive::new(cursor).unwrap(); let mut file = archive.by_name("file").unwrap(); assert_eq!(file.compression(), zip::CompressionMethod::Stored); assert_eq!(file.size(), BATCHED_FILE_SIZE as u64); let mut buffer = [0; 100_000]; let mut bytes_total = 0; loop { let read_bytes = file.read(&mut buffer).unwrap(); if read_bytes == 0 { break; } bytes_total += read_bytes; } assert_eq!(bytes_total, BATCHED_FILE_SIZE); } /// Test writing a file, and reading it with async-zip #[tokio::test] async fn test_write_large_zip64_file_self_read() { use futures_lite::io::AsyncReadExt; init_logger(); // Allocate space with some extra for metadata records let mut buffer = Vec::with_capacity(BATCHED_FILE_SIZE + 100_000); let mut writer = ZipFileWriter::new(&mut buffer); let entry = ZipEntryBuilder::new("file".into(), Compression::Stored); let mut entry_writer = writer.write_entry_stream(entry).await.unwrap(); for _ in 0..NUM_BATCHES { entry_writer.write_all(&[0; BATCH_SIZE]).await.unwrap(); } entry_writer.close().await.unwrap(); writer.close().await.unwrap(); let reader = crate::base::read::mem::ZipFileReader::new(buffer).await.unwrap(); assert!(reader.file().zip64); assert_eq!(reader.file().entries[0].entry.filename().as_str().unwrap(), "file"); assert_eq!(reader.file().entries[0].entry.compressed_size, BATCHED_FILE_SIZE as u64); let mut entry = reader.reader_without_entry(0).await.unwrap(); let mut buffer = [0; 100_000]; let mut bytes_total = 0; loop { let read_bytes = entry.read(&mut buffer).await.unwrap(); if read_bytes == 0 { break; } bytes_total += read_bytes; } assert_eq!(bytes_total, BATCHED_FILE_SIZE); } /// Test writing a zip64 file with more than u16::MAX files. #[tokio::test] async fn test_write_zip64_file_many_entries() { init_logger(); // The generated file will likely be ~3MB in size. let mut buffer = Vec::with_capacity(3_500_000); let mut writer = ZipFileWriter::new(&mut buffer); for i in 0..=u16::MAX as u32 + 1 { let entry = ZipEntryBuilder::new(i.to_string().into(), Compression::Stored); writer.write_entry_whole(entry, &[]).await.unwrap(); } assert!(writer.is_zip64); writer.close().await.unwrap(); let cursor = std::io::Cursor::new(buffer); let mut zip = zip::read::ZipArchive::new(cursor).unwrap(); assert_eq!(zip.len(), u16::MAX as usize + 2); for i in 0..=u16::MAX as u32 + 1 { let mut file = zip.by_name(&i.to_string()).unwrap(); let mut buf = Vec::new(); file.read_to_end(&mut buf).unwrap(); } } /// Tests that EntryWholeWriter switches to Zip64 mode when writing too many files for a non-Zip64. #[tokio::test] async fn test_zip64_when_many_files_whole() { let mut sink = AsyncSink; let mut writer = ZipFileWriter::new(&mut sink); for i in 0..=u16::MAX as u32 + 1 { let entry = ZipEntryBuilder::new(format!("{i}").into(), Compression::Stored); writer.write_entry_whole(entry, &[]).await.unwrap() } assert!(writer.is_zip64); writer.close().await.unwrap(); } /// Tests that EntryStreamWriter switches to Zip64 mode when writing too many files for a non-Zip64. #[tokio::test] async fn test_zip64_when_many_files_stream() { let mut sink = AsyncSink; let mut writer = ZipFileWriter::new(&mut sink); for i in 0..=u16::MAX as u32 + 1 { let entry = ZipEntryBuilder::new(format!("{i}").into(), Compression::Stored); let entrywriter = writer.write_entry_stream(entry).await.unwrap(); entrywriter.close().await.unwrap(); } assert!(writer.is_zip64); writer.close().await.unwrap(); } /// Tests that when force_no_zip64 is true, EntryWholeWriter errors when trying to write more than /// u16::MAX files to a single archive. #[tokio::test] async fn test_force_no_zip64_errors_with_too_many_files_whole() { let mut sink = AsyncSink; let mut writer = ZipFileWriter::new(&mut sink).force_no_zip64(); for i in 0..u16::MAX { let entry = ZipEntryBuilder::new(format!("{i}").into(), Compression::Stored); writer.write_entry_whole(entry, &[]).await.unwrap() } let entry = ZipEntryBuilder::new("65537".to_string().into(), Compression::Stored); let result = writer.write_entry_whole(entry, &[]).await; assert!(matches!(result, Err(ZipError::Zip64Needed(Zip64ErrorCase::TooManyFiles)))); } /// Tests that when force_no_zip64 is true, EntryStreamWriter errors when trying to write more than /// u16::MAX files to a single archive. #[tokio::test] async fn test_force_no_zip64_errors_with_too_many_files_stream() { let mut sink = AsyncSink; let mut writer = ZipFileWriter::new(&mut sink).force_no_zip64(); for i in 0..u16::MAX { let entry = ZipEntryBuilder::new(format!("{i}").into(), Compression::Stored); let entrywriter = writer.write_entry_stream(entry).await.unwrap(); entrywriter.close().await.unwrap(); } let entry = ZipEntryBuilder::new("65537".to_string().into(), Compression::Stored); let entrywriter = writer.write_entry_stream(entry).await.unwrap(); let result = entrywriter.close().await; assert!(matches!(result, Err(ZipError::Zip64Needed(Zip64ErrorCase::TooManyFiles)))); } /// Tests that when force_no_zip64 is true, EntryStreamWriter errors when trying to write /// a file larger than ~4 GiB to an archive. #[tokio::test] async fn test_force_no_zip64_errors_with_too_large_file_stream() { let mut sink = AsyncSink; let mut writer = ZipFileWriter::new(&mut sink).force_no_zip64(); let entry = ZipEntryBuilder::new("-".to_string().into(), Compression::Stored); let mut entrywriter = writer.write_entry_stream(entry).await.unwrap(); // Writing 4GB, 1kb at a time for _ in 0..NUM_BATCHES { entrywriter.write_all(&[0; BATCH_SIZE]).await.unwrap(); } let result = entrywriter.close().await; assert!(matches!(result, Err(ZipError::Zip64Needed(Zip64ErrorCase::LargeFile)))); } async_zip-0.0.16/src/tokio/mod.rs000064400000000000000000000033141046102023000147510ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A set of [`tokio`]-specific type aliases and features. //! //! # Usage //! With the `tokio` feature enabled, types from the [`base`] implementation will implement additional constructors //! for use with [`tokio`]. These constructors internally implement conversion between the required async IO traits. //! They are defined as: //! - [`base::read::seek::ZipFileReader::with_tokio()`] //! - [`base::read::stream::ZipFileReader::with_tokio()`] //! - [`base::write::ZipFileWriter::with_tokio()`] //! //! As a result of Rust's type inference, we are able to reuse the [`base`] implementation's types with considerable //! ease. There only exists one caveat with their use; the types returned by these constructors contain a wrapping //! compatibility type provided by an external crate. These compatibility types cannot be named unless you also pull in //! the [`tokio_util`] dependency manually. This is why we've provided type aliases within this module so that they can //! be named without needing to pull in a separate dependency. #[cfg(doc)] use crate::base; #[cfg(doc)] use tokio; #[cfg(doc)] use tokio_util; pub mod read; pub mod write { //! A module which supports writing ZIP files. #[cfg(doc)] use crate::base; use tokio_util::compat::Compat; /// A [`tokio`]-specific type alias for [`base::write::ZipFileWriter`]; pub type ZipFileWriter = crate::base::write::ZipFileWriter>; /// A [`tokio`]-specific type alias for [`base::write::EntryStreamWriter`]; pub type EntryStreamWriter<'a, W> = crate::base::write::EntryStreamWriter<'a, Compat>; } async_zip-0.0.16/src/tokio/read/fs.rs000064400000000000000000000122771046102023000155250ustar 00000000000000// Copyright (c) 2022 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A concurrent ZIP reader which acts over a file system path. //! //! Concurrency is achieved as a result of: //! - Wrapping the provided path within an [`Arc`] to allow shared ownership. //! - Constructing a new [`File`] from the path when reading. //! //! ### Usage //! Unlike the [`seek`] module, we no longer hold a mutable reference to any inner reader which in turn, allows the //! construction of concurrent [`ZipEntryReader`]s. Though, note that each individual [`ZipEntryReader`] cannot be sent //! between thread boundaries due to the masked lifetime requirement. Therefore, the overarching [`ZipFileReader`] //! should be cloned and moved into those contexts when needed. //! //! ### Concurrent Example //! ```no_run //! # use async_zip::tokio::read::fs::ZipFileReader; //! # use async_zip::error::Result; //! # use futures_lite::io::AsyncReadExt; //! # //! async fn run() -> Result<()> { //! let reader = ZipFileReader::new("./foo.zip").await?; //! let result = tokio::join!(read(&reader, 0), read(&reader, 1)); //! //! let data_0 = result.0?; //! let data_1 = result.1?; //! //! // Use data within current scope. //! //! Ok(()) //! } //! //! async fn read(reader: &ZipFileReader, index: usize) -> Result> { //! let mut entry = reader.reader_without_entry(index).await?; //! let mut data = Vec::new(); //! entry.read_to_end(&mut data).await?; //! Ok(data) //! } //! ``` //! //! ### Parallel Example //! ```no_run //! # use async_zip::tokio::read::fs::ZipFileReader; //! # use async_zip::error::Result; //! # use futures_lite::io::AsyncReadExt; //! # //! async fn run() -> Result<()> { //! let reader = ZipFileReader::new("./foo.zip").await?; //! //! let handle_0 = tokio::spawn(read(reader.clone(), 0)); //! let handle_1 = tokio::spawn(read(reader.clone(), 1)); //! //! let data_0 = handle_0.await.expect("thread panicked")?; //! let data_1 = handle_1.await.expect("thread panicked")?; //! //! // Use data within current scope. //! //! Ok(()) //! } //! //! async fn read(reader: ZipFileReader, index: usize) -> Result> { //! let mut entry = reader.reader_without_entry(index).await?; //! let mut data = Vec::new(); //! entry.read_to_end(&mut data).await?; //! Ok(data) //! } //! ``` #[cfg(doc)] use crate::base::read::seek; use crate::base::read::io::entry::{WithEntry, WithoutEntry, ZipEntryReader}; use crate::error::{Result, ZipError}; use crate::file::ZipFile; use std::path::{Path, PathBuf}; use std::sync::Arc; use futures_lite::io::BufReader; use tokio::fs::File; use tokio_util::compat::{Compat, TokioAsyncReadCompatExt}; struct Inner { path: PathBuf, file: ZipFile, } /// A concurrent ZIP reader which acts over a file system path. #[derive(Clone)] pub struct ZipFileReader { inner: Arc, } impl ZipFileReader { /// Constructs a new ZIP reader from a file system path. pub async fn new

(path: P) -> Result where P: AsRef, { let file = crate::base::read::file(File::open(&path).await?.compat()).await?; Ok(ZipFileReader::from_raw_parts(path, file)) } /// Constructs a ZIP reader from a file system path and ZIP file information derived from that path. /// /// Providing a [`ZipFile`] that wasn't derived from that path may lead to inaccurate parsing. pub fn from_raw_parts

(path: P, file: ZipFile) -> ZipFileReader where P: AsRef, { ZipFileReader { inner: Arc::new(Inner { path: path.as_ref().to_owned(), file }) } } /// Returns this ZIP file's information. pub fn file(&self) -> &ZipFile { &self.inner.file } /// Returns the file system path provided to the reader during construction. pub fn path(&self) -> &Path { &self.inner.path } /// Returns a new entry reader if the provided index is valid. pub async fn reader_without_entry( &self, index: usize, ) -> Result, WithoutEntry>> { let stored_entry = self.inner.file.entries.get(index).ok_or(ZipError::EntryIndexOutOfBounds)?; let mut fs_file = BufReader::new(File::open(&self.inner.path).await?.compat()); stored_entry.seek_to_data_offset(&mut fs_file).await?; Ok(ZipEntryReader::new_with_owned( fs_file, stored_entry.entry.compression(), stored_entry.entry.compressed_size(), )) } /// Returns a new entry reader if the provided index is valid. pub async fn reader_with_entry(&self, index: usize) -> Result, WithEntry<'_>>> { let stored_entry = self.inner.file.entries.get(index).ok_or(ZipError::EntryIndexOutOfBounds)?; let mut fs_file = BufReader::new(File::open(&self.inner.path).await?.compat()); stored_entry.seek_to_data_offset(&mut fs_file).await?; let reader = ZipEntryReader::new_with_owned( fs_file, stored_entry.entry.compression(), stored_entry.entry.compressed_size(), ); Ok(reader.into_with_entry(stored_entry)) } } async_zip-0.0.16/src/tokio/read/mod.rs000064400000000000000000000024331046102023000156650ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) //! A module which supports reading ZIP files. use tokio_util::compat::Compat; #[cfg(feature = "tokio-fs")] pub mod fs; #[cfg(doc)] use crate::base; #[cfg(doc)] use tokio; /// A [`tokio`]-specific type alias for [`base::read::ZipEntryReader`]; pub type ZipEntryReader<'a, R, E> = crate::base::read::ZipEntryReader<'a, Compat, E>; pub mod seek { //! A ZIP reader which acts over a seekable source. use tokio_util::compat::Compat; #[cfg(doc)] use crate::base; #[cfg(doc)] use tokio; /// A [`tokio`]-specific type alias for [`base::read::seek::ZipFileReader`]; pub type ZipFileReader = crate::base::read::seek::ZipFileReader>; } pub mod stream { //! A ZIP reader which acts over a non-seekable source. #[cfg(doc)] use crate::base; #[cfg(doc)] use tokio; use tokio_util::compat::Compat; /// A [`tokio`]-specific type alias for [`base::read::stream::Reading`]; pub type Reading<'a, R, E> = crate::base::read::stream::Reading<'a, Compat, E>; /// A [`tokio`]-specific type alias for [`base::read::stream::Ready`]; pub type Ready = crate::base::read::stream::Ready>; } async_zip-0.0.16/src/utils.rs000064400000000000000000000013421046102023000142040ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use crate::error::{Result, ZipError}; use futures_lite::io::{AsyncRead, AsyncReadExt}; // Assert that the next four-byte signature read by a reader which impls AsyncRead matches the expected signature. pub(crate) async fn assert_signature(reader: &mut R, expected: u32) -> Result<()> { let signature = { let mut buffer = [0; 4]; reader.read_exact(&mut buffer).await?; u32::from_le_bytes(buffer) }; match signature { actual if actual == expected => Ok(()), actual => Err(ZipError::UnexpectedHeaderError(actual, expected)), } } async_zip-0.0.16/tests/common/mod.rs000064400000000000000000000076001046102023000154710ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use async_zip::base::read::mem; use async_zip::base::read::seek; use async_zip::base::write::ZipFileWriter; use async_zip::Compression; use async_zip::ZipEntryBuilder; use futures_lite::io::AsyncWriteExt; use tokio::fs::File; use tokio_util::compat::TokioAsyncReadCompatExt; const FOLDER_PREFIX: &str = "tests/test_inputs"; const FILE_LIST: &[&str] = &[ "sample_data/alpha/back_to_front.txt", "sample_data/alpha/front_to_back.txt", "sample_data/numeric/forward.txt", "sample_data/numeric/reverse.txt", ]; pub async fn compress_to_mem(compress: Compression) -> Vec { let mut bytes = Vec::with_capacity(10_000); let mut writer = ZipFileWriter::new(&mut bytes); for fname in FILE_LIST { let content = tokio::fs::read(format!("{FOLDER_PREFIX}/{fname}")).await.unwrap(); let opts = ZipEntryBuilder::new(fname.to_string().into(), compress); let mut entry_writer = writer.write_entry_stream(opts).await.unwrap(); entry_writer.write_all(&content).await.unwrap(); entry_writer.close().await.unwrap(); } writer.close().await.unwrap(); bytes } #[cfg(feature = "tokio-fs")] pub async fn check_decompress_fs(fname: &str) { use async_zip::tokio::read::fs; let zip = fs::ZipFileReader::new(fname).await.unwrap(); let zip_entries: Vec<_> = zip.file().entries().to_vec(); for (idx, entry) in zip_entries.into_iter().enumerate() { // TODO: resolve unwrap usage if entry.dir().unwrap() { continue; } // TODO: resolve unwrap usage let fname = entry.filename().as_str().unwrap(); let mut output = String::new(); let mut reader = zip.reader_with_entry(idx).await.unwrap(); let _ = reader.read_to_string_checked(&mut output).await.unwrap(); let fs_file = format!("{FOLDER_PREFIX}/{fname}"); let expected = tokio::fs::read_to_string(fs_file).await.unwrap(); assert_eq!(output, expected, "for {fname}, expect zip data to match file data"); } } pub async fn check_decompress_seek(fname: &str) { let file = File::open(fname).await.unwrap(); let mut file_compat = file.compat(); let mut zip = seek::ZipFileReader::new(&mut file_compat).await.unwrap(); let zip_entries: Vec<_> = zip.file().entries().to_vec(); for (idx, entry) in zip_entries.into_iter().enumerate() { // TODO: resolve unwrap usage if entry.dir().unwrap() { continue; } // TODO: resolve unwrap usage let fname = entry.filename().as_str().unwrap(); let mut output = String::new(); let mut reader = zip.reader_with_entry(idx).await.unwrap(); let _ = reader.read_to_string_checked(&mut output).await.unwrap(); let fs_file = format!("tests/test_inputs/{fname}"); let expected = tokio::fs::read_to_string(fs_file).await.unwrap(); assert_eq!(output, expected, "for {fname}, expect zip data to match file data"); } } pub async fn check_decompress_mem(zip_data: Vec) { let zip = mem::ZipFileReader::new(zip_data).await.unwrap(); let zip_entries: Vec<_> = zip.file().entries().to_vec(); for (idx, entry) in zip_entries.into_iter().enumerate() { // TODO: resolve unwrap usage if entry.dir().unwrap() { continue; } // TODO: resolve unwrap usage let fname = entry.filename().as_str().unwrap(); let mut output = String::new(); let mut reader = zip.reader_with_entry(idx).await.unwrap(); let _ = reader.read_to_string_checked(&mut output).await.unwrap(); let fs_file = format!("{FOLDER_PREFIX}/{fname}"); let expected = tokio::fs::read_to_string(fs_file).await.unwrap(); assert_eq!(output, expected, "for {fname}, expect zip data to match file data"); } } async_zip-0.0.16/tests/compress_test.rs000064400000000000000000000055521046102023000163200ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use async_zip::{Compression, ZipEntryBuilder, ZipString}; use futures_lite::AsyncWriteExt; mod common; #[cfg(feature = "zstd")] #[tokio::test] async fn zip_zstd_in_out() { let zip_data = common::compress_to_mem(Compression::Zstd).await; common::check_decompress_mem(zip_data).await } #[cfg(feature = "deflate")] #[tokio::test] async fn zip_decompress_in_out() { let zip_data = common::compress_to_mem(Compression::Deflate).await; common::check_decompress_mem(zip_data).await } #[tokio::test] async fn zip_store_in_out() { let zip_data = common::compress_to_mem(Compression::Stored).await; common::check_decompress_mem(zip_data).await } #[tokio::test] async fn zip_utf8_extra_in_out_stream() { let mut zip_bytes = Vec::with_capacity(10_000); { // writing let content = "Test".as_bytes(); let mut writer = async_zip::base::write::ZipFileWriter::new(&mut zip_bytes); let filename = ZipString::new_with_alternative("\u{4E2D}\u{6587}.txt".to_string(), b"\xD6\xD0\xCe\xC4.txt".to_vec()); let opts = ZipEntryBuilder::new(filename, Compression::Stored); let mut entry_writer = writer.write_entry_stream(opts).await.unwrap(); entry_writer.write_all(content).await.unwrap(); entry_writer.close().await.unwrap(); writer.close().await.unwrap(); } { // reading let zip = async_zip::base::read::mem::ZipFileReader::new(zip_bytes).await.unwrap(); let zip_entries: Vec<_> = zip.file().entries().to_vec(); assert_eq!(zip_entries.len(), 1); assert_eq!(zip_entries[0].filename().as_str().unwrap(), "\u{4E2D}\u{6587}.txt"); assert_eq!(zip_entries[0].filename().alternative(), Some(b"\xD6\xD0\xCe\xC4.txt".as_ref())); } } #[tokio::test] async fn zip_utf8_extra_in_out_whole() { let mut zip_bytes = Vec::with_capacity(10_000); { // writing let content = "Test".as_bytes(); let mut writer = async_zip::base::write::ZipFileWriter::new(&mut zip_bytes); let filename = ZipString::new_with_alternative("\u{4E2D}\u{6587}.txt".to_string(), b"\xD6\xD0\xCe\xC4.txt".to_vec()); let opts = ZipEntryBuilder::new(filename, Compression::Stored); writer.write_entry_whole(opts, content).await.unwrap(); writer.close().await.unwrap(); } { // reading let zip = async_zip::base::read::mem::ZipFileReader::new(zip_bytes).await.unwrap(); let zip_entries: Vec<_> = zip.file().entries().to_vec(); assert_eq!(zip_entries.len(), 1); assert_eq!(zip_entries[0].filename().as_str().unwrap(), "\u{4E2D}\u{6587}.txt"); assert_eq!(zip_entries[0].filename().alternative(), Some(b"\xD6\xD0\xCe\xC4.txt".as_ref())); } } async_zip-0.0.16/tests/decompress_test.rs000064400000000000000000000054151046102023000166270ustar 00000000000000// Copyright (c) 2023 Harry [Majored] [hello@majored.pw] // MIT License (https://github.com/Majored/rs-async-zip/blob/main/LICENSE) use tokio_util::compat::TokioAsyncReadCompatExt; mod common; const ZSTD_ZIP_FILE: &str = "tests/test_inputs/sample_data.zstd.zip"; const DEFLATE_ZIP_FILE: &str = "tests/test_inputs/sample_data.deflate.zip"; const STORE_ZIP_FILE: &str = "tests/test_inputs/sample_data.store.zip"; const UTF8_EXTRA_ZIP_FILE: &str = "tests/test_inputs/sample_data_utf8_extra.zip"; #[cfg(feature = "zstd")] #[tokio::test] async fn decompress_zstd_zip_seek() { common::check_decompress_seek(ZSTD_ZIP_FILE).await } #[cfg(feature = "deflate")] #[tokio::test] async fn decompress_deflate_zip_seek() { common::check_decompress_seek(DEFLATE_ZIP_FILE).await } #[tokio::test] async fn check_empty_zip_seek() { let mut data: Vec = Vec::new(); async_zip::base::write::ZipFileWriter::new(futures::io::Cursor::new(&mut data)).close().await.unwrap(); async_zip::base::read::seek::ZipFileReader::new(futures::io::Cursor::new(&data)).await.unwrap(); } #[tokio::test] async fn decompress_store_zip_seek() { common::check_decompress_seek(STORE_ZIP_FILE).await } #[cfg(feature = "zstd")] #[tokio::test] async fn decompress_zstd_zip_mem() { let content = tokio::fs::read(ZSTD_ZIP_FILE).await.unwrap(); common::check_decompress_mem(content).await } #[cfg(feature = "deflate")] #[tokio::test] async fn decompress_deflate_zip_mem() { let content = tokio::fs::read(DEFLATE_ZIP_FILE).await.unwrap(); common::check_decompress_mem(content).await } #[tokio::test] async fn decompress_store_zip_mem() { let content = tokio::fs::read(STORE_ZIP_FILE).await.unwrap(); common::check_decompress_mem(content).await } #[cfg(feature = "zstd")] #[cfg(feature = "tokio-fs")] #[tokio::test] async fn decompress_zstd_zip_fs() { common::check_decompress_fs(ZSTD_ZIP_FILE).await } #[cfg(feature = "deflate")] #[cfg(feature = "tokio-fs")] #[tokio::test] async fn decompress_deflate_zip_fs() { common::check_decompress_fs(DEFLATE_ZIP_FILE).await } #[cfg(feature = "tokio-fs")] #[tokio::test] async fn decompress_store_zip_fs() { common::check_decompress_fs(STORE_ZIP_FILE).await } #[tokio::test] async fn decompress_zip_with_utf8_extra() { let file = tokio::fs::File::open(UTF8_EXTRA_ZIP_FILE).await.unwrap(); let mut file_compat = file.compat(); let zip = async_zip::base::read::seek::ZipFileReader::new(&mut file_compat).await.unwrap(); let zip_entries: Vec<_> = zip.file().entries().to_vec(); assert_eq!(zip_entries.len(), 1); assert_eq!(zip_entries[0].header_size(), 93); assert_eq!(zip_entries[0].filename().as_str().unwrap(), "\u{4E2D}\u{6587}.txt"); assert_eq!(zip_entries[0].filename().alternative(), Some(b"\xD6\xD0\xCe\xC4.txt".as_ref())); } async_zip-0.0.16/tests/test_inputs/sample_data/alpha/back_to_front.txt000064400000000000000000000006401046102023000243440ustar 00000000000000Z,z,Y,y,X,x,W,w,V,v,U,u,T,t,S,s,R,r,Q,q,P,p,O,o,N,n,M,m,L,l,K,k,J,j,I,I,H,h,G,g,F,f,E,e,D,d,C,c,B,b,A,a Z,z,Y,y,X,x,W,w,V,v,U,u,T,t,S,s,R,r,Q,q,P,p,O,o,N,n,M,m,L,l,K,k,J,j,I,I,H,h,G,g,F,f,E,e,D,d,C,c,B,b,A,a Z,z,Y,y,X,x,W,w,V,v,U,u,T,t,S,s,R,r,Q,q,P,p,O,o,N,n,M,m,L,l,K,k,J,j,I,I,H,h,G,g,F,f,E,e,D,d,C,c,B,b,A,a Z,z,Y,y,X,x,W,w,V,v,U,u,T,t,S,s,R,r,Q,q,P,p,O,o,N,n,M,m,L,l,K,k,J,j,I,I,H,h,G,g,F,f,E,e,D,d,C,c,B,b,A,a async_zip-0.0.16/tests/test_inputs/sample_data/alpha/front_to_back.txt000064400000000000000000000006401046102023000243440ustar 00000000000000A,a,B,b,C,c,D,d,E,e,F,f,G,g,H,h,I,I,J,j,K,k,L,l,M,m,N,n,O,o,P,p,Q,q,R,r,S,s,T,t,U,u,V,v,W,w,X,x,Y,y,Z,z A,a,B,b,C,c,D,d,E,e,F,f,G,g,H,h,I,I,J,j,K,k,L,l,M,m,N,n,O,o,P,p,Q,q,R,r,S,s,T,t,U,u,V,v,W,w,X,x,Y,y,Z,z A,a,B,b,C,c,D,d,E,e,F,f,G,g,H,h,I,I,J,j,K,k,L,l,M,m,N,n,O,o,P,p,Q,q,R,r,S,s,T,t,U,u,V,v,W,w,X,x,Y,y,Z,z A,a,B,b,C,c,D,d,E,e,F,f,G,g,H,h,I,I,J,j,K,k,L,l,M,m,N,n,O,o,P,p,Q,q,R,r,S,s,T,t,U,u,V,v,W,w,X,x,Y,y,Z,z async_zip-0.0.16/tests/test_inputs/sample_data/numeric/forward.txt000064400000000000000000000001271046102023000235530ustar 000000000000001,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32 async_zip-0.0.16/tests/test_inputs/sample_data/numeric/reverse.txt000064400000000000000000000001271046102023000235620ustar 0000000000000032,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1 async_zip-0.0.16/tests/test_inputs/sample_data.deflate.zip000064400000000000000000000022661046102023000220410ustar 00000000000000PK0V sample_data/PK0Vsample_data/numeric/PK'0V (5Wsample_data/numeric/forward.txt >@ofr'.p,xb#HBlr%M0OsУBI r!,d"!n\80(.͢ PK0Vsample_data/alpha/PKl0V2>W#sample_data/alpha/front_to_back.txtd ]O2 -,%*RU+5kuWSemck(t9] \ ݌܍=LW#xsample_data/alpha/front_to_back.txtPK?=0VFmUW#sample_data/alpha/back_to_front.txtPKasync_zip-0.0.16/tests/test_inputs/sample_data.store.zip000064400000000000000000000036041046102023000215660ustar 00000000000000PK0V sample_data/PK0Vsample_data/numeric/PK '0V (WWsample_data/numeric/forward.txt1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32 PK 10V[WWsample_data/numeric/reverse.txt32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1 PK0Vsample_data/alpha/PK l0V2>#sample_data/alpha/front_to_back.txtA,a,B,b,C,c,D,d,E,e,F,f,G,g,H,h,I,I,J,j,K,k,L,l,M,m,N,n,O,o,P,p,Q,q,R,r,S,s,T,t,U,u,V,v,W,w,X,x,Y,y,Z,z A,a,B,b,C,c,D,d,E,e,F,f,G,g,H,h,I,I,J,j,K,k,L,l,M,m,N,n,O,o,P,p,Q,q,R,r,S,s,T,t,U,u,V,v,W,w,X,x,Y,y,Z,z A,a,B,b,C,c,D,d,E,e,F,f,G,g,H,h,I,I,J,j,K,k,L,l,M,m,N,n,O,o,P,p,Q,q,R,r,S,s,T,t,U,u,V,v,W,w,X,x,Y,y,Z,z A,a,B,b,C,c,D,d,E,e,F,f,G,g,H,h,I,I,J,j,K,k,L,l,M,m,N,n,O,o,P,p,Q,q,R,r,S,s,T,t,U,u,V,v,W,w,X,x,Y,y,Z,z PK =0VFmU#sample_data/alpha/back_to_front.txtZ,z,Y,y,X,x,W,w,V,v,U,u,T,t,S,s,R,r,Q,q,P,p,O,o,N,n,M,m,L,l,K,k,J,j,I,I,H,h,G,g,F,f,E,e,D,d,C,c,B,b,A,a Z,z,Y,y,X,x,W,w,V,v,U,u,T,t,S,s,R,r,Q,q,P,p,O,o,N,n,M,m,L,l,K,k,J,j,I,I,H,h,G,g,F,f,E,e,D,d,C,c,B,b,A,a Z,z,Y,y,X,x,W,w,V,v,U,u,T,t,S,s,R,r,Q,q,P,p,O,o,N,n,M,m,L,l,K,k,J,j,I,I,H,h,G,g,F,f,E,e,D,d,C,c,B,b,A,a Z,z,Y,y,X,x,W,w,V,v,U,u,T,t,S,s,R,r,Q,q,P,p,O,o,N,n,M,m,L,l,K,k,J,j,I,I,H,h,G,g,F,f,E,e,D,d,C,c,B,b,A,a PK?0V Asample_data/PK?0VA*sample_data/numeric/PK? '0V (WW\sample_data/numeric/forward.txtPK? 10V[WWsample_data/numeric/reverse.txtPK?0VAsample_data/alpha/PK? l0V2>#sample_data/alpha/front_to_back.txtPK? =0VFmU#sample_data/alpha/back_to_front.txtPKvasync_zip-0.0.16/tests/test_inputs/sample_data.zstd.zip000064400000000000000000000023551046102023000214200ustar 00000000000000PK]0V sample_data/(/ PK]0V sample_data/numeric/(/ PK]'0V (<Wsample_data/numeric/forward.txt(/Xr 7f lYcgZ hQCGRCa$ǹ PK]10V[<Wsample_data/numeric/reverse.txt(/Xr 7f2( NG-ngǒ-Ê PK]0V sample_data/alpha/(/ PK]l0V2>b#sample_data/alpha/front_to_back.txt(/X0wk8:`-@(VAEDFD@JU*$㩣ic)#⨢hb("᧡ga'!ঠf`&bv/J-ev+yhͪ(PK]=0VFmUb#sample_data/alpha/back_to_front.txt(/X0wk8:`-@(VAEDFD@JUU:Vj-\fjn rvz~"&*.26:>Bhͪ(PK?]0V Asample_data/PK?]0V A3sample_data/numeric/PK?]'0V (<Wnsample_data/numeric/forward.txtPK?]10V[<Wsample_data/numeric/reverse.txtPK?]0V A`sample_data/alpha/PK?]l0V2>b#sample_data/alpha/front_to_back.txtPK?]=0VFmUb#<sample_data/alpha/back_to_front.txtPKasync_zip-0.0.16/tests/test_inputs/sample_data_utf8_extra.zip000064400000000000000000000003001046102023000225720ustar 00000000000000PKr W.txtup2中文.txtPKr W7 .txt YYYup2中文.txtPKm9