liblzma-0.3.4/.cargo/config.toml000064400000000000000000000001301046102023000145670ustar 00000000000000[target."wasm32-wasi"] runner = "wasmtime" [target."wasm32-wasip1"] runner = "wasmtime" liblzma-0.3.4/.cargo_vcs_info.json0000644000000001360000000000100124720ustar { "git": { "sha1": "a3002b523b1794ca5c676f0354e6cda8d1d0f4f3" }, "path_in_vcs": "" }liblzma-0.3.4/.gitignore000064400000000000000000000076711046102023000132650ustar 00000000000000# Created by https://www.toptal.com/developers/gitignore/api/windows,linux,macos,visualstudiocode,intellij+all,rust # Edit at https://www.toptal.com/developers/gitignore?templates=windows,linux,macos,visualstudiocode,intellij+all,rust ### Intellij+all ### # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # User-specific stuff .idea/**/workspace.xml .idea/**/tasks.xml .idea/**/usage.statistics.xml .idea/**/dictionaries .idea/**/shelf # AWS User-specific .idea/**/aws.xml # Generated files .idea/**/contentModel.xml # Sensitive or high-churn files .idea/**/dataSources/ .idea/**/dataSources.ids .idea/**/dataSources.local.xml .idea/**/sqlDataSources.xml .idea/**/dynamic.xml .idea/**/uiDesigner.xml .idea/**/dbnavigator.xml # Gradle .idea/**/gradle.xml .idea/**/libraries # Gradle and Maven with auto-import # When using Gradle or Maven with auto-import, you should exclude module files, # since they will be recreated, and may cause churn. Uncomment if using # auto-import. # .idea/artifacts # .idea/compiler.xml # .idea/jarRepositories.xml # .idea/modules.xml # .idea/*.iml # .idea/modules # *.iml # *.ipr # CMake cmake-build-*/ # Mongo Explorer plugin .idea/**/mongoSettings.xml # File-based project format *.iws # IntelliJ out/ # mpeltonen/sbt-idea plugin .idea_modules/ # JIRA plugin atlassian-ide-plugin.xml # Cursive Clojure plugin .idea/replstate.xml # SonarLint plugin .idea/sonarlint/ # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties crashlytics-build.properties fabric.properties # Editor-based Rest Client .idea/httpRequests # Android studio 3.1+ serialized cache file .idea/caches/build_file_checksums.ser ### Intellij+all Patch ### # Ignore everything but code style settings and run configurations # that are supposed to be shared within teams. .idea/* !.idea/codeStyles !.idea/runConfigurations ### Linux ### *~ # temporary files which can be created if a process still has a handle open of a deleted file .fuse_hidden* # KDE directory preferences .directory # Linux trash folder which might appear on any partition or disk .Trash-* # .nfs files are created when an open file is removed but is still being accessed .nfs* ### macOS ### # General .DS_Store .AppleDouble .LSOverride # Icon must end with two \r Icon # Thumbnails ._* # Files that might appear in the root of a volume .DocumentRevisions-V100 .fseventsd .Spotlight-V100 .TemporaryItems .Trashes .VolumeIcon.icns .com.apple.timemachine.donotpresent # Directories potentially created on remote AFP share .AppleDB .AppleDesktop Network Trash Folder Temporary Items .apdisk ### macOS Patch ### # iCloud generated files *.icloud ### Rust ### # Generated by Cargo # will have compiled files and executables debug/ target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk # MSVC Windows builds of rustc generate these, which store debugging information *.pdb ### VisualStudioCode ### .vscode/* !.vscode/settings.json !.vscode/tasks.json !.vscode/launch.json !.vscode/extensions.json !.vscode/*.code-snippets # Local History for Visual Studio Code .history/ # Built Visual Studio Code Extensions *.vsix ### VisualStudioCode Patch ### # Ignore all local history of files .history .ionide ### Windows ### # Windows thumbnail cache files Thumbs.db Thumbs.db:encryptable ehthumbs.db ehthumbs_vista.db # Dump file *.stackdump # Folder config file [Dd]esktop.ini # Recycle Bin used on file shares $RECYCLE.BIN/ # Windows Installer files *.cab *.msi *.msix *.msm *.msp # Windows shortcuts *.lnk # End of https://www.toptal.com/developers/gitignore/api/windows,linux,macos,visualstudiocode,intellij+all,rust liblzma-0.3.4/.gitmodules000064400000000000000000000001641046102023000134400ustar 00000000000000[submodule "liblzma-sys/xz"] path = liblzma-sys/xz url = https://github.com/tukaani-project/xz.git branch = v5.6 liblzma-0.3.4/Cargo.toml0000644000000043210000000000100104700ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "liblzma" version = "0.3.4" authors = [ "Alex Crichton ", "Portable-Network-Archive Developers", ] build = false exclude = [".github/"] autobins = false autoexamples = false autotests = false autobenches = false description = """ Rust bindings to liblzma providing Read/Write streams as well as low-level in-memory encoding/decoding. forked from xz2. """ homepage = "https://github.com/portable-network-archive/liblzma-rs" readme = "README.md" keywords = [ "lzma", "xz", "encoding", "wasm", ] categories = [ "compression", "api-bindings", ] license = "MIT OR Apache-2.0" repository = "https://github.com/portable-network-archive/liblzma-rs" [package.metadata.docs.rs] features = [ "tokio-io", "futures", "parallel", ] [lib] name = "liblzma" path = "src/lib.rs" [[test]] name = "drop-incomplete" path = "tests/drop-incomplete.rs" [[test]] name = "tokio" path = "tests/tokio.rs" [[test]] name = "xz" path = "tests/xz.rs" [dependencies.futures] version = "0.1.26" optional = true [dependencies.liblzma-sys] version = "0.3.7" default-features = false [dependencies.num_cpus] version = "1.16.0" optional = true [dependencies.tokio-io] version = "0.1.12" optional = true [dev-dependencies.quickcheck] version = "1.0.1" [dev-dependencies.rand] version = "0.8.0" [features] bindgen = ["liblzma-sys/bindgen"] default = ["bindgen"] parallel = [ "liblzma-sys/parallel", "num_cpus", ] static = ["liblzma-sys/static"] tokio = [ "tokio-io", "futures", ] wasm = ["liblzma-sys/wasm"] [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies.tokio-core] version = "0.1.17" [target.'cfg(target_arch = "wasm32")'.dev-dependencies.getrandom] version = "0.2" features = ["js"] liblzma-0.3.4/Cargo.toml.orig000064400000000000000000000026111046102023000141510ustar 00000000000000[package] name = "liblzma" version = "0.3.4" authors = ["Alex Crichton ", "Portable-Network-Archive Developers"] license = "MIT OR Apache-2.0" readme = "README.md" keywords = ["lzma", "xz", "encoding", "wasm"] repository = "https://github.com/portable-network-archive/liblzma-rs" homepage = "https://github.com/portable-network-archive/liblzma-rs" description = """ Rust bindings to liblzma providing Read/Write streams as well as low-level in-memory encoding/decoding. forked from xz2. """ categories = ["compression", "api-bindings"] edition = "2021" exclude = [".github/"] [workspace] members = ["systest"] [dependencies] liblzma-sys = { path = "liblzma-sys", version = "0.3.7", default-features = false } tokio-io = { version = "0.1.12", optional = true } futures = { version = "0.1.26", optional = true } num_cpus = { version = "1.16.0", optional = true } [dev-dependencies] rand = "0.8.0" quickcheck = "1.0.1" [target.'cfg(target_arch = "wasm32")'.dev-dependencies] getrandom = { version = "0.2", features = ["js"] } [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] tokio-core = "0.1.17" [features] default = ["bindgen"] tokio = ["tokio-io", "futures"] static = ["liblzma-sys/static"] parallel = ["liblzma-sys/parallel", "num_cpus"] bindgen = ["liblzma-sys/bindgen"] wasm = ["liblzma-sys/wasm"] [package.metadata.docs.rs] features = ["tokio-io", "futures", "parallel"] liblzma-0.3.4/LICENSE-APACHE000064400000000000000000000251371046102023000132160ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. liblzma-0.3.4/LICENSE-MIT000064400000000000000000000021161046102023000127160ustar 00000000000000Copyright (c) 2016-2023 Alex Crichton and Portable-Network-Archive Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. liblzma-0.3.4/README.md000064400000000000000000000030211046102023000125350ustar 00000000000000# liblzma [![CI](https://github.com/Portable-Network-Archive/liblzma-rs/actions/workflows/main.yml/badge.svg)](https://github.com/Portable-Network-Archive/liblzma-rs/actions/workflows/main.yml) [![Crates.io][crates-badge]][crates-url] [crates-badge]: https://img.shields.io/crates/v/liblzma.svg [crates-url]: https://crates.io/crates/liblzma [Documentation](https://docs.rs/liblzma) Bindings to the liblzma implementation in Rust, also provides types to read/write xz streams. **This crate is forked from [xz2](https://crates.io/crates/xz2) and `liblzma = "0.1.x"` is fully compatible with `xz2 = "0.1.7"`,** so you can migrate simply. ## Migrate from xz2 ```diff # Cargo.toml [dependencies] -xz2 = "0.1.7" +liblzma = "0.1.7" ``` ```diff // *.rs -use xz2; +use liblzma; ``` ## Version 0.2.x breaking changes - XZ upgraded to 5.4 - Multithreading is disabled by default. This feature is available by enabling the `parallel` feature - Support compile to webassembly ## Version 0.3.x breaking changes - XZ upgraded to 5.6 ## License This project is licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in liblzma by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. liblzma-0.3.4/THANKS000064400000000000000000000005641046102023000122020ustar 00000000000000Thanks ====== Some people have helped more, some less, but nevertheless everyone's help has been important. :-) In alphabetical order: - Ayush Singh - Ivan Krivosheev - Luke Street Also thanks to all the people who have participated in the liblzma-rs project. I have probably forgot to add some names to the above list. Sorry about that and thanks for your help. liblzma-0.3.4/src/bufread.rs000064400000000000000000000262011046102023000140300ustar 00000000000000//! I/O streams for wrapping `BufRead` types as encoders/decoders use std::io; use std::io::prelude::*; #[cfg(feature = "tokio")] use futures::Poll; #[cfg(feature = "tokio")] use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "parallel")] use crate::stream::MtStreamBuilder; use crate::stream::{Action, Check, Status, Stream}; /// A xz encoder, or compressor. /// /// This structure implements a `BufRead` interface and will read uncompressed /// data from an underlying stream and emit a stream of compressed data. pub struct XzEncoder { obj: R, data: Stream, } /// A xz decoder, or decompressor. /// /// This structure implements a `BufRead` interface and takes a stream of /// compressed data as input, providing the decompressed data when read from. pub struct XzDecoder { obj: R, data: Stream, } impl XzEncoder { /// Creates a new encoder which will read uncompressed data from the given /// stream and emit the compressed stream. /// /// The `level` argument here is typically 0-9 with 6 being a good default. #[inline] pub fn new(r: R, level: u32) -> XzEncoder { let stream = Stream::new_easy_encoder(level, Check::Crc64).unwrap(); XzEncoder::new_stream(r, stream) } /// Creates a new parallel encoder which will read uncompressed data from the given /// stream and emit the compressed stream. /// /// The `level` argument here is typically 0-9 with 6 being a good default. #[cfg(feature = "parallel")] pub fn new_parallel(r: R, level: u32) -> XzEncoder { let stream = MtStreamBuilder::new() .preset(level) .check(Check::Crc64) .threads(num_cpus::get() as u32) .encoder() .unwrap(); Self::new_stream(r, stream) } /// Creates a new encoder with a custom `Stream`. /// /// The `Stream` can be pre-configured for multithreaded encoding, different /// compression options/tuning, etc. #[inline] pub fn new_stream(r: R, stream: Stream) -> XzEncoder { XzEncoder { obj: r, data: stream, } } } impl XzEncoder { /// Acquires a reference to the underlying stream #[inline] pub fn get_ref(&self) -> &R { &self.obj } /// Acquires a mutable reference to the underlying stream /// /// Note that mutation of the stream may result in surprising results if /// this encoder is continued to be used. #[inline] pub fn get_mut(&mut self) -> &mut R { &mut self.obj } /// Consumes this encoder, returning the underlying reader. #[inline] pub fn into_inner(self) -> R { self.obj } /// Returns the number of bytes produced by the compressor /// (e.g. the number of bytes read from this stream) /// /// Note that, due to buffering, this only bears any relation to /// total_in() when the compressor chooses to flush its data /// (unfortunately, this won't happen in general at the end of the /// stream, because the compressor doesn't know if there's more data /// to come). At that point, `total_out() / total_in()` would be /// the compression ratio. #[inline] pub fn total_out(&self) -> u64 { self.data.total_out() } /// Returns the number of bytes consumed by the compressor /// (e.g. the number of bytes read from the underlying stream) #[inline] pub fn total_in(&self) -> u64 { self.data.total_in() } } impl Read for XzEncoder { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { loop { let (read, consumed, eof, ret); { let input = self.obj.fill_buf()?; eof = input.is_empty(); let before_out = self.data.total_out(); let before_in = self.data.total_in(); let action = if eof { Action::Finish } else { Action::Run }; ret = self.data.process(input, buf, action); read = (self.data.total_out() - before_out) as usize; consumed = (self.data.total_in() - before_in) as usize; }; self.obj.consume(consumed); ret.unwrap(); // If we haven't ready any data and we haven't hit EOF yet, then we // need to keep asking for more data because if we return that 0 // bytes of data have been read then it will be interpreted as EOF. if read == 0 && !eof && !buf.is_empty() { continue; } return Ok(read); } } } #[cfg(feature = "tokio")] impl AsyncRead for XzEncoder {} impl Write for XzEncoder { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result { self.get_mut().write(buf) } #[inline] fn flush(&mut self) -> io::Result<()> { self.get_mut().flush() } } #[cfg(feature = "tokio")] impl AsyncWrite for XzEncoder { #[inline] fn shutdown(&mut self) -> Poll<(), io::Error> { self.get_mut().shutdown() } } impl XzDecoder { /// Creates a new decoder which will decompress data read from the given /// stream. #[inline] pub fn new(r: R) -> XzDecoder { let stream = Stream::new_stream_decoder(u64::MAX, 0).unwrap(); XzDecoder::new_stream(r, stream) } /// Creates a new parallel decoder which will decompress data read from the given /// stream. #[cfg(feature = "parallel")] pub fn new_parallel(r: R) -> Self { let stream = MtStreamBuilder::new() .memlimit_stop(u64::MAX) .threads(num_cpus::get() as u32) .decoder() .unwrap(); Self::new_stream(r, stream) } /// Creates a new decoder which will decompress data read from the given /// input. All the concatenated xz streams from input will be consumed. #[inline] pub fn new_multi_decoder(r: R) -> XzDecoder { let stream = Stream::new_auto_decoder(u64::MAX, liblzma_sys::LZMA_CONCATENATED).unwrap(); XzDecoder::new_stream(r, stream) } /// Creates a new decoder with a custom `Stream`. /// /// The `Stream` can be pre-configured for various checks, different /// decompression options/tuning, etc. #[inline] pub fn new_stream(r: R, stream: Stream) -> XzDecoder { XzDecoder { obj: r, data: stream, } } } impl XzDecoder { /// Acquires a reference to the underlying stream #[inline] pub fn get_ref(&self) -> &R { &self.obj } /// Acquires a mutable reference to the underlying stream /// /// Note that mutation of the stream may result in surprising results if /// this encoder is continued to be used. #[inline] pub fn get_mut(&mut self) -> &mut R { &mut self.obj } /// Consumes this decoder, returning the underlying reader. #[inline] pub fn into_inner(self) -> R { self.obj } /// Returns the number of bytes that the decompressor has consumed. /// /// Note that this will likely be smaller than what the decompressor /// actually read from the underlying stream due to buffering. #[inline] pub fn total_in(&self) -> u64 { self.data.total_in() } /// Returns the number of bytes that the decompressor has produced. #[inline] pub fn total_out(&self) -> u64 { self.data.total_out() } } impl Read for XzDecoder { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { loop { let (read, consumed, eof, ret); { let input = self.obj.fill_buf()?; eof = input.is_empty(); let before_out = self.data.total_out(); let before_in = self.data.total_in(); ret = self .data .process(input, buf, if eof { Action::Finish } else { Action::Run }); read = (self.data.total_out() - before_out) as usize; consumed = (self.data.total_in() - before_in) as usize; } self.obj.consume(consumed); let status = ret?; if read > 0 || eof || buf.is_empty() { if read == 0 && status != Status::StreamEnd && !buf.is_empty() { return Err(io::Error::new( io::ErrorKind::UnexpectedEof, "premature eof", )); } return Ok(read); } if consumed == 0 { return Err(io::Error::new( io::ErrorKind::InvalidData, "corrupt xz stream", )); } } } } #[cfg(feature = "tokio")] impl AsyncRead for XzDecoder {} impl Write for XzDecoder { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result { self.get_mut().write(buf) } #[inline] fn flush(&mut self) -> io::Result<()> { self.get_mut().flush() } } #[cfg(feature = "tokio")] impl AsyncWrite for XzDecoder { #[inline] fn shutdown(&mut self) -> Poll<(), io::Error> { self.get_mut().shutdown() } } #[cfg(test)] mod tests { use super::*; #[test] fn compressed_and_trailing_data() { // Make a vector with compressed data... let mut to_compress: Vec = Vec::new(); const COMPRESSED_ORIG_SIZE: usize = 1024; for num in 0..COMPRESSED_ORIG_SIZE { to_compress.push(num as u8) } let mut encoder = XzEncoder::new(&to_compress[..], 6); let mut decoder_input = Vec::new(); encoder.read_to_end(&mut decoder_input).unwrap(); assert_eq!(encoder.total_in(), to_compress.len() as u64); assert_eq!(encoder.total_out(), decoder_input.len() as u64); // ...plus additional unrelated trailing data const ADDITIONAL_SIZE: usize = 123; let mut additional_data = Vec::new(); for num in 0..ADDITIONAL_SIZE { additional_data.push(((25 + num) % 256) as u8) } decoder_input.extend(&additional_data); // Decoder must be able to read the compressed xz stream, and keep the trailing data. let mut decoder_reader = &decoder_input[..]; { let mut decoder = XzDecoder::new(&mut decoder_reader); let mut decompressed_data = vec![0u8; to_compress.len()]; assert_eq!( decoder.read(&mut decompressed_data).unwrap(), COMPRESSED_ORIG_SIZE ); assert_eq!(decompressed_data, &to_compress[..]); assert_eq!( decoder.total_in(), (decoder_input.len() - ADDITIONAL_SIZE) as u64 ); assert_eq!(decoder.total_out(), decompressed_data.len() as u64); } let mut remaining_data = Vec::new(); let nb_read = decoder_reader.read_to_end(&mut remaining_data).unwrap(); assert_eq!(nb_read, ADDITIONAL_SIZE); assert_eq!(remaining_data, &additional_data[..]); } } liblzma-0.3.4/src/lib.rs000064400000000000000000000147141046102023000131740ustar 00000000000000//! LZMA/XZ encoding and decoding streams //! //! This library is a binding to liblzma currently to provide LZMA and xz //! encoding/decoding streams. I/O streams are provided in the `read`, `write`, //! and `bufread` modules (same types, different bounds). Raw in-memory //! compression/decompression is provided via the `stream` module and contains //! many of the raw APIs in liblzma. //! //! # Examples //! //! ``` //! use liblzma::read::{XzDecoder, XzEncoder}; //! use std::io::prelude::*; //! //! // Round trip some bytes from a byte source, into a compressor, into a //! // decompressor, and finally into a vector. //! let data = "Hello, World!".as_bytes(); //! let compressor = XzEncoder::new(data, 9); //! let mut decompressor = XzDecoder::new(compressor); //! //! let mut contents = String::new(); //! decompressor.read_to_string(&mut contents).unwrap(); //! assert_eq!(contents, "Hello, World!"); //! ``` //! # Static linking //! //! You can enable static-linking using the `static` feature, so that the XZ //! library is not required at runtime: //! //! ```toml //! liblzma = { version = "0.3", features = ["static"] } //! ``` //! //! # Multithreading //! //! This crate optionally can support multithreading using the `parallel` //! feature of this crate: //! //! ```toml //! liblzma = { version = "0.3", features = ["parallel"] } //! ``` //! //! # Async I/O //! //! This crate optionally can support async I/O streams with the Tokio stack via //! the `tokio` feature of this crate: //! //! ```toml //! liblzma = { version = "0.3", features = ["tokio"] } //! ``` //! //! All methods are internally capable of working with streams that may return //! `ErrorKind::WouldBlock` when they're not ready to perform the particular //! operation. //! //! Note that care needs to be taken when using these objects, however. The //! Tokio runtime, in particular, requires that data is fully flushed before //! dropping streams. For compatibility with blocking streams all streams are //! flushed/written when they are dropped, and this is not always a suitable //! time to perform I/O. If I/O streams are flushed before drop, however, then //! these operations will be a noop. #![deny(missing_docs)] use std::io::{self, prelude::*}; pub mod stream; pub mod bufread; pub mod read; pub mod write; /// Decompress from the given source as if using a [read::XzDecoder]. /// /// Result will be in the xz format. pub fn decode_all(source: R) -> io::Result> { let mut vec = Vec::new(); let mut r = read::XzDecoder::new(source); r.read_to_end(&mut vec)?; Ok(vec) } /// Compress from the given source as if using a [read::XzEncoder]. /// /// The input data must be in the xz format. pub fn encode_all(source: R, level: u32) -> io::Result> { let mut vec = Vec::new(); let mut r = read::XzEncoder::new(source, level); r.read_to_end(&mut vec)?; Ok(vec) } /// Compress all data from the given source as if using a [read::XzEncoder]. /// /// Compressed data will be appended to `destination`. pub fn copy_encode(source: R, mut destination: W, level: u32) -> io::Result<()> { io::copy(&mut read::XzEncoder::new(source, level), &mut destination)?; Ok(()) } /// Decompress all data from the given source as if using a [read::XzDecoder]. /// /// Decompressed data will be appended to `destination`. pub fn copy_decode(source: R, mut destination: W) -> io::Result<()> { io::copy(&mut read::XzDecoder::new(source), &mut destination)?; Ok(()) } /// Find the size in bytes of uncompressed data from xz file. #[cfg(feature = "bindgen")] pub fn uncompressed_size(mut source: R) -> io::Result { use std::mem::MaybeUninit; let mut footer = [0u8; liblzma_sys::LZMA_STREAM_HEADER_SIZE as usize]; source.seek(io::SeekFrom::End( 0 - (liblzma_sys::LZMA_STREAM_HEADER_SIZE as i64), ))?; source.read_exact(&mut footer)?; let lzma_stream_flags = unsafe { let mut lzma_stream_flags = MaybeUninit::uninit(); let ret = liblzma_sys::lzma_stream_footer_decode(lzma_stream_flags.as_mut_ptr(), footer.as_ptr()); if ret != liblzma_sys::lzma_ret_LZMA_OK { return Err(io::Error::new( io::ErrorKind::Other, "Failed to parse lzma footer", )); } lzma_stream_flags.assume_init() }; let index_plus_footer = liblzma_sys::LZMA_STREAM_HEADER_SIZE as usize + lzma_stream_flags.backward_size as usize; source.seek(io::SeekFrom::End(0 - index_plus_footer as i64))?; let buf = source .bytes() .take(index_plus_footer) .collect::>>()?; let uncompressed_size = unsafe { let mut i: MaybeUninit<*mut liblzma_sys::lzma_index> = MaybeUninit::uninit(); let mut memlimit = u64::MAX; let mut in_pos = 0usize; let ret = liblzma_sys::lzma_index_buffer_decode( i.as_mut_ptr(), &mut memlimit, std::ptr::null(), buf.as_ptr(), &mut in_pos, buf.len(), ); if ret != liblzma_sys::lzma_ret_LZMA_OK { return Err(io::Error::new( io::ErrorKind::Other, "Failed to parse lzma footer", )); } let i = i.assume_init(); let uncompressed_size = liblzma_sys::lzma_index_uncompressed_size(i); liblzma_sys::lzma_index_end(i, std::ptr::null()); uncompressed_size }; Ok(uncompressed_size) } #[cfg(test)] mod tests { use super::*; use quickcheck::quickcheck; #[test] fn all() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let e = encode_all(&v[..], 6).unwrap(); let d = decode_all(&e[..]).unwrap(); v == d } } #[test] fn copy() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let mut e = Vec::new(); copy_encode(&v[..], &mut e, 6).unwrap(); let mut d = Vec::new(); copy_decode(&e[..], &mut d).unwrap(); v == d } } #[test] #[cfg(feature = "bindgen")] fn size() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let mut e = Vec::new(); copy_encode(&v[..], &mut e, 6).unwrap(); let s = super::uncompressed_size(std::io::Cursor::new(e)).unwrap(); (s as usize) == v.len() } } } liblzma-0.3.4/src/read.rs000064400000000000000000000322121046102023000133320ustar 00000000000000//! Reader-based compression/decompression streams use std::io::prelude::*; use std::io::{self, BufReader}; #[cfg(feature = "tokio")] use futures::Poll; #[cfg(feature = "tokio")] use tokio_io::{AsyncRead, AsyncWrite}; use crate::bufread; use crate::stream::Stream; /// A compression stream which wraps an uncompressed stream of data. Compressed /// data will be read from the stream. pub struct XzEncoder { inner: bufread::XzEncoder>, } /// A decompression stream which wraps a compressed stream of data. Decompressed /// data will be read from the stream. pub struct XzDecoder { inner: bufread::XzDecoder>, } impl XzEncoder { /// Create a new compression stream which will compress at the given level /// to read compress output to the give output stream. /// /// The `level` argument here is typically 0-9 with 6 being a good default. #[inline] pub fn new(r: R, level: u32) -> XzEncoder { XzEncoder { inner: bufread::XzEncoder::new(BufReader::new(r), level), } } /// Create a new parallel compression stream which will compress at the given level /// to read compress output to the give output stream. /// /// The `level` argument here is typically 0-9 with 6 being a good default. #[cfg(feature = "parallel")] pub fn new_parallel(r: R, level: u32) -> XzEncoder { XzEncoder { inner: bufread::XzEncoder::new_parallel(BufReader::new(r), level), } } /// Creates a new encoder with a custom `Stream`. /// /// The `Stream` can be pre-configured for multithreaded encoding, different /// compression options/tuning, etc. #[inline] pub fn new_stream(r: R, stream: Stream) -> XzEncoder { XzEncoder { inner: bufread::XzEncoder::new_stream(BufReader::new(r), stream), } } /// Acquires a reference to the underlying stream #[inline] pub fn get_ref(&self) -> &R { self.inner.get_ref().get_ref() } /// Acquires a mutable reference to the underlying stream /// /// Note that mutation of the stream may result in surprising results if /// this encoder is continued to be used. #[inline] pub fn get_mut(&mut self) -> &mut R { self.inner.get_mut().get_mut() } /// Unwrap the underlying writer, finishing the compression stream. #[inline] pub fn into_inner(self) -> R { self.inner.into_inner().into_inner() } /// Returns the number of bytes produced by the compressor /// (e.g. the number of bytes read from this stream) /// /// Note that, due to buffering, this only bears any relation to /// total_in() when the compressor chooses to flush its data /// (unfortunately, this won't happen this won't happen in general /// at the end of the stream, because the compressor doesn't know /// if there's more data to come). At that point, /// `total_out() / total_in()` would be the compression ratio. #[inline] pub fn total_out(&self) -> u64 { self.inner.total_out() } /// Returns the number of bytes consumed by the compressor /// (e.g. the number of bytes read from the underlying stream) #[inline] pub fn total_in(&self) -> u64 { self.inner.total_in() } } impl Read for XzEncoder { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { self.inner.read(buf) } } #[cfg(feature = "tokio")] impl AsyncRead for XzEncoder {} impl Write for XzEncoder { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result { self.get_mut().write(buf) } #[inline] fn flush(&mut self) -> io::Result<()> { self.get_mut().flush() } } #[cfg(feature = "tokio")] impl AsyncWrite for XzEncoder { #[inline] fn shutdown(&mut self) -> Poll<(), io::Error> { self.get_mut().shutdown() } } impl XzDecoder { /// Create a new decompression stream, which will read compressed /// data from the given input stream, and decompress one xz stream. /// It may also consume input data that follows the xz stream. /// Use [`xz::bufread::XzDecoder`] instead to process a mix of xz and non-xz data. #[inline] pub fn new(r: R) -> XzDecoder { XzDecoder { inner: bufread::XzDecoder::new(BufReader::new(r)), } } /// Create a new parallel decompression stream, which will read compressed /// data from the given input stream, and decompress one xz stream. /// It may also consume input data that follows the xz stream. /// Use [`xz::bufread::XzDecoder`] instead to process a mix of xz and non-xz data. #[cfg(feature = "parallel")] #[inline] pub fn new_parallel(r: R) -> XzDecoder { XzDecoder { inner: bufread::XzDecoder::new_parallel(BufReader::new(r)), } } /// Create a new decompression stream, which will read compressed /// data from the given input and decompress all the xz stream it contains. #[inline] pub fn new_multi_decoder(r: R) -> XzDecoder { XzDecoder { inner: bufread::XzDecoder::new_multi_decoder(BufReader::new(r)), } } /// Creates a new decoder with a custom `Stream`. /// /// The `Stream` can be pre-configured for various checks, different /// decompression options/tuning, etc. #[inline] pub fn new_stream(r: R, stream: Stream) -> XzDecoder { XzDecoder { inner: bufread::XzDecoder::new_stream(BufReader::new(r), stream), } } /// Acquires a reference to the underlying stream #[inline] pub fn get_ref(&self) -> &R { self.inner.get_ref().get_ref() } /// Acquires a mutable reference to the underlying stream /// /// Note that mutation of the stream may result in surprising results if /// this encoder is continued to be used. #[inline] pub fn get_mut(&mut self) -> &mut R { self.inner.get_mut().get_mut() } /// Unwrap the underlying writer, finishing the compression stream. #[inline] pub fn into_inner(self) -> R { self.inner.into_inner().into_inner() } /// Returns the number of bytes produced by the decompressor /// (e.g. the number of bytes read from this stream) /// /// Note that, due to buffering, this only bears any relation to /// total_in() when the decompressor reaches a sync point /// (e.g. where the original compressed stream was flushed). /// At that point, `total_in() / total_out()` is the compression ratio. #[inline] pub fn total_out(&self) -> u64 { self.inner.total_out() } /// Returns the number of bytes consumed by the decompressor /// (e.g. the number of bytes read from the underlying stream) #[inline] pub fn total_in(&self) -> u64 { self.inner.total_in() } } impl Read for XzDecoder { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { self.inner.read(buf) } } #[cfg(feature = "tokio")] impl AsyncRead for XzDecoder {} impl Write for XzDecoder { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result { self.get_mut().write(buf) } #[inline] fn flush(&mut self) -> io::Result<()> { self.get_mut().flush() } } #[cfg(feature = "tokio")] impl AsyncWrite for XzDecoder { #[inline] fn shutdown(&mut self) -> Poll<(), io::Error> { self.get_mut().shutdown() } } #[cfg(test)] mod tests { use super::*; use crate::stream::LzmaOptions; use quickcheck::quickcheck; use rand::{thread_rng, Rng}; use std::iter; #[test] fn smoke() { let m: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8]; let mut c = XzEncoder::new(m, 6); let mut data = vec![]; c.read_to_end(&mut data).unwrap(); let mut d = XzDecoder::new(&data[..]); let mut data2 = Vec::new(); d.read_to_end(&mut data2).unwrap(); assert_eq!(data2, m); } #[test] fn smoke2() { let m: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8]; let c = XzEncoder::new(m, 6); let mut d = XzDecoder::new(c); let mut data = vec![]; d.read_to_end(&mut data).unwrap(); assert_eq!(data, [1, 2, 3, 4, 5, 6, 7, 8]); } #[test] fn smoke3() { let m = vec![3u8; 128 * 1024 + 1]; let c = XzEncoder::new(&m[..], 6); let mut d = XzDecoder::new(c); let mut data = vec![]; d.read_to_end(&mut data).unwrap(); assert_eq!(data, &m[..]); } #[test] fn self_terminating() { let m = vec![3u8; 128 * 1024 + 1]; let mut c = XzEncoder::new(&m[..], 6); let mut result = Vec::new(); c.read_to_end(&mut result).unwrap(); let mut rng = thread_rng(); let v = iter::repeat_with(|| rng.gen::()) .take(1024) .collect::>(); for _ in 0..200 { result.extend(v.iter().map(|x| *x)); } let mut d = XzDecoder::new(&result[..]); let mut data = Vec::with_capacity(m.len()); unsafe { data.set_len(m.len()); } assert_eq!(d.read(&mut data).unwrap(), m.len()); assert_eq!(data, &m[..]); } #[test] fn zero_length_read_at_eof() { let m = Vec::new(); let mut c = XzEncoder::new(&m[..], 6); let mut result = Vec::new(); c.read_to_end(&mut result).unwrap(); let mut d = XzDecoder::new(&result[..]); let mut data = Vec::new(); assert_eq!(d.read(&mut data).unwrap(), 0); } #[test] fn zero_length_read_with_data() { let m = vec![3u8; 128 * 1024 + 1]; let mut c = XzEncoder::new(&m[..], 6); let mut result = Vec::new(); c.read_to_end(&mut result).unwrap(); let mut d = XzDecoder::new(&result[..]); let mut data = Vec::new(); assert_eq!(d.read(&mut data).unwrap(), 0); } #[test] fn qc_lzma1() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let options = LzmaOptions::new_preset(6).unwrap(); let stream = Stream::new_lzma_encoder(&options).unwrap(); let r = XzEncoder::new_stream(&v[..], stream); let stream = Stream::new_lzma_decoder(u64::MAX).unwrap(); let mut r = XzDecoder::new_stream(r, stream); let mut v2 = Vec::new(); r.read_to_end(&mut v2).unwrap(); v == v2 } } #[test] fn qc() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let r = XzEncoder::new(&v[..], 6); let mut r = XzDecoder::new(r); let mut v2 = Vec::new(); r.read_to_end(&mut v2).unwrap(); v == v2 } } #[cfg(feature = "parallel")] #[test] fn qc_parallel_encode() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let r = XzEncoder::new_parallel(&v[..], 6); let mut r = XzDecoder::new(r); let mut v2 = Vec::new(); r.read_to_end(&mut v2).unwrap(); v == v2 } } #[cfg(feature = "parallel")] #[test] fn qc_parallel_decode() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let r = XzEncoder::new(&v[..], 6); let mut r = XzDecoder::new_parallel(r); let mut v2 = Vec::new(); r.read_to_end(&mut v2).unwrap(); v == v2 } } #[test] fn two_streams() { let mut input_stream1: Vec = Vec::new(); let mut input_stream2: Vec = Vec::new(); let mut all_input: Vec = Vec::new(); // Generate input data. const STREAM1_SIZE: usize = 1024; for num in 0..STREAM1_SIZE { input_stream1.push(num as u8) } const STREAM2_SIZE: usize = 532; for num in 0..STREAM2_SIZE { input_stream2.push((num + 32) as u8) } all_input.extend(&input_stream1); all_input.extend(&input_stream2); // Make a vector with compressed data let mut decoder_input = Vec::new(); { let mut encoder = XzEncoder::new(&input_stream1[..], 6); encoder.read_to_end(&mut decoder_input).unwrap(); } { let mut encoder = XzEncoder::new(&input_stream2[..], 6); encoder.read_to_end(&mut decoder_input).unwrap(); } // Decoder must be able to read the 2 concatenated xz streams and get the same data as input. let mut decoder_reader = &decoder_input[..]; { // using `XzDecoder::new` here would fail because only 1 xz stream would be processed. let mut decoder = XzDecoder::new_multi_decoder(&mut decoder_reader); let mut decompressed_data = vec![0u8; all_input.len()]; assert_eq!( decoder.read(&mut decompressed_data).unwrap(), all_input.len() ); assert_eq!(decompressed_data, &all_input[..]); } } } liblzma-0.3.4/src/stream.rs000064400000000000000000001320001046102023000137060ustar 00000000000000//! Raw in-memory LZMA streams. //! //! The `Stream` type exported by this module is the primary type which performs //! encoding/decoding of LZMA streams. Each `Stream` is either an encoder or //! decoder and processes data in a streaming fashion. use std::collections::LinkedList; use std::error; use std::fmt; use std::io; use std::mem; use std::slice; /// Representation of an in-memory LZMA encoding or decoding stream. /// /// Wraps the raw underlying `lzma_stream` type and provides the ability to /// create streams which can either decode or encode various LZMA-based formats. pub struct Stream { raw: liblzma_sys::lzma_stream, } unsafe impl Send for Stream {} unsafe impl Sync for Stream {} /// Options that can be used to configure how LZMA encoding happens. /// /// This builder is consumed by a number of other methods. pub struct LzmaOptions { raw: liblzma_sys::lzma_options_lzma, } /// Builder to create a multithreaded stream encoder. #[cfg(feature = "parallel")] pub struct MtStreamBuilder { raw: liblzma_sys::lzma_mt, filters: Option, } /// A custom chain of filters to configure an encoding stream. pub struct Filters { inner: Vec, lzma_opts: LinkedList, } /// The `action` argument for `process`, /// /// After the first use of SyncFlush, FullFlush, FullBarrier, or Finish, the /// same `action' must is used until `process` returns `Status::StreamEnd`. /// Also, the amount of input must not be modified by the application until /// `process` returns `Status::StreamEnd`. Changing the `action' or modifying /// the amount of input will make `process` return `Error::Program`. #[derive(Copy, Clone)] pub enum Action { /// Continue processing /// /// When encoding, encode as much input as possible. Some internal buffering /// will probably be done (depends on the filter chain in use), which causes /// latency: the input used won't usually be decodeable from the output of /// the same `process` call. /// /// When decoding, decode as much input as possible and produce as much /// output as possible. Run = liblzma_sys::LZMA_RUN as isize, /// Make all the input available at output /// /// Normally the encoder introduces some latency. `SyncFlush` forces all the /// buffered data to be available at output without resetting the internal /// state of the encoder. This way it is possible to use compressed stream /// for example for communication over network. /// /// Only some filters support `SyncFlush`. Trying to use `SyncFlush` with /// filters that don't support it will make `process` return /// `Error::Options`. For example, LZMA1 doesn't support `SyncFlush` but /// LZMA2 does. /// /// Using `SyncFlush` very often can dramatically reduce the compression /// ratio. With some filters (for example, LZMA2), fine-tuning the /// compression options may help mitigate this problem significantly (for /// example, match finder with LZMA2). /// /// Decoders don't support `SyncFlush`. SyncFlush = liblzma_sys::LZMA_SYNC_FLUSH as isize, /// Finish encoding of the current block. /// /// All the input data going to the current block must have been given to /// the encoder. Call `process` with `FullFlush` until it returns /// `Status::StreamEnd`. Then continue normally with `Run` or finish the /// Stream with `Finish`. /// /// This action is currently supported only by stream encoder and easy /// encoder (which uses stream encoder). If there is no unfinished block, no /// empty block is created. FullFlush = liblzma_sys::LZMA_FULL_FLUSH as isize, /// Finish encoding of the current block. /// /// This is like `FullFlush` except that this doesn't necessarily wait until /// all the input has been made available via the output buffer. That is, /// `process` might return `Status::StreamEnd` as soon as all the input has /// been consumed. /// /// `FullBarrier` is useful with a threaded encoder if one wants to split /// the .xz Stream into blocks at specific offsets but doesn't care if the /// output isn't flushed immediately. Using `FullBarrier` allows keeping the /// threads busy while `FullFlush` would make `process` wait until all the /// threads have finished until more data could be passed to the encoder. /// /// With a `Stream` initialized with the single-threaded /// `new_stream_encoder` or `new_easy_encoder`, `FullBarrier` is an alias /// for `FullFlush`. FullBarrier = liblzma_sys::LZMA_FULL_BARRIER as isize, /// Finish the current operation /// /// All the input data must have been given to the encoder (the last bytes /// can still be pending in next_in). Call `process` with `Finish` until it /// returns `Status::StreamEnd`. Once `Finish` has been used, the amount of /// input must no longer be changed by the application. /// /// When decoding, using `Finish` is optional unless the concatenated flag /// was used when the decoder was initialized. When concatenated was not /// used, the only effect of `Finish` is that the amount of input must not /// be changed just like in the encoder. Finish = liblzma_sys::LZMA_FINISH as isize, } /// Return value of a `process` operation. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum Status { /// Operation completed successfully. Ok, /// End of stream was reached. /// /// When encoding, this means that a sync/full flush or `Finish` was /// completed. When decoding, this indicates that all data was decoded /// successfully. StreamEnd, /// If the TELL_ANY_CHECK flags is specified when constructing a decoder, /// this informs that the `check` method will now return the underlying /// integrity check algorithm. GetCheck, /// An error has not been encountered, but no progress is possible. /// /// Processing can be continued normally by providing more input and/or more /// output space, if possible. /// /// Typically the first call to `process` that can do no progress returns /// `Ok` instead of `MemNeeded`. Only the second consecutive call doing no /// progress will return `MemNeeded`. MemNeeded, } /// Possible error codes that can be returned from a processing operation. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum Error { /// The underlying data was corrupt. Data, /// Invalid or unsupported options were specified. Options, /// File format wasn't recognized. Format, /// Memory usage limit was reached. /// /// The memory limit can be increased with `set_memlimit` MemLimit, /// Memory couldn't be allocated. Mem, /// A programming error was encountered. Program, /// The `TELL_NO_CHECK` flag was specified and no integrity check was /// available for this stream. NoCheck, /// The `TELL_UNSUPPORTED_CHECK` flag was specified and no integrity check /// isn't implemented in this build of liblzma for this stream. UnsupportedCheck, } /// Possible integrity checks that can be part of a .xz stream. #[allow(missing_docs)] // self-explanatory mostly #[derive(Copy, Clone)] pub enum Check { None = liblzma_sys::LZMA_CHECK_NONE as isize, Crc32 = liblzma_sys::LZMA_CHECK_CRC32 as isize, Crc64 = liblzma_sys::LZMA_CHECK_CRC64 as isize, Sha256 = liblzma_sys::LZMA_CHECK_SHA256 as isize, } /// Compression modes /// /// This selects the function used to analyze the data produced by the match /// finder. #[derive(Copy, Clone)] pub enum Mode { /// Fast compression. /// /// Fast mode is usually at its best when combined with a hash chain match /// finder. Fast = liblzma_sys::LZMA_MODE_FAST as isize, /// Normal compression. /// /// This is usually notably slower than fast mode. Use this together with /// binary tree match finders to expose the full potential of the LZMA1 or /// LZMA2 encoder. Normal = liblzma_sys::LZMA_MODE_NORMAL as isize, } /// Match finders /// /// Match finder has major effect on both speed and compression ratio. Usually /// hash chains are faster than binary trees. /// /// If you will use `SyncFlush` often, the hash chains may be a better choice, /// because binary trees get much higher compression ratio penalty with /// `SyncFlush`. /// /// The memory usage formulas are only rough estimates, which are closest to /// reality when dict_size is a power of two. The formulas are more complex in /// reality, and can also change a little between liblzma versions. #[derive(Copy, Clone)] pub enum MatchFinder { /// Hash Chain with 2- and 3-byte hashing HashChain3 = liblzma_sys::LZMA_MF_HC3 as isize, /// Hash Chain with 2-, 3-, and 4-byte hashing HashChain4 = liblzma_sys::LZMA_MF_HC4 as isize, /// Binary Tree with 2-byte hashing BinaryTree2 = liblzma_sys::LZMA_MF_BT2 as isize, /// Binary Tree with 2- and 3-byte hashing BinaryTree3 = liblzma_sys::LZMA_MF_BT3 as isize, /// Binary Tree with 2-, 3-, and 4-byte hashing BinaryTree4 = liblzma_sys::LZMA_MF_BT4 as isize, } /// A flag passed when initializing a decoder, causes `process` to return /// `Status::GetCheck` as soon as the integrity check is known. pub const TELL_ANY_CHECK: u32 = liblzma_sys::LZMA_TELL_ANY_CHECK; /// A flag passed when initializing a decoder, causes `process` to return /// `Error::NoCheck` if the stream being decoded has no integrity check. pub const TELL_NO_CHECK: u32 = liblzma_sys::LZMA_TELL_NO_CHECK; /// A flag passed when initializing a decoder, causes `process` to return /// `Error::UnsupportedCheck` if the stream being decoded has an integrity check /// that cannot be verified by this build of liblzma. pub const TELL_UNSUPPORTED_CHECK: u32 = liblzma_sys::LZMA_TELL_UNSUPPORTED_CHECK; /// A flag passed when initializing a decoder, causes the decoder to ignore any /// integrity checks listed. pub const IGNORE_CHECK: u32 = liblzma_sys::LZMA_TELL_UNSUPPORTED_CHECK; /// A flag passed when initializing a decoder, indicates that the stream may be /// multiple concatenated xz files. pub const CONCATENATED: u32 = liblzma_sys::LZMA_CONCATENATED; impl Stream { #[inline] unsafe fn zeroed() -> Self { Self { raw: unsafe { mem::zeroed() }, } } /// Initialize .xz stream encoder using a preset number /// /// This is intended to be used by most for encoding data. The `preset` /// argument is a number 0-9 indicating the compression level to use, and /// normally 6 is a reasonable default. /// /// The `check` argument is the integrity check to insert at the end of the /// stream. The default of `Crc64` is typically appropriate. #[inline] pub fn new_easy_encoder(preset: u32, check: Check) -> Result { let mut init = unsafe { Stream::zeroed() }; cvt(unsafe { liblzma_sys::lzma_easy_encoder(&mut init.raw, preset, check as liblzma_sys::lzma_check) })?; Ok(init) } /// Initialize .lzma encoder (legacy file format) /// /// The .lzma format is sometimes called the LZMA_Alone format, which is the /// reason for the name of this function. The .lzma format supports only the /// LZMA1 filter. There is no support for integrity checks like CRC32. /// /// Use this function if and only if you need to create files readable by /// legacy LZMA tools such as LZMA Utils 4.32.x. Moving to the .xz format /// (the `new_easy_encoder` function) is strongly recommended. /// /// The valid action values for `process` are `Run` and `Finish`. No kind /// of flushing is supported, because the file format doesn't make it /// possible. #[inline] pub fn new_lzma_encoder(options: &LzmaOptions) -> Result { let mut init = unsafe { Stream::zeroed() }; cvt(unsafe { liblzma_sys::lzma_alone_encoder(&mut init.raw, &options.raw) })?; Ok(init) } /// Initialize .xz Stream encoder using a custom filter chain /// /// This function is similar to `new_easy_encoder` but a custom filter chain /// is specified. #[inline] pub fn new_stream_encoder(filters: &Filters, check: Check) -> Result { let mut init = unsafe { Stream::zeroed() }; cvt(unsafe { liblzma_sys::lzma_stream_encoder( &mut init.raw, filters.inner.as_ptr(), check as liblzma_sys::lzma_check, ) })?; Ok(init) } /// Initialize a .xz stream decoder. /// /// The maximum memory usage can be specified along with flags such as /// `TELL_ANY_CHECK`, `TELL_NO_CHECK`, `TELL_UNSUPPORTED_CHECK`, /// `TELL_IGNORE_CHECK`, or `CONCATENATED`. #[inline] pub fn new_stream_decoder(memlimit: u64, flags: u32) -> Result { let mut init = unsafe { Self::zeroed() }; cvt(unsafe { liblzma_sys::lzma_stream_decoder(&mut init.raw, memlimit, flags) })?; Ok(init) } /// Initialize a .lzma stream decoder. /// /// The maximum memory usage can also be specified. #[inline] pub fn new_lzma_decoder(memlimit: u64) -> Result { let mut init = unsafe { Self::zeroed() }; cvt(unsafe { liblzma_sys::lzma_alone_decoder(&mut init.raw, memlimit) })?; Ok(init) } /// Initialize a decoder which will choose a stream/lzma formats depending /// on the input stream. #[inline] pub fn new_auto_decoder(memlimit: u64, flags: u32) -> Result { let mut init = unsafe { Self::zeroed() }; cvt(unsafe { liblzma_sys::lzma_auto_decoder(&mut init.raw, memlimit, flags) })?; Ok(init) } /// Initialize a .lz stream decoder. #[inline] pub fn new_lzip_decoder(memlimit: u64, flags: u32) -> Result { let mut init = unsafe { Self::zeroed() }; cvt(unsafe { liblzma_sys::lzma_lzip_decoder(&mut init.raw, memlimit, flags) })?; Ok(init) } /// Initialize a decoder stream using a custom filter chain. #[inline] pub fn new_raw_decoder(filters: &Filters) -> Result { let mut init = unsafe { Self::zeroed() }; cvt(unsafe { liblzma_sys::lzma_raw_decoder(&mut init.raw, filters.inner.as_ptr()) })?; Ok(init) } /// Initialize an encoder stream using a custom filter chain. #[inline] pub fn new_raw_encoder(filters: &Filters) -> Result { let mut init = unsafe { Self::zeroed() }; cvt(unsafe { liblzma_sys::lzma_raw_encoder(&mut init.raw, filters.inner.as_ptr()) })?; Ok(init) } /// Processes some data from input into an output buffer. /// /// This will perform the appropriate encoding or decoding operation /// depending on the kind of underlying stream. Documentation for the /// various `action` arguments can be found on the respective variants. #[inline] pub fn process( &mut self, input: &[u8], output: &mut [u8], action: Action, ) -> Result { self.raw.next_in = input.as_ptr(); self.raw.avail_in = input.len(); self.raw.next_out = output.as_mut_ptr(); self.raw.avail_out = output.len(); let action = action as liblzma_sys::lzma_action; unsafe { cvt(liblzma_sys::lzma_code(&mut self.raw, action)) } } /// Performs the same data as `process`, but places output data in a `Vec`. /// /// This function will use the extra capacity of `output` as a destination /// for bytes to be placed. The length of `output` will automatically get /// updated after the operation has completed. #[inline] pub fn process_vec( &mut self, input: &[u8], output: &mut Vec, action: Action, ) -> Result { let cap = output.capacity(); let len = output.len(); unsafe { let before = self.total_out(); let ret = { let ptr = output.as_mut_ptr().add(len); let out = slice::from_raw_parts_mut(ptr, cap - len); self.process(input, out, action) }; output.set_len((self.total_out() - before) as usize + len); ret } } /// Returns the total amount of input bytes consumed by this stream. #[inline] pub fn total_in(&self) -> u64 { self.raw.total_in } /// Returns the total amount of bytes produced by this stream. #[inline] pub fn total_out(&self) -> u64 { self.raw.total_out } /// Get the current memory usage limit. /// /// This is only supported if the underlying stream supports a memlimit. #[inline] pub fn memlimit(&self) -> u64 { unsafe { liblzma_sys::lzma_memlimit_get(&self.raw) } } /// Set the current memory usage limit. /// /// This can return `Error::MemLimit` if the new limit is too small or /// `Error::Program` if this stream doesn't take a memory limit. #[inline] pub fn set_memlimit(&mut self, limit: u64) -> Result<(), Error> { cvt(unsafe { liblzma_sys::lzma_memlimit_set(&mut self.raw, limit) }).map(|_| ()) } } impl LzmaOptions { /// Creates a new blank set of options. #[inline] pub fn new() -> LzmaOptions { LzmaOptions { raw: unsafe { mem::zeroed() }, } } /// Creates a new blank set of options for encoding. /// /// The `preset` argument is the compression level to use, typically in the /// range of 0-9. #[inline] pub fn new_preset(preset: u32) -> Result { unsafe { let mut options = Self::new(); let ret = liblzma_sys::lzma_lzma_preset(&mut options.raw, preset); if ret != 0 { Err(Error::Program) } else { Ok(options) } } } /// Configures the dictionary size, in bytes /// /// Dictionary size indicates how many bytes of the recently processed /// uncompressed data is kept in memory. /// /// The minimum dictionary size is 4096 bytes and the default is 2^23, 8MB. #[inline] pub fn dict_size(&mut self, size: u32) -> &mut LzmaOptions { self.raw.dict_size = size; self } /// Configures the number of literal context bits. /// /// How many of the highest bits of the previous uncompressed eight-bit byte /// (also known as `literal') are taken into account when predicting the /// bits of the next literal. /// /// The maximum value to this is 4 and the default is 3. It is not currently /// supported if this plus `literal_position_bits` is greater than 4. #[inline] pub fn literal_context_bits(&mut self, bits: u32) -> &mut LzmaOptions { self.raw.lc = bits; self } /// Configures the number of literal position bits. /// /// This affects what kind of alignment in the uncompressed data is assumed /// when encoding literals. A literal is a single 8-bit byte. See /// `position_bits` for more information about alignment. /// /// The default for this is 0. #[inline] pub fn literal_position_bits(&mut self, bits: u32) -> &mut LzmaOptions { self.raw.lp = bits; self } /// Configures the number of position bits. /// /// Position bits affects what kind of alignment in the uncompressed data is /// assumed in general. The default of 2 means four-byte alignment (2^ pb /// =2^2=4), which is often a good choice when there's no better guess. /// /// When the alignment is known, setting pb accordingly may reduce the file /// size a little. E.g. with text files having one-byte alignment (US-ASCII, /// ISO-8859-*, UTF-8), setting pb=0 can improve compression slightly. For /// UTF-16 text, pb=1 is a good choice. If the alignment is an odd number /// like 3 bytes, pb=0 might be the best choice. /// /// Even though the assumed alignment can be adjusted with pb and lp, LZMA1 /// and LZMA2 still slightly favor 16-byte alignment. It might be worth /// taking into account when designing file formats that are likely to be /// often compressed with LZMA1 or LZMA2. #[inline] pub fn position_bits(&mut self, bits: u32) -> &mut LzmaOptions { self.raw.pb = bits; self } /// Configures the compression mode. #[inline] pub fn mode(&mut self, mode: Mode) -> &mut LzmaOptions { self.raw.mode = mode as liblzma_sys::lzma_mode; self } /// Configures the nice length of a match. /// /// This determines how many bytes the encoder compares from the match /// candidates when looking for the best match. Once a match of at least /// `nice_len` bytes long is found, the encoder stops looking for better /// candidates and encodes the match. (Naturally, if the found match is /// actually longer than `nice_len`, the actual length is encoded; it's not /// truncated to `nice_len`.) /// /// Bigger values usually increase the compression ratio and compression /// time. For most files, 32 to 128 is a good value, which gives very good /// compression ratio at good speed. /// /// The exact minimum value depends on the match finder. The maximum is 273, /// which is the maximum length of a match that LZMA1 and LZMA2 can encode. #[inline] pub fn nice_len(&mut self, len: u32) -> &mut LzmaOptions { self.raw.nice_len = len; self } /// Configures the match finder ID. #[inline] pub fn match_finder(&mut self, mf: MatchFinder) -> &mut LzmaOptions { self.raw.mf = mf as liblzma_sys::lzma_match_finder; self } /// Maximum search depth in the match finder. /// /// For every input byte, match finder searches through the hash chain or /// binary tree in a loop, each iteration going one step deeper in the chain /// or tree. The searching stops if /// /// - a match of at least `nice_len` bytes long is found; /// - all match candidates from the hash chain or binary tree have /// been checked; or /// - maximum search depth is reached. /// /// Maximum search depth is needed to prevent the match finder from wasting /// too much time in case there are lots of short match candidates. On the /// other hand, stopping the search before all candidates have been checked /// can reduce compression ratio. /// /// Setting depth to zero tells liblzma to use an automatic default value, /// that depends on the selected match finder and nice_len. The default is /// in the range [4, 200] or so (it may vary between liblzma versions). /// /// Using a bigger depth value than the default can increase compression /// ratio in some cases. There is no strict maximum value, but high values /// (thousands or millions) should be used with care: the encoder could /// remain fast enough with typical input, but malicious input could cause /// the match finder to slow down dramatically, possibly creating a denial /// of service attack. #[inline] pub fn depth(&mut self, depth: u32) -> &mut LzmaOptions { self.raw.depth = depth; self } } impl Check { /// Test if this check is supported in this build of liblzma. #[inline] pub fn is_supported(&self) -> bool { let ret = unsafe { liblzma_sys::lzma_check_is_supported(*self as liblzma_sys::lzma_check) }; ret != 0 } } impl MatchFinder { /// Test if this match finder is supported in this build of liblzma. #[inline] pub fn is_supported(&self) -> bool { let ret = unsafe { liblzma_sys::lzma_mf_is_supported(*self as liblzma_sys::lzma_match_finder) }; ret != 0 } } impl Filters { /// Creates a new filter chain with no filters. #[inline] pub fn new() -> Filters { Filters { inner: vec![liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_VLI_UNKNOWN, options: std::ptr::null_mut(), }], lzma_opts: LinkedList::new(), } } /// Add an LZMA1 filter. /// /// LZMA1 is the very same thing as what was called just LZMA in LZMA Utils, /// 7-Zip, and LZMA SDK. It's called LZMA1 here to prevent developers from /// accidentally using LZMA when they actually want LZMA2. /// /// LZMA1 shouldn't be used for new applications unless you _really_ know /// what you are doing. LZMA2 is almost always a better choice. #[inline] pub fn lzma1(&mut self, opts: &LzmaOptions) -> &mut Filters { self.lzma_opts.push_back(opts.raw); let ptr = self.lzma_opts.back().unwrap() as *const _ as *mut _; self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_LZMA1, options: ptr, }) } /// Add an LZMA1 filter with properties. #[inline] pub fn lzma1_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_LZMA1, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add an LZMA2 filter. /// /// Usually you want this instead of LZMA1. Compared to LZMA1, LZMA2 adds /// support for `SyncFlush`, uncompressed chunks (smaller expansion when /// trying to compress uncompressible data), possibility to change /// `literal_context_bits`/`literal_position_bits`/`position_bits` in the /// middle of encoding, and some other internal improvements. #[inline] pub fn lzma2(&mut self, opts: &LzmaOptions) -> &mut Filters { self.lzma_opts.push_back(opts.raw); let ptr = self.lzma_opts.back().unwrap() as *const _ as *mut _; self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_LZMA2, options: ptr, }) } /// Add an LZMA2 filter with properties. #[inline] pub fn lzma2_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_LZMA2, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add a DELTA filter. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.delta(); /// filters.lzma2(&opts); /// ``` #[inline] pub fn delta(&mut self) -> &mut Filters { self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_DELTA, options: std::ptr::null_mut(), }) } /// Add a DELTA filter with properties. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let mut filters = Filters::new(); /// filters.delta_properties(&[0x00]).unwrap(); /// ``` #[inline] pub fn delta_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_DELTA, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add a filter for x86 binaries. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.x86(); /// filters.lzma2(&opts); /// ``` #[inline] pub fn x86(&mut self) -> &mut Filters { self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_X86, options: std::ptr::null_mut(), }) } /// Add a filter for x86 binaries with properties. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let mut filters = Filters::new(); /// filters.x86_properties(&[0x00, 0x00, 0x00, 0x00]).unwrap(); /// ``` #[inline] pub fn x86_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_X86, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add a filter for PowerPC binaries. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.powerpc(); /// filters.lzma2(&opts); /// ``` #[inline] pub fn powerpc(&mut self) -> &mut Filters { self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_POWERPC, options: std::ptr::null_mut(), }) } /// Add a filter for PowerPC binaries with properties. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let mut filters = Filters::new(); /// filters.powerpc_properties(&[0x00, 0x00, 0x00, 0x00]).unwrap(); /// ``` #[inline] pub fn powerpc_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_POWERPC, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add a filter for IA-64 (itanium) binaries. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.ia64(); /// filters.lzma2(&opts); /// ``` #[inline] pub fn ia64(&mut self) -> &mut Filters { self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_IA64, options: std::ptr::null_mut(), }) } /// Add a filter for IA-64 (itanium) binaries with properties. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let mut filters = Filters::new(); /// filters.ia64_properties(&[0x00, 0x00, 0x00, 0x00]).unwrap(); /// ``` #[inline] pub fn ia64_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_IA64, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add a filter for ARM binaries. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.arm(); /// filters.lzma2(&opts); /// ``` #[inline] pub fn arm(&mut self) -> &mut Filters { self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_ARM, options: std::ptr::null_mut(), }) } /// Add a filter for ARM binaries with properties. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let mut filters = Filters::new(); /// filters.arm_properties(&[0x00, 0x00, 0x00, 0x00]).unwrap(); /// ``` #[inline] pub fn arm_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_ARM, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add a filter for ARM64 binaries. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.arm64(); /// filters.lzma2(&opts); /// ``` #[inline] pub fn arm64(&mut self) -> &mut Filters { self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_ARM64, options: std::ptr::null_mut(), }) } /// Add a filter for ARM64 binaries with properties. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let mut filters = Filters::new(); /// filters.arm64_properties(&[0x00, 0x00, 0x00, 0x00]).unwrap(); /// ``` #[inline] pub fn arm64_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_ARM64, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add a filter for RISCV binaries. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.riscv(); /// filters.lzma2(&opts); /// ``` #[inline] pub fn riscv(&mut self) -> &mut Filters { self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_RISCV, options: std::ptr::null_mut(), }) } /// Add a filter for RISCV binaries with properties. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let mut filters = Filters::new(); /// filters.riscv_properties(&[0x00, 0x00, 0x00, 0x00]).unwrap(); /// ``` #[inline] pub fn riscv_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_RISCV, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add a filter for ARM-Thumb binaries. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.arm_thumb(); /// filters.lzma2(&opts); /// ``` #[inline] pub fn arm_thumb(&mut self) -> &mut Filters { self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_ARMTHUMB, options: std::ptr::null_mut(), }) } /// Add a filter for ARM-Thumb binaries with properties. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let mut filters = Filters::new(); /// filters.arm_thumb_properties(&[0x00, 0x00, 0x00, 0x00]).unwrap(); /// ``` #[inline] pub fn arm_thumb_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_ARMTHUMB, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } /// Add a filter for SPARC binaries. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.sparc(); /// filters.lzma2(&opts); /// ``` #[inline] pub fn sparc(&mut self) -> &mut Filters { self.push(liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_SPARC, options: std::ptr::null_mut(), }) } /// Add a filter for SPARC binaries with properties. /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let mut filters = Filters::new(); /// filters.sparc_properties(&[0x00, 0x00, 0x00, 0x00]).unwrap(); /// ``` #[inline] pub fn sparc_properties(&mut self, properties: &[u8]) -> Result<&mut Filters, Error> { let filter = liblzma_sys::lzma_filter { id: liblzma_sys::LZMA_FILTER_SPARC, options: std::ptr::null_mut(), }; self.push_with_properties(filter, properties) } #[inline] fn push(&mut self, filter: liblzma_sys::lzma_filter) -> &mut Filters { let pos = self.inner.len() - 1; self.inner.insert(pos, filter); self } #[inline] fn push_with_properties( &mut self, mut filter: liblzma_sys::lzma_filter, properties: &[u8], ) -> Result<&mut Filters, Error> { cvt(unsafe { liblzma_sys::lzma_properties_decode( &mut filter, std::ptr::null(), properties.as_ptr(), properties.len(), ) })?; let pos = self.inner.len() - 1; self.inner.insert(pos, filter); Ok(self) } /// recommend a Block size for multithreaded encoding /// /// # Examples /// ``` /// use liblzma::stream::{Filters, LzmaOptions}; /// /// let dict_size = 0x40000; /// let mut opts = LzmaOptions::new_preset(6).unwrap(); /// opts.dict_size(dict_size); /// let mut filters = Filters::new(); /// filters.lzma2(&opts); /// assert_eq!(filters.mt_block_size(), 1 << 20); /// ``` #[cfg(feature = "parallel")] #[inline] pub fn mt_block_size(&self) -> u64 { unsafe { liblzma_sys::lzma_mt_block_size(self.inner.as_ptr()) } } } #[cfg(feature = "parallel")] impl MtStreamBuilder { /// Creates a new blank builder to create a multithreaded encoding `Stream`. #[inline] pub fn new() -> Self { let mut init = Self { raw: unsafe { mem::zeroed() }, filters: None, }; init.raw.threads = 1; init } /// Configures the number of worker threads to use #[inline] pub fn threads(&mut self, threads: u32) -> &mut Self { self.raw.threads = threads; self } /// Configures the maximum uncompressed size of a block /// /// The encoder will start a new .xz block every `block_size` bytes. /// Using `FullFlush` or `FullBarrier` with `process` the caller may tell /// liblzma to start a new block earlier. /// /// With LZMA2, a recommended block size is 2-4 times the LZMA2 dictionary /// size. With very small dictionaries, it is recommended to use at least 1 /// MiB block size for good compression ratio, even if this is more than /// four times the dictionary size. Note that these are only recommendations /// for typical use cases; feel free to use other values. Just keep in mind /// that using a block size less than the LZMA2 dictionary size is waste of /// RAM. /// /// Set this to 0 to let liblzma choose the block size depending on the /// compression options. For LZMA2 it will be 3*`dict_size` or 1 MiB, /// whichever is more. /// /// For each thread, about 3 * `block_size` bytes of memory will be /// allocated. This may change in later liblzma versions. If so, the memory /// usage will probably be reduced, not increased. #[inline] pub fn block_size(&mut self, block_size: u64) -> &mut Self { self.raw.block_size = block_size; self } /// Timeout to allow `process` to return early /// /// Multithreading can make liblzma to consume input and produce output in a /// very bursty way: it may first read a lot of input to fill internal /// buffers, then no input or output occurs for a while. /// /// In single-threaded mode, `process` won't return until it has either /// consumed all the input or filled the output buffer. If this is done in /// multithreaded mode, it may cause a call `process` to take even tens of /// seconds, which isn't acceptable in all applications. /// /// To avoid very long blocking times in `process`, a timeout (in /// milliseconds) may be set here. If `process would block longer than /// this number of milliseconds, it will return with `Ok`. Reasonable /// values are 100 ms or more. The xz command line tool uses 300 ms. /// /// If long blocking times are fine for you, set timeout to a special /// value of 0, which will disable the timeout mechanism and will make /// `process` block until all the input is consumed or the output /// buffer has been filled. #[inline] pub fn timeout_ms(&mut self, timeout: u32) -> &mut Self { self.raw.timeout = timeout; self } /// Compression preset (level and possible flags) /// /// The preset is set just like with `Stream::new_easy_encoder`. The preset /// is ignored if filters below have been specified. #[inline] pub fn preset(&mut self, preset: u32) -> &mut Self { self.raw.preset = preset; self } /// Configure a custom filter chain #[inline] pub fn filters(&mut self, filters: Filters) -> &mut Self { self.raw.filters = filters.inner.as_ptr(); self.filters = Some(filters); self } /// Configures the integrity check type #[inline] pub fn check(&mut self, check: Check) -> &mut Self { self.raw.check = check as liblzma_sys::lzma_check; self } /// Memory usage limit to reduce the number of threads #[inline] pub fn memlimit_threading(&mut self, memlimit: u64) -> &mut Self { self.raw.memlimit_threading = memlimit; self } /// Memory usage limit that should never be exceeded #[inline] pub fn memlimit_stop(&mut self, memlimit: u64) -> &mut Self { self.raw.memlimit_stop = memlimit; self } /// Calculate approximate memory usage of multithreaded .xz encoder #[inline] pub fn memusage(&self) -> u64 { unsafe { liblzma_sys::lzma_stream_encoder_mt_memusage(&self.raw) } } /// Initialize multithreaded .xz stream encoder. #[inline] pub fn encoder(&self) -> Result { let mut init = unsafe { Stream::zeroed() }; cvt(unsafe { liblzma_sys::lzma_stream_encoder_mt(&mut init.raw, &self.raw) })?; Ok(init) } /// Initialize multithreaded .xz stream decoder. #[inline] pub fn decoder(&self) -> Result { let mut init = unsafe { Stream::zeroed() }; cvt(unsafe { liblzma_sys::lzma_stream_decoder_mt(&mut init.raw, &self.raw) })?; Ok(init) } } fn cvt(rc: liblzma_sys::lzma_ret) -> Result { match rc { liblzma_sys::LZMA_OK => Ok(Status::Ok), liblzma_sys::LZMA_STREAM_END => Ok(Status::StreamEnd), liblzma_sys::LZMA_NO_CHECK => Err(Error::NoCheck), liblzma_sys::LZMA_UNSUPPORTED_CHECK => Err(Error::UnsupportedCheck), liblzma_sys::LZMA_GET_CHECK => Ok(Status::GetCheck), liblzma_sys::LZMA_MEM_ERROR => Err(Error::Mem), liblzma_sys::LZMA_MEMLIMIT_ERROR => Err(Error::MemLimit), liblzma_sys::LZMA_FORMAT_ERROR => Err(Error::Format), liblzma_sys::LZMA_OPTIONS_ERROR => Err(Error::Options), liblzma_sys::LZMA_DATA_ERROR => Err(Error::Data), liblzma_sys::LZMA_BUF_ERROR => Ok(Status::MemNeeded), liblzma_sys::LZMA_PROG_ERROR => Err(Error::Program), c => panic!("unknown return code: {}", c), } } impl From for io::Error { #[inline] fn from(e: Error) -> io::Error { let kind = match e { Error::Data => io::ErrorKind::InvalidData, Error::Options => io::ErrorKind::InvalidInput, Error::Format => io::ErrorKind::InvalidData, Error::MemLimit => io::ErrorKind::Other, Error::Mem => io::ErrorKind::Other, Error::Program => io::ErrorKind::Other, Error::NoCheck => io::ErrorKind::InvalidInput, Error::UnsupportedCheck => io::ErrorKind::Other, }; io::Error::new(kind, e) } } impl error::Error for Error {} impl fmt::Display for Error { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Data => "lzma data error", Error::Options => "invalid options", Error::Format => "stream/file format not recognized", Error::MemLimit => "memory limit reached", Error::Mem => "can't allocate memory", Error::Program => "liblzma internal error", Error::NoCheck => "no integrity check was available", Error::UnsupportedCheck => "liblzma not built with check support", } .fmt(f) } } impl Drop for Stream { #[inline] fn drop(&mut self) { unsafe { liblzma_sys::lzma_end(&mut self.raw); } } } liblzma-0.3.4/src/write.rs000064400000000000000000000325071046102023000135600ustar 00000000000000//! Writer-based compression/decompression streams use std::io; use std::io::prelude::*; #[cfg(feature = "tokio")] use futures::Poll; #[cfg(feature = "tokio")] use tokio_io::{try_nb, AsyncRead, AsyncWrite}; #[cfg(feature = "parallel")] use crate::stream::MtStreamBuilder; use crate::stream::{Action, Check, Status, Stream}; /// A compression stream which will have uncompressed data written to it and /// will write compressed data to an output stream. pub struct XzEncoder { data: Stream, obj: Option, buf: Vec, } /// A compression stream which will have compressed data written to it and /// will write uncompressed data to an output stream. pub struct XzDecoder { data: Stream, obj: Option, buf: Vec, } impl XzEncoder { /// Create a new compression stream which will compress at the given level /// to write compress output to the give output stream. #[inline] pub fn new(obj: W, level: u32) -> XzEncoder { let stream = Stream::new_easy_encoder(level, Check::Crc64).unwrap(); XzEncoder::new_stream(obj, stream) } /// Create a new parallel compression stream which will compress at the given level /// to write compress output to the give output stream. #[cfg(feature = "parallel")] pub fn new_parallel(obj: W, level: u32) -> XzEncoder { let stream = MtStreamBuilder::new() .preset(level) .check(Check::Crc64) .threads(num_cpus::get() as u32) .encoder() .unwrap(); Self::new_stream(obj, stream) } /// Create a new encoder which will use the specified `Stream` to encode /// (compress) data into the provided `obj`. #[inline] pub fn new_stream(obj: W, stream: Stream) -> XzEncoder { XzEncoder { data: stream, obj: Some(obj), buf: Vec::with_capacity(32 * 1024), } } /// Acquires a reference to the underlying writer. #[inline] pub fn get_ref(&self) -> &W { self.obj.as_ref().unwrap() } /// Acquires a mutable reference to the underlying writer. /// /// Note that mutating the output/input state of the stream may corrupt this /// object, so care must be taken when using this method. #[inline] pub fn get_mut(&mut self) -> &mut W { self.obj.as_mut().unwrap() } fn dump(&mut self) -> io::Result<()> { while !self.buf.is_empty() { let n = self.obj.as_mut().unwrap().write(&self.buf)?; self.buf.drain(..n); } Ok(()) } /// Attempt to finish this output stream, writing out final chunks of data. /// /// Note that this function can only be used once data has finished being /// written to the output stream. After this function is called then further /// calls to `write` may result in a panic. /// /// # Panics /// /// Attempts to write data to this stream may result in a panic after this /// function is called. #[inline] pub fn try_finish(&mut self) -> io::Result<()> { loop { self.dump()?; let res = self.data.process_vec(&[], &mut self.buf, Action::Finish)?; if res == Status::StreamEnd { break; } } self.dump() } /// Consumes this encoder, flushing the output stream. /// /// This will flush the underlying data stream and then return the contained /// writer if the flush succeeded. /// /// Note that this function may not be suitable to call in a situation where /// the underlying stream is an asynchronous I/O stream. To finish a stream /// the `try_finish` (or `shutdown`) method should be used instead. To /// re-acquire ownership of a stream it is safe to call this method after /// `try_finish` or `shutdown` has returned `Ok`. #[inline] pub fn finish(mut self) -> io::Result { self.try_finish()?; Ok(self.obj.take().unwrap()) } /// Returns the number of bytes produced by the compressor /// /// Note that, due to buffering, this only bears any relation to /// `total_in()` after a call to `flush()`. At that point, /// `total_out() / total_in()` is the compression ratio. #[inline] pub fn total_out(&self) -> u64 { self.data.total_out() } /// Returns the number of bytes consumed by the compressor /// (e.g. the number of bytes written to this stream.) #[inline] pub fn total_in(&self) -> u64 { self.data.total_in() } } impl Write for XzEncoder { #[inline] fn write(&mut self, data: &[u8]) -> io::Result { loop { self.dump()?; let total_in = self.total_in(); self.data.process_vec(data, &mut self.buf, Action::Run)?; let written = (self.total_in() - total_in) as usize; if written > 0 || data.is_empty() { return Ok(written); } } } #[inline] fn flush(&mut self) -> io::Result<()> { loop { self.dump()?; let status = self .data .process_vec(&[], &mut self.buf, Action::FullFlush)?; if status == Status::StreamEnd { break; } } self.obj.as_mut().unwrap().flush() } } #[cfg(feature = "tokio")] impl AsyncWrite for XzEncoder { fn shutdown(&mut self) -> Poll<(), io::Error> { try_nb!(self.try_finish()); self.get_mut().shutdown() } } impl Read for XzEncoder { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { self.get_mut().read(buf) } } #[cfg(feature = "tokio")] impl AsyncRead for XzEncoder {} impl Drop for XzEncoder { #[inline] fn drop(&mut self) { if self.obj.is_some() { let _ = self.try_finish(); } } } impl XzDecoder { /// Creates a new decoding stream which will decode into `obj` one xz stream /// from the input written to it. #[inline] pub fn new(obj: W) -> XzDecoder { let stream = Stream::new_stream_decoder(u64::MAX, 0).unwrap(); XzDecoder::new_stream(obj, stream) } /// Creates a new parallel decoding stream which will decode into `obj` one xz stream /// from the input written to it. #[cfg(feature = "parallel")] pub fn new_parallel(obj: W) -> Self { let stream = MtStreamBuilder::new() .memlimit_stop(u64::MAX) .threads(num_cpus::get() as u32) .decoder() .unwrap(); Self::new_stream(obj, stream) } /// Creates a new decoding stream which will decode into `obj` all the xz streams /// from the input written to it. #[inline] pub fn new_multi_decoder(obj: W) -> XzDecoder { let stream = Stream::new_stream_decoder(u64::MAX, liblzma_sys::LZMA_CONCATENATED).unwrap(); XzDecoder::new_stream(obj, stream) } /// Creates a new decoding stream which will decode all input written to it /// into `obj`. /// /// A custom `stream` can be specified to configure what format this decoder /// will recognize or configure other various decoding options. #[inline] pub fn new_stream(obj: W, stream: Stream) -> XzDecoder { XzDecoder { data: stream, obj: Some(obj), buf: Vec::with_capacity(32 * 1024), } } /// Acquires a reference to the underlying writer. #[inline] pub fn get_ref(&self) -> &W { self.obj.as_ref().unwrap() } /// Acquires a mutable reference to the underlying writer. /// /// Note that mutating the output/input state of the stream may corrupt this /// object, so care must be taken when using this method. #[inline] pub fn get_mut(&mut self) -> &mut W { self.obj.as_mut().unwrap() } fn dump(&mut self) -> io::Result<()> { if !self.buf.is_empty() { self.obj.as_mut().unwrap().write_all(&self.buf)?; self.buf.clear(); } Ok(()) } fn try_finish(&mut self) -> io::Result<()> { loop { self.dump()?; let res = self.data.process_vec(&[], &mut self.buf, Action::Finish)?; // When decoding a truncated file, XZ returns LZMA_BUF_ERROR and // decodes no new data, which corresponds to this crate's MemNeeded // status. Since we're finishing, we cannot provide more data so // this is an error. // // See the 02_decompress.c example in xz-utils. if self.buf.is_empty() && res == Status::MemNeeded { let msg = "xz compressed stream is truncated or otherwise corrupt"; return Err(io::Error::new(io::ErrorKind::UnexpectedEof, msg)); } if res == Status::StreamEnd { break; } } self.dump() } /// Unwrap the underlying writer, finishing the compression stream. #[inline] pub fn finish(&mut self) -> io::Result { self.try_finish()?; Ok(self.obj.take().unwrap()) } /// Returns the number of bytes produced by the decompressor /// /// Note that, due to buffering, this only bears any relation to /// `total_in()` after a call to `flush()`. At that point, /// `total_in() / total_out()` is the compression ratio. #[inline] pub fn total_out(&self) -> u64 { self.data.total_out() } /// Returns the number of bytes consumed by the decompressor /// (e.g. the number of bytes written to this stream.) #[inline] pub fn total_in(&self) -> u64 { self.data.total_in() } } impl Write for XzDecoder { #[inline] fn write(&mut self, data: &[u8]) -> io::Result { loop { self.dump()?; let before = self.total_in(); let res = self.data.process_vec(data, &mut self.buf, Action::Run)?; let written = (self.total_in() - before) as usize; if written > 0 || data.is_empty() || res == Status::StreamEnd { return Ok(written); } } } #[inline] fn flush(&mut self) -> io::Result<()> { self.dump()?; self.obj.as_mut().unwrap().flush() } } #[cfg(feature = "tokio")] impl AsyncWrite for XzDecoder { fn shutdown(&mut self) -> Poll<(), io::Error> { try_nb!(self.try_finish()); self.get_mut().shutdown() } } impl Read for XzDecoder { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { self.get_mut().read(buf) } } #[cfg(feature = "tokio")] impl AsyncRead for XzDecoder {} impl Drop for XzDecoder { #[inline] fn drop(&mut self) { if self.obj.is_some() { let _ = self.try_finish(); } } } #[cfg(test)] mod tests { use super::*; use crate::stream::LzmaOptions; use quickcheck::quickcheck; use std::iter::repeat; #[test] fn smoke() { let d = XzDecoder::new(Vec::new()); let mut c = XzEncoder::new(d, 6); c.write_all(b"12834").unwrap(); let s = repeat("12345").take(100000).collect::(); c.write_all(s.as_bytes()).unwrap(); let data = c.finish().unwrap().finish().unwrap(); assert_eq!(&data[0..5], b"12834"); assert_eq!(data.len(), 500005); assert_eq!(format!("12834{}", s).as_bytes(), &*data); } #[test] fn write_empty() { let d = XzDecoder::new(Vec::new()); let mut c = XzEncoder::new(d, 6); c.write(b"").unwrap(); let data = c.finish().unwrap().finish().unwrap(); assert_eq!(&data[..], b""); } #[test] fn qc_lzma1() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let stream = Stream::new_lzma_decoder(u64::MAX).unwrap(); let w = XzDecoder::new_stream(Vec::new(), stream); let options = LzmaOptions::new_preset(6).unwrap(); let stream = Stream::new_lzma_encoder(&options).unwrap(); let mut w = XzEncoder::new_stream(w, stream); w.write_all(&v).unwrap(); v == w.finish().unwrap().finish().unwrap() } } #[test] fn qc() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let w = XzDecoder::new(Vec::new()); let mut w = XzEncoder::new(w, 6); w.write_all(&v).unwrap(); v == w.finish().unwrap().finish().unwrap() } } #[cfg(feature = "parallel")] #[test] fn qc_parallel_encode() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let w = XzDecoder::new(Vec::new()); let mut w = XzEncoder::new_parallel(w, 6); w.write_all(&v).unwrap(); v == w.finish().unwrap().finish().unwrap() } } #[cfg(feature = "parallel")] #[test] fn qc_parallel_decode() { quickcheck(test as fn(_) -> _); fn test(v: Vec) -> bool { let w = XzDecoder::new_parallel(Vec::new()); let mut w = XzEncoder::new(w, 6); w.write_all(&v).unwrap(); v == w.finish().unwrap().finish().unwrap() } } } liblzma-0.3.4/tests/drop-incomplete.rs000064400000000000000000000023401046102023000160720ustar 00000000000000use liblzma::write::XzDecoder; use std::io::prelude::*; // This is a XZ file generated by head -c10 /dev/urandom | xz -c const DATA: &'static [u8] = &[ 253, 55, 122, 88, 90, 0, 0, 4, 230, 214, 180, 70, 2, 0, 33, 1, 22, 0, 0, 0, 116, 47, 229, 163, 1, 0, 9, 7, 122, 65, 14, 253, 214, 121, 128, 230, 115, 0, 0, 0, 158, 47, 174, 196, 175, 10, 34, 254, 0, 1, 34, 10, 21, 26, 225, 103, 31, 182, 243, 125, 1, 0, 0, 0, 0, 4, 89, 90, ]; /// In this test, we drop a write::XzDecoder after supplying it a truncated input stream. /// /// The decoder should detect that it is impossible to decode more data and not /// go into an infinite loop waiting for more data. #[test] fn drop_writer_incomplete_input_no_loop() { let mut decoder = XzDecoder::new(Vec::new()); const PREFIX_LEN: usize = 50; decoder.write_all(&DATA[..PREFIX_LEN]).unwrap(); } /// Same as above, but verifying that we get an error if we manually call `finish`; #[test] fn finish_writer_incomplete_input_error() { let mut decoder = XzDecoder::new(Vec::new()); const PREFIX_LEN: usize = 50; decoder.write_all(&DATA[..PREFIX_LEN]).unwrap(); decoder .finish() .err() .expect("finish should error because of incomplete input"); } liblzma-0.3.4/tests/tokio.rs000064400000000000000000000066571046102023000141350ustar 00000000000000#![cfg(feature = "tokio")] use std::io::{Read, Write}; use std::net::{Shutdown, TcpListener}; use std::thread; use futures::Future; use liblzma::read; use liblzma::write; use rand::Rng; use tokio_core::net::TcpStream; use tokio_core::reactor::Core; use tokio_io::io::{copy, shutdown}; use tokio_io::AsyncRead; #[test] fn tcp_stream_echo_pattern() { const N: u8 = 16; const M: usize = 16 * 1024; let mut core = Core::new().unwrap(); let listener = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = listener.local_addr().unwrap(); let t = thread::spawn(move || { let a = listener.accept().unwrap().0; let b = a.try_clone().unwrap(); let t = thread::spawn(move || { let mut b = read::XzDecoder::new(b); let mut buf = [0; M]; for i in 0..N { b.read_exact(&mut buf).unwrap(); for byte in buf.iter() { assert_eq!(*byte, i); } } assert_eq!(b.read(&mut buf).unwrap(), 0); }); let mut a = write::XzEncoder::new(a, 6); for i in 0..N { let buf = [i; M]; a.write_all(&buf).unwrap(); } a.finish().unwrap().shutdown(Shutdown::Write).unwrap(); t.join().unwrap(); }); let handle = core.handle(); let stream = TcpStream::connect(&addr, &handle); let copy = stream .and_then(|s| { let (a, b) = s.split(); let a = read::XzDecoder::new(a); let b = write::XzEncoder::new(b, 6); copy(a, b) }) .then(|result| { let (amt, _a, b) = result.unwrap(); assert_eq!(amt, (N as u64) * (M as u64)); shutdown(b).map(|_| ()) }); core.run(copy).unwrap(); t.join().unwrap(); } #[test] fn echo_random() { let v = std::iter::repeat(()) .map(|_| rand::thread_rng().gen::()) .take(1024 * 1024) .collect::>(); let mut core = Core::new().unwrap(); let listener = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = listener.local_addr().unwrap(); let v2 = v.clone(); let t = thread::spawn(move || { let a = listener.accept().unwrap().0; let b = a.try_clone().unwrap(); let mut v3 = v2.clone(); let t = thread::spawn(move || { let mut b = read::XzDecoder::new(b); let mut buf = [0; 1024]; while v3.len() > 0 { let n = b.read(&mut buf).unwrap(); for (actual, expected) in buf[..n].iter().zip(&v3) { assert_eq!(*actual, *expected); } v3.drain(..n); } assert_eq!(b.read(&mut buf).unwrap(), 0); }); let mut a = write::XzEncoder::new(a, 6); a.write_all(&v2).unwrap(); a.finish().unwrap().shutdown(Shutdown::Write).unwrap(); t.join().unwrap(); }); let handle = core.handle(); let stream = TcpStream::connect(&addr, &handle); let copy = stream .and_then(|s| { let (a, b) = s.split(); let a = read::XzDecoder::new(a); let b = write::XzEncoder::new(b, 6); copy(a, b) }) .then(|result| { let (amt, _a, b) = result.unwrap(); assert_eq!(amt, v.len() as u64); shutdown(b).map(|_| ()) }); core.run(copy).unwrap(); t.join().unwrap(); } liblzma-0.3.4/tests/xz.rs000064400000000000000000000034431046102023000134370ustar 00000000000000use std::fs::File; use std::io::prelude::*; use std::path::Path; use liblzma::read; use liblzma::stream; use liblzma::write; #[test] fn standard_files() { for file in Path::new("liblzma-sys/xz/tests/files").read_dir().unwrap() { let file = file.unwrap(); if file.path().extension().and_then(|s| s.to_str()) != Some("xz") { continue; } let filename = file.file_name().into_string().unwrap(); // This appears to be implementation-defined how it's handled if filename.contains("unsupported-check") { continue; } println!("testing {:?}", file.path()); let mut contents = Vec::new(); File::open(&file.path()) .unwrap() .read_to_end(&mut contents) .unwrap(); if filename.starts_with("bad") || filename.starts_with("unsupported") { test_bad(&contents); } else { test_good(&contents); } } } fn test_good(data: &[u8]) { let mut ret = Vec::new(); read::XzDecoder::new_multi_decoder(data) .read_to_end(&mut ret) .unwrap(); let mut w = write::XzDecoder::new_multi_decoder(ret); w.write_all(data).unwrap(); w.finish().unwrap(); } fn test_bad(data: &[u8]) { let mut ret = Vec::new(); assert!(read::XzDecoder::new(data).read_to_end(&mut ret).is_err()); let mut w = write::XzDecoder::new(ret); assert!(w.write_all(data).is_err() || w.finish().is_err()); } fn assert_send_sync() {} #[test] fn impls_send_and_sync() { assert_send_sync::(); assert_send_sync::>(); assert_send_sync::>(); assert_send_sync::>(); assert_send_sync::>(); }