jpeg-decoder-0.1.15/.gitignore010064400007650000024000000000231302105003000143100ustar0000000000000000Cargo.lock target/ jpeg-decoder-0.1.15/.travis.yml010064400007650000024000000004551322634675200144740ustar0000000000000000language: rust rust: - 1.14.0 - stable - beta - nightly matrix: allow_failures: - rust: nightly env: - FEATURES="" - FEATURES="rayon" script: - cargo build --verbose --no-default-features --features "$FEATURES" - cargo test --verbose --no-default-features --features "$FEATURES" jpeg-decoder-0.1.15/appveyor.yml010064400007650000024000000007771303401252200147400ustar0000000000000000environment: matrix: - TARGET: x86_64-pc-windows-msvc - TARGET: i686-pc-windows-msvc - TARGET: i686-pc-windows-gnu install: - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin - SET PATH=%PATH%;C:\MinGW\bin - rustc -V - cargo -V build: false test_script: - cargo build --verbose - cargo test --verbose jpeg-decoder-0.1.15/Cargo.toml.orig010064400007650000024000000010111330726102000152170ustar0000000000000000[package] name = "jpeg-decoder" version = "0.1.15" authors = ["Ulf Nilsson "] description = "JPEG decoder" documentation = "https://docs.rs/jpeg-decoder" repository = "https://github.com/kaksmet/jpeg-decoder" readme = "README.md" keywords = ["jpeg", "jpg", "decoder", "image"] license = "MIT / Apache-2.0" exclude = ["tests/*"] [dependencies] byteorder = "1.0" rayon = { version = "1.0", optional = true } [dev-dependencies] docopt = "0.7" png = "0.5" walkdir = "1.0" [features] default = ["rayon"] jpeg-decoder-0.1.15/Cargo.toml0000644000000021440000000000000115060ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g. crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "jpeg-decoder" version = "0.1.15" authors = ["Ulf Nilsson "] exclude = ["tests/*"] description = "JPEG decoder" documentation = "https://docs.rs/jpeg-decoder" readme = "README.md" keywords = ["jpeg", "jpg", "decoder", "image"] license = "MIT / Apache-2.0" repository = "https://github.com/kaksmet/jpeg-decoder" [dependencies.byteorder] version = "1.0" [dependencies.rayon] version = "1.0" optional = true [dev-dependencies.docopt] version = "0.7" [dev-dependencies.png] version = "0.5" [dev-dependencies.walkdir] version = "1.0" [features] default = ["rayon"] jpeg-decoder-0.1.15/CHANGELOG.md010064400007650000024000000041601330726144300141620ustar0000000000000000# Change Log All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). ## v0.1.15 (2018-06-10) - Added support for WebAssembly and asm.js (thanks @CryZe!) - Bugfix for images with APP14 segments longer than 12 bytes. ## v0.1.14 (2018-02-15) - Updated `rayon` to 1.0. ## v0.1.13 (2017-06-14) - Updated `rayon` to 0.8. ## v0.1.12 (2017-04-07) - Fixed an integer overflow in `derive_huffman_codes`. - Updated `rayon` to 0.7. ## v0.1.11 (2017-01-09) - Fixed an integer overflow. - Updated `byteorder` to 1.0. ## v0.1.10 (2016-12-23) - Updated `rayon` to 0.6 ## v0.1.9 (2016-12-12) - Added a generic integer upsampler, which brings support for some unusual subsampling schemes, e.g. 4:1:1 (thanks @iamrohit7!) - Made rayon optional through the `rayon` cargo feature (thanks @jackpot51!) ## v0.1.8 (2016-11-05) * Updated rayon to version 0.5. ## v0.1.7 (2016-10-04) - Added `UnsupportedFeature::NonIntegerSubsamplingRatio` error - Fixed a bug which could cause certain images to fail decoding - Fixed decoding of JPEGs which has a final RST marker in their entropy-coded data - Avoid allocating coefficients when calling `read_info()` on progressive JPEGs ## v0.1.6 (2016-07-12) - Added support for 16-bit quantization tables (even though the JPEG spec explicitly states "An 8-bit DCT-based process shall not use a 16-bit precision quantization table", but since libjpeg allows it there is little choice...) - Added support for decoding files with extraneous data (this violates the JPEG spec, but libjpeg allows it) - Fixed panic when decoding files without SOF - Fixed bug which caused files with certain APP marker segments to fail decoding ## v0.1.5 (2016-06-22) - Removed `euclid` and `num-rational` dependencies - Updated `rayon` to 0.4 ## v0.1.4 (2016-04-20) - Replaced `num` with `num-rational` ## v0.1.3 (2016-04-06) - Updated `byteorder` to 0.5 ## v0.1.2 (2016-03-08) - Fixed a bug which was causing some progressive JPEGs to fail decoding - Performance improvements ## v0.1.1 (2016-02-29) - Performance improvements ## v0.1.0 (2016-02-13) - Initial release jpeg-decoder-0.1.15/examples/decode.rs010064400007650000024000000051721322634675200157730ustar0000000000000000extern crate docopt; extern crate jpeg_decoder as jpeg; extern crate png; use docopt::Docopt; use png::HasParameters; use std::env; use std::fs::File; use std::io::BufReader; use std::process; const USAGE: &'static str = " Usage: decode [--output=] decode -h | --help Options: -h --help Show this screen. -o , --output= Output PNG file. "; fn main() { let args = &Docopt::new(USAGE) .and_then(|d| d.argv(env::args()).parse()) .unwrap_or_else(|e| e.exit()); let input = args.get_str(""); let output = args.get_str("-o"); let file = match File::open(input) { Ok(file) => file, Err(error) => { println!("The specified input could not be opened: {}", error); process::exit(1); }, }; let mut decoder = jpeg::Decoder::new(BufReader::new(file)); let mut data = match decoder.decode() { Ok(data) => data, Err(error) => { println!("The image could not be decoded: {}", error); println!("If other software can decode this image successfully then it's likely that this is a bug."); process::exit(1); } }; if !output.is_empty() { let output_file = File::create(output).unwrap(); let info = decoder.info().unwrap(); let mut encoder = png::Encoder::new(output_file, info.width as u32, info.height as u32); encoder.set(png::BitDepth::Eight); match info.pixel_format { jpeg::PixelFormat::L8 => encoder.set(png::ColorType::Grayscale), jpeg::PixelFormat::RGB24 => encoder.set(png::ColorType::RGB), jpeg::PixelFormat::CMYK32 => { data = cmyk_to_rgb(&mut data); encoder.set(png::ColorType::RGB) }, }; encoder.write_header().expect("writing png header failed").write_image_data(&data).expect("png encoding failed"); } } fn cmyk_to_rgb(input: &[u8]) -> Vec { let size = input.len() - input.len() / 4; let mut output = Vec::with_capacity(size); for pixel in input.chunks(4) { let c = pixel[0] as f32 / 255.0; let m = pixel[1] as f32 / 255.0; let y = pixel[2] as f32 / 255.0; let k = pixel[3] as f32 / 255.0; // CMYK -> CMY let c = c * (1.0 - k) + k; let m = m * (1.0 - k) + k; let y = y * (1.0 - k) + k; // CMY -> RGB let r = (1.0 - c) * 255.0; let g = (1.0 - m) * 255.0; let b = (1.0 - y) * 255.0; output.push(r as u8); output.push(g as u8); output.push(b as u8); } output } jpeg-decoder-0.1.15/LICENSE-APACHE010064400007650000024000000251371303512721000142730ustar0000000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. jpeg-decoder-0.1.15/LICENSE-MIT010064400007650000024000000020741303512721000137760ustar0000000000000000MIT License Copyright (c) 2016 The jpeg-decoder Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. jpeg-decoder-0.1.15/README.md010064400007650000024000000016731322634675200136450ustar0000000000000000# jpeg-decoder [![Travis Build Status](https://travis-ci.org/kaksmet/jpeg-decoder.svg?branch=master)](https://travis-ci.org/kaksmet/jpeg-decoder) [![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/k65rrkd0f8yb4o9w/branch/master?svg=true)](https://ci.appveyor.com/project/kaksmet/jpeg-decoder/branch/master) [![Crates.io](https://img.shields.io/crates/v/jpeg-decoder.svg)](https://crates.io/crates/jpeg-decoder) A Rust library for decoding JPEGs. [Documentation](https://docs.rs/jpeg-decoder) ## Example Cargo.toml: ```toml [dependencies] jpeg-decoder = "0.1" ``` main.rs: ```rust extern crate jpeg_decoder as jpeg; use std::fs::File; use std::io::BufReader; fn main() { let file = File::open("hello_world.jpg").expect("failed to open file"); let mut decoder = jpeg::Decoder::new(BufReader::new(file)); let pixels = decoder.decode().expect("failed to decode image"); let metadata = decoder.info().unwrap(); } ``` jpeg-decoder-0.1.15/src/decoder.rs010064400007650000024000001077211330724424500151230ustar0000000000000000use byteorder::ReadBytesExt; use error::{Error, Result, UnsupportedFeature}; use huffman::{fill_default_mjpeg_tables, HuffmanDecoder, HuffmanTable}; use marker::Marker; use parser::{AdobeColorTransform, AppData, CodingProcess, Component, Dimensions, EntropyCoding, FrameInfo, parse_app, parse_com, parse_dht, parse_dqt, parse_dri, parse_sof, parse_sos, ScanInfo}; use upsampler::Upsampler; use std::cmp; use std::io::Read; use std::mem; use std::ops::Range; use std::sync::Arc; use worker::{RowData, PlatformWorker, Worker}; pub const MAX_COMPONENTS: usize = 4; static UNZIGZAG: [u8; 64] = [ 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63, ]; /// An enumeration over combinations of color spaces and bit depths a pixel can have. #[derive(Clone, Copy, Debug, PartialEq)] pub enum PixelFormat { /// Luminance (grayscale), 8 bits L8, /// RGB, 8 bits per channel RGB24, /// CMYK, 8 bits per channel CMYK32, } /// Represents metadata of an image. #[derive(Clone, Copy, Debug, PartialEq)] pub struct ImageInfo { /// The width of the image, in pixels. pub width: u16, /// The height of the image, in pixels. pub height: u16, /// The pixel format of the image. pub pixel_format: PixelFormat, } /// JPEG decoder pub struct Decoder { reader: R, frame: Option, dc_huffman_tables: Vec>, ac_huffman_tables: Vec>, quantization_tables: [Option>; 4], restart_interval: u16, color_transform: Option, is_jfif: bool, is_mjpeg: bool, // Used for progressive JPEGs. coefficients: Vec>, // Bitmask of which coefficients has been completely decoded. coefficients_finished: [u64; MAX_COMPONENTS], } impl Decoder { /// Creates a new `Decoder` using the reader `reader`. pub fn new(reader: R) -> Decoder { Decoder { reader: reader, frame: None, dc_huffman_tables: vec![None, None, None, None], ac_huffman_tables: vec![None, None, None, None], quantization_tables: [None, None, None, None], restart_interval: 0, color_transform: None, is_jfif: false, is_mjpeg: false, coefficients: Vec::new(), coefficients_finished: [0; MAX_COMPONENTS], } } /// Returns metadata about the image. /// /// The returned value will be `None` until a call to either `read_info` or `decode` has /// returned `Ok`. pub fn info(&self) -> Option { match self.frame { Some(ref frame) => { let pixel_format = match frame.components.len() { 1 => PixelFormat::L8, 3 => PixelFormat::RGB24, 4 => PixelFormat::CMYK32, _ => panic!(), }; Some(ImageInfo { width: frame.image_size.width, height: frame.image_size.height, pixel_format: pixel_format, }) }, None => None, } } /// Tries to read metadata from the image without decoding it. /// /// If successful, the metadata can be obtained using the `info` method. pub fn read_info(&mut self) -> Result<()> { self.decode_internal(true).map(|_| ()) } /// Decodes the image and returns the decoded pixels if successful. pub fn decode(&mut self) -> Result> { self.decode_internal(false) } fn decode_internal(&mut self, stop_after_metadata: bool) -> Result> { if stop_after_metadata && self.frame.is_some() { // The metadata has already been read. return Ok(Vec::new()); } else if self.frame.is_none() && (self.reader.read_u8()? != 0xFF || Marker::from_u8(try!(self.reader.read_u8())) != Some(Marker::SOI)) { return Err(Error::Format("first two bytes is not a SOI marker".to_owned())); } let mut previous_marker = Marker::SOI; let mut pending_marker = None; let mut worker = None; let mut scans_processed = 0; let mut planes = vec![Vec::new(); self.frame.as_ref().map_or(0, |frame| frame.components.len())]; loop { let marker = match pending_marker.take() { Some(m) => m, None => self.read_marker()?, }; match marker { // Frame header Marker::SOF(..) => { // Section 4.10 // "An image contains only one frame in the cases of sequential and // progressive coding processes; an image contains multiple frames for the // hierarchical mode." if self.frame.is_some() { return Err(Error::Unsupported(UnsupportedFeature::Hierarchical)); } let frame = parse_sof(&mut self.reader, marker)?; let component_count = frame.components.len(); if frame.is_differential { return Err(Error::Unsupported(UnsupportedFeature::Hierarchical)); } if frame.coding_process == CodingProcess::Lossless { return Err(Error::Unsupported(UnsupportedFeature::Lossless)); } if frame.entropy_coding == EntropyCoding::Arithmetic { return Err(Error::Unsupported(UnsupportedFeature::ArithmeticEntropyCoding)); } if frame.precision != 8 { return Err(Error::Unsupported(UnsupportedFeature::SamplePrecision(frame.precision))); } if frame.image_size.height == 0 { return Err(Error::Unsupported(UnsupportedFeature::DNL)); } if component_count != 1 && component_count != 3 && component_count != 4 { return Err(Error::Unsupported(UnsupportedFeature::ComponentCount(component_count as u8))); } // Make sure we support the subsampling ratios used. let _ = Upsampler::new(&frame.components, frame.image_size.width, frame.image_size.height)?; self.frame = Some(frame); if stop_after_metadata { return Ok(Vec::new()); } planes = vec![Vec::new(); component_count]; }, // Scan header Marker::SOS => { if self.frame.is_none() { return Err(Error::Format("scan encountered before frame".to_owned())); } if worker.is_none() { worker = Some(PlatformWorker::new()?); } let frame = self.frame.clone().unwrap(); let scan = parse_sos(&mut self.reader, &frame)?; if frame.coding_process == CodingProcess::DctProgressive && self.coefficients.is_empty() { self.coefficients = frame.components.iter().map(|c| { let block_count = c.block_size.width as usize * c.block_size.height as usize; vec![0; block_count * 64] }).collect(); } if scan.successive_approximation_low == 0 { for &i in scan.component_indices.iter() { for j in scan.spectral_selection.clone() { self.coefficients_finished[i] |= 1 << j; } } } let is_final_scan = scan.component_indices.iter().all(|&i| self.coefficients_finished[i] == !0); let (marker, data) = self.decode_scan(&frame, &scan, worker.as_mut().unwrap(), is_final_scan)?; if let Some(data) = data { for (i, plane) in data.into_iter().enumerate().filter(|&(_, ref plane)| !plane.is_empty()) { planes[i] = plane; } } pending_marker = marker; scans_processed += 1; }, // Table-specification and miscellaneous markers // Quantization table-specification Marker::DQT => { let tables = parse_dqt(&mut self.reader)?; for (i, &table) in tables.into_iter().enumerate() { if let Some(table) = table { let mut unzigzagged_table = [0u16; 64]; for j in 0 .. 64 { unzigzagged_table[UNZIGZAG[j] as usize] = table[j]; } self.quantization_tables[i] = Some(Arc::new(unzigzagged_table)); } } }, // Huffman table-specification Marker::DHT => { let is_baseline = self.frame.as_ref().map(|frame| frame.is_baseline); let (dc_tables, ac_tables) = parse_dht(&mut self.reader, is_baseline)?; let current_dc_tables = mem::replace(&mut self.dc_huffman_tables, vec![]); self.dc_huffman_tables = dc_tables.into_iter() .zip(current_dc_tables.into_iter()) .map(|(a, b)| a.or(b)) .collect(); let current_ac_tables = mem::replace(&mut self.ac_huffman_tables, vec![]); self.ac_huffman_tables = ac_tables.into_iter() .zip(current_ac_tables.into_iter()) .map(|(a, b)| a.or(b)) .collect(); }, // Arithmetic conditioning table-specification Marker::DAC => return Err(Error::Unsupported(UnsupportedFeature::ArithmeticEntropyCoding)), // Restart interval definition Marker::DRI => self.restart_interval = parse_dri(&mut self.reader)?, // Comment Marker::COM => { let _comment = parse_com(&mut self.reader)?; }, // Application data Marker::APP(..) => { if let Some(data) = parse_app(&mut self.reader, marker)? { match data { AppData::Adobe(color_transform) => self.color_transform = Some(color_transform), AppData::Jfif => { // From the JFIF spec: // "The APP0 marker is used to identify a JPEG FIF file. // The JPEG FIF APP0 marker is mandatory right after the SOI marker." // Some JPEGs in the wild does not follow this though, so we allow // JFIF headers anywhere APP0 markers are allowed. /* if previous_marker != Marker::SOI { return Err(Error::Format("the JFIF APP0 marker must come right after the SOI marker".to_owned())); } */ self.is_jfif = true; }, AppData::Avi1 => self.is_mjpeg = true, } } }, // Restart Marker::RST(..) => { // Some encoders emit a final RST marker after entropy-coded data, which // decode_scan does not take care of. So if we encounter one, we ignore it. if previous_marker != Marker::SOS { return Err(Error::Format("RST found outside of entropy-coded data".to_owned())); } }, // Define number of lines Marker::DNL => { // Section B.2.1 // "If a DNL segment (see B.2.5) is present, it shall immediately follow the first scan." if previous_marker != Marker::SOS || scans_processed != 1 { return Err(Error::Format("DNL is only allowed immediately after the first scan".to_owned())); } return Err(Error::Unsupported(UnsupportedFeature::DNL)); }, // Hierarchical mode markers Marker::DHP | Marker::EXP => return Err(Error::Unsupported(UnsupportedFeature::Hierarchical)), // End of image Marker::EOI => break, _ => return Err(Error::Format(format!("{:?} marker found where not allowed", marker))), } previous_marker = marker; } if planes.is_empty() || planes.iter().any(|plane| plane.is_empty()) { return Err(Error::Format("no data found".to_owned())); } let frame = self.frame.as_ref().unwrap(); compute_image(&frame.components, &planes, frame.image_size, self.is_jfif, self.color_transform) } fn read_marker(&mut self) -> Result { // This should be an error as the JPEG spec doesn't allow extraneous data between marker segments. // libjpeg allows this though and there are images in the wild utilising it, so we are // forced to support this behavior. // Sony Ericsson P990i is an example of a device which produce this sort of JPEGs. while self.reader.read_u8()? != 0xFF {} let mut byte = self.reader.read_u8()?; // Section B.1.1.2 // "Any marker may optionally be preceded by any number of fill bytes, which are bytes assigned code X’FF’." while byte == 0xFF { byte = self.reader.read_u8()?; } match byte { 0x00 => Err(Error::Format("FF 00 found where marker was expected".to_owned())), _ => Ok(Marker::from_u8(byte).unwrap()), } } fn decode_scan(&mut self, frame: &FrameInfo, scan: &ScanInfo, worker: &mut PlatformWorker, produce_data: bool) -> Result<(Option, Option>>)> { assert!(scan.component_indices.len() <= MAX_COMPONENTS); let components: Vec = scan.component_indices.iter() .map(|&i| frame.components[i].clone()) .collect(); // Verify that all required quantization tables has been set. if components.iter().any(|component| self.quantization_tables[component.quantization_table_index].is_none()) { return Err(Error::Format("use of unset quantization table".to_owned())); } if self.is_mjpeg { fill_default_mjpeg_tables(scan, &mut self.dc_huffman_tables, &mut self.ac_huffman_tables); } // Verify that all required huffman tables has been set. if scan.spectral_selection.start == 0 && scan.dc_table_indices.iter().any(|&i| self.dc_huffman_tables[i].is_none()) { return Err(Error::Format("scan makes use of unset dc huffman table".to_owned())); } if scan.spectral_selection.end > 1 && scan.ac_table_indices.iter().any(|&i| self.ac_huffman_tables[i].is_none()) { return Err(Error::Format("scan makes use of unset ac huffman table".to_owned())); } if produce_data { // Prepare the worker thread for the work to come. for (i, component) in components.iter().enumerate() { let row_data = RowData { index: i, component: component.clone(), quantization_table: self.quantization_tables[component.quantization_table_index].clone().unwrap(), }; worker.start(row_data)?; } } let blocks_per_mcu: Vec = components.iter() .map(|c| c.horizontal_sampling_factor as u16 * c.vertical_sampling_factor as u16) .collect(); let is_progressive = frame.coding_process == CodingProcess::DctProgressive; let is_interleaved = components.len() > 1; let mut dummy_block = [0i16; 64]; let mut huffman = HuffmanDecoder::new(); let mut dc_predictors = [0i16; MAX_COMPONENTS]; let mut mcus_left_until_restart = self.restart_interval; let mut expected_rst_num = 0; let mut eob_run = 0; let mut mcu_row_coefficients = Vec::with_capacity(components.len()); if produce_data && !is_progressive { for component in &components { let coefficients_per_mcu_row = component.block_size.width as usize * component.vertical_sampling_factor as usize * 64; mcu_row_coefficients.push(vec![0i16; coefficients_per_mcu_row]); } } for mcu_y in 0 .. frame.mcu_size.height { for mcu_x in 0 .. frame.mcu_size.width { for (i, component) in components.iter().enumerate() { for j in 0 .. blocks_per_mcu[i] { let (block_x, block_y) = if is_interleaved { // Section A.2.3 (mcu_x * component.horizontal_sampling_factor as u16 + j % component.horizontal_sampling_factor as u16, mcu_y * component.vertical_sampling_factor as u16 + j / component.horizontal_sampling_factor as u16) } else { // Section A.2.2 let blocks_per_row = component.block_size.width as usize; let block_num = (mcu_y as usize * frame.mcu_size.width as usize + mcu_x as usize) * blocks_per_mcu[i] as usize + j as usize; let x = (block_num % blocks_per_row) as u16; let y = (block_num / blocks_per_row) as u16; if x * 8 >= component.size.width || y * 8 >= component.size.height { continue; } (x, y) }; let block_offset = (block_y as usize * component.block_size.width as usize + block_x as usize) * 64; let mcu_row_offset = mcu_y as usize * component.block_size.width as usize * component.vertical_sampling_factor as usize * 64; let coefficients = if is_progressive { &mut self.coefficients[scan.component_indices[i]][block_offset .. block_offset + 64] } else if produce_data { &mut mcu_row_coefficients[i][block_offset - mcu_row_offset .. block_offset - mcu_row_offset + 64] } else { &mut dummy_block[..] }; if scan.successive_approximation_high == 0 { decode_block(&mut self.reader, coefficients, &mut huffman, self.dc_huffman_tables[scan.dc_table_indices[i]].as_ref(), self.ac_huffman_tables[scan.ac_table_indices[i]].as_ref(), scan.spectral_selection.clone(), scan.successive_approximation_low, &mut eob_run, &mut dc_predictors[i])?; } else { decode_block_successive_approximation(&mut self.reader, coefficients, &mut huffman, self.ac_huffman_tables[scan.ac_table_indices[i]].as_ref(), scan.spectral_selection.clone(), scan.successive_approximation_low, &mut eob_run)?; } } } if self.restart_interval > 0 { let is_last_mcu = mcu_x == frame.mcu_size.width - 1 && mcu_y == frame.mcu_size.height - 1; mcus_left_until_restart -= 1; if mcus_left_until_restart == 0 && !is_last_mcu { match huffman.take_marker(&mut self.reader)? { Some(Marker::RST(n)) => { if n != expected_rst_num { return Err(Error::Format(format!("found RST{} where RST{} was expected", n, expected_rst_num))); } huffman.reset(); // Section F.2.1.3.1 dc_predictors = [0i16; MAX_COMPONENTS]; // Section G.1.2.2 eob_run = 0; expected_rst_num = (expected_rst_num + 1) % 8; mcus_left_until_restart = self.restart_interval; }, Some(marker) => return Err(Error::Format(format!("found marker {:?} inside scan where RST{} was expected", marker, expected_rst_num))), None => return Err(Error::Format(format!("no marker found where RST{} was expected", expected_rst_num))), } } } } if produce_data { // Send the coefficients from this MCU row to the worker thread for dequantization and idct. for (i, component) in components.iter().enumerate() { let coefficients_per_mcu_row = component.block_size.width as usize * component.vertical_sampling_factor as usize * 64; let row_coefficients = if is_progressive { let offset = mcu_y as usize * coefficients_per_mcu_row; self.coefficients[scan.component_indices[i]][offset .. offset + coefficients_per_mcu_row].to_vec() } else { mem::replace(&mut mcu_row_coefficients[i], vec![0i16; coefficients_per_mcu_row]) }; worker.append_row((i, row_coefficients))?; } } } let marker = huffman.take_marker(&mut self.reader)?; if produce_data { // Retrieve all the data from the worker thread. let mut data = vec![Vec::new(); frame.components.len()]; for (i, &component_index) in scan.component_indices.iter().enumerate() { data[component_index] = worker.get_result(i)?; } Ok((marker, Some(data))) } else { Ok((marker, None)) } } } fn decode_block(reader: &mut R, coefficients: &mut [i16], huffman: &mut HuffmanDecoder, dc_table: Option<&HuffmanTable>, ac_table: Option<&HuffmanTable>, spectral_selection: Range, successive_approximation_low: u8, eob_run: &mut u16, dc_predictor: &mut i16) -> Result<()> { debug_assert_eq!(coefficients.len(), 64); if spectral_selection.start == 0 { // Section F.2.2.1 // Figure F.12 let value = huffman.decode(reader, dc_table.unwrap())?; let diff = match value { 0 => 0, _ => { // Section F.1.2.1.1 // Table F.1 if value > 11 { return Err(Error::Format("invalid DC difference magnitude category".to_owned())); } huffman.receive_extend(reader, value)? }, }; // Malicious JPEG files can cause this add to overflow, therefore we use wrapping_add. // One example of such a file is tests/crashtest/images/dc-predictor-overflow.jpg *dc_predictor = dc_predictor.wrapping_add(diff); coefficients[0] = *dc_predictor << successive_approximation_low; } let mut index = cmp::max(spectral_selection.start, 1); if index < spectral_selection.end && *eob_run > 0 { *eob_run -= 1; return Ok(()); } // Section F.1.2.2.1 while index < spectral_selection.end { if let Some((value, run)) = huffman.decode_fast_ac(reader, ac_table.unwrap())? { index += run; if index >= spectral_selection.end { break; } coefficients[UNZIGZAG[index as usize] as usize] = value << successive_approximation_low; index += 1; } else { let byte = huffman.decode(reader, ac_table.unwrap())?; let r = byte >> 4; let s = byte & 0x0f; if s == 0 { match r { 15 => index += 16, // Run length of 16 zero coefficients. _ => { *eob_run = (1 << r) - 1; if r > 0 { *eob_run += huffman.get_bits(reader, r)?; } break; }, } } else { index += r; if index >= spectral_selection.end { break; } coefficients[UNZIGZAG[index as usize] as usize] = huffman.receive_extend(reader, s)? << successive_approximation_low; index += 1; } } } Ok(()) } fn decode_block_successive_approximation(reader: &mut R, coefficients: &mut [i16], huffman: &mut HuffmanDecoder, ac_table: Option<&HuffmanTable>, spectral_selection: Range, successive_approximation_low: u8, eob_run: &mut u16) -> Result<()> { debug_assert_eq!(coefficients.len(), 64); let bit = 1 << successive_approximation_low; if spectral_selection.start == 0 { // Section G.1.2.1 if huffman.get_bits(reader, 1)? == 1 { coefficients[0] |= bit; } } else { // Section G.1.2.3 if *eob_run > 0 { *eob_run -= 1; refine_non_zeroes(reader, coefficients, huffman, spectral_selection, 64, bit)?; return Ok(()); } let mut index = spectral_selection.start; while index < spectral_selection.end { let byte = huffman.decode(reader, ac_table.unwrap())?; let r = byte >> 4; let s = byte & 0x0f; let mut zero_run_length = r; let mut value = 0; match s { 0 => { match r { 15 => { // Run length of 16 zero coefficients. // We don't need to do anything special here, zero_run_length is 15 // and then value (which is zero) gets written, resulting in 16 // zero coefficients. }, _ => { *eob_run = (1 << r) - 1; if r > 0 { *eob_run += huffman.get_bits(reader, r)?; } // Force end of block. zero_run_length = 64; }, } }, 1 => { if huffman.get_bits(reader, 1)? == 1 { value = bit; } else { value = -bit; } }, _ => return Err(Error::Format("unexpected huffman code".to_owned())), } let range = Range { start: index, end: spectral_selection.end, }; index = refine_non_zeroes(reader, coefficients, huffman, range, zero_run_length, bit)?; if value != 0 { coefficients[UNZIGZAG[index as usize] as usize] = value; } index += 1; } } Ok(()) } fn refine_non_zeroes(reader: &mut R, coefficients: &mut [i16], huffman: &mut HuffmanDecoder, range: Range, zrl: u8, bit: i16) -> Result { debug_assert_eq!(coefficients.len(), 64); let last = range.end - 1; let mut zero_run_length = zrl; for i in range { let index = UNZIGZAG[i as usize] as usize; if coefficients[index] == 0 { if zero_run_length == 0 { return Ok(i); } zero_run_length -= 1; } else if huffman.get_bits(reader, 1)? == 1 && coefficients[index] & bit == 0 { if coefficients[index] > 0 { coefficients[index] += bit; } else { coefficients[index] -= bit; } } } Ok(last) } fn compute_image(components: &[Component], data: &[Vec], output_size: Dimensions, is_jfif: bool, color_transform: Option) -> Result> { if data.iter().any(|data| data.is_empty()) { return Err(Error::Format("not all components has data".to_owned())); } if components.len() == 1 { let component = &components[0]; if component.size.width % 8 == 0 && component.size.height % 8 == 0 { return Ok(data[0].clone()) } let mut buffer = vec![0u8; component.size.width as usize * component.size.height as usize]; let line_stride = component.block_size.width as usize * 8; for y in 0 .. component.size.height as usize { for x in 0 .. component.size.width as usize { buffer[y * component.size.width as usize + x] = data[0][y * line_stride + x]; } } Ok(buffer) } else { compute_image_parallel(components, data, output_size, is_jfif, color_transform) } } #[cfg(feature="rayon")] fn compute_image_parallel(components: &[Component], data: &[Vec], output_size: Dimensions, is_jfif: bool, color_transform: Option) -> Result> { use rayon::prelude::*; let color_convert_func = choose_color_convert_func(components.len(), is_jfif, color_transform)?; let upsampler = Upsampler::new(components, output_size.width, output_size.height)?; let line_size = output_size.width as usize * components.len(); let mut image = vec![0u8; line_size * output_size.height as usize]; image.par_chunks_mut(line_size) .with_max_len(1) .enumerate() .for_each(|(row, line)| { upsampler.upsample_and_interleave_row(data, row, output_size.width as usize, line); color_convert_func(line, output_size.width as usize); }); Ok(image) } #[cfg(not(feature="rayon"))] fn compute_image_parallel(components: &[Component], data: &[Vec], output_size: Dimensions, is_jfif: bool, color_transform: Option) -> Result> { let color_convert_func = choose_color_convert_func(components.len(), is_jfif, color_transform)?; let upsampler = Upsampler::new(components, output_size.width, output_size.height)?; let line_size = output_size.width as usize * components.len(); let mut image = vec![0u8; line_size * output_size.height as usize]; for (row, line) in image.chunks_mut(line_size) .enumerate() { upsampler.upsample_and_interleave_row(data, row, output_size.width as usize, line); color_convert_func(line, output_size.width as usize); } Ok(image) } fn choose_color_convert_func(component_count: usize, _is_jfif: bool, color_transform: Option) -> Result { match component_count { 3 => { // http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe // Unknown means the data is RGB, so we don't need to perform any color conversion on it. if color_transform == Some(AdobeColorTransform::Unknown) { Ok(color_convert_line_null) } else { Ok(color_convert_line_ycbcr) } }, 4 => { // http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe match color_transform { Some(AdobeColorTransform::Unknown) => Ok(color_convert_line_cmyk), Some(_) => Ok(color_convert_line_ycck), None => Err(Error::Format("4 components without Adobe APP14 metadata to tell color space".to_owned())), } }, _ => panic!(), } } fn color_convert_line_null(_data: &mut [u8], _width: usize) { } fn color_convert_line_ycbcr(data: &mut [u8], width: usize) { for i in 0 .. width { let (r, g, b) = ycbcr_to_rgb(data[i * 3], data[i * 3 + 1], data[i * 3 + 2]); data[i * 3] = r; data[i * 3 + 1] = g; data[i * 3 + 2] = b; } } fn color_convert_line_ycck(data: &mut [u8], width: usize) { for i in 0 .. width { let (r, g, b) = ycbcr_to_rgb(data[i * 4], data[i * 4 + 1], data[i * 4 + 2]); let k = data[i * 4 + 3]; data[i * 4] = r; data[i * 4 + 1] = g; data[i * 4 + 2] = b; data[i * 4 + 3] = 255 - k; } } fn color_convert_line_cmyk(data: &mut [u8], width: usize) { for i in 0 .. width { data[i * 4] = 255 - data[i * 4]; data[i * 4 + 1] = 255 - data[i * 4 + 1]; data[i * 4 + 2] = 255 - data[i * 4 + 2]; data[i * 4 + 3] = 255 - data[i * 4 + 3]; } } // ITU-R BT.601 fn ycbcr_to_rgb(y: u8, cb: u8, cr: u8) -> (u8, u8, u8) { let y = y as f32; let cb = cb as f32 - 128.0; let cr = cr as f32 - 128.0; let r = y + 1.40200 * cr; let g = y - 0.34414 * cb - 0.71414 * cr; let b = y + 1.77200 * cb; (clamp((r + 0.5) as i32, 0, 255) as u8, clamp((g + 0.5) as i32, 0, 255) as u8, clamp((b + 0.5) as i32, 0, 255) as u8) } fn clamp(value: T, min: T, max: T) -> T { if value < min { return min; } if value > max { return max; } value } jpeg-decoder-0.1.15/src/error.rs010064400007650000024000000057241322634675200146550ustar0000000000000000use std::any::Any; use std::error::Error as StdError; use std::fmt; use std::io::Error as IoError; use std::sync::mpsc::{RecvError, SendError}; pub type Result = ::std::result::Result; /// An enumeration over JPEG features (currently) unsupported by this library. /// /// Support for features listed here may be included in future versions of this library. #[derive(Debug)] pub enum UnsupportedFeature { /// Hierarchical JPEG. Hierarchical, /// Lossless JPEG. Lossless, /// JPEG using arithmetic entropy coding instead of Huffman coding. ArithmeticEntropyCoding, /// Sample precision in bits. 8 bit sample precision is what is currently supported. SamplePrecision(u8), /// Number of components in an image. 1, 3 and 4 components are currently supported. ComponentCount(u8), /// An image can specify a zero height in the frame header and use the DNL (Define Number of /// Lines) marker at the end of the first scan to define the number of lines in the frame. DNL, /// Subsampling ratio. SubsamplingRatio, /// A subsampling ratio not representable as an integer. NonIntegerSubsamplingRatio, } /// Errors that can occur while decoding a JPEG image. #[derive(Debug)] pub enum Error { /// The image is not formatted properly. The string contains detailed information about the /// error. Format(String), /// The image makes use of a JPEG feature not (currently) supported by this library. Unsupported(UnsupportedFeature), /// An I/O error occurred while decoding the image. Io(IoError), /// An internal error occurred while decoding the image. Internal(Box), } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::Format(ref desc) => write!(f, "invalid JPEG format: {}", desc), Error::Unsupported(ref feat) => write!(f, "unsupported JPEG feature: {:?}", feat), Error::Io(ref err) => err.fmt(f), Error::Internal(ref err) => err.fmt(f), } } } impl StdError for Error { fn description(&self) -> &str { match *self { Error::Format(_) => "invalid JPEG format", Error::Unsupported(_) => "unsupported JPEG feature", Error::Io(ref err) => err.description(), Error::Internal(ref err) => err.description(), } } fn cause(&self) -> Option<&StdError> { match *self { Error::Io(ref err) => Some(err), Error::Internal(ref err) => Some(&**err), _ => None, } } } impl From for Error { fn from(err: IoError) -> Error { Error::Io(err) } } impl From for Error { fn from(err: RecvError) -> Error { Error::Internal(Box::new(err)) } } impl From> for Error { fn from(err: SendError) -> Error { Error::Internal(Box::new(err)) } } jpeg-decoder-0.1.15/src/huffman.rs010064400007650000024000000311461322634675200151450ustar0000000000000000use byteorder::ReadBytesExt; use error::{Error, Result}; use marker::Marker; use parser::ScanInfo; use std::io::Read; use std::iter::repeat; const LUT_BITS: u8 = 8; #[derive(Debug)] pub struct HuffmanDecoder { bits: u64, num_bits: u8, marker: Option, } impl HuffmanDecoder { pub fn new() -> HuffmanDecoder { HuffmanDecoder { bits: 0, num_bits: 0, marker: None, } } // Section F.2.2.3 // Figure F.16 pub fn decode(&mut self, reader: &mut R, table: &HuffmanTable) -> Result { if self.num_bits < 16 { self.read_bits(reader)?; } let (value, size) = table.lut[self.peek_bits(LUT_BITS) as usize]; if size > 0 { self.consume_bits(size); Ok(value) } else { let bits = self.peek_bits(16); for i in LUT_BITS .. 16 { let code = (bits >> (15 - i)) as i32; if code <= table.maxcode[i as usize] { self.consume_bits(i + 1); let index = (code + table.delta[i as usize]) as usize; return Ok(table.values[index]); } } Err(Error::Format("failed to decode huffman code".to_owned())) } } pub fn decode_fast_ac(&mut self, reader: &mut R, table: &HuffmanTable) -> Result> { if let Some(ref ac_lut) = table.ac_lut { if self.num_bits < LUT_BITS { self.read_bits(reader)?; } let (value, run_size) = ac_lut[self.peek_bits(LUT_BITS) as usize]; if run_size != 0 { let run = run_size >> 4; let size = run_size & 0x0f; self.consume_bits(size); return Ok(Some((value, run))); } } Ok(None) } #[inline] pub fn get_bits(&mut self, reader: &mut R, count: u8) -> Result { if self.num_bits < count { self.read_bits(reader)?; } let bits = self.peek_bits(count); self.consume_bits(count); Ok(bits) } #[inline] pub fn receive_extend(&mut self, reader: &mut R, count: u8) -> Result { let value = self.get_bits(reader, count)?; Ok(extend(value, count)) } pub fn reset(&mut self) { self.bits = 0; self.num_bits = 0; } pub fn take_marker(&mut self, reader: &mut R) -> Result> { self.read_bits(reader).map(|_| self.marker.take()) } #[inline] fn peek_bits(&mut self, count: u8) -> u16 { debug_assert!(count <= 16); debug_assert!(self.num_bits >= count); ((self.bits >> (64 - count)) & ((1 << count) - 1)) as u16 } #[inline] fn consume_bits(&mut self, count: u8) { debug_assert!(self.num_bits >= count); self.bits <<= count as usize; self.num_bits -= count; } fn read_bits(&mut self, reader: &mut R) -> Result<()> { while self.num_bits <= 56 { // Fill with zero bits if we have reached the end. let byte = match self.marker { Some(_) => 0, None => reader.read_u8()?, }; if byte == 0xFF { let mut next_byte = reader.read_u8()?; // Check for byte stuffing. if next_byte != 0x00 { // We seem to have reached the end of entropy-coded data and encountered a // marker. Since we can't put data back into the reader, we have to continue // reading to identify the marker so we can pass it on. // Section B.1.1.2 // "Any marker may optionally be preceded by any number of fill bytes, which are bytes assigned code X’FF’." while next_byte == 0xFF { next_byte = reader.read_u8()?; } match next_byte { 0x00 => return Err(Error::Format("FF 00 found where marker was expected".to_owned())), _ => self.marker = Some(Marker::from_u8(next_byte).unwrap()), } continue; } } self.bits |= (byte as u64) << (56 - self.num_bits); self.num_bits += 8; } Ok(()) } } // Section F.2.2.1 // Figure F.12 fn extend(value: u16, count: u8) -> i16 { let vt = 1 << (count as u16 - 1); if value < vt { value as i16 + (-1 << count as i16) + 1 } else { value as i16 } } #[derive(Clone, Copy, Debug, PartialEq)] pub enum HuffmanTableClass { DC, AC, } pub struct HuffmanTable { values: Vec, delta: [i32; 16], maxcode: [i32; 16], lut: [(u8, u8); 1 << LUT_BITS], ac_lut: Option<[(i16, u8); 1 << LUT_BITS]>, } impl HuffmanTable { pub fn new(bits: &[u8; 16], values: &[u8], class: HuffmanTableClass) -> Result { let (huffcode, huffsize) = derive_huffman_codes(bits)?; // Section F.2.2.3 // Figure F.15 // delta[i] is set to VALPTR(I) - MINCODE(I) let mut delta = [0i32; 16]; let mut maxcode = [-1i32; 16]; let mut j = 0; for i in 0 .. 16 { if bits[i] != 0 { delta[i] = j as i32 - huffcode[j] as i32; j += bits[i] as usize; maxcode[i] = huffcode[j - 1] as i32; } } // Build a lookup table for faster decoding. let mut lut = [(0u8, 0u8); 1 << LUT_BITS]; for (i, &size) in huffsize.iter().enumerate().filter(|&(_, &size)| size <= LUT_BITS) { let bits_remaining = LUT_BITS - size; let start = (huffcode[i] << bits_remaining) as usize; for j in 0 .. 1 << bits_remaining { lut[start + j] = (values[i], size); } } // Build a lookup table for small AC coefficients which both decodes the value and does the // equivalent of receive_extend. let ac_lut = match class { HuffmanTableClass::DC => None, HuffmanTableClass::AC => { let mut table = [(0i16, 0u8); 1 << LUT_BITS]; for (i, &(value, size)) in lut.iter().enumerate() { let run_length = value >> 4; let magnitude_category = value & 0x0f; if magnitude_category > 0 && size + magnitude_category <= LUT_BITS { let unextended_ac_value = (((i << size) & ((1 << LUT_BITS) - 1)) >> (LUT_BITS - magnitude_category)) as u16; let ac_value = extend(unextended_ac_value, magnitude_category); table[i] = (ac_value, (run_length << 4) | (size + magnitude_category)); } } Some(table) }, }; Ok(HuffmanTable { values: values.to_vec(), delta: delta, maxcode: maxcode, lut: lut, ac_lut: ac_lut, }) } } // Section C.2 fn derive_huffman_codes(bits: &[u8; 16]) -> Result<(Vec, Vec)> { // Figure C.1 let huffsize = bits.iter() .enumerate() .fold(Vec::new(), |mut acc, (i, &value)| { let mut repeated_size: Vec = repeat((i + 1) as u8).take(value as usize).collect(); acc.append(&mut repeated_size); acc }); // Figure C.2 let mut huffcode = vec![0u16; huffsize.len()]; let mut code_size = huffsize[0]; let mut code = 0u32; for (i, &size) in huffsize.iter().enumerate() { while code_size < size { code <<= 1; code_size += 1; } if code >= (1u32 << size) { return Err(Error::Format("bad huffman code length".to_owned())); } huffcode[i] = code as u16; code += 1; } Ok((huffcode, huffsize)) } // https://www.loc.gov/preservation/digital/formats/fdd/fdd000063.shtml // "Avery Lee, writing in the rec.video.desktop newsgroup in 2001, commented that "MJPEG, or at // least the MJPEG in AVIs having the MJPG fourcc, is restricted JPEG with a fixed -- and // *omitted* -- Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2, and it must // use basic Huffman encoding, not arithmetic or progressive.... You can indeed extract the // MJPEG frames and decode them with a regular JPEG decoder, but you have to prepend the DHT // segment to them, or else the decoder won't have any idea how to decompress the data. // The exact table necessary is given in the OpenDML spec."" pub fn fill_default_mjpeg_tables(scan: &ScanInfo, dc_huffman_tables: &mut[Option], ac_huffman_tables: &mut[Option]) { // Section K.3.3 if dc_huffman_tables[0].is_none() && scan.dc_table_indices.iter().any(|&i| i == 0) { // Table K.3 dc_huffman_tables[0] = Some(HuffmanTable::new( &[0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], &[0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B], HuffmanTableClass::DC).unwrap()); } if dc_huffman_tables[1].is_none() && scan.dc_table_indices.iter().any(|&i| i == 1) { // Table K.4 dc_huffman_tables[1] = Some(HuffmanTable::new( &[0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00], &[0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B], HuffmanTableClass::DC).unwrap()); } if ac_huffman_tables[0].is_none() && scan.ac_table_indices.iter().any(|&i| i == 0) { // Table K.5 ac_huffman_tables[0] = Some(HuffmanTable::new( &[0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D], &[0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA ], HuffmanTableClass::AC).unwrap()); } if ac_huffman_tables[1].is_none() && scan.ac_table_indices.iter().any(|&i| i == 1) { // Table K.6 ac_huffman_tables[1] = Some(HuffmanTable::new( &[0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77], &[0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA ], HuffmanTableClass::AC).unwrap()); } } jpeg-decoder-0.1.15/src/idct.rs010064400007650000024000000200111322634675200144310ustar0000000000000000// Malicious JPEG files can cause operations in the idct to overflow. // One example is tests/crashtest/images/imagetestsuite/b0b8914cc5f7a6eff409f16d8cc236c5.jpg // That's why wrapping operators are needed. // This is based on stb_image's 'stbi__idct_block'. pub fn dequantize_and_idct_block(coefficients: &[i16], quantization_table: &[u16; 64], output_linestride: usize, output: &mut [u8]) { debug_assert_eq!(coefficients.len(), 64); let mut temp = [0i32; 64]; // columns for i in 0 .. 8 { // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing if coefficients[i + 8] == 0 && coefficients[i + 16] == 0 && coefficients[i + 24] == 0 && coefficients[i + 32] == 0 && coefficients[i + 40] == 0 && coefficients[i + 48] == 0 && coefficients[i + 56] == 0 { let dcterm = (coefficients[i] as i32 * quantization_table[i] as i32).wrapping_shl(2); temp[i] = dcterm; temp[i + 8] = dcterm; temp[i + 16] = dcterm; temp[i + 24] = dcterm; temp[i + 32] = dcterm; temp[i + 40] = dcterm; temp[i + 48] = dcterm; temp[i + 56] = dcterm; } else { let s0 = coefficients[i] as i32 * quantization_table[i] as i32; let s1 = coefficients[i + 8] as i32 * quantization_table[i + 8] as i32; let s2 = coefficients[i + 16] as i32 * quantization_table[i + 16] as i32; let s3 = coefficients[i + 24] as i32 * quantization_table[i + 24] as i32; let s4 = coefficients[i + 32] as i32 * quantization_table[i + 32] as i32; let s5 = coefficients[i + 40] as i32 * quantization_table[i + 40] as i32; let s6 = coefficients[i + 48] as i32 * quantization_table[i + 48] as i32; let s7 = coefficients[i + 56] as i32 * quantization_table[i + 56] as i32; let p2 = s2; let p3 = s6; let p1 = p2.wrapping_add(p3).wrapping_mul(stbi_f2f(0.5411961)); let t2 = p1.wrapping_add(p3.wrapping_mul(stbi_f2f(-1.847759065))); let t3 = p1.wrapping_add(p2.wrapping_mul(stbi_f2f(0.765366865))); let p2 = s0; let p3 = s4; let t0 = stbi_fsh(p2.wrapping_add(p3)); let t1 = stbi_fsh(p2.wrapping_sub(p3)); let x0 = t0.wrapping_add(t3); let x3 = t0.wrapping_sub(t3); let x1 = t1.wrapping_add(t2); let x2 = t1.wrapping_sub(t2); let t0 = s7; let t1 = s5; let t2 = s3; let t3 = s1; let p3 = t0.wrapping_add(t2); let p4 = t1.wrapping_add(t3); let p1 = t0.wrapping_add(t3); let p2 = t1.wrapping_add(t2); let p5 = p3.wrapping_add(p4).wrapping_mul(stbi_f2f(1.175875602)); let t0 = t0.wrapping_mul(stbi_f2f(0.298631336)); let t1 = t1.wrapping_mul(stbi_f2f(2.053119869)); let t2 = t2.wrapping_mul(stbi_f2f(3.072711026)); let t3 = t3.wrapping_mul(stbi_f2f(1.501321110)); let p1 = p5.wrapping_add(p1.wrapping_mul(stbi_f2f(-0.899976223))); let p2 = p5.wrapping_add(p2.wrapping_mul(stbi_f2f(-2.562915447))); let p3 = p3.wrapping_mul(stbi_f2f(-1.961570560)); let p4 = p4.wrapping_mul(stbi_f2f(-0.390180644)); let t3 = t3.wrapping_add(p1.wrapping_add(p4)); let t2 = t2.wrapping_add(p2.wrapping_add(p3)); let t1 = t1.wrapping_add(p2.wrapping_add(p4)); let t0 = t0.wrapping_add(p1.wrapping_add(p3)); // constants scaled things up by 1<<12; let's bring them back // down, but keep 2 extra bits of precision let x0 = x0.wrapping_add(512); let x1 = x1.wrapping_add(512); let x2 = x2.wrapping_add(512); let x3 = x3.wrapping_add(512); temp[i] = x0.wrapping_add(t3).wrapping_shr(10); temp[i + 56] = x0.wrapping_sub(t3).wrapping_shr(10); temp[i + 8] = x1.wrapping_add(t2).wrapping_shr(10); temp[i + 48] = x1.wrapping_sub(t2).wrapping_shr(10); temp[i + 16] = x2.wrapping_add(t1).wrapping_shr(10); temp[i + 40] = x2.wrapping_sub(t1).wrapping_shr(10); temp[i + 24] = x3.wrapping_add(t0).wrapping_shr(10); temp[i + 32] = x3.wrapping_sub(t0).wrapping_shr(10); } } for i in 0 .. 8 { // no fast case since the first 1D IDCT spread components out let s0 = temp[i * 8]; let s1 = temp[i * 8 + 1]; let s2 = temp[i * 8 + 2]; let s3 = temp[i * 8 + 3]; let s4 = temp[i * 8 + 4]; let s5 = temp[i * 8 + 5]; let s6 = temp[i * 8 + 6]; let s7 = temp[i * 8 + 7]; let p2 = s2; let p3 = s6; let p1 = p2.wrapping_add(p3).wrapping_mul(stbi_f2f(0.5411961)); let t2 = p1.wrapping_add(p3.wrapping_mul(stbi_f2f(-1.847759065))); let t3 = p1.wrapping_add(p2.wrapping_mul(stbi_f2f(0.765366865))); let p2 = s0; let p3 = s4; let t0 = stbi_fsh(p2.wrapping_add(p3)); let t1 = stbi_fsh(p2.wrapping_sub(p3)); let x0 = t0.wrapping_add(t3); let x3 = t0.wrapping_sub(t3); let x1 = t1.wrapping_add(t2); let x2 = t1.wrapping_sub(t2); let t0 = s7; let t1 = s5; let t2 = s3; let t3 = s1; let p3 = t0.wrapping_add(t2); let p4 = t1.wrapping_add(t3); let p1 = t0.wrapping_add(t3); let p2 = t1.wrapping_add(t2); let p5 = p3.wrapping_add(p4).wrapping_mul(stbi_f2f(1.175875602)); let t0 = t0.wrapping_mul(stbi_f2f(0.298631336)); let t1 = t1.wrapping_mul(stbi_f2f(2.053119869)); let t2 = t2.wrapping_mul(stbi_f2f(3.072711026)); let t3 = t3.wrapping_mul(stbi_f2f(1.501321110)); let p1 = p5.wrapping_add(p1.wrapping_mul(stbi_f2f(-0.899976223))); let p2 = p5.wrapping_add(p2.wrapping_mul(stbi_f2f(-2.562915447))); let p3 = p3.wrapping_mul(stbi_f2f(-1.961570560)); let p4 = p4.wrapping_mul(stbi_f2f(-0.390180644)); let t3 = t3.wrapping_add(p1.wrapping_add(p4)); let t2 = t2.wrapping_add(p2.wrapping_add(p3)); let t1 = t1.wrapping_add(p2.wrapping_add(p4)); let t0 = t0.wrapping_add(p1.wrapping_add(p3)); // constants scaled things up by 1<<12, plus we had 1<<2 from first // loop, plus horizontal and vertical each scale by sqrt(8) so together // we've got an extra 1<<3, so 1<<17 total we need to remove. // so we want to round that, which means adding 0.5 * 1<<17, // aka 65536. Also, we'll end up with -128 to 127 that we want // to encode as 0..255 by adding 128, so we'll add that before the shift let x0 = x0.wrapping_add(65536 + (128 << 17)); let x1 = x1.wrapping_add(65536 + (128 << 17)); let x2 = x2.wrapping_add(65536 + (128 << 17)); let x3 = x3.wrapping_add(65536 + (128 << 17)); output[i * output_linestride] = stbi_clamp(x0.wrapping_add(t3).wrapping_shr(17)); output[i * output_linestride + 7] = stbi_clamp(x0.wrapping_sub(t3).wrapping_shr(17)); output[i * output_linestride + 1] = stbi_clamp(x1.wrapping_add(t2).wrapping_shr(17)); output[i * output_linestride + 6] = stbi_clamp(x1.wrapping_sub(t2).wrapping_shr(17)); output[i * output_linestride + 2] = stbi_clamp(x2.wrapping_add(t1).wrapping_shr(17)); output[i * output_linestride + 5] = stbi_clamp(x2.wrapping_sub(t1).wrapping_shr(17)); output[i * output_linestride + 3] = stbi_clamp(x3.wrapping_add(t0).wrapping_shr(17)); output[i * output_linestride + 4] = stbi_clamp(x3.wrapping_sub(t0).wrapping_shr(17)); } } // take a -128..127 value and stbi__clamp it and convert to 0..255 fn stbi_clamp(x: i32) -> u8 { // trick to use a single test to catch both cases if x as u32 > 255 { if x < 0 { return 0; } if x > 255 { return 255; } } x as u8 } fn stbi_f2f(x: f32) -> i32 { (x * 4096.0 + 0.5) as i32 } fn stbi_fsh(x: i32) -> i32 { x << 12 } jpeg-decoder-0.1.15/src/lib.rs010064400007650000024000000021731330724424500142570ustar0000000000000000//! This crate contains a JPEG decoder. //! //! # Examples //! //! ``` //! use jpeg_decoder::Decoder; //! use std::fs::File; //! use std::io::BufReader; //! //! let file = File::open("tests/reftest/images/extraneous-data.jpg").expect("failed to open file"); //! let mut decoder = Decoder::new(BufReader::new(file)); //! let pixels = decoder.decode().expect("failed to decode image"); //! let metadata = decoder.info().unwrap(); //! ``` //! //! Get metadata from a file without decoding it: //! //! ``` //! use jpeg_decoder::Decoder; //! use std::fs::File; //! use std::io::BufReader; //! //! let file = File::open("tests/reftest/images/extraneous-data.jpg").expect("failed to open file"); //! let mut decoder = Decoder::new(BufReader::new(file)); //! decoder.read_info().expect("failed to read metadata"); //! let metadata = decoder.info().unwrap(); //! ``` #![deny(missing_docs)] extern crate byteorder; #[cfg(feature="rayon")] extern crate rayon; pub use decoder::{Decoder, ImageInfo, PixelFormat}; pub use error::{Error, UnsupportedFeature}; mod decoder; mod error; mod huffman; mod idct; mod marker; mod parser; mod upsampler; mod worker; jpeg-decoder-0.1.15/src/marker.rs010064400007650000024000000103031322634675200147720ustar0000000000000000// Table B.1 #[derive(Clone, Copy, Debug, PartialEq)] pub enum Marker { /// Start Of Frame markers /// /// - SOF(0): Baseline DCT (Huffman coding) /// - SOF(1): Extended sequential DCT (Huffman coding) /// - SOF(2): Progressive DCT (Huffman coding) /// - SOF(3): Lossless (sequential) (Huffman coding) /// - SOF(5): Differential sequential DCT (Huffman coding) /// - SOF(6): Differential progressive DCT (Huffman coding) /// - SOF(7): Differential lossless (sequential) (Huffman coding) /// - SOF(9): Extended sequential DCT (arithmetic coding) /// - SOF(10): Progressive DCT (arithmetic coding) /// - SOF(11): Lossless (sequential) (arithmetic coding) /// - SOF(13): Differential sequential DCT (arithmetic coding) /// - SOF(14): Differential progressive DCT (arithmetic coding) /// - SOF(15): Differential lossless (sequential) (arithmetic coding) SOF(u8), /// Reserved for JPEG extensions JPG, /// Define Huffman table(s) DHT, /// Define arithmetic coding conditioning(s) DAC, /// Restart with modulo 8 count `m` RST(u8), /// Start of image SOI, /// End of image EOI, /// Start of scan SOS, /// Define quantization table(s) DQT, /// Define number of lines DNL, /// Define restart interval DRI, /// Define hierarchical progression DHP, /// Expand reference component(s) EXP, /// Reserved for application segments APP(u8), /// Reserved for JPEG extensions JPGn(u8), /// Comment COM, /// For temporary private use in arithmetic coding TEM, /// Reserved RES, } impl Marker { pub fn has_length(self) -> bool { use self::Marker::*; match self { RST(..) | SOI | EOI | TEM => false, _ => true, } } pub fn from_u8(n: u8) -> Option { use self::Marker::*; match n { 0x00 => None, // Byte stuffing 0x01 => Some(TEM), 0x02 ... 0xBF => Some(RES), 0xC0 => Some(SOF(0)), 0xC1 => Some(SOF(1)), 0xC2 => Some(SOF(2)), 0xC3 => Some(SOF(3)), 0xC4 => Some(DHT), 0xC5 => Some(SOF(5)), 0xC6 => Some(SOF(6)), 0xC7 => Some(SOF(7)), 0xC8 => Some(JPG), 0xC9 => Some(SOF(9)), 0xCA => Some(SOF(10)), 0xCB => Some(SOF(11)), 0xCC => Some(DAC), 0xCD => Some(SOF(13)), 0xCE => Some(SOF(14)), 0xCF => Some(SOF(15)), 0xD0 => Some(RST(0)), 0xD1 => Some(RST(1)), 0xD2 => Some(RST(2)), 0xD3 => Some(RST(3)), 0xD4 => Some(RST(4)), 0xD5 => Some(RST(5)), 0xD6 => Some(RST(6)), 0xD7 => Some(RST(7)), 0xD8 => Some(SOI), 0xD9 => Some(EOI), 0xDA => Some(SOS), 0xDB => Some(DQT), 0xDC => Some(DNL), 0xDD => Some(DRI), 0xDE => Some(DHP), 0xDF => Some(EXP), 0xE0 => Some(APP(0)), 0xE1 => Some(APP(1)), 0xE2 => Some(APP(2)), 0xE3 => Some(APP(3)), 0xE4 => Some(APP(4)), 0xE5 => Some(APP(5)), 0xE6 => Some(APP(6)), 0xE7 => Some(APP(7)), 0xE8 => Some(APP(8)), 0xE9 => Some(APP(9)), 0xEA => Some(APP(10)), 0xEB => Some(APP(11)), 0xEC => Some(APP(12)), 0xED => Some(APP(13)), 0xEE => Some(APP(14)), 0xEF => Some(APP(15)), 0xF0 => Some(JPGn(0)), 0xF1 => Some(JPGn(1)), 0xF2 => Some(JPGn(2)), 0xF3 => Some(JPGn(3)), 0xF4 => Some(JPGn(4)), 0xF5 => Some(JPGn(5)), 0xF6 => Some(JPGn(6)), 0xF7 => Some(JPGn(7)), 0xF8 => Some(JPGn(8)), 0xF9 => Some(JPGn(9)), 0xFA => Some(JPGn(10)), 0xFB => Some(JPGn(11)), 0xFC => Some(JPGn(12)), 0xFD => Some(JPGn(13)), 0xFE => Some(COM), 0xFF => None, // Fill byte _ => unreachable!(), } } } jpeg-decoder-0.1.15/src/parser.rs010064400007650000024000000445641330724423200150130ustar0000000000000000use byteorder::{BigEndian, ReadBytesExt}; use error::{Error, Result}; use huffman::{HuffmanTable, HuffmanTableClass}; use marker::Marker; use marker::Marker::*; use std::io::Read; use std::ops::Range; #[derive(Clone, Copy, Debug, PartialEq)] pub struct Dimensions { pub width: u16, pub height: u16, } #[derive(Clone, Copy, Debug, PartialEq)] pub enum EntropyCoding { Huffman, Arithmetic, } #[derive(Clone, Copy, Debug, PartialEq)] pub enum CodingProcess { DctSequential, DctProgressive, Lossless, } #[derive(Clone)] pub struct FrameInfo { pub is_baseline: bool, pub is_differential: bool, pub coding_process: CodingProcess, pub entropy_coding: EntropyCoding, pub precision: u8, pub image_size: Dimensions, pub mcu_size: Dimensions, pub components: Vec, } #[derive(Debug)] pub struct ScanInfo { pub component_indices: Vec, pub dc_table_indices: Vec, pub ac_table_indices: Vec, pub spectral_selection: Range, pub successive_approximation_high: u8, pub successive_approximation_low: u8, } #[derive(Clone, Debug)] pub struct Component { pub identifier: u8, pub horizontal_sampling_factor: u8, pub vertical_sampling_factor: u8, pub quantization_table_index: usize, pub size: Dimensions, pub block_size: Dimensions, } #[derive(Debug)] pub enum AppData { Adobe(AdobeColorTransform), Jfif, Avi1, } // http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe #[derive(Clone, Copy, Debug, PartialEq)] pub enum AdobeColorTransform { // RGB or CMYK Unknown, YCbCr, // YCbCrK YCCK, } fn read_length(reader: &mut R, marker: Marker) -> Result { assert!(marker.has_length()); // length is including itself. let length = reader.read_u16::()? as usize; if length <= 2 { return Err(Error::Format(format!("encountered {:?} with invalid length {}", marker, length))); } Ok(length - 2) } fn skip_bytes(reader: &mut R, length: usize) -> Result<()> { let mut buffer = vec![0u8; length]; reader.read_exact(&mut buffer)?; Ok(()) } // Section B.2.2 pub fn parse_sof(reader: &mut R, marker: Marker) -> Result { let length = read_length(reader, marker)?; if length <= 6 { return Err(Error::Format("invalid length in SOF".to_owned())); } let is_baseline = marker == SOF(0); let is_differential = match marker { SOF(0 ... 3) | SOF(9 ... 11) => false, SOF(5 ... 7) | SOF(13 ... 15) => true, _ => panic!(), }; let coding_process = match marker { SOF(0) | SOF(1) | SOF(5) | SOF(9) | SOF(13) => CodingProcess::DctSequential, SOF(2) | SOF(6) | SOF(10) | SOF(14) => CodingProcess::DctProgressive, SOF(3) | SOF(7) | SOF(11) | SOF(15) => CodingProcess::Lossless, _ => panic!(), }; let entropy_coding = match marker { SOF(0 ... 3) | SOF(5 ... 7) => EntropyCoding::Huffman, SOF(9 ... 11) | SOF(13 ... 15) => EntropyCoding::Arithmetic, _ => panic!(), }; let precision = reader.read_u8()?; match precision { 8 => {}, 12 => { if is_baseline { return Err(Error::Format("12 bit sample precision is not allowed in baseline".to_owned())); } }, _ => { if coding_process != CodingProcess::Lossless { return Err(Error::Format(format!("invalid precision {} in frame header", precision))) } }, } let height = reader.read_u16::()?; let width = reader.read_u16::()?; // height: // "Value 0 indicates that the number of lines shall be defined by the DNL marker and // parameters at the end of the first scan (see B.2.5)." if width == 0 { return Err(Error::Format("zero width in frame header".to_owned())); } let component_count = reader.read_u8()?; if component_count == 0 { return Err(Error::Format("zero component count in frame header".to_owned())); } if coding_process == CodingProcess::DctProgressive && component_count > 4 { return Err(Error::Format("progressive frame with more than 4 components".to_owned())); } if length != 6 + 3 * component_count as usize { return Err(Error::Format("invalid length in SOF".to_owned())); } let mut components: Vec = Vec::with_capacity(component_count as usize); for _ in 0 .. component_count { let identifier = reader.read_u8()?; // Each component's identifier must be unique. if components.iter().any(|c| c.identifier == identifier) { return Err(Error::Format(format!("duplicate frame component identifier {}", identifier))); } let byte = reader.read_u8()?; let horizontal_sampling_factor = byte >> 4; let vertical_sampling_factor = byte & 0x0f; if horizontal_sampling_factor == 0 || horizontal_sampling_factor > 4 { return Err(Error::Format(format!("invalid horizontal sampling factor {}", horizontal_sampling_factor))); } if vertical_sampling_factor == 0 || vertical_sampling_factor > 4 { return Err(Error::Format(format!("invalid vertical sampling factor {}", vertical_sampling_factor))); } let quantization_table_index = reader.read_u8()?; if quantization_table_index > 3 || (coding_process == CodingProcess::Lossless && quantization_table_index != 0) { return Err(Error::Format(format!("invalid quantization table index {}", quantization_table_index))); } components.push(Component { identifier: identifier, horizontal_sampling_factor: horizontal_sampling_factor, vertical_sampling_factor: vertical_sampling_factor, quantization_table_index: quantization_table_index as usize, size: Dimensions {width: 0, height: 0}, block_size: Dimensions {width: 0, height: 0}, }); } let h_max = components.iter().map(|c| c.horizontal_sampling_factor).max().unwrap(); let v_max = components.iter().map(|c| c.vertical_sampling_factor).max().unwrap(); let mcu_size = Dimensions { width: (width as f32 / (h_max as f32 * 8.0)).ceil() as u16, height: (height as f32 / (v_max as f32 * 8.0)).ceil() as u16, }; for component in &mut components { component.size.width = (width as f32 * (component.horizontal_sampling_factor as f32 / h_max as f32)).ceil() as u16; component.size.height = (height as f32 * (component.vertical_sampling_factor as f32 / v_max as f32)).ceil() as u16; component.block_size.width = mcu_size.width * component.horizontal_sampling_factor as u16; component.block_size.height = mcu_size.height * component.vertical_sampling_factor as u16; } Ok(FrameInfo { is_baseline: is_baseline, is_differential: is_differential, coding_process: coding_process, entropy_coding: entropy_coding, precision: precision, image_size: Dimensions {width: width, height: height}, mcu_size: mcu_size, components: components, }) } // Section B.2.3 pub fn parse_sos(reader: &mut R, frame: &FrameInfo) -> Result { let length = read_length(reader, SOS)?; let component_count = reader.read_u8()?; if component_count == 0 || component_count > 4 { return Err(Error::Format(format!("invalid component count {} in scan header", component_count))); } if length != 4 + 2 * component_count as usize { return Err(Error::Format("invalid length in SOF".to_owned())); } let mut component_indices = Vec::with_capacity(component_count as usize); let mut dc_table_indices = Vec::with_capacity(component_count as usize); let mut ac_table_indices = Vec::with_capacity(component_count as usize); for _ in 0 .. component_count { let identifier = reader.read_u8()?; let component_index = match frame.components.iter().position(|c| c.identifier == identifier) { Some(value) => value, None => return Err(Error::Format(format!("scan component identifier {} does not match any of the component identifiers defined in the frame", identifier))), }; // Each of the scan's components must be unique. if component_indices.contains(&component_index) { return Err(Error::Format(format!("duplicate scan component identifier {}", identifier))); } // "... the ordering in the scan header shall follow the ordering in the frame header." if component_index < *component_indices.iter().max().unwrap_or(&0) { return Err(Error::Format("the scan component order does not follow the order in the frame header".to_owned())); } let byte = reader.read_u8()?; let dc_table_index = byte >> 4; let ac_table_index = byte & 0x0f; if dc_table_index > 3 || (frame.is_baseline && dc_table_index > 1) { return Err(Error::Format(format!("invalid dc table index {}", dc_table_index))); } if ac_table_index > 3 || (frame.is_baseline && ac_table_index > 1) { return Err(Error::Format(format!("invalid ac table index {}", ac_table_index))); } component_indices.push(component_index); dc_table_indices.push(dc_table_index as usize); ac_table_indices.push(ac_table_index as usize); } let blocks_per_mcu = component_indices.iter().map(|&i| { frame.components[i].horizontal_sampling_factor as u32 * frame.components[i].vertical_sampling_factor as u32 }).fold(0, ::std::ops::Add::add); if component_count > 1 && blocks_per_mcu > 10 { return Err(Error::Format("scan with more than one component and more than 10 blocks per MCU".to_owned())); } let spectral_selection_start = try!(reader.read_u8()); let spectral_selection_end = try!(reader.read_u8()); let byte = reader.read_u8()?; let successive_approximation_high = byte >> 4; let successive_approximation_low = byte & 0x0f; if frame.coding_process == CodingProcess::DctProgressive { if spectral_selection_end > 63 || spectral_selection_start > spectral_selection_end || (spectral_selection_start == 0 && spectral_selection_end != 0) { return Err(Error::Format(format!("invalid spectral selection parameters: ss={}, se={}", spectral_selection_start, spectral_selection_end))); } if spectral_selection_start != 0 && component_count != 1 { return Err(Error::Format("spectral selection scan with AC coefficients can't have more than one component".to_owned())); } if successive_approximation_high > 13 || successive_approximation_low > 13 { return Err(Error::Format(format!("invalid successive approximation parameters: ah={}, al={}", successive_approximation_high, successive_approximation_low))); } // Section G.1.1.1.2 // "Each scan which follows the first scan for a given band progressively improves // the precision of the coefficients by one bit, until full precision is reached." if successive_approximation_high != 0 && successive_approximation_high != successive_approximation_low + 1 { return Err(Error::Format("successive approximation scan with more than one bit of improvement".to_owned())); } } else { if spectral_selection_start != 0 || spectral_selection_end != 63 { return Err(Error::Format("spectral selection is not allowed in non-progressive scan".to_owned())); } if successive_approximation_high != 0 || successive_approximation_low != 0 { return Err(Error::Format("successive approximation is not allowed in non-progressive scan".to_owned())); } } Ok(ScanInfo { component_indices: component_indices, dc_table_indices: dc_table_indices, ac_table_indices: ac_table_indices, spectral_selection: Range { start: spectral_selection_start, end: spectral_selection_end + 1, }, successive_approximation_high: successive_approximation_high, successive_approximation_low: successive_approximation_low, }) } // Section B.2.4.1 pub fn parse_dqt(reader: &mut R) -> Result<[Option<[u16; 64]>; 4]> { let mut length = read_length(reader, DQT)?; let mut tables = [None; 4]; // Each DQT segment may contain multiple quantization tables. while length > 0 { let byte = reader.read_u8()?; let precision = (byte >> 4) as usize; let index = (byte & 0x0f) as usize; // The combination of 8-bit sample precision and 16-bit quantization tables is explicitly // disallowed by the JPEG spec: // "An 8-bit DCT-based process shall not use a 16-bit precision quantization table." // "Pq: Quantization table element precision – Specifies the precision of the Qk // values. Value 0 indicates 8-bit Qk values; value 1 indicates 16-bit Qk values. Pq // shall be zero for 8 bit sample precision P (see B.2.2)." // libjpeg allows this behavior though, and there are images in the wild using it. So to // match libjpeg's behavior we are deviating from the JPEG spec here. if precision > 1 { return Err(Error::Format(format!("invalid precision {} in DQT", precision))); } if index > 3 { return Err(Error::Format(format!("invalid destination identifier {} in DQT", index))); } if length < 65 + 64 * precision { return Err(Error::Format("invalid length in DQT".to_owned())); } let mut table = [0u16; 64]; for i in 0 .. 64 { table[i] = match precision { 0 => reader.read_u8()? as u16, 1 => reader.read_u16::()?, _ => unreachable!(), }; } if table.iter().any(|&val| val == 0) { return Err(Error::Format("quantization table contains element with a zero value".to_owned())); } tables[index] = Some(table); length -= 65 + 64 * precision; } Ok(tables) } // Section B.2.4.2 pub fn parse_dht(reader: &mut R, is_baseline: Option) -> Result<(Vec>, Vec>)> { let mut length = read_length(reader, DHT)?; let mut dc_tables = vec![None, None, None, None]; let mut ac_tables = vec![None, None, None, None]; // Each DHT segment may contain multiple huffman tables. while length > 17 { let byte = reader.read_u8()?; let class = byte >> 4; let index = (byte & 0x0f) as usize; if class != 0 && class != 1 { return Err(Error::Format(format!("invalid class {} in DHT", class))); } if is_baseline == Some(true) && index > 1 { return Err(Error::Format("a maximum of two huffman tables per class are allowed in baseline".to_owned())); } if index > 3 { return Err(Error::Format(format!("invalid destination identifier {} in DHT", index))); } let mut counts = [0u8; 16]; reader.read_exact(&mut counts)?; let size = counts.iter().map(|&val| val as usize).fold(0, ::std::ops::Add::add); if size == 0 { return Err(Error::Format("encountered table with zero length in DHT".to_owned())); } else if size > 256 { return Err(Error::Format("encountered table with excessive length in DHT".to_owned())); } else if size > length - 17 { return Err(Error::Format("invalid length in DHT".to_owned())); } let mut values = vec![0u8; size]; reader.read_exact(&mut values)?; match class { 0 => dc_tables[index] = Some(HuffmanTable::new(&counts, &values, HuffmanTableClass::DC)?), 1 => ac_tables[index] = Some(HuffmanTable::new(&counts, &values, HuffmanTableClass::AC)?), _ => unreachable!(), } length -= 17 + size; } if length != 0 { return Err(Error::Format("invalid length in DHT".to_owned())); } Ok((dc_tables, ac_tables)) } // Section B.2.4.4 pub fn parse_dri(reader: &mut R) -> Result { let length = read_length(reader, DRI)?; if length != 2 { return Err(Error::Format("DRI with invalid length".to_owned())); } Ok(reader.read_u16::()?) } // Section B.2.4.5 pub fn parse_com(reader: &mut R) -> Result> { let length = read_length(reader, COM)?; let mut buffer = vec![0u8; length]; reader.read_exact(&mut buffer)?; Ok(buffer) } // Section B.2.4.6 pub fn parse_app(reader: &mut R, marker: Marker) -> Result> { let length = read_length(reader, marker)?; let mut bytes_read = 0; let mut result = None; match marker { APP(0) => { if length >= 5 { let mut buffer = [0u8; 5]; reader.read_exact(&mut buffer)?; bytes_read = buffer.len(); // http://www.w3.org/Graphics/JPEG/jfif3.pdf if &buffer[0 .. 5] == &[b'J', b'F', b'I', b'F', b'\0'] { result = Some(AppData::Jfif); // https://sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#AVI1 } else if &buffer[0 .. 5] == &[b'A', b'V', b'I', b'1', b'\0'] { result = Some(AppData::Avi1); } } }, APP(14) => { if length >= 12 { let mut buffer = [0u8; 12]; reader.read_exact(&mut buffer)?; bytes_read = buffer.len(); // http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe if &buffer[0 .. 6] == &[b'A', b'd', b'o', b'b', b'e', b'\0'] { let color_transform = match buffer[11] { 0 => AdobeColorTransform::Unknown, 1 => AdobeColorTransform::YCbCr, 2 => AdobeColorTransform::YCCK, _ => return Err(Error::Format("invalid color transform in adobe app segment".to_owned())), }; result = Some(AppData::Adobe(color_transform)); } } }, _ => {}, } skip_bytes(reader, length - bytes_read)?; Ok(result) } jpeg-decoder-0.1.15/src/upsampler.rs010064400007650000024000000212311322634675200155230ustar0000000000000000use error::{Error, Result, UnsupportedFeature}; use parser::Component; pub struct Upsampler { components: Vec, line_buffer_size: usize } struct UpsamplerComponent { upsampler: Box, width: usize, height: usize, row_stride: usize, } impl Upsampler { pub fn new(components: &[Component], output_width: u16, output_height: u16) -> Result { let h_max = components.iter().map(|c| c.horizontal_sampling_factor).max().unwrap(); let v_max = components.iter().map(|c| c.vertical_sampling_factor).max().unwrap(); let mut upsampler_components = Vec::with_capacity(components.len()); for component in components { let upsampler = choose_upsampler((component.horizontal_sampling_factor, component.vertical_sampling_factor), (h_max, v_max), output_width, output_height)?; upsampler_components.push(UpsamplerComponent { upsampler: upsampler, width: component.size.width as usize, height: component.size.height as usize, row_stride: component.block_size.width as usize * 8, }); } let buffer_size = components.iter().map(|c| c.size.width).max().unwrap() as usize * h_max as usize; Ok(Upsampler { components: upsampler_components, line_buffer_size: buffer_size }) } pub fn upsample_and_interleave_row(&self, component_data: &[Vec], row: usize, output_width: usize, output: &mut [u8]) { let component_count = component_data.len(); let mut line_buffer = vec![0u8; self.line_buffer_size]; debug_assert_eq!(component_count, self.components.len()); for (i, component) in self.components.iter().enumerate() { component.upsampler.upsample_row(&component_data[i], component.width, component.height, component.row_stride, row, output_width, &mut line_buffer); for x in 0 .. output_width { output[x * component_count + i] = line_buffer[x]; } } } } struct UpsamplerH1V1; struct UpsamplerH2V1; struct UpsamplerH1V2; struct UpsamplerH2V2; struct UpsamplerGeneric { horizontal_scaling_factor: u8, vertical_scaling_factor: u8 } fn choose_upsampler(sampling_factors: (u8, u8), max_sampling_factors: (u8, u8), output_width: u16, output_height: u16) -> Result> { let h1 = sampling_factors.0 == max_sampling_factors.0 || output_width == 1; let v1 = sampling_factors.1 == max_sampling_factors.1 || output_height == 1; let h2 = sampling_factors.0 * 2 == max_sampling_factors.0; let v2 = sampling_factors.1 * 2 == max_sampling_factors.1; if h1 && v1 { Ok(Box::new(UpsamplerH1V1)) } else if h2 && v1 { Ok(Box::new(UpsamplerH2V1)) } else if h1 && v2 { Ok(Box::new(UpsamplerH1V2)) } else if h2 && v2 { Ok(Box::new(UpsamplerH2V2)) } else { if max_sampling_factors.0 % sampling_factors.0 != 0 || max_sampling_factors.1 % sampling_factors.1 != 0 { Err(Error::Unsupported(UnsupportedFeature::NonIntegerSubsamplingRatio)) } else { Ok(Box::new(UpsamplerGeneric { horizontal_scaling_factor: max_sampling_factors.0 / sampling_factors.0, vertical_scaling_factor: max_sampling_factors.1 / sampling_factors.1 })) } } } trait Upsample { fn upsample_row(&self, input: &[u8], input_width: usize, input_height: usize, row_stride: usize, row: usize, output_width: usize, output: &mut [u8]); } impl Upsample for UpsamplerH1V1 { fn upsample_row(&self, input: &[u8], _input_width: usize, _input_height: usize, row_stride: usize, row: usize, output_width: usize, output: &mut [u8]) { let input = &input[row * row_stride ..]; for i in 0 .. output_width { output[i] = input[i]; } } } impl Upsample for UpsamplerH2V1 { fn upsample_row(&self, input: &[u8], input_width: usize, _input_height: usize, row_stride: usize, row: usize, _output_width: usize, output: &mut [u8]) { let input = &input[row * row_stride ..]; if input_width == 1 { output[0] = input[0]; output[1] = input[0]; return; } output[0] = input[0]; output[1] = ((input[0] as u32 * 3 + input[1] as u32 + 2) >> 2) as u8; for i in 1 .. input_width - 1 { let sample = 3 * input[i] as u32 + 2; output[i * 2] = ((sample + input[i - 1] as u32) >> 2) as u8; output[i * 2 + 1] = ((sample + input[i + 1] as u32) >> 2) as u8; } output[(input_width - 1) * 2] = ((input[input_width - 1] as u32 * 3 + input[input_width - 2] as u32 + 2) >> 2) as u8; output[(input_width - 1) * 2 + 1] = input[input_width - 1]; } } impl Upsample for UpsamplerH1V2 { fn upsample_row(&self, input: &[u8], _input_width: usize, input_height: usize, row_stride: usize, row: usize, output_width: usize, output: &mut [u8]) { let row_near = row as f32 / 2.0; // If row_near's fractional is 0.0 we want row_far to be the previous row and if it's 0.5 we // want it to be the next row. let row_far = (row_near + row_near.fract() * 3.0 - 0.25).min((input_height - 1) as f32); let input_near = &input[row_near as usize * row_stride ..]; let input_far = &input[row_far as usize * row_stride ..]; for i in 0 .. output_width { output[i] = ((3 * input_near[i] as u32 + input_far[i] as u32 + 2) >> 2) as u8; } } } impl Upsample for UpsamplerH2V2 { fn upsample_row(&self, input: &[u8], input_width: usize, input_height: usize, row_stride: usize, row: usize, _output_width: usize, output: &mut [u8]) { let row_near = row as f32 / 2.0; // If row_near's fractional is 0.0 we want row_far to be the previous row and if it's 0.5 we // want it to be the next row. let row_far = (row_near + row_near.fract() * 3.0 - 0.25).min((input_height - 1) as f32); let input_near = &input[row_near as usize * row_stride ..]; let input_far = &input[row_far as usize * row_stride ..]; if input_width == 1 { let value = ((3 * input_near[0] as u32 + input_far[0] as u32 + 2) >> 2) as u8; output[0] = value; output[1] = value; return; } let mut t1 = 3 * input_near[0] as u32 + input_far[0] as u32; output[0] = ((t1 + 2) >> 2) as u8; for i in 1 .. input_width { let t0 = t1; t1 = 3 * input_near[i] as u32 + input_far[i] as u32; output[i * 2 - 1] = ((3 * t0 + t1 + 8) >> 4) as u8; output[i * 2] = ((3 * t1 + t0 + 8) >> 4) as u8; } output[input_width * 2 - 1] = ((t1 + 2) >> 2) as u8; } } impl Upsample for UpsamplerGeneric { // Uses nearest neighbor sampling fn upsample_row(&self, input: &[u8], input_width: usize, _input_height: usize, row_stride: usize, row: usize, _output_width: usize, output: &mut [u8]) { let mut index = 0; let start = (row / self.vertical_scaling_factor as usize) * row_stride; let input = &input[start..(start + input_width)]; for val in input { for _ in 0..self.horizontal_scaling_factor { output[index] = *val; index += 1; } } } } jpeg-decoder-0.1.15/src/worker/immediate.rs010064400007650000024000000053021330724424500167550ustar0000000000000000use decoder::MAX_COMPONENTS; use error::Result; use idct::dequantize_and_idct_block; use std::mem; use std::sync::Arc; use parser::Component; use super::{RowData, Worker}; pub struct ImmediateWorker { offsets: [usize; MAX_COMPONENTS], results: Vec>, components: Vec>, quantization_tables: Vec>>, } impl ImmediateWorker { pub fn new_immediate() -> ImmediateWorker { ImmediateWorker { offsets: [0; MAX_COMPONENTS], results: vec![Vec::new(); MAX_COMPONENTS], components: vec![None; MAX_COMPONENTS], quantization_tables: vec![None; MAX_COMPONENTS], } } pub fn start_immediate(&mut self, data: RowData) { assert!(self.results[data.index].is_empty()); self.offsets[data.index] = 0; self.results[data.index].resize(data.component.block_size.width as usize * data.component.block_size.height as usize * 64, 0u8); self.components[data.index] = Some(data.component); self.quantization_tables[data.index] = Some(data.quantization_table); } pub fn append_row_immediate(&mut self, (index, data): (usize, Vec)) { // Convert coefficients from a MCU row to samples. let component = self.components[index].as_ref().unwrap(); let quantization_table = self.quantization_tables[index].as_ref().unwrap(); let block_count = component.block_size.width as usize * component.vertical_sampling_factor as usize; let line_stride = component.block_size.width as usize * 8; assert_eq!(data.len(), block_count * 64); for i in 0..block_count { let x = (i % component.block_size.width as usize) * 8; let y = (i / component.block_size.width as usize) * 8; dequantize_and_idct_block(&data[i * 64..(i + 1) * 64], quantization_table, line_stride, &mut self.results[index][self.offsets[index] + y * line_stride + x..]); } self.offsets[index] += data.len(); } pub fn get_result_immediate(&mut self, index: usize) -> Vec { mem::replace(&mut self.results[index], Vec::new()) } } impl Worker for ImmediateWorker { fn new() -> Result { Ok(ImmediateWorker::new_immediate()) } fn start(&mut self, data: RowData) -> Result<()> { self.start_immediate(data); Ok(()) } fn append_row(&mut self, row: (usize, Vec)) -> Result<()> { self.append_row_immediate(row); Ok(()) } fn get_result(&mut self, index: usize) -> Result> { Ok(self.get_result_immediate(index)) } } jpeg-decoder-0.1.15/src/worker/mod.rs010064400007650000024000000012761330724424500156040ustar0000000000000000mod threaded; mod immediate; #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))] pub use self::threaded::ThreadedWorker as PlatformWorker; #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))] pub use self::immediate::ImmediateWorker as PlatformWorker; use error::Result; use parser::Component; use std::sync::Arc; pub struct RowData { pub index: usize, pub component: Component, pub quantization_table: Arc<[u16; 64]>, } pub trait Worker: Sized { fn new() -> Result; fn start(&mut self, row_data: RowData) -> Result<()>; fn append_row(&mut self, row: (usize, Vec)) -> Result<()>; fn get_result(&mut self, index: usize) -> Result>; } jpeg-decoder-0.1.15/src/worker/threaded.rs010064400007650000024000000031741330724424500166040ustar0000000000000000use error::Result; use std::sync::mpsc::{self, Sender}; use std::thread; use super::{RowData, Worker}; use super::immediate::ImmediateWorker; enum WorkerMsg { Start(RowData), AppendRow((usize, Vec)), GetResult((usize, Sender>)), } pub struct ThreadedWorker { sender: Sender, } impl Worker for ThreadedWorker { fn new() -> Result { let thread_builder = thread::Builder::new().name("worker thread".to_owned()); let (tx, rx) = mpsc::channel(); thread_builder.spawn(move || { let mut worker = ImmediateWorker::new_immediate(); while let Ok(message) = rx.recv() { match message { WorkerMsg::Start(data) => { worker.start_immediate(data); }, WorkerMsg::AppendRow(row) => { worker.append_row_immediate(row); }, WorkerMsg::GetResult((index, chan)) => { let _ = chan.send(worker.get_result_immediate(index)); }, } } })?; Ok(ThreadedWorker { sender: tx }) } fn start(&mut self, row_data: RowData) -> Result<()> { Ok(self.sender.send(WorkerMsg::Start(row_data))?) } fn append_row(&mut self, row: (usize, Vec)) -> Result<()> { Ok(self.sender.send(WorkerMsg::AppendRow(row))?) } fn get_result(&mut self, index: usize) -> Result> { let (tx, rx) = mpsc::channel(); self.sender.send(WorkerMsg::GetResult((index, tx)))?; Ok(rx.recv()?) } }