png-0.15.0/.gitignore010064400017500001750000000000301347544560500126000ustar0000000000000000target Cargo.lock /.ideapng-0.15.0/.travis.yml010064400017500001750000000005711351245535300127240ustar0000000000000000language: rust os: - linux - osx - windows rust: - stable - nightly matrix: include: - rust: stable env: FLAGS="--no-default-features" - rust: nightly env: FLAGS="-Z minimal-versions" allow_failures: - rust: nightly env: FLAGS="-Z minimal-versions" script: - cargo build -v $FLAGS - cargo doc -v $FLAGS - cargo test -v $FLAGS png-0.15.0/Cargo.toml.orig010064400017500001750000000013121351413732300134700ustar0000000000000000[package] name = "png" version = "0.15.0" license = "MIT OR Apache-2.0" description = "PNG decoding and encoding library in pure Rust" categories = ["multimedia::images"] authors = ["nwin "] repository = "https://github.com/image-rs/image-png.git" edition = "2018" exclude = [ "tests/*", ] [dependencies] inflate = "0.4.2" deflate = { version = "0.7.12", optional = true } bitflags = "1.0" crc32fast = "1.2.0" [dev-dependencies] getopts = "0.2.14" term = "0.4" glob = "0.3" rand = "0.5.5" [dev-dependencies.glium] version = "0.22" features = ["glutin"] default-features = false [features] png-encoding = ["deflate"] default = ["png-encoding"] unstable = [] benchmarks = [] png-0.15.0/Cargo.toml0000644000000025510000000000000077430ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] edition = "2018" name = "png" version = "0.15.0" authors = ["nwin "] exclude = ["tests/*"] description = "PNG decoding and encoding library in pure Rust" categories = ["multimedia::images"] license = "MIT OR Apache-2.0" repository = "https://github.com/image-rs/image-png.git" [dependencies.bitflags] version = "1.0" [dependencies.crc32fast] version = "1.2.0" [dependencies.deflate] version = "0.7.12" optional = true [dependencies.inflate] version = "0.4.2" [dev-dependencies.getopts] version = "0.2.14" [dev-dependencies.glium] version = "0.22" features = ["glutin"] default-features = false [dev-dependencies.glob] version = "0.3" [dev-dependencies.rand] version = "0.5.5" [dev-dependencies.term] version = "0.4" [features] benchmarks = [] default = ["png-encoding"] png-encoding = ["deflate"] unstable = [] png-0.15.0/LICENSE-APACHE010064400017500001750000000251371343060310200125260ustar0000000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. png-0.15.0/LICENSE-MIT010064400017500001750000000020301343060310200122210ustar0000000000000000Copyright (c) 2015 nwin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. png-0.15.0/README.md010064400017500001750000000026331351414243600120700ustar0000000000000000# PNG Decoder/Encoder [![Build Status](https://travis-ci.org/image-rs/image-png.svg?branch=master)](https://travis-ci.org/image-rs/image-png) [![Documentation](https://docs.rs/png/badge.svg)](https://docs.rs/png) [![Crates.io](https://img.shields.io/crates/v/png.svg)](https://crates.io/crates/png) ![Lines of Code](https://tokei.rs/b1/github/image-rs/image-png) [![License](https://img.shields.io/crates/l/png.svg)](https://github.com/image-rs/image-png) PNG decoder/encoder in pure Rust. It contains all features required to handle the entirety of [the PngSuite by Willem van Schack][PngSuite]. [PngSuite]: http://www.schaik.com/pngsuite2011/pngsuite.html ## pngcheck The `pngcheck` utility is a small demonstration binary that checks and prints metadata on every `.png` image provided via parameter. You can run it (for example on the test directories) with ```bash cargo run --release --example pngcheck ./tests/pngsuite/* ``` ## License Licensed under either of * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. png-0.15.0/benches/README.md010064400017500001750000000002601347724065700135050ustar0000000000000000# Getting started with benchmarking To run the benchmarks you need a nightly rust toolchain. Then you launch it with rustup run nightly cargo bench --features=benchmarks png-0.15.0/benches/decoder.rs010064400017500001750000000013121347724065700142000ustar0000000000000000#![cfg(feature = "benchmarks")] #![feature(test)] extern crate png; extern crate test; use std::fs::File; use std::io::Read; use png::Decoder; #[bench] fn bench_big(b: &mut test::Bencher) { let mut data = Vec::new(); File::open("tests/pngsuite/PngSuite.png").unwrap().read_to_end(&mut data).unwrap(); let decoder = Decoder::new(&*data); let (info, _) = decoder.read_info().unwrap(); let mut image = vec![0; info.buffer_size()]; b.iter(|| { let decoder = Decoder::new(&*data); let (_, mut decoder) = decoder.read_info().unwrap(); test::black_box(decoder.next_frame(&mut image)).unwrap(); }); b.bytes = info.buffer_size() as u64 } png-0.15.0/examples/pngcheck.rs010064400017500001750000000273151351112656600145660ustar0000000000000000#![allow(non_upper_case_globals)] extern crate getopts; extern crate glob; extern crate png; extern crate term; use std::io; use std::io::prelude::*; use std::path::Path; use std::fs::File; use std::env; use getopts::{Matches, Options, ParsingStyle}; use term::{color, Attr}; fn parse_args() -> Matches { let args: Vec = env::args().collect(); let mut opts = Options::new(); opts.optflag("c", "", "colorize output (for ANSI terminals)") .optflag("q", "", "test quietly (output only errors)") //.optflag("t", "", "print contents of tEXt chunks (can be used with -q)"); .optflag("v", "", "test verbosely (print most chunk data)") .parsing_style(ParsingStyle::StopAtFirstFree); if args.len() > 1 { match opts.parse(&args[1..]) { Ok(matches) => return matches, Err(err) => println!("{}", err) } } println!("{}", opts.usage(&format!("Usage: pngcheck [-cpt] [file ...]"))); std::process::exit(0); } #[derive(Clone, Copy)] struct Config { quiet: bool, verbose: bool, color: bool } fn display_interlaced(i: bool) -> &'static str { if i { "interlaced" } else { "non-interlaced" } } fn display_image_type(bits: u8, color: png::ColorType) -> String { use png::ColorType::*; format!( "{}-bit {}", bits, match color { Grayscale => "grayscale", RGB => "RGB", Indexed => "palette", GrayscaleAlpha => "grayscale+alpha", RGBA => "RGB+alpha" } ) } // channels after expansion of tRNS fn final_channels(c: png::ColorType, trns: bool) -> u8 { use png::ColorType::*; match c { Grayscale => 1 + if trns { 1 } else { 0 }, RGB => 3, Indexed => 3 + if trns { 1 } else { 0 }, GrayscaleAlpha => 2, RGBA => 4 } } fn check_image>(c: Config, fname: P) -> io::Result<()> { // TODO improve performance by resusing allocations from decoder use png::Decoded::*; let mut t = term::stdout().ok_or(io::Error::new( io::ErrorKind::Other, "could not open terminal" ))?; let mut data = vec![0; 10*1024]; let data_p = data.as_mut_ptr(); let mut reader = io::BufReader::new(File::open(&fname)?); let fname = fname.as_ref().to_string_lossy(); let n = reader.read(&mut data)?; let mut buf = &data[..n]; let mut pos = 0; let mut decoder = png::StreamingDecoder::new(); // Image data let mut width = 0; let mut height = 0; let mut color = png::ColorType::Grayscale; let mut bits = 0; let mut trns = false; let mut interlaced = false; let mut compressed_size = 0; let mut n_chunks = 0; let mut have_idat = false; macro_rules! c_ratio( // TODO add palette entries to compressed_size () => ({ compressed_size as f32/( height as u64 * (width as u64 * final_channels(color, trns) as u64 * bits as u64 + 7)>>3 ) as f32 }); ); let display_error = |err| -> Result<_, io::Error> { let mut t = term::stdout().ok_or(io::Error::new( io::ErrorKind::Other, "could not open terminal" ))?; if c.verbose { if c.color { print!(": "); t.fg(color::RED)?; writeln!(t, "{}", err)?; t.attr(Attr::Bold)?; write!(t, "ERRORS DETECTED")?; t.reset()?; } else { println!(": {}", err); print!("ERRORS DETECTED") } println!(" in {}", fname); } else { if !c.quiet { if c.color { t.fg(color::RED)?; t.attr(Attr::Bold)?; write!(t, "ERROR")?; t.reset()?; write!(t, ": ")?; t.fg(color::YELLOW)?; writeln!(t, "{}", fname)?; t.reset()?; } else { println!("ERROR: {}", fname) }} print!("{}: ", fname); if c.color { t.fg(color::RED)?; writeln!(t, "{}", err)?; t.reset()?; } else { println!("{}", err); } } Ok(()) }; if c.verbose { print!("File: "); if c.color { t.attr(Attr::Bold)?; write!(t, "{}", fname)?; t.reset()?; } else { print!("{}", fname); } print!(" ({}) bytes", data.len()) } loop { if buf.len() == 0 { // circumvent borrow checker assert!(!data.is_empty()); let n = reader.read(unsafe { ::std::slice::from_raw_parts_mut(data_p, data.len()) })?; // EOF if n == 0 { println!("ERROR: premature end of file {}", fname); break; } buf = &data[..n]; } match decoder.update(buf, &mut Vec::new()) { Ok((_, ImageEnd)) => { if !have_idat { display_error(png::DecodingError::Format("IDAT chunk missing".into()))?; break; } if !c.verbose && !c.quiet { if c.color { t.fg(color::GREEN)?; t.attr(Attr::Bold)?; write!(t, "OK")?; t.reset()?; write!(t, ": ")?; t.fg(color::YELLOW)?; write!(t, "{}", fname)?; t.reset()?; } else { print!("OK: {}", fname) } println!( " ({}x{}, {}{}, {}, {:.1}%)", width, height, display_image_type(bits, color), (if trns { "+trns" } else { "" }), display_interlaced(interlaced), 100.0*(1.0-c_ratio!()) ) } else if !c.quiet { println!(""); if c.color { t.fg(color::GREEN)?; t.attr(Attr::Bold)?; write!(t, "No errors detected ")?; t.reset()?; } else { print!("No errors detected "); } println!( "in {} ({} chunks, {:.1}% compression)", fname, n_chunks, 100.0*(1.0-c_ratio!()) ) } break }, Ok((n, res)) => { buf = &buf[n..]; pos += n; match res { Header(w, h, b, c, i) => { width = w; height = h; bits = b as u8; color = c; interlaced = i; } ChunkBegin(len, type_str) => { use png::chunk; n_chunks += 1; if c.verbose { let chunk = String::from_utf8_lossy(&type_str); println!(""); print!(" chunk "); if c.color { t.fg(color::YELLOW)?; write!(t, "{}", chunk)?; t.reset()?; } else { print!("{}", chunk) } print!( " at offset {:#07x}, length {}", pos - 4, // substract chunk name length len ) } match type_str { chunk::IDAT => { have_idat = true; compressed_size += len }, chunk::tRNS => { trns = true; }, _ => () } } ImageData => { //println!("got {} bytes of image data", data.len()) } ChunkComplete(_, type_str) if c.verbose => { use png::chunk::*; match type_str { IHDR => { println!(""); print!( " {} x {} image, {}{}, {}", width, height, display_image_type(bits, color), (if trns { "+trns" } else { "" }), display_interlaced(interlaced), ); } _ => () } } AnimationControl(actl) => { println!(""); print!( " {} frames, {} plays", actl.num_frames, actl.num_plays, ); } FrameControl(fctl) => { println!(""); println!( " sequence #{}, {} x {} pixels @ ({}, {})", fctl.sequence_number, fctl.width, fctl.height, fctl.x_offset, fctl.y_offset, /*fctl.delay_num, fctl.delay_den, fctl.dispose_op, fctl.blend_op,*/ ); print!( " {}/{} s delay, dispose: {}, blend: {}", fctl.delay_num, if fctl.delay_den == 0 { 100 } else {fctl.delay_den}, fctl.dispose_op, fctl.blend_op, ); } _ => () } //println!("{} {:?}", n, res) }, Err(err) => { let _ = display_error(err); break } } } Ok(()) } fn main() { let m = parse_args(); let config = Config { quiet: m.opt_present("q"), verbose: m.opt_present("v"), color: m.opt_present("c") }; for file in m.free { let result = if file.contains("*") { glob::glob(&file).map_err(|err| { io::Error::new(io::ErrorKind::Other, err) }).and_then(|mut glob| glob.try_for_each(|entry| { entry.map_err(|err| { io::Error::new(io::ErrorKind::Other, err) }).and_then(|file| { check_image(config, file) }) })) } else { check_image(config, &file) }; result.unwrap_or_else(|err| { println!("{}: {}", file, err); std::process::exit(1) }); } } png-0.15.0/examples/show.rs010064400017500001750000000150721347544560500137700ustar0000000000000000extern crate glium; extern crate glob; extern crate png; use std::env; use std::io; use std::fs::File; use std::borrow::Cow; use std::path; use std::error::Error; use glium::{Surface, Rect, BlitTarget}; use glium::texture::{RawImage2d, ClientFormat}; use glium::glutin::{self, Event, VirtualKeyCode, dpi}; use glium::backend::glutin::Display; /// Load the image using `png` fn load_image(path: &path::PathBuf) -> io::Result> { use png::ColorType::*; let decoder = png::Decoder::new(File::open(path)?); let (info, mut reader) = decoder.read_info()?; let mut img_data = vec![0; info.buffer_size()]; reader.next_frame(&mut img_data)?; let (data, format) = match info.color_type { RGB => (img_data, ClientFormat::U8U8U8), RGBA => (img_data, ClientFormat::U8U8U8U8), Grayscale => ( { let mut vec = Vec::with_capacity(img_data.len()*3); for g in img_data { vec.extend([g, g, g].iter().cloned()) } vec }, ClientFormat::U8U8U8 ), GrayscaleAlpha => ( { let mut vec = Vec::with_capacity(img_data.len()*3); for ga in img_data.chunks(2) { let g = ga[0]; let a = ga[1]; vec.extend([g, g, g, a].iter().cloned()) } vec }, ClientFormat::U8U8U8U8 ), _ => unreachable!("uncovered color type") }; Ok(RawImage2d { data: Cow::Owned(data), width: info.width, height: info.height, format: format }) } fn main_loop(files: Vec) -> io::Result<()> { use glium::glutin::{KeyboardInput, WindowEvent}; let mut files = files.iter(); let image = load_image(files.next().unwrap())?; let mut events_loop = glutin::EventsLoop::new(); let window = glutin::WindowBuilder::new(); let context = glutin::ContextBuilder::new() .with_vsync(true); let display = Display::new(window, context, &events_loop) .map_err(|err| io::Error::new( io::ErrorKind::Other, err.description() ))?; // building the display, ie. the main object resize_window(&display, &image); let mut opengl_texture = glium::Texture2d::new(&display, image).unwrap(); let mut stop = false; let mut res = Ok(()); 'main: loop { let frame = display.draw(); fill_v_flipped(&opengl_texture.as_surface(), &frame, glium::uniforms::MagnifySamplerFilter::Linear); frame.finish().unwrap(); // polling and handling the events received by the window events_loop.poll_events(|event| { if stop {return;} match event { Event::WindowEvent {event: WindowEvent::CloseRequested, ..} => { stop = true; return; } Event::WindowEvent { event: WindowEvent::KeyboardInput { input: KeyboardInput { state: glutin::ElementState::Pressed, virtual_keycode: code, .. }, .. }, .. } => match code { Some(VirtualKeyCode::Escape) => { stop = true; return; } Some(VirtualKeyCode::Right) => { match files.next() { Some(path) => { let image = match load_image(path) { Ok(image) => image, Err(err) => { stop = true; res = Err(err); return; } }; resize_window(&display, &image); opengl_texture = glium::Texture2d::new(&display, image).unwrap(); }, None => { stop = true; return; } } }, _ => () }, _ => () } }); if stop {break 'main;} } res } fn fill_v_flipped(src: &S1, target: &S2, filter: glium::uniforms::MagnifySamplerFilter) where S1: Surface, S2: Surface { let src_dim = src.get_dimensions(); let src_rect = Rect { left: 0, bottom: 0, width: src_dim.0 as u32, height: src_dim.1 as u32 }; let target_dim = target.get_dimensions(); let target_rect = BlitTarget { left: 0, bottom: target_dim.1, width: target_dim.0 as i32, height: -(target_dim.1 as i32) }; src.blit_color(&src_rect, target, &target_rect, filter); } fn resize_window(display: &Display, image: &RawImage2d<'static, u8>) { let mut width = image.width; let mut height = image.height; if width < 50 && height < 50 { width *= 10; height *= 10; } else if width < 5 && height < 5 { width *= 10; height *= 10; } display.gl_window().set_inner_size(dpi::LogicalSize::new(f64::from(width), f64::from(height))); } fn main() { let args: Vec = env::args().collect(); if args.len() < 2 { println!("Usage: show files [...]"); } else { let mut files = vec![]; for file in args.iter().skip(1) { match if file.contains("*") { (|| -> io::Result<_> { for entry in glob::glob(&file).map_err(|err| { io::Error::new(io::ErrorKind::Other, err.msg) })? { files.push(entry.map_err(|_| { io::Error::new(io::ErrorKind::Other, "glob error") })?) } Ok(()) })() } else { files.push(path::PathBuf::from(file)); Ok(()) } { Ok(_) => (), Err(err) => { println!("{}: {}", file, err); break } } } // "tests/pngsuite/pngsuite.png" match main_loop(files) { Ok(_) => (), Err(err) => println!("Error: {}", err) } } } png-0.15.0/src/chunk.rs010064400017500001750000000030641343060310200130420ustar0000000000000000//! Chunk types and functions #![allow(dead_code)] #![allow(non_upper_case_globals)] pub type ChunkType = [u8; 4]; // -- Critical chunks -- /// Image header pub const IHDR: ChunkType = [b'I', b'H', b'D', b'R']; /// Palette pub const PLTE: ChunkType = [b'P', b'L', b'T', b'E']; /// Image data pub const IDAT: ChunkType = [b'I', b'D', b'A', b'T']; /// Image trailer pub const IEND: ChunkType = [b'I', b'E', b'N', b'D']; // -- Ancillary chunks -- /// Transparency pub const tRNS: ChunkType = [b't', b'R', b'N', b'S']; /// Background colour pub const bKGD: ChunkType = [b'b', b'K', b'G', b'D']; /// Image last-modification time pub const tIME: ChunkType = [b't', b'I', b'M', b'E']; /// Physical pixel dimensions pub const pHYs: ChunkType = [b'p', b'H', b'Y', b's']; // -- Extension chunks -- /// Animation control pub const acTL: ChunkType = [b'a', b'c', b'T', b'L']; /// Frame control pub const fcTL: ChunkType = [b'f', b'c', b'T', b'L']; /// Frame data pub const fdAT: ChunkType = [b'f', b'd', b'A', b'T']; // -- Chunk type determination -- /// Returns true if the chunk is critical. pub fn is_critical(type_: ChunkType) -> bool { type_[0] & 32 == 0 } /// Returns true if the chunk is private. pub fn is_private(type_: ChunkType) -> bool { type_[1] & 32 != 0 } /// Checks whether the reserved bit of the chunk name is set. /// If it is set the chunk name is invalid. pub fn reserved_set(type_: ChunkType) -> bool { type_[2] & 32 != 0 } /// Returns true if the chunk is safe to copy if unknown. pub fn safe_to_copy(type_: ChunkType) -> bool { type_[3] & 32 != 0 }png-0.15.0/src/common.rs010064400017500001750000000301141351414516500132330ustar0000000000000000//! Common types shared between the encoder and decoder use crate::filter; use std::fmt; /// Describes the layout of samples in a pixel #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(u8)] pub enum ColorType { Grayscale = 0, RGB = 2, Indexed = 3, GrayscaleAlpha = 4, RGBA = 6 } impl ColorType { /// Returns the number of samples used per pixel of `ColorType` pub fn samples(&self) -> usize { use self::ColorType::*; match *self { Grayscale | Indexed => 1, RGB => 3, GrayscaleAlpha => 2, RGBA => 4 } } /// u8 -> Self. Temporary solution until Rust provides a canonical one. pub fn from_u8(n: u8) -> Option { match n { 0 => Some(ColorType::Grayscale), 2 => Some(ColorType::RGB), 3 => Some(ColorType::Indexed), 4 => Some(ColorType::GrayscaleAlpha), 6 => Some(ColorType::RGBA), _ => None } } } /// Bit depth of the png file #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(u8)] pub enum BitDepth { One = 1, Two = 2, Four = 4, Eight = 8, Sixteen = 16, } impl BitDepth { /// u8 -> Self. Temporary solution until Rust provides a canonical one. pub fn from_u8(n: u8) -> Option { match n { 1 => Some(BitDepth::One), 2 => Some(BitDepth::Two), 4 => Some(BitDepth::Four), 8 => Some(BitDepth::Eight), 16 => Some(BitDepth::Sixteen), _ => None } } } /// Pixel dimensions information #[derive(Clone, Copy, Debug)] pub struct PixelDimensions { /// Pixels per unit, X axis pub xppu: u32, /// Pixels per unit, Y axis pub yppu: u32, /// Either *Meter* or *Unspecified* pub unit: Unit, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(u8)] /// Physical unit of the pixel dimensions pub enum Unit { Unspecified = 0, Meter = 1, } impl Unit { /// u8 -> Self. Temporary solution until Rust provides a canonical one. pub fn from_u8(n: u8) -> Option { match n { 0 => Some(Unit::Unspecified), 1 => Some(Unit::Meter), _ => None } } } /// How to reset buffer of an animated png (APNG) at the end of a frame. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(u8)] pub enum DisposeOp { /// Leave the buffer unchanged. None = 0, /// Clear buffer with the background color. Background = 1, /// Reset the buffer to the state before the current frame. Previous = 2, } impl DisposeOp { /// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now. pub fn from_u8(n: u8) -> Option { match n { 0 => Some(DisposeOp::None), 1 => Some(DisposeOp::Background), 2 => Some(DisposeOp::Previous), _ => None } } } impl fmt::Display for DisposeOp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { DisposeOp::None => "DISPOSE_OP_NONE", DisposeOp::Background => "DISPOSE_OP_BACKGROUND", DisposeOp::Previous => "DISPOSE_OP_PREVIOUS", }; write!(f, "{}", name) } } /// How pixels are written into the buffer. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(u8)] pub enum BlendOp { /// Pixels overwrite the value at their position. Source = 0, /// The new pixels are blended into the current state based on alpha. Over = 1, } impl BlendOp { /// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now. pub fn from_u8(n: u8) -> Option { match n { 0 => Some(BlendOp::Source), 1 => Some(BlendOp::Over), _ => None } } } impl fmt::Display for BlendOp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let name = match *self { BlendOp::Source => "BLEND_OP_SOURCE", BlendOp::Over => "BLEND_OP_OVER", }; write!(f, "{}", name) } } /// Frame control information #[derive(Clone, Copy, Debug)] pub struct FrameControl { /// Sequence number of the animation chunk, starting from 0 pub sequence_number: u32, /// Width of the following frame pub width: u32, /// Height of the following frame pub height: u32, /// X position at which to render the following frame pub x_offset: u32, /// Y position at which to render the following frame pub y_offset: u32, /// Frame delay fraction numerator pub delay_num: u16, /// Frame delay fraction denominator pub delay_den: u16, /// Type of frame area disposal to be done after rendering this frame pub dispose_op: DisposeOp, /// Type of frame area rendering for this frame pub blend_op: BlendOp, } impl Default for FrameControl { fn default() -> FrameControl { FrameControl { sequence_number: 0, width: 0, height: 0, x_offset: 0, y_offset: 0, delay_num: 1, delay_den: 30, dispose_op: DisposeOp::None, blend_op: BlendOp::Source, } } } impl FrameControl { pub fn set_seq_num(&mut self, s: u32) { self.sequence_number = s; } pub fn inc_seq_num(&mut self, i: u32) { self.sequence_number += i; } } /// Animation control information #[derive(Clone, Copy, Debug)] pub struct AnimationControl { /// Number of frames pub num_frames: u32, /// Number of times to loop this APNG. 0 indicates infinite looping. pub num_plays: u32, } /// The type and strength of applied compression. #[derive(Debug, Clone)] pub enum Compression { /// Default level Default, /// Fast minimal compression Fast, /// Higher compression level /// /// Best in this context isn't actually the highest possible level /// the encoder can do, but is meant to emulate the `Best` setting in the `Flate2` /// library. Best, Huffman, Rle, } /// PNG info struct #[derive(Debug)] pub struct Info { pub width: u32, pub height: u32, pub bit_depth: BitDepth, pub color_type: ColorType, pub interlaced: bool, pub trns: Option>, pub pixel_dims: Option, pub palette: Option>, pub frame_control: Option, pub animation_control: Option, pub compression: Compression, pub filter: filter::FilterType, } impl Default for Info { fn default() -> Info { Info { width: 0, height: 0, bit_depth: BitDepth::Eight, color_type: ColorType::Grayscale, interlaced: false, palette: None, trns: None, pixel_dims: None, frame_control: None, animation_control: None, // Default to `deflate::Compresion::Fast` and `filter::FilterType::Sub` // to maintain backward compatible output. compression: Compression::Fast, filter: filter::FilterType::Sub, } } } impl Info { /// Size of the image pub fn size(&self) -> (u32, u32) { (self.width, self.height) } /// Returns true if the image is an APNG image. pub fn is_animated(&self) -> bool { self.frame_control.is_some() && self.animation_control.is_some() } /// Returns the frame control information of the image pub fn animation_control(&self) -> Option<&AnimationControl> { self.animation_control.as_ref() } /// Returns the frame control information of the current frame pub fn frame_control(&self) -> Option<&FrameControl> { self.frame_control.as_ref() } /// Returns the bits per pixel pub fn bits_per_pixel(&self) -> usize { self.color_type.samples() * self.bit_depth as usize } /// Returns the bytes per pixel pub fn bytes_per_pixel(&self) -> usize { self.color_type.samples() * ((self.bit_depth as usize + 7) >> 3) } /// Returns the number of bytes needed for one deinterlaced image pub fn raw_bytes(&self) -> usize { self.height as usize * self.raw_row_length() } /// Returns the number of bytes needed for one deinterlaced row pub fn raw_row_length(&self) -> usize { let bits = self.width as usize * self.color_type.samples() * self.bit_depth as usize; let extra = bits % 8; bits/8 + match extra { 0 => 0, _ => 1 } + 1 // filter method } /// Returns the number of bytes needed for one deinterlaced row of width `width` pub fn raw_row_length_from_width(&self, width: u32) -> usize { let bits = width as usize * self.color_type.samples() * self.bit_depth as usize; let extra = bits % 8; bits/8 + match extra { 0 => 0, _ => 1 } + 1 // filter method } } bitflags! { /// # Output transformations /// /// Only `IDENTITY` and `TRANSFORM_EXPAND | TRANSFORM_STRIP_ALPHA` can be used at the moment. pub struct Transformations: u32 { /// No transformation const IDENTITY = 0x0000; // read and write */ /// Strip 16-bit samples to 8 bits const STRIP_16 = 0x0001; // read only */ /// Discard the alpha channel const STRIP_ALPHA = 0x0002; // read only */ /// Expand 1; 2 and 4-bit samples to bytes const PACKING = 0x0004; // read and write */ /// Change order of packed pixels to LSB first const PACKSWAP = 0x0008; // read and write */ /// Expand paletted images to RGB; expand grayscale images of /// less than 8-bit depth to 8-bit depth; and expand tRNS chunks /// to alpha channels. const EXPAND = 0x0010; // read only */ /// Invert monochrome images const INVERT_MONO = 0x0020; // read and write */ /// Normalize pixels to the sBIT depth const SHIFT = 0x0040; // read and write */ /// Flip RGB to BGR; RGBA to BGRA const BGR = 0x0080; // read and write */ /// Flip RGBA to ARGB or GA to AG const SWAP_ALPHA = 0x0100; // read and write */ /// Byte-swap 16-bit samples const SWAP_ENDIAN = 0x0200; // read and write */ /// Change alpha from opacity to transparency const INVERT_ALPHA = 0x0400; // read and write */ const STRIP_FILLER = 0x0800; // write only */ const STRIP_FILLER_BEFORE = 0x0800; // write only const STRIP_FILLER_AFTER = 0x1000; // write only */ const GRAY_TO_RGB = 0x2000; // read only */ const EXPAND_16 = 0x4000; // read only */ const SCALE_16 = 0x8000; // read only */ } } /// Mod to encapsulate the converters depending on the `deflate` crate. /// /// Since this only contains trait impls, there is no need to make this public, they are simply /// available when the mod is compiled as well. #[cfg(feature = "png-encoding")] mod deflate_convert { extern crate deflate; use super::Compression; impl From for Compression { fn from(c: deflate::Compression) -> Self { match c { deflate::Compression::Default => Compression::Default, deflate::Compression::Fast => Compression::Fast, deflate::Compression::Best => Compression::Best, } } } impl From for deflate::CompressionOptions { fn from(c: Compression) -> Self { match c { Compression::Default => deflate::CompressionOptions::default(), Compression::Fast => deflate::CompressionOptions::fast(), Compression::Best => deflate::CompressionOptions::high(), Compression::Huffman => deflate::CompressionOptions::huffman_only(), Compression::Rle => deflate::CompressionOptions::rle(), } } } } png-0.15.0/src/decoder/mod.rs010064400017500001750000000453571351414516500141460ustar0000000000000000mod stream; pub use self::stream::{StreamingDecoder, Decoded, DecodingError}; use self::stream::{CHUNCK_BUFFER_SIZE, get_info}; use std::mem; use std::borrow; use std::io::{Read, Write, BufReader, BufRead}; use crate::common::{ColorType, BitDepth, Info, Transformations}; use crate::filter::{unfilter, FilterType}; use crate::chunk::IDAT; use crate::utils; /* pub enum InterlaceHandling { /// Outputs the raw rows RawRows, /// Fill missing the pixels from the existing ones Rectangle, /// Only fill the needed pixels Sparkle } */ /// Output info #[derive(Debug)] pub struct OutputInfo { pub width: u32, pub height: u32, pub color_type: ColorType, pub bit_depth: BitDepth, pub line_size: usize, } impl OutputInfo { /// Returns the size needed to hold a decoded frame pub fn buffer_size(&self) -> usize { self.line_size * self.height as usize } } #[derive(Clone, Copy, Debug)] /// Limits on the resources the `Decoder` is allowed too use pub struct Limits { /// maximum number of bytes the decoder is allowed to allocate, default is 64Mib pub bytes: usize, } impl Default for Limits { fn default() -> Limits { Limits { bytes: 1024*1024*64, } } } /// PNG Decoder pub struct Decoder { /// Reader r: R, /// Output transformations transform: Transformations, /// Limits on resources the Decoder is allowed to use limits: Limits, } impl Decoder { pub fn new(r: R) -> Decoder { Decoder::new_with_limits(r, Limits::default()) } pub fn new_with_limits(r: R, l: Limits) -> Decoder { Decoder { r: r, transform: crate::Transformations::EXPAND | crate::Transformations::SCALE_16 | crate::Transformations::STRIP_16, limits: l, } } /// Limit resource usage /// /// ``` /// use std::fs::File; /// use png::{Decoder, Limits}; /// // This image is 32x32 pixels, so the deocder will allocate more than four bytes /// let mut limits = Limits::default(); /// limits.bytes = 4; /// let mut decoder = Decoder::new_with_limits(File::open("tests/pngsuite/basi0g01.png").unwrap(), limits); /// assert!(decoder.read_info().is_err()); /// // This image is 32x32 pixels, so the decoder will allocate less than 10Kib /// let mut limits = Limits::default(); /// limits.bytes = 10*1024; /// let mut decoder = Decoder::new_with_limits(File::open("tests/pngsuite/basi0g01.png").unwrap(), limits); /// assert!(decoder.read_info().is_ok()); /// ``` pub fn set_limits(&mut self, limits: Limits) { self.limits = limits; } /// Reads all meta data until the first IDAT chunk pub fn read_info(self) -> Result<(OutputInfo, Reader), DecodingError> { let mut r = Reader::new(self.r, StreamingDecoder::new(), self.transform, self.limits); r.init()?; let (ct, bits) = r.output_color_type(); let info = { let info = r.info(); OutputInfo { width: info.width, height: info.height, color_type: ct, bit_depth: bits, line_size: r.output_line_size(info.width), } }; Ok((info, r)) } /// Set the allowed and performed transformations. /// /// A transformation is a pre-processing on the raw image data modifying content or encoding. /// Many options have an impact on memory or CPU usage during decoding. pub fn set_transformations(&mut self, transform: Transformations) { self.transform = transform; } } struct ReadDecoder { reader: BufReader, decoder: StreamingDecoder, at_eof: bool } impl ReadDecoder { /// Returns the next decoded chunk. If the chunk is an ImageData chunk, its contents are written /// into image_data. fn decode_next(&mut self, image_data: &mut Vec) -> Result, DecodingError> { while !self.at_eof { let (consumed, result) = { let buf = self.reader.fill_buf()?; if buf.is_empty() { return Err(DecodingError::Format( "unexpected EOF".into() )) } self.decoder.update(buf, image_data)? }; self.reader.consume(consumed); match result { Decoded::Nothing => (), Decoded::ImageEnd => self.at_eof = true, result => return Ok(Some(result)) } } Ok(None) } fn info(&self) -> Option<&Info> { get_info(&self.decoder) } } /// PNG reader (mostly high-level interface) /// /// Provides a high level that iterates over lines or whole images. pub struct Reader { decoder: ReadDecoder, bpp: usize, rowlen: usize, adam7: Option, /// Previous raw line prev: Vec, /// Current raw line current: Vec, /// Output transformations transform: Transformations, /// Processed line processed: Vec, limits: Limits, } macro_rules! get_info( ($this:expr) => { $this.decoder.info().unwrap() } ); impl Reader { /// Creates a new PNG reader fn new(r: R, d: StreamingDecoder, t: Transformations, limits: Limits) -> Reader { Reader { decoder: ReadDecoder { reader: BufReader::with_capacity(CHUNCK_BUFFER_SIZE, r), decoder: d, at_eof: false }, bpp: 0, rowlen: 0, adam7: None, prev: Vec::new(), current: Vec::new(), transform: t, processed: Vec::new(), limits, } } /// Reads all meta data until the first IDAT chunk fn init(&mut self) -> Result<(), DecodingError> { use crate::Decoded::*; if self.decoder.info().is_some() { Ok(()) } else { loop { match self.decoder.decode_next(&mut Vec::new())? { Some(ChunkBegin(_, IDAT)) => break, None => return Err(DecodingError::Format( "IDAT chunk missing".into() )), _ => (), } } { let info = match self.decoder.info() { Some(info) => info, None => return Err(DecodingError::Format( "IHDR chunk missing".into() )) }; self.bpp = info.bytes_per_pixel(); self.rowlen = info.raw_row_length(); if info.interlaced { self.adam7 = Some(utils::Adam7Iterator::new(info.width, info.height)) } } self.allocate_out_buf()?; self.prev = vec![0; self.rowlen]; Ok(()) } } pub fn info(&self) -> &Info { get_info!(self) } /// Decodes the next frame into `buf` pub fn next_frame(&mut self, buf: &mut [u8]) -> Result<(), DecodingError> { // TODO 16 bit let (color_type, bit_depth) = self.output_color_type(); let width = get_info!(self).width; if buf.len() < self.output_buffer_size() { return Err(DecodingError::Other( "supplied buffer is too small to hold the image".into() )) } if get_info!(self).interlaced { while let Some((row, adam7)) = self.next_interlaced_row()? { let (pass, line, _) = adam7.unwrap(); let samples = color_type.samples() as u8; utils::expand_pass(buf, width, row, pass, line, samples * (bit_depth as u8)); } } else { let mut len = 0; while let Some(row) = self.next_row()? { len += (&mut buf[len..]).write(row)?; } } Ok(()) } /// Returns the next processed row of the image pub fn next_row(&mut self) -> Result, DecodingError> { self.next_interlaced_row().map(|v| v.map(|v| v.0)) } /// Returns the next processed row of the image pub fn next_interlaced_row(&mut self) -> Result)>, DecodingError> { use crate::common::ColorType::*; let transform = self.transform; if transform == crate::Transformations::IDENTITY { self.next_raw_interlaced_row() } else { // swap buffer to circumvent borrow issues let mut buffer = mem::replace(&mut self.processed, Vec::new()); let (got_next, adam7) = if let Some((row, adam7)) = self.next_raw_interlaced_row()? { (&mut buffer[..]).write(row)?; (true, adam7) } else { (false, None) }; // swap back let _ = mem::replace(&mut self.processed, buffer); if got_next { let (color_type, bit_depth, trns) = { let info = get_info!(self); (info.color_type, info.bit_depth as u8, info.trns.is_some()) }; let output_buffer = if let Some((_, _, width)) = adam7 { let width = self.line_size(width); &mut self.processed[..width] } else { &mut *self.processed }; let mut len = output_buffer.len(); if transform.contains(crate::Transformations::EXPAND) { match color_type { Indexed => { expand_paletted(output_buffer, get_info!(self))? } Grayscale | GrayscaleAlpha if bit_depth < 8 => expand_gray_u8( output_buffer, get_info!(self) ), Grayscale | RGB if trns => { let channels = color_type.samples(); let trns = get_info!(self).trns.as_ref().unwrap(); if bit_depth == 8 { utils::expand_trns_line(output_buffer, &*trns, channels); } else { utils::expand_trns_line16(output_buffer, &*trns, channels); } }, _ => () } } if bit_depth == 16 && transform.intersects(crate::Transformations::SCALE_16 | crate::Transformations::STRIP_16) { len /= 2; for i in 0..len { output_buffer[i] = output_buffer[2 * i]; } } Ok(Some(( &output_buffer[..len], adam7 ))) } else { Ok(None) } } } /// Returns the color type and the number of bits per sample /// of the data returned by `Reader::next_row` and Reader::frames`. pub fn output_color_type(&mut self) -> (ColorType, BitDepth) { use crate::common::ColorType::*; let t = self.transform; let info = get_info!(self); if t == crate::Transformations::IDENTITY { (info.color_type, info.bit_depth) } else { let bits = match info.bit_depth as u8 { 16 if t.intersects( crate::Transformations::SCALE_16 | crate::Transformations::STRIP_16 ) => 8, _ if t.contains(crate::Transformations::EXPAND) => 8, n => n }; let color_type = if t.contains(crate::Transformations::EXPAND) { let has_trns = info.trns.is_some(); match info.color_type { Grayscale if has_trns => GrayscaleAlpha, RGB if has_trns => RGBA, Indexed if has_trns => RGBA, Indexed => RGB, ct => ct } } else { info.color_type }; (color_type, BitDepth::from_u8(bits).unwrap()) } } /// Returns the number of bytes required to hold a deinterlaced image frame /// that is decoded using the given input transformations. pub fn output_buffer_size(&self) -> usize { let (width, height) = get_info!(self).size(); let size = self.output_line_size(width); size * height as usize } /// Returns the number of bytes required to hold a deinterlaced row. pub fn output_line_size(&self, width: u32) -> usize { let size = self.line_size(width); if get_info!(self).bit_depth as u8 == 16 && self.transform.intersects( crate::Transformations::SCALE_16 | crate::Transformations::STRIP_16 ) { size / 2 } else { size } } /// Returns the number of bytes required to decode a deinterlaced row. fn line_size(&self, width: u32) -> usize { use crate::common::ColorType::*; let t = self.transform; let info = get_info!(self); let trns = info.trns.is_some(); // TODO 16 bit let bits = match info.color_type { Indexed if trns && t.contains(crate::Transformations::EXPAND) => 4 * 8, Indexed if t.contains(crate::Transformations::EXPAND) => 3 * 8, RGB if trns && t.contains(crate::Transformations::EXPAND) => 4 * 8, Grayscale if trns && t.contains(crate::Transformations::EXPAND) => 2 * 8, Grayscale if t.contains(crate::Transformations::EXPAND) => 1 * 8, GrayscaleAlpha if t.contains(crate::Transformations::EXPAND) => 2 * 8, // divide by 2 as it will get mutiplied by two later _ if info.bit_depth as u8 == 16 => info.bits_per_pixel() / 2, _ => info.bits_per_pixel() } * width as usize * if info.bit_depth as u8 == 16 { 2 } else { 1 }; let len = bits / 8; let extra = bits % 8; len + match extra { 0 => 0, _ => 1 } } fn allocate_out_buf(&mut self) -> Result<(), DecodingError> { let width = get_info!(self).width; let bytes = self.limits.bytes; if bytes < self.line_size(width) { return Err(DecodingError::LimitsExceeded); } self.processed = vec![0; self.line_size(width)]; Ok(()) } /// Returns the next raw row of the image fn next_raw_interlaced_row(&mut self) -> Result)>, DecodingError> { let _ = get_info!(self); let bpp = self.bpp; let (rowlen, passdata) = if let Some(ref mut adam7) = self.adam7 { let last_pass = adam7.current_pass(); if let Some((pass, line, len)) = adam7.next() { let rowlen = get_info!(self).raw_row_length_from_width(len); if last_pass != pass { self.prev.clear(); for _ in 0..rowlen { self.prev.push(0); } } (rowlen, Some((pass, line, len))) } else { return Ok(None) } } else { (self.rowlen, None) }; loop { if self.current.len() >= rowlen { if let Some(filter) = FilterType::from_u8(self.current[0]) { if let Err(message) = unfilter(filter, bpp, &self.prev[1..rowlen], &mut self.current[1..rowlen]) { return Err(DecodingError::Format( borrow::Cow::Borrowed(message) )) } self.prev[..rowlen].copy_from_slice(&self.current[..rowlen]); self.current.drain(0..rowlen); return Ok( Some(( &self.prev[1..rowlen], passdata )) ) } else { return Err(DecodingError::Format( format!("invalid filter method ({})", self.current[0]).into() )) } } else { let val = self.decoder.decode_next(&mut self.current)?; match val { Some(Decoded::ImageData) => {} None => { if self.current.len() > 0 { return Err(DecodingError::Format( "file truncated".into() )) } else { return Ok(None) } } _ => () } } } } } fn expand_paletted(buffer: &mut [u8], info: &Info) -> Result<(), DecodingError> { if let Some(palette) = info.palette.as_ref() { if let BitDepth::Sixteen = info.bit_depth { Err(DecodingError::Format("Bit depth '16' is not valid for paletted images".into())) } else { let black = [0, 0, 0]; if let Some(ref trns) = info.trns { utils::unpack_bits(buffer, 4, info.bit_depth as u8, |i, chunk| { let (rgb, a) = ( palette.get(3*i as usize..3*i as usize+3).unwrap_or(&black), *trns.get(i as usize).unwrap_or(&0xFF) ); chunk[0] = rgb[0]; chunk[1] = rgb[1]; chunk[2] = rgb[2]; chunk[3] = a; }); } else { utils::unpack_bits(buffer, 3, info.bit_depth as u8, |i, chunk| { let rgb = palette.get(3*i as usize..3*i as usize+3).unwrap_or(&black); chunk[0] = rgb[0]; chunk[1] = rgb[1]; chunk[2] = rgb[2]; }) } Ok(()) } } else { Err(DecodingError::Format("missing palette".into())) } } fn expand_gray_u8(buffer: &mut [u8], info: &Info) { let rescale = true; let scaling_factor = if rescale { (255)/((1u16 << info.bit_depth as u8) - 1) as u8 } else { 1 }; if let Some(ref trns) = info.trns { utils::unpack_bits(buffer, 2, info.bit_depth as u8, |pixel, chunk| { if pixel == trns[0] { chunk[1] = 0 } else { chunk[1] = 0xFF } chunk[0] = pixel * scaling_factor }) } else { utils::unpack_bits(buffer, 1, info.bit_depth as u8, |val, chunk| { chunk[0] = val * scaling_factor }) } } png-0.15.0/src/decoder/stream.rs010064400017500001750000000527551351245535300146630ustar0000000000000000extern crate crc32fast; extern crate inflate; use std::borrow::Cow; use std::default::Default; use std::error; use std::fmt; use std::io; use std::cmp::min; use std::convert::From; use crc32fast::Hasher as Crc32; use self::inflate::InflateStream; use crate::traits::ReadBytesExt; use crate::common::{BitDepth, BlendOp, ColorType, DisposeOp, Info, Unit, PixelDimensions, AnimationControl, FrameControl}; use crate::chunk::{self, ChunkType, IHDR, IDAT, IEND}; /// TODO check if these size are reasonable pub const CHUNCK_BUFFER_SIZE: usize = 32*1024; /// Determines if checksum checks should be disabled globally. /// /// This is used only in fuzzing. `afl` automatically adds `--cfg fuzzing` to RUSTFLAGS which can /// be used to detect that build. const CHECKSUM_DISABLED: bool = cfg!(fuzzing); fn zlib_stream() -> InflateStream { if CHECKSUM_DISABLED { InflateStream::from_zlib_no_checksum() } else { InflateStream::from_zlib() } } #[derive(Debug)] enum U32Value { // CHUNKS Length, Type(u32), Crc(ChunkType) } #[derive(Debug)] enum State { Signature(u8, [u8; 7]), U32Byte3(U32Value, u32), U32Byte2(U32Value, u32), U32Byte1(U32Value, u32), U32(U32Value), ReadChunk(ChunkType, bool), PartialChunk(ChunkType), DecodeData(ChunkType, usize), } #[derive(Debug)] /// Result of the decoding process pub enum Decoded { /// Nothing decoded yet Nothing, Header(u32, u32, BitDepth, ColorType, bool), ChunkBegin(u32, ChunkType), ChunkComplete(u32, ChunkType), PixelDimensions(PixelDimensions), AnimationControl(AnimationControl), FrameControl(FrameControl), /// Decoded raw image data. ImageData, PartialChunk(ChunkType), ImageEnd, } #[derive(Debug)] pub enum DecodingError { IoError(io::Error), Format(Cow<'static, str>), InvalidSignature, CrcMismatch { /// bytes to skip to try to recover from this error recover: usize, /// Stored CRC32 value crc_val: u32, /// Calculated CRC32 sum crc_sum: u32, chunk: ChunkType }, Other(Cow<'static, str>), CorruptFlateStream, LimitsExceeded, } impl error::Error for DecodingError { fn description(&self) -> &str { use self::DecodingError::*; match *self { IoError(ref err) => err.description(), Format(ref desc) | Other(ref desc) => &desc, InvalidSignature => "invalid signature", CrcMismatch { .. } => "CRC error", CorruptFlateStream => "compressed data stream corrupted", LimitsExceeded => "limits are exceeded" } } } impl fmt::Display for DecodingError { fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(fmt, "{}", (self as &dyn error::Error).description()) } } impl From for DecodingError { fn from(err: io::Error) -> DecodingError { DecodingError::IoError(err) } } impl From for DecodingError { fn from(err: String) -> DecodingError { DecodingError::Other(err.into()) } } impl From for io::Error { fn from(err: DecodingError) -> io::Error { use std::error::Error; match err { DecodingError::IoError(err) => err, err => io::Error::new( io::ErrorKind::Other, err.description() ) } } } /// PNG StreamingDecoder (low-level interface) pub struct StreamingDecoder { state: Option, current_chunk: (Crc32, u32, Vec), inflater: InflateStream, info: Option, current_seq_no: Option, have_idat: bool, } impl StreamingDecoder { /// Creates a new StreamingDecoder /// /// Allocates the internal buffers. pub fn new() -> StreamingDecoder { StreamingDecoder { state: Some(State::Signature(0, [0; 7])), current_chunk: (Crc32::new(), 0, Vec::with_capacity(CHUNCK_BUFFER_SIZE)), inflater: zlib_stream(), info: None, current_seq_no: None, have_idat: false } } /// Resets the StreamingDecoder pub fn reset(&mut self) { self.state = Some(State::Signature(0, [0; 7])); self.current_chunk.0 = Crc32::new(); self.current_chunk.1 = 0; self.current_chunk.2.clear(); self.inflater = zlib_stream(); self.info = None; self.current_seq_no = None; self.have_idat = false; } /// Low level StreamingDecoder interface. /// /// Allows to stream partial data to the encoder. Returns a tuple containing the bytes that have /// been consumed from the input buffer and the current decoding result. If the decoded chunk /// was an image data chunk, it also appends the read data to `image_data`. pub fn update(&mut self, mut buf: &[u8], image_data: &mut Vec) -> Result<(usize, Decoded), DecodingError> { let len = buf.len(); while buf.len() > 0 && self.state.is_some() { match self.next_state(buf, image_data) { Ok((bytes, Decoded::Nothing)) => { buf = &buf[bytes..] } Ok((bytes, result)) => { buf = &buf[bytes..]; return Ok((len-buf.len(), result)); } Err(err) => return Err(err) } } Ok((len-buf.len(), Decoded::Nothing)) } fn next_state<'a>(&'a mut self, buf: &[u8], image_data: &mut Vec) -> Result<(usize, Decoded), DecodingError> { use self::State::*; macro_rules! goto ( ($n:expr, $state:expr) => ({ self.state = Some($state); Ok(($n, Decoded::Nothing)) }); ($state:expr) => ({ self.state = Some($state); Ok((1, Decoded::Nothing)) }); ($n:expr, $state:expr, emit $res:expr) => ({ self.state = Some($state); Ok(($n, $res)) }); ($state:expr, emit $res:expr) => ({ self.state = Some($state); Ok((1, $res)) }) ); let current_byte = buf[0]; // Driver should ensure that state is never None let state = self.state.take().unwrap(); //println!("state: {:?}", state); match state { Signature(i, mut signature) => if i < 7 { signature[i as usize] = current_byte; goto!(Signature(i+1, signature)) } else { if signature == [137, 80, 78, 71, 13, 10, 26] && current_byte == 10 { goto!(U32(U32Value::Length)) } else { Err(DecodingError::InvalidSignature) } }, U32Byte3(type_, mut val) => { use self::U32Value::*; val |= current_byte as u32; match type_ { Length => goto!(U32(Type(val))), Type(length) => { let type_str = [ (val >> 24) as u8, (val >> 16) as u8, (val >> 8) as u8, val as u8 ]; self.current_chunk.0.reset(); self.current_chunk.0.update(&type_str); self.current_chunk.1 = length; goto!( ReadChunk(type_str, true), emit Decoded::ChunkBegin(length, type_str) ) }, Crc(type_str) => { let sum = self.current_chunk.0.clone().finalize(); if CHECKSUM_DISABLED || val == sum { goto!( State::U32(U32Value::Length), emit if type_str == IEND { Decoded::ImageEnd } else { Decoded::ChunkComplete(val, type_str) } ) } else { Err(DecodingError::CrcMismatch { recover: 1, crc_val: val, crc_sum: sum, chunk: type_str }) } }, } }, U32Byte2(type_, val) => { goto!(U32Byte3(type_, val | (current_byte as u32) << 8)) }, U32Byte1(type_, val) => { goto!(U32Byte2(type_, val | (current_byte as u32) << 16)) }, U32(type_) => { goto!(U32Byte1(type_, (current_byte as u32) << 24)) }, PartialChunk(type_str) => { match type_str { IDAT => { self.have_idat = true; goto!( 0, DecodeData(type_str, 0), emit Decoded::PartialChunk(type_str) ) }, chunk::fdAT => { if let Some(seq_no) = self.current_seq_no { let mut buf = &self.current_chunk.2[..]; let next_seq_no = buf.read_be()?; if next_seq_no != seq_no + 1 { return Err(DecodingError::Format(format!( "Sequence is not in order, expected #{} got #{}.", seq_no + 1, next_seq_no ).into())) } self.current_seq_no = Some(next_seq_no); } else { return Err(DecodingError::Format("fcTL chunk missing before fdAT chunk.".into())) } goto!( 0, DecodeData(type_str, 4), emit Decoded::PartialChunk(type_str) ) }, // Handle other chunks _ => { if self.current_chunk.1 == 0 { // complete chunk Ok((0, self.parse_chunk(type_str)?)) } else { goto!( 0, ReadChunk(type_str, true), emit Decoded::PartialChunk(type_str) ) } } } }, ReadChunk(type_str, clear) => { if clear { self.current_chunk.2.clear(); } if self.current_chunk.1 > 0 { let (ref mut crc, ref mut remaining, ref mut c_buf) = self.current_chunk; let buf_avail = c_buf.capacity() - c_buf.len(); let bytes_avail = min(buf.len(), buf_avail); let n = min(*remaining, bytes_avail as u32); if buf_avail == 0 { goto!(0, PartialChunk(type_str)) } else { let buf = &buf[..n as usize]; crc.update(buf); c_buf.extend(buf.iter().map(|&v| v)); *remaining -= n; if *remaining == 0 { goto!(n as usize, PartialChunk(type_str )) } else { goto!(n as usize, ReadChunk(type_str, false)) } } } else { goto!(0, U32(U32Value::Crc(type_str))) } } DecodeData(type_str, mut n) => { let chunk_len = self.current_chunk.2.len(); let (c, data) = self.inflater.update(&self.current_chunk.2[n..])?; image_data.extend_from_slice(data); n += c; if n == chunk_len && data.len() == 0 && c == 0 { goto!( 0, ReadChunk(type_str, true), emit Decoded::ImageData ) } else { goto!( 0, DecodeData(type_str, n), emit Decoded::ImageData ) } } } } fn parse_chunk(&mut self, type_str: [u8; 4]) -> Result { self.state = Some(State::U32(U32Value::Crc(type_str))); if self.info.is_none() && type_str != IHDR { return Err(DecodingError::Format(format!( "{} chunk appeared before IHDR chunk", String::from_utf8_lossy(&type_str) ).into())) } match match type_str { IHDR => { self.parse_ihdr() } chunk::PLTE => { self.parse_plte() } chunk::tRNS => { self.parse_trns() } chunk::pHYs => { self.parse_phys() } chunk::acTL => { self.parse_actl() } chunk::fcTL => { self.parse_fctl() } _ => Ok(Decoded::PartialChunk(type_str)) } { Err(err) =>{ // Borrow of self ends here, because Decoding error does not borrow self. self.state = None; Err(err) }, ok => ok } } fn get_info_or_err(&self) -> Result<&Info, DecodingError> { self.info.as_ref().ok_or(DecodingError::Format( "IHDR chunk missing".into() )) } fn parse_fctl(&mut self) -> Result { let mut buf = &self.current_chunk.2[..]; let next_seq_no = buf.read_be()?; // Asuming that fcTL is required before *every* fdAT-sequence self.current_seq_no = Some(if let Some(seq_no) = self.current_seq_no { if next_seq_no != seq_no + 1 { return Err(DecodingError::Format(format!( "Sequence is not in order, expected #{} got #{}.", seq_no + 1, next_seq_no ).into())) } next_seq_no } else { if next_seq_no != 0 { return Err(DecodingError::Format(format!( "Sequence is not in order, expected #{} got #{}.", 0, next_seq_no ).into())) } 0 }); self.inflater = zlib_stream(); let fc = FrameControl { sequence_number: next_seq_no, width: buf.read_be()?, height: buf.read_be()?, x_offset: buf.read_be()?, y_offset: buf.read_be()?, delay_num: buf.read_be()?, delay_den: buf.read_be()?, dispose_op: match DisposeOp::from_u8(buf.read_be()?) { Some(dispose_op) => dispose_op, None => return Err(DecodingError::Format("invalid dispose operation".into())) }, blend_op : match BlendOp::from_u8(buf.read_be()?) { Some(blend_op) => blend_op, None => return Err(DecodingError::Format("invalid blend operation".into())) }, }; self.info.as_mut().unwrap().frame_control = Some(fc.clone()); Ok(Decoded::FrameControl(fc)) } fn parse_actl(&mut self) -> Result { if self.have_idat { Err(DecodingError::Format( "acTL chunk appeared after first IDAT chunk".into() )) } else { let mut buf = &self.current_chunk.2[..]; let actl = AnimationControl { num_frames: buf.read_be()?, num_plays: buf.read_be()? }; self.info.as_mut().unwrap().animation_control = Some(actl); Ok(Decoded::AnimationControl(actl)) } } fn parse_plte(&mut self) -> Result { let mut vec = Vec::new(); vec.extend(self.current_chunk.2.iter().map(|&v| v)); self.info.as_mut().map( |info| info.palette = Some(vec) ); Ok(Decoded::Nothing) } fn parse_trns(&mut self) -> Result { use crate::common::ColorType::*; let (color_type, bit_depth) = { let info = self.get_info_or_err()?; (info.color_type, info.bit_depth as u8) }; let mut vec = Vec::new(); vec.extend(self.current_chunk.2.iter().map(|&v| v)); let len = vec.len(); let info = match self.info { Some(ref mut info) => info, None => return Err(DecodingError::Format( "tRNS chunk occured before IHDR chunk".into() )) }; info.trns = Some(vec); let vec = info.trns.as_mut().unwrap(); match color_type { Grayscale => { if len < 2 { return Err(DecodingError::Format( "not enough palette entries".into() )) } if bit_depth < 16 { vec[0] = vec[1]; vec.truncate(1); } Ok(Decoded::Nothing) }, RGB => { if len < 6 { return Err(DecodingError::Format( "not enough palette entries".into() )) } if bit_depth < 16 { vec[0] = vec[1]; vec[1] = vec[3]; vec[2] = vec[5]; vec.truncate(3); } Ok(Decoded::Nothing) }, Indexed => { let _ = info.palette.as_ref().ok_or(DecodingError::Format( "tRNS chunk occured before PLTE chunk".into() )); Ok(Decoded::Nothing) }, c => Err(DecodingError::Format( format!("tRNS chunk found for color type ({})", c as u8).into() )) } } fn parse_phys(&mut self) -> Result { if self.have_idat { Err(DecodingError::Format( "pHYs chunk appeared after first IDAT chunk".into() )) } else { let mut buf = &self.current_chunk.2[..]; let xppu = buf.read_be()?; let yppu = buf.read_be()?; let unit = buf.read_be()?; let unit = match Unit::from_u8(unit) { Some(unit) => unit, None => return Err(DecodingError::Format( format!("invalid unit ({})", unit).into() )) }; let pixel_dims = PixelDimensions { xppu: xppu, yppu: yppu, unit: unit, }; self.info.as_mut().unwrap().pixel_dims = Some(pixel_dims); Ok(Decoded::PixelDimensions(pixel_dims)) } } fn parse_ihdr(&mut self) -> Result { // TODO: check if color/bit depths combination is valid let mut buf = &self.current_chunk.2[..]; let width = buf.read_be()?; let height = buf.read_be()?; let bit_depth = buf.read_be()?; let bit_depth = match BitDepth::from_u8(bit_depth) { Some(bits) => bits, None => return Err(DecodingError::Format( format!("invalid bit depth ({})", bit_depth).into() )) }; let color_type = buf.read_be()?; let color_type = match ColorType::from_u8(color_type) { Some(color_type) => color_type, None => return Err(DecodingError::Format( format!("invalid color type ({})", color_type).into() )) }; match buf.read_be()? { // compression method 0u8 => (), n => return Err(DecodingError::Format( format!("unknown compression method ({})", n).into() )) } match buf.read_be()? { // filter method 0u8 => (), n => return Err(DecodingError::Format( format!("unknown filter method ({})", n).into() )) } let interlaced = match buf.read_be()? { 0u8 => false, 1 => { true }, n => return Err(DecodingError::Format( format!("unknown interlace method ({})", n).into() )) }; let mut info = Info::default(); info.width = width; info.height = height; info.bit_depth = bit_depth; info.color_type = color_type; info.interlaced = interlaced; self.info = Some(info); Ok(Decoded::Header( width, height, bit_depth, color_type, interlaced )) } } #[inline(always)] pub fn get_info(d: &StreamingDecoder) -> Option<&Info> { d.info.as_ref() } png-0.15.0/src/encoder.rs010064400017500001750000000337601351245535300133750ustar0000000000000000extern crate crc32fast; extern crate deflate; use std::borrow::Cow; use std::error; use std::fmt; use std::io::{self, Read, Write}; use std::mem; use std::result; use crc32fast::Hasher as Crc32; use crate::chunk; use crate::common::{Info, ColorType, BitDepth, Compression}; use crate::filter::{FilterType, filter}; use crate::traits::WriteBytesExt; pub type Result = result::Result; #[derive(Debug)] pub enum EncodingError { IoError(io::Error), Format(Cow<'static, str>), } impl error::Error for EncodingError { fn description(&self) -> &str { use self::EncodingError::*; match *self { IoError(ref err) => err.description(), Format(ref desc) => &desc, } } } impl fmt::Display for EncodingError { fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { write!(fmt, "{}", (self as &dyn error::Error).description()) } } impl From for EncodingError { fn from(err: io::Error) -> EncodingError { EncodingError::IoError(err) } } impl From for io::Error { fn from(err: EncodingError) -> io::Error { io::Error::new(io::ErrorKind::Other, (&err as &dyn error::Error).description()) } } /// PNG Encoder pub struct Encoder { w: W, info: Info, } impl Encoder { pub fn new(w: W, width: u32, height: u32) -> Encoder { let mut info = Info::default(); info.width = width; info.height = height; Encoder { w: w, info: info } } pub fn write_header(self) -> Result> { Writer::new(self.w, self.info).init() } /// Set the color of the encoded image. /// /// These correspond to the color types in the png IHDR data that will be written. The length /// of the image data that is later supplied must match the color type, otherwise an error will /// be emitted. pub fn set_color(&mut self, color: ColorType) { self.info.color_type = color; } /// Set the indicated depth of the image data. pub fn set_depth(&mut self, depth: BitDepth) { self.info.bit_depth = depth; } /// Set compression parameters. /// /// Accepts a `Compression` or any type that can transform into a `Compression`. Notably `deflate::Compression` and /// `deflate::CompressionOptions` which "just work". pub fn set_compression>(&mut self, compression: C) { self.info.compression = compression.into(); } /// Set the used filter type. /// /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for /// sample values based on the previous. For a potentially better compression ratio, at the /// cost of more complex processing, try out [`FilterType::Paeth`]. /// /// [`FilterType::Sub`]: enum.FilterType.html#variant.Sub /// [`FilterType::Paeth`]: enum.FilterType.html#variant.Paeth pub fn set_filter(&mut self, filter: FilterType) { self.info.filter = filter; } } /// PNG writer pub struct Writer { w: W, info: Info, } impl Writer { fn new(w: W, info: Info) -> Writer { let w = Writer { w: w, info: info }; w } fn init(mut self) -> Result { self.w.write_all(&[137, 80, 78, 71, 13, 10, 26, 10])?; let mut data = [0; 13]; (&mut data[..]).write_be(self.info.width)?; (&mut data[4..]).write_be(self.info.height)?; data[8] = self.info.bit_depth as u8; data[9] = self.info.color_type as u8; data[12] = if self.info.interlaced { 1 } else { 0 }; self.write_chunk(chunk::IHDR, &data)?; Ok(self) } pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> { self.w.write_be(data.len() as u32)?; self.w.write_all(&name)?; self.w.write_all(data)?; let mut crc = Crc32::new(); crc.update(&name); crc.update(data); self.w.write_be(crc.finalize())?; Ok(()) } /// Writes the image data. pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> { let bpp = self.info.bytes_per_pixel(); let in_len = self.info.raw_row_length() - 1; let mut prev = vec![0; in_len]; let mut current = vec![0; in_len]; let data_size = in_len * self.info.height as usize; if data_size != data.len() { let message = format!("wrong data size, expected {} got {}", data_size, data.len()); return Err(EncodingError::Format(message.into())); } let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), self.info.compression.clone()); let filter_method = self.info.filter; for line in data.chunks(in_len) { current.copy_from_slice(&line); zlib.write_all(&[filter_method as u8])?; filter(filter_method, bpp, &prev, &mut current); zlib.write_all(¤t)?; mem::swap(&mut prev, &mut current); } self.write_chunk(chunk::IDAT, &zlib.finish()?) } /// Create an stream writer. /// /// This allows you create images that do not fit /// in memory. The default chunk size is 4K, use /// `stream_writer_with_size` to set another chuck /// size. pub fn stream_writer(&mut self) -> StreamWriter { self.stream_writer_with_size(4 * 1024) } /// Create a stream writer with custom buffer size. /// /// See `stream_writer` pub fn stream_writer_with_size(&mut self, size: usize) -> StreamWriter { StreamWriter::new(self, size) } } impl Drop for Writer { fn drop(&mut self) { let _ = self.write_chunk(chunk::IEND, &[]); } } struct ChunkWriter<'a, W: Write> { writer: &'a mut Writer, buffer: Vec, index: usize, } impl<'a, W: Write> ChunkWriter<'a, W> { fn new(writer: &'a mut Writer, buf_len: usize) -> ChunkWriter<'a, W> { ChunkWriter { writer, buffer: vec![0; buf_len], index: 0, } } } impl<'a, W: Write> Write for ChunkWriter<'a, W> { fn write(&mut self, mut buf: &[u8]) -> io::Result { let written = buf.read(&mut self.buffer[self.index..])?; self.index += written; if self.index + 1 >= self.buffer.len() { self.writer.write_chunk(chunk::IDAT, &self.buffer)?; self.index = 0; } Ok(written) } fn flush(&mut self) -> io::Result<()> { if self.index > 0 { self.writer.write_chunk(chunk::IDAT, &self.buffer[..self.index+1])?; } self.index = 0; Ok(()) } } impl<'a, W: Write> Drop for ChunkWriter<'a, W> { fn drop(&mut self) { let _ = self.flush(); } } /// Streaming png writer /// /// This may may silently fail in the destructor so it is a good idea to call /// `finish` or `flush` before droping. pub struct StreamWriter<'a, W: Write> { writer: deflate::write::ZlibEncoder>, prev_buf: Vec, curr_buf: Vec, index: usize, bpp: usize, filter: FilterType, } impl<'a, W: Write> StreamWriter<'a, W> { fn new(writer: &'a mut Writer, buf_len: usize) -> StreamWriter<'a, W> { let bpp = writer.info.bytes_per_pixel(); let in_len = writer.info.raw_row_length() - 1; let filter = writer.info.filter; let prev_buf = vec![0; in_len]; let curr_buf = vec![0; in_len]; let compression = writer.info.compression.clone(); let chunk_writer = ChunkWriter::new(writer, buf_len); let zlib = deflate::write::ZlibEncoder::new(chunk_writer, compression); StreamWriter { writer: zlib, index: 0, prev_buf, curr_buf, bpp, filter, } } pub fn finish(mut self) -> Result<()> { // TODO: call `writer.finish` somehow? self.flush()?; Ok(()) } } impl<'a, W: Write> Write for StreamWriter<'a, W> { fn write(&mut self, mut buf: &[u8]) -> io::Result { let written = buf.read(&mut self.curr_buf[self.index..])?; self.index += written; if self.index >= self.curr_buf.len() { self.writer.write_all(&[self.filter as u8])?; filter(self.filter, self.bpp, &self.prev_buf, &mut self.curr_buf); self.writer.write_all(&self.curr_buf)?; mem::swap(&mut self.prev_buf, &mut self.curr_buf); self.index = 0; } Ok(written) } fn flush(&mut self) -> io::Result<()> { self.writer.flush()?; if self.index > 0 { let message = format!("wrong data size, got {} bytes too many", self.index); return Err(EncodingError::Format(message.into()).into()); } Ok(()) } } impl<'a, W: Write> Drop for StreamWriter<'a, W> { fn drop(&mut self) { let _ = self.flush(); } } #[cfg(test)] mod tests { use super::*; extern crate rand; extern crate glob; use self::rand::Rng; use std::{io, cmp}; use std::io::Write; use std::fs::File; #[test] fn roundtrip() { // More loops = more random testing, but also more test wait time for _ in 0..10 { for path in glob::glob("tests/pngsuite/*.png").unwrap().map(|r| r.unwrap()) { if path.file_name().unwrap().to_str().unwrap().starts_with("x") { // x* files are expected to fail to decode continue; } // Decode image let decoder = crate::Decoder::new(File::open(path).unwrap()); let (info, mut reader) = decoder.read_info().unwrap(); if info.line_size != 32 { // TODO encoding only works with line size 32? continue; } let mut buf = vec![0; info.buffer_size()]; reader.next_frame(&mut buf).unwrap(); // Encode decoded image let mut out = Vec::new(); { let mut wrapper = RandomChunkWriter { rng: self::rand::thread_rng(), w: &mut out }; let mut encoder = Encoder::new(&mut wrapper, info.width, info.height).write_header().unwrap(); encoder.write_image_data(&buf).unwrap(); } // Decode encoded decoded image let decoder = crate::Decoder::new(&*out); let (info, mut reader) = decoder.read_info().unwrap(); let mut buf2 = vec![0; info.buffer_size()]; reader.next_frame(&mut buf2).unwrap(); // check if the encoded image is ok: assert_eq!(buf, buf2); } } } #[test] fn roundtrip_stream() { // More loops = more random testing, but also more test wait time for _ in 0..10 { for path in glob::glob("tests/pngsuite/*.png").unwrap().map(|r| r.unwrap()) { if path.file_name().unwrap().to_str().unwrap().starts_with("x") { // x* files are expected to fail to decode continue; } // Decode image let decoder = crate::Decoder::new(File::open(path).unwrap()); let (info, mut reader) = decoder.read_info().unwrap(); if info.line_size != 32 { // TODO encoding only works with line size 32? continue; } let mut buf = vec![0; info.buffer_size()]; reader.next_frame(&mut buf).unwrap(); // Encode decoded image let mut out = Vec::new(); { let mut wrapper = RandomChunkWriter { rng: self::rand::thread_rng(), w: &mut out }; let mut encoder = Encoder::new(&mut wrapper, info.width, info.height).write_header().unwrap(); let mut stream_writer = encoder.stream_writer(); let mut outer_wrapper = RandomChunkWriter { rng: self::rand::thread_rng(), w: &mut stream_writer }; outer_wrapper.write_all(&buf).unwrap(); } // Decode encoded decoded image let decoder = crate::Decoder::new(&*out); let (info, mut reader) = decoder.read_info().unwrap(); let mut buf2 = vec![0; info.buffer_size()]; reader.next_frame(&mut buf2).unwrap(); // check if the encoded image is ok: assert_eq!(buf, buf2); } } } #[test] fn expect_error_on_wrong_image_len() -> Result<()> { use std::io::Cursor; let width = 10; let height = 10; let output = vec![0u8; 1024]; let writer = Cursor::new(output); let mut encoder = Encoder::new(writer, width as u32, height as u32); encoder.set_depth(BitDepth::Eight); encoder.set_color(ColorType::RGB); let mut png_writer = encoder.write_header()?; let correct_image_size = width * height * 3; let image = vec![0u8; correct_image_size + 1]; let result = png_writer.write_image_data(image.as_ref()); assert!(result.is_err()); Ok(()) } /// A Writer that only writes a few bytes at a time struct RandomChunkWriter<'a, R: Rng, W: Write + 'a> { rng: R, w: &'a mut W } impl<'a, R: Rng, W: Write + 'a> Write for RandomChunkWriter<'a, R, W> { fn write(&mut self, buf: &[u8]) -> io::Result { // choose a random length to write let len = cmp::min(self.rng.gen_range(1, 50), buf.len()); self.w.write(&buf[0..len]) } fn flush(&mut self) -> io::Result<()> { self.w.flush() } } } png-0.15.0/src/filter.rs010064400017500001750000000111141351414516500132270ustar0000000000000000use std; /// The byte level filter applied to scanlines to prepare them for compression. /// /// Compression in general benefits from repetitive data. The filter is a content-aware method of /// compressing the range of occurring byte values to help the compression algorithm. Note that /// this does not operate on pixels but on raw bytes of a scanline. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(u8)] pub enum FilterType { NoFilter = 0, Sub = 1, Up = 2, Avg = 3, Paeth = 4 } impl FilterType { /// u8 -> Self. Temporary solution until Rust provides a canonical one. pub fn from_u8(n: u8) -> Option { match n { 0 => Some(FilterType::NoFilter), 1 => Some(FilterType::Sub), 2 => Some(FilterType::Up), 3 => Some(FilterType::Avg), 4 => Some(FilterType::Paeth), _ => None } } } fn filter_paeth(a: u8, b: u8, c: u8) -> u8 { let ia = a as i16; let ib = b as i16; let ic = c as i16; let p = ia + ib - ic; let pa = (p - ia).abs(); let pb = (p - ib).abs(); let pc = (p - ic).abs(); if pa <= pb && pa <= pc { a } else if pb <= pc { b } else { c } } pub fn unfilter(filter: FilterType, bpp: usize, previous: &[u8], current: &mut [u8]) -> std::result::Result<(), &'static str> { use self::FilterType::*; assert!(bpp > 0); let len = current.len(); match filter { NoFilter => Ok(()), Sub => { for i in bpp..len { current[i] = current[i].wrapping_add( current[i - bpp] ); } Ok(()) } Up => { if previous.len() < len { Err("Filtering failed: not enough data in previous row") } else { for i in 0..len { current[i] = current[i].wrapping_add( previous[i] ); } Ok(()) } } Avg => { if previous.len() < len { Err("Filtering failed: not enough data in previous row") } else if bpp > len { Err("Filtering failed: bytes per pixel is greater than length of row") } else { for i in 0..bpp { current[i] = current[i].wrapping_add( previous[i] / 2 ); } for i in bpp..len { current[i] = current[i].wrapping_add( ((current[i - bpp] as i16 + previous[i] as i16) / 2) as u8 ); } Ok(()) } } Paeth => { if previous.len() < len { Err("Filtering failed: not enough data in previous row") } else if bpp > len { Err("Filtering failed: bytes per pixel is greater than length of row") } else { for i in 0..bpp { current[i] = current[i].wrapping_add( filter_paeth(0, previous[i], 0) ); } for i in bpp..len { current[i] = current[i].wrapping_add( filter_paeth(current[i - bpp], previous[i], previous[i - bpp]) ); } Ok(()) } } } } pub fn filter(method: FilterType, bpp: usize, previous: &[u8], current: &mut [u8]) { use self::FilterType::*; assert!(bpp > 0); let len = current.len(); match method { NoFilter => (), Sub => { for i in (bpp..len).rev() { current[i] = current[i].wrapping_sub(current[i - bpp]); } } Up => { for i in 0..len { current[i] = current[i].wrapping_sub(previous[i]); } } Avg => { for i in (bpp..len).rev() { current[i] = current[i].wrapping_sub(current[i - bpp].wrapping_add(previous[i]) / 2); } for i in 0..bpp { current[i] = current[i].wrapping_sub(previous[i] / 2); } } Paeth => { for i in (bpp..len).rev() { current[i] = current[i].wrapping_sub(filter_paeth(current[i - bpp], previous[i], previous[i - bpp])); } for i in 0..bpp { current[i] = current[i].wrapping_sub(filter_paeth(0, previous[i], 0)); } } } } png-0.15.0/src/lib.rs010064400017500001750000000045241351245535300125200ustar0000000000000000//! # PNG encoder and decoder //! This crate contains a PNG encoder and decoder. It supports reading of single lines or whole frames. //! ## The decoder //! The most important types for decoding purposes are [`Decoder`](struct.Decoder.html) and //! [`Reader`](struct.Reader.html). They both wrap a `std::io::Read`. //! `Decoder` serves as a builder for `Reader`. Calling `Decoder::read_info` reads from the `Read` until the //! image data is reached. //! ### Using the decoder //! use std::fs::File; //! //! // The decoder is a build for reader and can be used to set various decoding options //! // via `Transformations`. The default output transformation is `Transformations::EXPAND //! // | Transformations::STRIP_ALPHA`. //! let decoder = png::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap()); //! let (info, mut reader) = decoder.read_info().unwrap(); //! // Allocate the output buffer. //! let mut buf = vec![0; info.buffer_size()]; //! // Read the next frame. Currently this function should only called once. //! // The default options //! reader.next_frame(&mut buf).unwrap(); //! ## Encoder //! ### Using the encoder //! ```ignore //! // For reading and opening files //! use std::path::Path; //! use std::fs::File; //! use std::io::BufWriter; //! // To use encoder.set() //! use png::HasParameters; //! //! let path = Path::new(r"/path/to/image.png"); //! let file = File::create(path).unwrap(); //! let ref mut w = BufWriter::new(file); //! //! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1. //! encoder.set(png::ColorType::RGBA).set(png::BitDepth::Eight); //! let mut writer = encoder.write_header().unwrap(); //! //! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black. //! writer.write_image_data(&data).unwrap(); // Save //! ``` //! //#![cfg_attr(test, feature(test))] #[macro_use] extern crate bitflags; pub mod chunk; mod decoder; #[cfg(feature = "png-encoding")] mod encoder; mod filter; mod traits; mod common; mod utils; pub use crate::common::*; pub use crate::decoder::{Decoder, Reader, OutputInfo, StreamingDecoder, Decoded, DecodingError, Limits}; #[cfg(feature = "png-encoding")] pub use crate::encoder::{Encoder, Writer, StreamWriter, EncodingError}; pub use crate::filter::FilterType; png-0.15.0/src/traits.rs010064400017500001750000000037201351112656600132550ustar0000000000000000use std::io; // Will be replaced by stdlib solution fn read_all(this: &mut R, buf: &mut [u8]) -> io::Result<()> { let mut total = 0; while total < buf.len() { match this.read(&mut buf[total..]) { Ok(0) => return Err(io::Error::new(io::ErrorKind::Other, "failed to read the whole buffer")), Ok(n) => total += n, Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} Err(e) => return Err(e), } } Ok(()) } /// Read extension to read big endian data pub trait ReadBytesExt: io::Read { /// Read `T` from a bytes stream. Most significant byte first. fn read_be(&mut self) -> io::Result; } /// Write extension to write big endian data pub trait WriteBytesExt: io::Write { /// Writes `T` to a bytes stream. Most significant byte first. fn write_be(&mut self, _: T) -> io::Result<()>; } impl ReadBytesExt for W { #[inline] fn read_be(&mut self) -> io::Result { let mut byte = [0]; read_all(self, &mut byte)?; Ok(byte[0]) } } impl ReadBytesExt for W { #[inline] fn read_be(&mut self) -> io::Result { let mut bytes = [0, 0]; read_all(self, &mut bytes)?; Ok((bytes[0] as u16) << 8 | bytes[1] as u16) } } impl ReadBytesExt for W { #[inline] fn read_be(&mut self) -> io::Result { let mut bytes = [0, 0, 0, 0]; read_all(self, &mut bytes)?; Ok( (bytes[0] as u32) << 24 | (bytes[1] as u32) << 16 | (bytes[2] as u32) << 8 | bytes[3] as u32 ) } } impl WriteBytesExt for W { #[inline] fn write_be(&mut self, n: u32) -> io::Result<()> { self.write_all(&[ (n >> 24) as u8, (n >> 16) as u8, (n >> 8) as u8, n as u8 ]) } } png-0.15.0/src/utils.rs010064400017500001750000000256151351414516500131150ustar0000000000000000//! Utility functions use std::iter::{repeat, StepBy}; use std::ops::Range; #[inline(always)] pub fn unpack_bits(buf: &mut [u8], channels: usize, bit_depth: u8, func: F) where F: Fn(u8, &mut[u8]) { let bits = buf.len()/channels*bit_depth as usize; let extra_bits = bits % 8; let entries = bits / 8 + match extra_bits { 0 => 0, _ => 1 }; let skip = match extra_bits { 0 => 0, n => (8-n) / bit_depth as usize }; let mask = ((1u16 << bit_depth) - 1) as u8; let i = (0..entries) .rev() // reverse iterator .flat_map(|idx| // this has to be reversed too (0..8).step_by(bit_depth.into()) .zip(repeat(idx)) ) .skip(skip); let j = (0..=buf.len() - channels).rev().step_by(channels); for ((shift, i), j) in i.zip(j) { let pixel = (buf[i] & (mask << shift)) >> shift; func(pixel, &mut buf[j as usize..(j + channels) as usize]) } } pub fn expand_trns_line(buf: &mut[u8], trns: &[u8], channels: usize) { let i = (0..=buf.len() / (channels+1) * channels - channels).rev().step_by(channels); let j = (0..=buf.len() - (channels+1)).rev().step_by(channels+1); for (i, j) in i.zip(j) { let i_pixel = i as usize; let j_chunk = j as usize; if &buf[i_pixel..i_pixel+channels] == trns { buf[j_chunk+channels] = 0 } else { buf[j_chunk+channels] = 0xFF } for k in (0..channels).rev() { buf[j_chunk+k] = buf[i_pixel+k]; } } } pub fn expand_trns_line16(buf: &mut[u8], trns: &[u8], channels: usize) { let c2 = 2 * channels; let i = (0..=buf.len() / (c2+2) * c2 - c2).rev().step_by(c2); let j = (0..=buf.len() - (c2+2)).rev().step_by(c2+2); for (i, j) in i.zip(j) { let i_pixel = i as usize; let j_chunk = j as usize; if &buf[i_pixel..i_pixel+c2] == trns { buf[j_chunk+c2] = 0; buf[j_chunk+c2 + 1] = 0 } else { buf[j_chunk+c2] = 0xFF; buf[j_chunk+c2 + 1] = 0xFF } for k in (0..c2).rev() { buf[j_chunk+k] = buf[i_pixel+k]; } } } /// This iterator iterates over the different passes of an image Adam7 encoded /// PNG image /// The pattern is: /// 16462646 /// 77777777 /// 56565656 /// 77777777 /// 36463646 /// 77777777 /// 56565656 /// 77777777 /// #[derive(Clone)] pub struct Adam7Iterator { line: u32, lines: u32, line_width: u32, current_pass: u8, width: u32, height: u32, } impl Adam7Iterator { pub fn new(width: u32, height: u32) -> Adam7Iterator { let mut this = Adam7Iterator { line: 0, lines: 0, line_width: 0, current_pass: 1, width: width, height: height }; this.init_pass(); this } /// Calculates the bounds of the current pass fn init_pass(&mut self) { let w = self.width as f64; let h = self.height as f64; let (line_width, lines) = match self.current_pass { 1 => (w/8.0, h/8.0), 2 => ((w-4.0)/8.0, h/8.0), 3 => (w/4.0, (h-4.0)/8.0), 4 => ((w-2.0)/4.0, h/4.0), 5 => (w/2.0, (h-2.0)/4.0), 6 => ((w-1.0)/2.0, h/2.0), 7 => (w, (h-1.0)/2.0), _ => unreachable!() }; self.line_width = line_width.ceil() as u32; self.lines = lines.ceil() as u32; self.line = 0; } /// The current pass#. pub fn current_pass(&self) -> u8 { self.current_pass } } /// Iterates over the (passes, lines, widths) impl Iterator for Adam7Iterator { type Item = (u8, u32, u32); fn next(&mut self) -> Option<(u8, u32, u32)> { if self.line < self.lines && self.line_width > 0 { let this_line = self.line; self.line += 1; Some((self.current_pass, this_line, self.line_width)) } else if self.current_pass < 7 { self.current_pass += 1; self.init_pass(); self.next() } else { None } } } fn subbyte_pixels<'a>(scanline: &'a [u8], bits_pp: usize) -> impl Iterator + 'a { (0..scanline.len() * 8).step_by(bits_pp).map(move |bit_idx| { let byte_idx = bit_idx / 8; // sub-byte samples start in the high-order bits let rem = 8 - bit_idx % 8 - bits_pp; match bits_pp { // evenly divides bytes 1 => (scanline[byte_idx] >> rem) & 1, 2 => (scanline[byte_idx] >> rem) & 3, 4 => (scanline[byte_idx] >> rem) & 15, _ => unreachable!(), } }) } /// Given pass, image width, and line number, produce an iterator of bit positions of pixels to copy /// from the input scanline to the image buffer. fn expand_adam7_bits(pass: u8, width: usize, line_no: usize, bits_pp: usize) -> StepBy> { let (line_mul, line_off, samp_mul, samp_off) = match pass { 1 => (8, 0, 8, 0), 2 => (8, 0, 8, 4), 3 => (8, 4, 4, 0), 4 => (4, 0, 4, 2), 5 => (4, 2, 2, 0), 6 => (2, 0, 2, 1), 7 => (2, 1, 1, 0), _ => panic!("Adam7 pass out of range: {}", pass) }; // the equivalent line number in progressive scan let prog_line = line_mul * line_no + line_off; // line width is rounded up to the next byte let line_width = width * bits_pp + 7 & !7; let line_start = prog_line * line_width; let start = line_start + (samp_off * bits_pp); let stop = line_start + (width * bits_pp); (start .. stop).step_by(bits_pp * samp_mul) } /// Expands an Adam 7 pass pub fn expand_pass( img: &mut [u8], width: u32, scanline: &[u8], pass: u8, line_no: u32, bits_pp: u8) { let width = width as usize; let line_no = line_no as usize; let bits_pp = bits_pp as usize; // pass is out of range but don't blow up if pass == 0 || pass > 7 { return; } let bit_indices = expand_adam7_bits(pass, width, line_no, bits_pp); if bits_pp < 8 { for (pos, px) in bit_indices.zip(subbyte_pixels(scanline, bits_pp)) { let rem = 8 - pos % 8 - bits_pp; img[pos / 8] |= px << rem as u8; } } else { let bytes_pp = bits_pp / 8; for (bitpos, px) in bit_indices.zip(scanline.chunks(bytes_pp)) { for (offset, val) in px.iter().enumerate() { img[bitpos / 8 + offset] = *val; } } } } #[test] fn test_adam7() { /* 1646 7777 5656 7777 */ let it = Adam7Iterator::new(4, 4); let passes: Vec<_> = it.collect(); assert_eq!(&*passes, &[(1, 0, 1), (4, 0, 1), (5, 0, 2), (6, 0, 2), (6, 1, 2), (7, 0, 4), (7, 1, 4)]); } #[test] fn test_subbyte_pixels() { let scanline = &[0b10101010, 0b10101010]; let pixels = subbyte_pixels(scanline, 1).collect::>(); assert_eq!(pixels.len(), 16); assert_eq!(pixels, [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]); } #[test] fn test_expand_adam7_bits() { let width = 32; let bits_pp = 1; let expected = |offset: usize, step: usize, count: usize| (0 .. count).map(move |i| step * i + offset).collect::>(); for line_no in 0..8 { let start = 8 * line_no * width; assert_eq!( expand_adam7_bits(1, width, line_no, bits_pp).collect::>(), expected(start, 8, 4) ); let start = start + 4; assert_eq!( expand_adam7_bits(2, width, line_no, bits_pp).collect::>(), expected(start, 8, 4) ); let start = (8 * line_no + 4) as usize * width as usize; assert_eq!( expand_adam7_bits(3, width, line_no, bits_pp).collect::>(), expected(start, 4, 8) ); } for line_no in 0 .. 16 { let start = 4 * line_no * width + 2; assert_eq!( expand_adam7_bits(4, width, line_no, bits_pp).collect::>(), expected(start, 4, 8) ); let start = (4 * line_no + 2) * width; assert_eq!( expand_adam7_bits(5, width, line_no, bits_pp).collect::>(), expected(start, 2, 16) ) } for line_no in 0 .. 32 { let start = 2 * line_no * width + 1; assert_eq!( expand_adam7_bits(6, width, line_no, bits_pp).collect::>(), expected(start, 2, 16), "line_no: {}", line_no ); let start = (2 * line_no + 1) * width; assert_eq!( expand_adam7_bits(7, width, line_no, bits_pp).collect::>(), expected(start, 1, 32) ); } } #[test] fn test_expand_pass_subbyte() { let mut img = [0u8; 8]; let width = 8; let bits_pp = 1; expand_pass(&mut img, width, &[0b10000000], 1, 0, bits_pp); assert_eq!(img, [0b10000000u8, 0, 0, 0, 0, 0, 0, 0]); expand_pass(&mut img, width, &[0b10000000], 2, 0, bits_pp); assert_eq!(img, [0b10001000u8, 0, 0, 0, 0, 0, 0, 0]); expand_pass(&mut img, width, &[0b11000000], 3, 0, bits_pp); assert_eq!(img, [0b10001000u8, 0, 0, 0, 0b10001000, 0, 0, 0]); expand_pass(&mut img, width, &[0b11000000], 4, 0, bits_pp); assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10001000, 0, 0, 0]); expand_pass(&mut img, width, &[0b11000000], 4, 1, bits_pp); assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10101010, 0, 0, 0]); expand_pass(&mut img, width, &[0b11110000], 5, 0, bits_pp); assert_eq!(img, [0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0, 0]); expand_pass(&mut img, width, &[0b11110000], 5, 1, bits_pp); assert_eq!(img, [0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0]); expand_pass(&mut img, width, &[0b11110000], 6, 0, bits_pp); assert_eq!(img, [0b11111111u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0]); expand_pass(&mut img, width, &[0b11110000], 6, 1, bits_pp); assert_eq!(img, [0b11111111u8, 0, 0b11111111, 0, 0b10101010, 0, 0b10101010, 0]); expand_pass(&mut img, width, &[0b11110000], 6, 2, bits_pp); assert_eq!(img, [0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b10101010, 0]); expand_pass(&mut img, width, &[0b11110000], 6, 3, bits_pp); assert_eq!([0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b11111111, 0], img); expand_pass(&mut img, width, &[0b11111111], 7, 0, bits_pp); assert_eq!([0b11111111u8, 0b11111111, 0b11111111, 0, 0b11111111, 0, 0b11111111, 0], img); expand_pass(&mut img, width, &[0b11111111], 7, 1, bits_pp); assert_eq!([0b11111111u8, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0, 0b11111111, 0], img); expand_pass(&mut img, width, &[0b11111111], 7, 2, bits_pp); assert_eq!([0b11111111u8, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0], img); expand_pass(&mut img, width, &[0b11111111], 7, 3, bits_pp); assert_eq!([0b11111111u8, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111], img); } png-0.15.0/.cargo_vcs_info.json0000644000000001120000000000000117340ustar00{ "git": { "sha1": "1dc53c0f6a541ed66e28a594b953f994b63f3e41" } }