image-webp-0.2.0/.cargo_vcs_info.json0000644000000001360000000000100130500ustar { "git": { "sha1": "5d8654dd160549777729a3888cb28172d450e078" }, "path_in_vcs": "" }image-webp-0.2.0/Cargo.toml0000644000000025600000000000100110510ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.67.1" name = "image-webp" version = "0.2.0" build = false include = [ "/src", "LICENSE-APACHE", "LICENSE-MIT", "README.md", ] autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "WebP encoding and decoding in pure Rust" homepage = "https://github.com/image-rs/image-webp" readme = "README.md" categories = [ "multimedia::images", "multimedia::encoding", "encoding", ] license = "MIT OR Apache-2.0" repository = "https://github.com/image-rs/image-webp" [lib] name = "image_webp" path = "src/lib.rs" [dependencies.byteorder-lite] version = "0.1.0" [dependencies.quick-error] version = "2.0.1" [dev-dependencies.paste] version = "1.0.14" [dev-dependencies.png] version = "0.17.12" [dev-dependencies.rand] version = "0.8.5" [dev-dependencies.webp] version = "0.3.0" [features] _benchmarks = [] image-webp-0.2.0/Cargo.toml.orig000064400000000000000000000011231046102023000145240ustar 00000000000000[package] name = "image-webp" version = "0.2.0" edition = "2021" license = "MIT OR Apache-2.0" rust-version = "1.67.1" description = "WebP encoding and decoding in pure Rust" homepage = "https://github.com/image-rs/image-webp" repository = "https://github.com/image-rs/image-webp" categories = ["multimedia::images", "multimedia::encoding", "encoding"] include = ["/src", "LICENSE-APACHE", "LICENSE-MIT", "README.md"] [dependencies] byteorder-lite = "0.1.0" quick-error = "2.0.1" [dev-dependencies] paste = "1.0.14" png = "0.17.12" rand = "0.8.5" webp = "0.3.0" [features] _benchmarks = [] image-webp-0.2.0/LICENSE-APACHE000064400000000000000000000236761046102023000136020ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS image-webp-0.2.0/LICENSE-MIT000064400000000000000000000020141046102023000132710ustar 00000000000000MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. image-webp-0.2.0/README.md000064400000000000000000000005701046102023000131210ustar 00000000000000# image-webp [![crates.io](https://img.shields.io/crates/v/image-webp.svg)](https://crates.io/crates/image-webp) [![Documentation](https://docs.rs/image-webp/badge.svg)](https://docs.rs/image-webp) [![Build Status](https://github.com/image-rs/image-webp/workflows/Rust%20CI/badge.svg)](https://github.com/image-rs/image-webp/actions) WebP encoding and decoding in pure Rust image-webp-0.2.0/src/decoder.rs000064400000000000000000001065151046102023000144120ustar 00000000000000use byteorder_lite::{LittleEndian, ReadBytesExt}; use quick_error::quick_error; use std::collections::HashMap; use std::io::{self, BufRead, BufReader, Cursor, Read, Seek}; use std::num::NonZeroU16; use std::ops::Range; use crate::extended::{self, get_alpha_predictor, read_alpha_chunk, WebPExtendedInfo}; use super::lossless::LosslessDecoder; use super::vp8::Vp8Decoder; quick_error! { /// Errors that can occur when attempting to decode a WebP image #[derive(Debug)] #[non_exhaustive] pub enum DecodingError { /// An IO error occurred while reading the file IoError(err: io::Error) { from() display("IO Error: {}", err) source(err) } /// RIFF's "RIFF" signature not found or invalid RiffSignatureInvalid(err: [u8; 4]) { display("Invalid RIFF signature: {err:x?}") } /// WebP's "WEBP" signature not found or invalid WebpSignatureInvalid(err: [u8; 4]) { display("Invalid WebP signature: {err:x?}") } /// An expected chunk was missing ChunkMissing { display("An expected chunk was missing") } /// Chunk Header was incorrect or invalid in its usage ChunkHeaderInvalid(err: [u8; 4]) { display("Invalid Chunk header: {err:x?}") } #[allow(deprecated)] #[deprecated] /// Some bits were invalid ReservedBitSet { display("Reserved bits set") } /// The ALPH chunk preprocessing info flag was invalid InvalidAlphaPreprocessing { display("Alpha chunk preprocessing flag invalid") } /// Invalid compression method InvalidCompressionMethod { display("Invalid compression method") } /// Alpha chunk doesn't match the frame's size AlphaChunkSizeMismatch { display("Alpha chunk size mismatch") } /// Image is too large, either for the platform's pointer size or generally ImageTooLarge { display("Image too large") } /// Frame would go out of the canvas FrameOutsideImage { display("Frame outside image") } /// Signature of 0x2f not found LosslessSignatureInvalid(err: u8) { display("Invalid lossless signature: {err:x?}") } /// Version Number was not zero VersionNumberInvalid(err: u8) { display("Invalid lossless version number: {err}") } /// Invalid color cache bits InvalidColorCacheBits(err: u8) { display("Invalid color cache bits: {err}") } /// An invalid Huffman code was encountered HuffmanError { display("Invalid Huffman code") } /// The bitstream was somehow corrupt BitStreamError { display("Corrupt bitstream") } /// The transforms specified were invalid TransformError { display("Invalid transform") } /// VP8's `[0x9D, 0x01, 0x2A]` magic not found or invalid Vp8MagicInvalid(err: [u8; 3]) { display("Invalid VP8 magic: {err:x?}") } /// VP8 Decoder initialisation wasn't provided with enough data NotEnoughInitData { display("Not enough VP8 init data") } /// At time of writing, only the YUV colour-space encoded as `0` is specified ColorSpaceInvalid(err: u8) { display("Invalid VP8 color space: {err}") } /// LUMA prediction mode was not recognised LumaPredictionModeInvalid(err: i8) { display("Invalid VP8 luma prediction mode: {err}") } /// Intra-prediction mode was not recognised IntraPredictionModeInvalid(err: i8) { display("Invalid VP8 intra prediction mode: {err}") } /// Chroma prediction mode was not recognised ChromaPredictionModeInvalid(err: i8) { display("Invalid VP8 chroma prediction mode: {err}") } /// Inconsistent image sizes InconsistentImageSizes { display("Inconsistent image sizes") } /// The file may be valid, but this crate doesn't support decoding it. UnsupportedFeature(err: String) { display("Unsupported feature: {err}") } /// Invalid function call or parameter InvalidParameter(err: String) { display("Invalid parameter: {err}") } /// Memory limit exceeded MemoryLimitExceeded { display("Memory limit exceeded") } /// Invalid chunk size InvalidChunkSize { display("Invalid chunk size") } /// No more frames in image NoMoreFrames { display("No more frames") } } } /// All possible RIFF chunks in a WebP image file #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone, Copy, PartialEq, Hash, Eq)] pub(crate) enum WebPRiffChunk { RIFF, WEBP, VP8, VP8L, VP8X, ANIM, ANMF, ALPH, ICCP, EXIF, XMP, Unknown([u8; 4]), } impl WebPRiffChunk { pub(crate) fn from_fourcc(chunk_fourcc: [u8; 4]) -> Self { match &chunk_fourcc { b"RIFF" => Self::RIFF, b"WEBP" => Self::WEBP, b"VP8 " => Self::VP8, b"VP8L" => Self::VP8L, b"VP8X" => Self::VP8X, b"ANIM" => Self::ANIM, b"ANMF" => Self::ANMF, b"ALPH" => Self::ALPH, b"ICCP" => Self::ICCP, b"EXIF" => Self::EXIF, b"XMP " => Self::XMP, _ => Self::Unknown(chunk_fourcc), } } pub(crate) fn to_fourcc(self) -> [u8; 4] { match self { Self::RIFF => *b"RIFF", Self::WEBP => *b"WEBP", Self::VP8 => *b"VP8 ", Self::VP8L => *b"VP8L", Self::VP8X => *b"VP8X", Self::ANIM => *b"ANIM", Self::ANMF => *b"ANMF", Self::ALPH => *b"ALPH", Self::ICCP => *b"ICCP", Self::EXIF => *b"EXIF", Self::XMP => *b"XMP ", Self::Unknown(fourcc) => fourcc, } } pub(crate) fn is_unknown(&self) -> bool { matches!(self, Self::Unknown(_)) } } // enum WebPImage { // Lossy(VP8Frame), // Lossless(LosslessFrame), // Extended(ExtendedImage), // } enum ImageKind { Lossy, Lossless, Extended(WebPExtendedInfo), } struct AnimationState { next_frame: u32, next_frame_start: u64, dispose_next_frame: bool, previous_frame_width: u32, previous_frame_height: u32, previous_frame_x_offset: u32, previous_frame_y_offset: u32, canvas: Option>, } impl Default for AnimationState { fn default() -> Self { Self { next_frame: 0, next_frame_start: 0, dispose_next_frame: true, previous_frame_width: 0, previous_frame_height: 0, previous_frame_x_offset: 0, previous_frame_y_offset: 0, canvas: None, } } } /// Number of times that an animation loops. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum LoopCount { /// The animation loops forever. Forever, /// Each frame of the animation is displayed the specified number of times. Times(NonZeroU16), } /// WebP image format decoder. pub struct WebPDecoder { r: R, memory_limit: usize, width: u32, height: u32, kind: ImageKind, animation: AnimationState, is_lossy: bool, has_alpha: bool, num_frames: u32, loop_count: LoopCount, loop_duration: u64, chunks: HashMap>, } impl WebPDecoder { /// Create a new WebPDecoder from the reader `r`. The decoder performs many small reads, so the /// reader should be buffered. pub fn new(r: R) -> Result, DecodingError> { let mut decoder = WebPDecoder { r, width: 0, height: 0, num_frames: 0, kind: ImageKind::Lossy, chunks: HashMap::new(), animation: Default::default(), memory_limit: usize::MAX, is_lossy: false, has_alpha: false, loop_count: LoopCount::Times(NonZeroU16::new(1).unwrap()), loop_duration: 0, }; decoder.read_data()?; Ok(decoder) } fn read_data(&mut self) -> Result<(), DecodingError> { let (WebPRiffChunk::RIFF, riff_size, _) = read_chunk_header(&mut self.r)? else { return Err(DecodingError::ChunkHeaderInvalid(*b"RIFF")); }; match &read_fourcc(&mut self.r)? { WebPRiffChunk::WEBP => {} fourcc => return Err(DecodingError::WebpSignatureInvalid(fourcc.to_fourcc())), } let (chunk, chunk_size, chunk_size_rounded) = read_chunk_header(&mut self.r)?; let start = self.r.stream_position()?; match chunk { WebPRiffChunk::VP8 => { let tag = self.r.read_u24::()?; let keyframe = tag & 1 == 0; if !keyframe { return Err(DecodingError::UnsupportedFeature( "Non-keyframe frames".to_owned(), )); } let mut tag = [0u8; 3]; self.r.read_exact(&mut tag)?; if tag != [0x9d, 0x01, 0x2a] { return Err(DecodingError::Vp8MagicInvalid(tag)); } let w = self.r.read_u16::()?; let h = self.r.read_u16::()?; self.width = (w & 0x3FFF) as u32; self.height = (h & 0x3FFF) as u32; if self.width == 0 || self.height == 0 { return Err(DecodingError::InconsistentImageSizes); } self.chunks .insert(WebPRiffChunk::VP8, start..start + chunk_size); self.kind = ImageKind::Lossy; self.is_lossy = true; } WebPRiffChunk::VP8L => { let signature = self.r.read_u8()?; if signature != 0x2f { return Err(DecodingError::LosslessSignatureInvalid(signature)); } let header = self.r.read_u32::()?; let version = header >> 29; if version != 0 { return Err(DecodingError::VersionNumberInvalid(version as u8)); } self.width = (1 + header) & 0x3FFF; self.height = (1 + (header >> 14)) & 0x3FFF; self.chunks .insert(WebPRiffChunk::VP8L, start..start + chunk_size); self.kind = ImageKind::Lossless; self.has_alpha = (header >> 28) & 1 != 0; } WebPRiffChunk::VP8X => { let mut info = extended::read_extended_header(&mut self.r)?; self.width = info.canvas_width; self.height = info.canvas_height; let mut position = start + chunk_size_rounded; let max_position = position + riff_size.saturating_sub(12); self.r.seek(io::SeekFrom::Start(position))?; // Resist denial of service attacks by using a BufReader. In most images there // should be a very small number of chunks. However, nothing prevents a malicious // image from having an extremely large number of "unknown" chunks. Issuing // millions of reads and seeks against the underlying reader might be very // expensive. let mut reader = BufReader::with_capacity(64 << 10, &mut self.r); while position < max_position { match read_chunk_header(&mut reader) { Ok((chunk, chunk_size, chunk_size_rounded)) => { let range = position + 8..position + 8 + chunk_size; position += 8 + chunk_size_rounded; if !chunk.is_unknown() { self.chunks.entry(chunk).or_insert(range); } if let WebPRiffChunk::ANMF = chunk { self.num_frames += 1; if chunk_size < 24 { return Err(DecodingError::InvalidChunkSize); } reader.seek_relative(12)?; let duration = reader.read_u32::()? & 0xffffff; self.loop_duration = self.loop_duration.wrapping_add(u64::from(duration)); // If the image is animated, the image data chunk will be inside the // ANMF chunks, so we must inspect them to determine whether the // image contains any lossy image data. VP8 chunks store lossy data // and the spec says that lossless images SHOULD NOT contain ALPH // chunks, so we treat both as indicators of lossy images. if !self.is_lossy { let (subchunk, ..) = read_chunk_header(&mut reader)?; if let WebPRiffChunk::VP8 | WebPRiffChunk::ALPH = subchunk { self.is_lossy = true; } reader.seek_relative(chunk_size_rounded as i64 - 24)?; } else { reader.seek_relative(chunk_size_rounded as i64 - 16)?; } continue; } reader.seek_relative(chunk_size_rounded as i64)?; } Err(DecodingError::IoError(e)) if e.kind() == io::ErrorKind::UnexpectedEof => { break; } Err(e) => return Err(e), } } self.is_lossy = self.is_lossy || self.chunks.contains_key(&WebPRiffChunk::VP8); if info.animation && (!self.chunks.contains_key(&WebPRiffChunk::ANIM) || !self.chunks.contains_key(&WebPRiffChunk::ANMF)) || info.icc_profile && !self.chunks.contains_key(&WebPRiffChunk::ICCP) || info.exif_metadata && !self.chunks.contains_key(&WebPRiffChunk::EXIF) || info.xmp_metadata && !self.chunks.contains_key(&WebPRiffChunk::XMP) || !info.animation && self.chunks.contains_key(&WebPRiffChunk::VP8) == self.chunks.contains_key(&WebPRiffChunk::VP8L) { return Err(DecodingError::ChunkMissing); } // Decode ANIM chunk. if info.animation { match self.read_chunk(WebPRiffChunk::ANIM, 6) { Ok(Some(chunk)) => { let mut cursor = Cursor::new(chunk); cursor.read_exact(&mut info.background_color)?; self.loop_count = match cursor.read_u16::()? { 0 => LoopCount::Forever, n => LoopCount::Times(NonZeroU16::new(n).unwrap()), }; self.animation.next_frame_start = self.chunks.get(&WebPRiffChunk::ANMF).unwrap().start - 8; } Ok(None) => return Err(DecodingError::ChunkMissing), Err(DecodingError::MemoryLimitExceeded) => { return Err(DecodingError::InvalidChunkSize) } Err(e) => return Err(e), } } // If the image is animated, the image data chunk will be inside the ANMF chunks. We // store the ALPH, VP8, and VP8L chunks (as applicable) of the first frame in the // hashmap so that we can read them later. if let Some(range) = self.chunks.get(&WebPRiffChunk::ANMF).cloned() { let mut position = range.start + 16; self.r.seek(io::SeekFrom::Start(position))?; for _ in 0..2 { let (subchunk, subchunk_size, subchunk_size_rounded) = read_chunk_header(&mut self.r)?; let subrange = position + 8..position + 8 + subchunk_size; self.chunks.entry(subchunk).or_insert(subrange.clone()); position += 8 + subchunk_size_rounded; if position + 8 > range.end { break; } } } self.has_alpha = info.alpha; self.kind = ImageKind::Extended(info); } _ => return Err(DecodingError::ChunkHeaderInvalid(chunk.to_fourcc())), }; Ok(()) } /// Sets the maximum amount of memory that the decoder is allowed to allocate at once. /// /// TODO: Some allocations currently ignore this limit. pub fn set_memory_limit(&mut self, limit: usize) { self.memory_limit = limit; } /// Sets the background color if the image is an extended and animated webp. pub fn set_background_color(&mut self, color: [u8; 4]) -> Result<(), DecodingError> { if let ImageKind::Extended(info) = &mut self.kind { info.background_color = color; Ok(()) } else { Err(DecodingError::InvalidParameter( "Background color can only be set on animated webp".to_owned(), )) } } /// Returns the (width, height) of the image in pixels. pub fn dimensions(&self) -> (u32, u32) { (self.width, self.height) } /// Returns whether the image has an alpha channel. If so, the pixel format is Rgba8 and /// otherwise Rgb8. pub fn has_alpha(&self) -> bool { self.has_alpha } /// Returns true if the image is animated. pub fn is_animated(&self) -> bool { match &self.kind { ImageKind::Lossy | ImageKind::Lossless => false, ImageKind::Extended(extended) => extended.animation, } } /// Returns whether the image is lossy. For animated images, this is true if any frame is lossy. pub fn is_lossy(&mut self) -> bool { self.is_lossy } /// Returns the number of frames of a single loop of the animation, or zero if the image is not /// animated. pub fn num_frames(&self) -> u32 { self.num_frames } /// Returns the number of times the animation should loop. pub fn loop_count(&self) -> LoopCount { self.loop_count } /// Returns the total duration of one loop through the animation in milliseconds, or zero if the /// image is not animated. /// /// This is the sum of the durations of all individual frames of the image. pub fn loop_duration(&self) -> u64 { self.loop_duration } fn read_chunk( &mut self, chunk: WebPRiffChunk, max_size: usize, ) -> Result>, DecodingError> { match self.chunks.get(&chunk) { Some(range) => { if range.end - range.start > max_size as u64 { return Err(DecodingError::MemoryLimitExceeded); } self.r.seek(io::SeekFrom::Start(range.start))?; let mut data = vec![0; (range.end - range.start) as usize]; self.r.read_exact(&mut data)?; Ok(Some(data)) } None => Ok(None), } } /// Returns the raw bytes of the ICC profile, or None if there is no ICC profile. pub fn icc_profile(&mut self) -> Result>, DecodingError> { self.read_chunk(WebPRiffChunk::ICCP, self.memory_limit) } /// Returns the raw bytes of the EXIF metadata, or None if there is no EXIF metadata. pub fn exif_metadata(&mut self) -> Result>, DecodingError> { self.read_chunk(WebPRiffChunk::EXIF, self.memory_limit) } /// Returns the raw bytes of the XMP metadata, or None if there is no XMP metadata. pub fn xmp_metadata(&mut self) -> Result>, DecodingError> { self.read_chunk(WebPRiffChunk::XMP, self.memory_limit) } /// Returns the number of bytes required to store the image or a single frame, or None if that /// would take more than usize::MAX bytes. pub fn output_buffer_size(&self) -> Option { let bytes_per_pixel = if self.has_alpha() { 4 } else { 3 }; (self.width as usize) .checked_mul(self.height as usize)? .checked_mul(bytes_per_pixel) } /// Returns the raw bytes of the image. For animated images, this is the first frame. pub fn read_image(&mut self, buf: &mut [u8]) -> Result<(), DecodingError> { assert_eq!(Some(buf.len()), self.output_buffer_size()); if self.is_animated() { let saved = std::mem::take(&mut self.animation); self.animation.next_frame_start = self.chunks.get(&WebPRiffChunk::ANMF).unwrap().start - 8; let result = self.read_frame(buf); self.animation = saved; result?; } else if let Some(range) = self.chunks.get(&WebPRiffChunk::VP8L) { let mut decoder = LosslessDecoder::new(range_reader(&mut self.r, range.clone())?); if self.has_alpha { decoder.decode_frame(self.width, self.height, false, buf)?; } else { let mut data = vec![0; self.width as usize * self.height as usize * 4]; decoder.decode_frame(self.width, self.height, false, &mut data)?; for (rgba_val, chunk) in data.chunks_exact(4).zip(buf.chunks_exact_mut(3)) { chunk.copy_from_slice(&rgba_val[..3]); } } } else { let range = self .chunks .get(&WebPRiffChunk::VP8) .ok_or(DecodingError::ChunkMissing)?; // TODO: avoid cloning frame let frame = Vp8Decoder::new(range_reader(&mut self.r, range.start..range.end)?) .decode_frame()? .clone(); if u32::from(frame.width) != self.width || u32::from(frame.height) != self.height { return Err(DecodingError::InconsistentImageSizes); } if self.has_alpha() { frame.fill_rgba(buf); let range = self .chunks .get(&WebPRiffChunk::ALPH) .ok_or(DecodingError::ChunkMissing)? .clone(); let alpha_chunk = read_alpha_chunk( &mut range_reader(&mut self.r, range.start..range.end)?, self.width as u16, self.height as u16, )?; for y in 0..frame.height { for x in 0..frame.width { let predictor: u8 = get_alpha_predictor( x.into(), y.into(), frame.width.into(), alpha_chunk.filtering_method, buf, ); let alpha_index = usize::from(y) * usize::from(frame.width) + usize::from(x); let buffer_index = alpha_index * 4 + 3; buf[buffer_index] = predictor.wrapping_add(alpha_chunk.data[alpha_index]); } } } else { frame.fill_rgb(buf); } } Ok(()) } /// Reads the next frame of the animation. /// /// The frame contents are written into `buf` and the method returns the duration of the frame /// in milliseconds. If there are no more frames, the method returns /// `DecodingError::NoMoreFrames` and `buf` is left unchanged. /// /// # Panics /// /// Panics if the image is not animated. pub fn read_frame(&mut self, buf: &mut [u8]) -> Result { assert!(self.is_animated()); assert_eq!(Some(buf.len()), self.output_buffer_size()); if self.animation.next_frame == self.num_frames { return Err(DecodingError::NoMoreFrames); } let ImageKind::Extended(info) = &self.kind else { unreachable!() }; self.r .seek(io::SeekFrom::Start(self.animation.next_frame_start))?; let anmf_size = match read_chunk_header(&mut self.r)? { (WebPRiffChunk::ANMF, size, _) if size >= 32 => size, _ => return Err(DecodingError::ChunkHeaderInvalid(*b"ANMF")), }; // Read ANMF chunk let frame_x = extended::read_3_bytes(&mut self.r)? * 2; let frame_y = extended::read_3_bytes(&mut self.r)? * 2; let frame_width = extended::read_3_bytes(&mut self.r)? + 1; let frame_height = extended::read_3_bytes(&mut self.r)? + 1; if frame_width > 16384 || frame_height > 16384 { return Err(DecodingError::ImageTooLarge); } if frame_x + frame_width > self.width || frame_y + frame_height > self.height { return Err(DecodingError::FrameOutsideImage); } let duration = extended::read_3_bytes(&mut self.r)?; let frame_info = self.r.read_u8()?; let use_alpha_blending = frame_info & 0b00000010 == 0; let dispose = frame_info & 0b00000001 != 0; let clear_color = if self.animation.dispose_next_frame { Some(info.background_color) } else { None }; // Read normal bitstream now let (chunk, chunk_size, chunk_size_rounded) = read_chunk_header(&mut self.r)?; if chunk_size_rounded + 24 > anmf_size { return Err(DecodingError::ChunkHeaderInvalid(chunk.to_fourcc())); } let (frame, frame_has_alpha): (Vec, bool) = match chunk { WebPRiffChunk::VP8 => { let reader = (&mut self.r).take(chunk_size); let mut vp8_decoder = Vp8Decoder::new(reader); let raw_frame = vp8_decoder.decode_frame()?; if raw_frame.width as u32 != frame_width || raw_frame.height as u32 != frame_height { return Err(DecodingError::InconsistentImageSizes); } let mut rgb_frame = vec![0; frame_width as usize * frame_height as usize * 3]; raw_frame.fill_rgb(&mut rgb_frame); (rgb_frame, false) } WebPRiffChunk::VP8L => { let reader = (&mut self.r).take(chunk_size); let mut lossless_decoder = LosslessDecoder::new(reader); let mut rgba_frame = vec![0; frame_width as usize * frame_height as usize * 4]; lossless_decoder.decode_frame(frame_width, frame_height, false, &mut rgba_frame)?; (rgba_frame, true) } WebPRiffChunk::ALPH => { if chunk_size_rounded + 32 > anmf_size { return Err(DecodingError::ChunkHeaderInvalid(chunk.to_fourcc())); } // read alpha let next_chunk_start = self.r.stream_position()? + chunk_size_rounded; let mut reader = (&mut self.r).take(chunk_size); let alpha_chunk = read_alpha_chunk(&mut reader, frame_width as u16, frame_height as u16)?; // read opaque self.r.seek(io::SeekFrom::Start(next_chunk_start))?; let (next_chunk, next_chunk_size, _) = read_chunk_header(&mut self.r)?; if chunk_size + next_chunk_size + 32 > anmf_size { return Err(DecodingError::ChunkHeaderInvalid(next_chunk.to_fourcc())); } let mut vp8_decoder = Vp8Decoder::new((&mut self.r).take(next_chunk_size)); let frame = vp8_decoder.decode_frame()?; let mut rgba_frame = vec![0; frame_width as usize * frame_height as usize * 4]; frame.fill_rgba(&mut rgba_frame); for y in 0..frame.height { for x in 0..frame.width { let predictor: u8 = get_alpha_predictor( x.into(), y.into(), frame.width.into(), alpha_chunk.filtering_method, &rgba_frame, ); let alpha_index = usize::from(y) * usize::from(frame.width) + usize::from(x); let buffer_index = alpha_index * 4 + 3; rgba_frame[buffer_index] = predictor.wrapping_add(alpha_chunk.data[alpha_index]); } } (rgba_frame, true) } _ => return Err(DecodingError::ChunkHeaderInvalid(chunk.to_fourcc())), }; // fill starting canvas with clear color if self.animation.canvas.is_none() { self.animation.canvas = { let mut canvas = vec![0; (self.width * self.height * 4) as usize]; canvas .chunks_exact_mut(4) .for_each(|c| c.copy_from_slice(&info.background_color)); Some(canvas) } } extended::composite_frame( self.animation.canvas.as_mut().unwrap(), self.width, self.height, clear_color, &frame, frame_x, frame_y, frame_width, frame_height, frame_has_alpha, use_alpha_blending, self.animation.previous_frame_width, self.animation.previous_frame_height, self.animation.previous_frame_x_offset, self.animation.previous_frame_y_offset, ); self.animation.previous_frame_width = frame_width; self.animation.previous_frame_height = frame_height; self.animation.previous_frame_x_offset = frame_x; self.animation.previous_frame_y_offset = frame_y; self.animation.dispose_next_frame = dispose; self.animation.next_frame_start += anmf_size + 8; self.animation.next_frame += 1; if self.has_alpha() { buf.copy_from_slice(self.animation.canvas.as_ref().unwrap()); } else { for (b, c) in buf .chunks_exact_mut(3) .zip(self.animation.canvas.as_ref().unwrap().chunks_exact(4)) { b.copy_from_slice(&c[..3]); } } Ok(duration) } /// Resets the animation to the first frame. /// /// # Panics /// /// Panics if the image is not animated. pub fn reset_animation(&mut self) { assert!(self.is_animated()); self.animation.next_frame = 0; self.animation.next_frame_start = self.chunks.get(&WebPRiffChunk::ANMF).unwrap().start - 8; self.animation.dispose_next_frame = true; } } pub(crate) fn range_reader( mut r: R, range: Range, ) -> Result { r.seek(io::SeekFrom::Start(range.start))?; Ok(r.take(range.end - range.start)) } pub(crate) fn read_fourcc(mut r: R) -> Result { let mut chunk_fourcc = [0; 4]; r.read_exact(&mut chunk_fourcc)?; Ok(WebPRiffChunk::from_fourcc(chunk_fourcc)) } pub(crate) fn read_chunk_header( mut r: R, ) -> Result<(WebPRiffChunk, u64, u64), DecodingError> { let chunk = read_fourcc(&mut r)?; let chunk_size = r.read_u32::()?; let chunk_size_rounded = chunk_size.saturating_add(chunk_size & 1); Ok((chunk, chunk_size.into(), chunk_size_rounded.into())) } #[cfg(test)] mod tests { use super::*; const RGB_BPP: usize = 3; #[test] fn add_with_overflow_size() { let bytes = vec![ 0x52, 0x49, 0x46, 0x46, 0xaf, 0x37, 0x80, 0x47, 0x57, 0x45, 0x42, 0x50, 0x6c, 0x64, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xfb, 0x7e, 0x73, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x40, 0xfb, 0xff, 0xff, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x00, 0x00, 0x00, 0x00, 0x62, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x49, 0x54, 0x55, 0x50, 0x4c, 0x54, 0x59, 0x50, 0x45, 0x33, 0x37, 0x44, 0x4d, 0x46, ]; let data = std::io::Cursor::new(bytes); let _ = WebPDecoder::new(data); } #[test] fn decode_2x2_single_color_image() { // Image data created from imagemagick and output of xxd: // $ convert -size 2x2 xc:#f00 red.webp // $ xxd -g 1 red.webp | head const NUM_PIXELS: usize = 2 * 2 * RGB_BPP; // 2x2 red pixel image let bytes = [ 0x52, 0x49, 0x46, 0x46, 0x3c, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50, 0x56, 0x50, 0x38, 0x20, 0x30, 0x00, 0x00, 0x00, 0xd0, 0x01, 0x00, 0x9d, 0x01, 0x2a, 0x02, 0x00, 0x02, 0x00, 0x02, 0x00, 0x34, 0x25, 0xa0, 0x02, 0x74, 0xba, 0x01, 0xf8, 0x00, 0x03, 0xb0, 0x00, 0xfe, 0xf0, 0xc4, 0x0b, 0xff, 0x20, 0xb9, 0x61, 0x75, 0xc8, 0xd7, 0xff, 0x20, 0x3f, 0xe4, 0x07, 0xfc, 0x80, 0xff, 0xf8, 0xf2, 0x00, 0x00, 0x00, ]; let mut data = [0; NUM_PIXELS]; let mut decoder = WebPDecoder::new(std::io::Cursor::new(bytes)).unwrap(); decoder.read_image(&mut data).unwrap(); // All pixels are the same value let first_pixel = &data[..RGB_BPP]; assert!(data.chunks_exact(3).all(|ch| ch.iter().eq(first_pixel))); } #[test] fn decode_3x3_single_color_image() { // Test that any odd pixel "tail" is decoded properly const NUM_PIXELS: usize = 3 * 3 * RGB_BPP; // 3x3 red pixel image let bytes = [ 0x52, 0x49, 0x46, 0x46, 0x3c, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50, 0x56, 0x50, 0x38, 0x20, 0x30, 0x00, 0x00, 0x00, 0xd0, 0x01, 0x00, 0x9d, 0x01, 0x2a, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x34, 0x25, 0xa0, 0x02, 0x74, 0xba, 0x01, 0xf8, 0x00, 0x03, 0xb0, 0x00, 0xfe, 0xf0, 0xc4, 0x0b, 0xff, 0x20, 0xb9, 0x61, 0x75, 0xc8, 0xd7, 0xff, 0x20, 0x3f, 0xe4, 0x07, 0xfc, 0x80, 0xff, 0xf8, 0xf2, 0x00, 0x00, 0x00, ]; let mut data = [0; NUM_PIXELS]; let mut decoder = WebPDecoder::new(std::io::Cursor::new(bytes)).unwrap(); decoder.read_image(&mut data).unwrap(); // All pixels are the same value let first_pixel = &data[..RGB_BPP]; assert!(data.chunks_exact(3).all(|ch| ch.iter().eq(first_pixel))); } } image-webp-0.2.0/src/encoder.rs000064400000000000000000000635111046102023000144220ustar 00000000000000//! Encoding of WebP images. use std::collections::BinaryHeap; use std::io::{self, Write}; use std::slice::ChunksExact; use quick_error::quick_error; /// Color type of the image. /// /// Note that the WebP format doesn't have a concept of color type. All images are encoded as RGBA /// and some decoders may treat them as such. This enum is used to indicate the color type of the /// input data provided to the encoder, which can help improve compression ratio. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum ColorType { /// Opaque image with a single luminance byte per pixel. L8, /// Image with a luminance and alpha byte per pixel. La8, /// Opaque image with a red, green, and blue byte per pixel. Rgb8, /// Image with a red, green, blue, and alpha byte per pixel. Rgba8, } quick_error! { /// Error that can occur during encoding. #[derive(Debug)] #[non_exhaustive] pub enum EncodingError { /// An IO error occurred. IoError(err: io::Error) { from() display("IO error: {}", err) source(err) } /// The image dimensions are not allowed by the WebP format. InvalidDimensions { display("Invalid dimensions") } } } struct BitWriter { writer: W, buffer: u64, nbits: u8, } impl BitWriter { fn write_bits(&mut self, bits: u64, nbits: u8) -> io::Result<()> { debug_assert!(nbits <= 64); self.buffer |= bits << self.nbits; self.nbits += nbits; if self.nbits >= 64 { self.writer.write_all(&self.buffer.to_le_bytes())?; self.nbits -= 64; self.buffer = bits.checked_shr((nbits - self.nbits) as u32).unwrap_or(0); } debug_assert!(self.nbits < 64); Ok(()) } fn flush(&mut self) -> io::Result<()> { if self.nbits % 8 != 0 { self.write_bits(0, 8 - self.nbits % 8)?; } if self.nbits > 0 { self.writer .write_all(&self.buffer.to_le_bytes()[..self.nbits as usize / 8]) .unwrap(); self.buffer = 0; self.nbits = 0; } Ok(()) } } fn write_single_entry_huffman_tree(w: &mut BitWriter, symbol: u8) -> io::Result<()> { w.write_bits(1, 2)?; if symbol <= 1 { w.write_bits(0, 1)?; w.write_bits(symbol as u64, 1)?; } else { w.write_bits(1, 1)?; w.write_bits(symbol as u64, 8)?; } Ok(()) } fn build_huffman_tree( frequencies: &[u32], lengths: &mut [u8], codes: &mut [u16], length_limit: u8, ) -> bool { assert_eq!(frequencies.len(), lengths.len()); assert_eq!(frequencies.len(), codes.len()); if frequencies.iter().filter(|&&f| f > 0).count() <= 1 { lengths.fill(0); codes.fill(0); return false; } #[derive(Eq, PartialEq, Copy, Clone, Debug)] struct Item(u32, u16); impl Ord for Item { fn cmp(&self, other: &Self) -> std::cmp::Ordering { other.0.cmp(&self.0) } } impl PartialOrd for Item { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } // Build a huffman tree let mut internal_nodes = Vec::new(); let mut nodes = BinaryHeap::from_iter( frequencies .iter() .enumerate() .filter(|(_, &frequency)| frequency > 0) .map(|(i, &frequency)| Item(frequency, i as u16)), ); while nodes.len() > 1 { let Item(frequency1, index1) = nodes.pop().unwrap(); let mut root = nodes.peek_mut().unwrap(); internal_nodes.push((index1, root.1)); *root = Item( frequency1 + root.0, internal_nodes.len() as u16 + frequencies.len() as u16 - 1, ); } // Walk the tree to assign code lengths lengths.fill(0); let mut stack = Vec::new(); stack.push((nodes.pop().unwrap().1, 0)); while let Some((node, depth)) = stack.pop() { let node = node as usize; if node < frequencies.len() { lengths[node] = depth as u8; } else { let (left, right) = internal_nodes[node - frequencies.len()]; stack.push((left, depth + 1)); stack.push((right, depth + 1)); } } // Limit the codes to length length_limit let mut max_length = 0; for &length in lengths.iter() { max_length = max_length.max(length); } if max_length > length_limit { let mut counts = [0u32; 16]; for &length in lengths.iter() { counts[length.min(length_limit) as usize] += 1; } let mut total = 0; for (i, count) in counts .iter() .enumerate() .skip(1) .take(length_limit as usize) { total += count << (length_limit as usize - i); } while total > 1u32 << length_limit { let mut i = length_limit as usize - 1; while counts[i] == 0 { i -= 1; } counts[i] -= 1; counts[length_limit as usize] -= 1; counts[i + 1] += 2; total -= 1; } // assign new lengths let mut len = length_limit; let mut indexes = frequencies.iter().copied().enumerate().collect::>(); indexes.sort_unstable_by_key(|&(_, frequency)| frequency); for &(i, frequency) in indexes.iter() { if frequency > 0 { while counts[len as usize] == 0 { len -= 1; } lengths[i] = len; counts[len as usize] -= 1; } } } // Assign codes codes.fill(0); let mut code = 0u32; for len in 1..=length_limit { for (i, &length) in lengths.iter().enumerate() { if length == len { codes[i] = (code as u16).reverse_bits() >> (16 - len); code += 1; } } code <<= 1; } assert_eq!(code, 2 << length_limit); true } fn write_huffman_tree( w: &mut BitWriter, frequencies: &[u32], lengths: &mut [u8], codes: &mut [u16], ) -> io::Result<()> { if !build_huffman_tree(frequencies, lengths, codes, 15) { let symbol = frequencies .iter() .position(|&frequency| frequency > 0) .unwrap_or(0); return write_single_entry_huffman_tree(w, symbol as u8); } let mut code_length_lengths = [0u8; 16]; let mut code_length_codes = [0u16; 16]; let mut code_length_frequencies = [0u32; 16]; for &length in lengths.iter() { code_length_frequencies[length as usize] += 1; } let single_code_length_length = !build_huffman_tree( &code_length_frequencies, &mut code_length_lengths, &mut code_length_codes, 7, ); const CODE_LENGTH_ORDER: [usize; 19] = [ 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ]; // Write the huffman tree w.write_bits(0, 1)?; // normal huffman tree w.write_bits(19 - 4, 4)?; // num_code_lengths - 4 for &i in CODE_LENGTH_ORDER.iter() { if i > 15 || code_length_frequencies[i] == 0 { w.write_bits(0, 3)?; } else if single_code_length_length { w.write_bits(1, 3)?; } else { w.write_bits(code_length_lengths[i] as u64, 3)?; } } match lengths.len() { 256 => { w.write_bits(1, 1)?; // max_symbol is stored w.write_bits(3, 3)?; // max_symbol_nbits / 2 - 2 w.write_bits(254, 8)?; // max_symbol - 2 } 280 => w.write_bits(0, 1)?, _ => unreachable!(), } // Write the huffman codes if !single_code_length_length { for &len in lengths.iter() { w.write_bits( code_length_codes[len as usize] as u64, code_length_lengths[len as usize], )?; } } Ok(()) } fn length_to_symbol(len: u16) -> (u16, u8) { let len = len - 1; let highest_bit = 15 - len.leading_zeros() as u16; // TODO: use ilog2 once MSRV >= 1.67 let second_highest_bit = (len >> (highest_bit - 1)) & 1; let extra_bits = highest_bit - 1; let symbol = 2 * highest_bit + second_highest_bit; (symbol, extra_bits as u8) } #[inline(always)] fn count_run( pixel: &[u8], it: &mut std::iter::Peekable>, frequencies1: &mut [u32; 280], ) { let mut run_length = 0; while run_length < 4096 && it.peek() == Some(&pixel) { run_length += 1; it.next(); } if run_length > 0 { if run_length <= 4 { let symbol = 256 + run_length - 1; frequencies1[symbol] += 1; } else { let (symbol, _extra_bits) = length_to_symbol(run_length as u16); frequencies1[256 + symbol as usize] += 1; } } } #[inline(always)] fn write_run( w: &mut BitWriter, pixel: &[u8], it: &mut std::iter::Peekable>, codes1: &[u16; 280], lengths1: &[u8; 280], ) -> io::Result<()> { let mut run_length = 0; while run_length < 4096 && it.peek() == Some(&pixel) { run_length += 1; it.next(); } if run_length > 0 { if run_length <= 4 { let symbol = 256 + run_length - 1; w.write_bits(codes1[symbol] as u64, lengths1[symbol])?; } else { let (symbol, extra_bits) = length_to_symbol(run_length as u16); w.write_bits( codes1[256 + symbol as usize] as u64, lengths1[256 + symbol as usize], )?; w.write_bits( (run_length as u64 - 1) & ((1 << extra_bits) - 1), extra_bits, )?; } } Ok(()) } /// Allows fine-tuning some encoder parameters. /// /// Pass to [`WebPEncoder::set_params()`]. #[non_exhaustive] #[derive(Clone, Debug)] pub struct EncoderParams { /// Use a predictor transform. Enabled by default. pub use_predictor_transform: bool, } impl Default for EncoderParams { fn default() -> Self { Self { use_predictor_transform: true, } } } /// Encode image data with the indicated color type. /// /// # Panics /// /// Panics if the image data is not of the indicated dimensions. fn encode_frame( writer: W, data: &[u8], width: u32, height: u32, color: ColorType, params: EncoderParams, ) -> Result<(), EncodingError> { let w = &mut BitWriter { writer, buffer: 0, nbits: 0, }; let (is_color, is_alpha, bytes_per_pixel) = match color { ColorType::L8 => (false, false, 1), ColorType::La8 => (false, true, 2), ColorType::Rgb8 => (true, false, 3), ColorType::Rgba8 => (true, true, 4), }; assert_eq!( (width as u64 * height as u64).saturating_mul(bytes_per_pixel), data.len() as u64 ); if width == 0 || width > 16384 || height == 0 || height > 16384 { return Err(EncodingError::InvalidDimensions); } w.write_bits(0x2f, 8)?; // signature w.write_bits(width as u64 - 1, 14)?; w.write_bits(height as u64 - 1, 14)?; w.write_bits(is_alpha as u64, 1)?; // alpha used w.write_bits(0x0, 3)?; // version // subtract green transform w.write_bits(0b101, 3)?; // predictor transform if params.use_predictor_transform { w.write_bits(0b111001, 6)?; w.write_bits(0x0, 1)?; // no color cache write_single_entry_huffman_tree(w, 2)?; for _ in 0..4 { write_single_entry_huffman_tree(w, 0)?; } } // transforms done w.write_bits(0x0, 1)?; // color cache w.write_bits(0x0, 1)?; // meta-huffman codes w.write_bits(0x0, 1)?; // expand to RGBA let mut pixels = match color { ColorType::L8 => data.iter().flat_map(|&p| [p, p, p, 255]).collect(), ColorType::La8 => data .chunks_exact(2) .flat_map(|p| [p[0], p[0], p[0], p[1]]) .collect(), ColorType::Rgb8 => data .chunks_exact(3) .flat_map(|p| [p[0], p[1], p[2], 255]) .collect(), ColorType::Rgba8 => data.to_vec(), }; // compute subtract green transform for pixel in pixels.chunks_exact_mut(4) { pixel[0] = pixel[0].wrapping_sub(pixel[1]); pixel[2] = pixel[2].wrapping_sub(pixel[1]); } // compute predictor transform if params.use_predictor_transform { let row_bytes = width as usize * 4; for y in (1..height as usize).rev() { let (prev, current) = pixels[(y - 1) * row_bytes..][..row_bytes * 2].split_at_mut(row_bytes); for (c, p) in current.iter_mut().zip(prev) { *c = c.wrapping_sub(*p); } } for i in (4..row_bytes).rev() { pixels[i] = pixels[i].wrapping_sub(pixels[i - 4]); } pixels[3] = pixels[3].wrapping_sub(255); } // compute frequencies let mut frequencies0 = [0u32; 256]; let mut frequencies1 = [0u32; 280]; let mut frequencies2 = [0u32; 256]; let mut frequencies3 = [0u32; 256]; let mut it = pixels.chunks_exact(4).peekable(); match color { ColorType::L8 => { frequencies0[0] = 1; frequencies2[0] = 1; frequencies3[0] = 1; while let Some(pixel) = it.next() { frequencies1[pixel[1] as usize] += 1; count_run(pixel, &mut it, &mut frequencies1); } } ColorType::La8 => { frequencies0[0] = 1; frequencies2[0] = 1; while let Some(pixel) = it.next() { frequencies1[pixel[1] as usize] += 1; frequencies3[pixel[3] as usize] += 1; count_run(pixel, &mut it, &mut frequencies1); } } ColorType::Rgb8 => { frequencies3[0] = 1; while let Some(pixel) = it.next() { frequencies0[pixel[0] as usize] += 1; frequencies1[pixel[1] as usize] += 1; frequencies2[pixel[2] as usize] += 1; count_run(pixel, &mut it, &mut frequencies1); } } ColorType::Rgba8 => { while let Some(pixel) = it.next() { frequencies0[pixel[0] as usize] += 1; frequencies1[pixel[1] as usize] += 1; frequencies2[pixel[2] as usize] += 1; frequencies3[pixel[3] as usize] += 1; count_run(pixel, &mut it, &mut frequencies1); } } } // compute and write huffman codes let mut lengths0 = [0u8; 256]; let mut lengths1 = [0u8; 280]; let mut lengths2 = [0u8; 256]; let mut lengths3 = [0u8; 256]; let mut codes0 = [0u16; 256]; let mut codes1 = [0u16; 280]; let mut codes2 = [0u16; 256]; let mut codes3 = [0u16; 256]; write_huffman_tree(w, &frequencies1, &mut lengths1, &mut codes1)?; if is_color { write_huffman_tree(w, &frequencies0, &mut lengths0, &mut codes0)?; write_huffman_tree(w, &frequencies2, &mut lengths2, &mut codes2)?; } else { write_single_entry_huffman_tree(w, 0)?; write_single_entry_huffman_tree(w, 0)?; } if is_alpha { write_huffman_tree(w, &frequencies3, &mut lengths3, &mut codes3)?; } else if params.use_predictor_transform { write_single_entry_huffman_tree(w, 0)?; } else { write_single_entry_huffman_tree(w, 255)?; } write_single_entry_huffman_tree(w, 1)?; // Write image data let mut it = pixels.chunks_exact(4).peekable(); match color { ColorType::L8 => { while let Some(pixel) = it.next() { w.write_bits( codes1[pixel[1] as usize] as u64, lengths1[pixel[1] as usize], )?; write_run(w, pixel, &mut it, &codes1, &lengths1)?; } } ColorType::La8 => { while let Some(pixel) = it.next() { let len1 = lengths1[pixel[1] as usize]; let len3 = lengths3[pixel[3] as usize]; let code = codes1[pixel[1] as usize] as u64 | (codes3[pixel[3] as usize] as u64) << len1; w.write_bits(code, len1 + len3)?; write_run(w, pixel, &mut it, &codes1, &lengths1)?; } } ColorType::Rgb8 => { while let Some(pixel) = it.next() { let len1 = lengths1[pixel[1] as usize]; let len0 = lengths0[pixel[0] as usize]; let len2 = lengths2[pixel[2] as usize]; let code = codes1[pixel[1] as usize] as u64 | (codes0[pixel[0] as usize] as u64) << len1 | (codes2[pixel[2] as usize] as u64) << (len1 + len0); w.write_bits(code, len1 + len0 + len2)?; write_run(w, pixel, &mut it, &codes1, &lengths1)?; } } ColorType::Rgba8 => { while let Some(pixel) = it.next() { let len1 = lengths1[pixel[1] as usize]; let len0 = lengths0[pixel[0] as usize]; let len2 = lengths2[pixel[2] as usize]; let len3 = lengths3[pixel[3] as usize]; let code = codes1[pixel[1] as usize] as u64 | (codes0[pixel[0] as usize] as u64) << len1 | (codes2[pixel[2] as usize] as u64) << (len1 + len0) | (codes3[pixel[3] as usize] as u64) << (len1 + len0 + len2); w.write_bits(code, len1 + len0 + len2 + len3)?; write_run(w, pixel, &mut it, &codes1, &lengths1)?; } } } w.flush()?; Ok(()) } fn chunk_size(inner_bytes: usize) -> u32 { if inner_bytes % 2 == 1 { (inner_bytes + 1) as u32 + 8 } else { inner_bytes as u32 + 8 } } fn write_chunk(mut w: W, name: &[u8], data: &[u8]) -> io::Result<()> { debug_assert!(name.len() == 4); w.write_all(name)?; w.write_all(&(data.len() as u32).to_le_bytes())?; w.write_all(data)?; if data.len() % 2 == 1 { w.write_all(&[0])?; } Ok(()) } /// WebP Encoder. pub struct WebPEncoder { writer: W, icc_profile: Vec, exif_metadata: Vec, xmp_metadata: Vec, params: EncoderParams, } impl WebPEncoder { /// Create a new encoder that writes its output to `w`. /// /// Only supports "VP8L" lossless encoding. pub fn new(w: W) -> Self { Self { writer: w, icc_profile: Vec::new(), exif_metadata: Vec::new(), xmp_metadata: Vec::new(), params: EncoderParams::default(), } } /// Set the ICC profile to use for the image. pub fn set_icc_profile(&mut self, icc_profile: Vec) { self.icc_profile = icc_profile; } /// Set the EXIF metadata to use for the image. pub fn set_exif_metadata(&mut self, exif_metadata: Vec) { self.exif_metadata = exif_metadata; } /// Set the XMP metadata to use for the image. pub fn set_xmp_metadata(&mut self, xmp_metadata: Vec) { self.xmp_metadata = xmp_metadata; } /// Set the `EncoderParams` to use. pub fn set_params(&mut self, params: EncoderParams) { self.params = params; } /// Encode image data with the indicated color type. /// /// # Panics /// /// Panics if the image data is not of the indicated dimensions. pub fn encode( mut self, data: &[u8], width: u32, height: u32, color: ColorType, ) -> Result<(), EncodingError> { let mut frame = Vec::new(); encode_frame(&mut frame, data, width, height, color, self.params)?; // If the image has no metadata, it can be encoded with the "simple" WebP container format. if self.icc_profile.is_empty() && self.exif_metadata.is_empty() && self.xmp_metadata.is_empty() { self.writer.write_all(b"RIFF")?; self.writer .write_all(&(chunk_size(frame.len()) + 4).to_le_bytes())?; self.writer.write_all(b"WEBP")?; write_chunk(&mut self.writer, b"VP8L", &frame)?; } else { let mut total_bytes = 22 + chunk_size(frame.len()); if !self.icc_profile.is_empty() { total_bytes += chunk_size(self.icc_profile.len()); } if !self.exif_metadata.is_empty() { total_bytes += chunk_size(self.exif_metadata.len()); } if !self.xmp_metadata.is_empty() { total_bytes += chunk_size(self.xmp_metadata.len()); } let mut flags = 0; if !self.xmp_metadata.is_empty() { flags |= 1 << 2; } if !self.exif_metadata.is_empty() { flags |= 1 << 3; } if let ColorType::La8 | ColorType::Rgba8 = color { flags |= 1 << 4; } if !self.icc_profile.is_empty() { flags |= 1 << 5; } self.writer.write_all(b"RIFF")?; self.writer.write_all(&total_bytes.to_le_bytes())?; self.writer.write_all(b"WEBP")?; let mut vp8x = Vec::new(); vp8x.write_all(&[flags])?; // flags vp8x.write_all(&[0; 3])?; // reserved vp8x.write_all(&(width - 1).to_le_bytes()[..3])?; // canvas width vp8x.write_all(&(height - 1).to_le_bytes()[..3])?; // canvas height write_chunk(&mut self.writer, b"VP8X", &vp8x)?; if !self.icc_profile.is_empty() { write_chunk(&mut self.writer, b"ICCP", &self.icc_profile)?; } write_chunk(&mut self.writer, b"VP8L", &frame)?; if !self.exif_metadata.is_empty() { write_chunk(&mut self.writer, b"EXIF", &self.exif_metadata)?; } if !self.xmp_metadata.is_empty() { write_chunk(&mut self.writer, b"XMP ", &self.xmp_metadata)?; } } Ok(()) } } #[cfg(test)] mod tests { use rand::RngCore; use super::*; #[test] fn write_webp() { let mut img = vec![0; 256 * 256 * 4]; rand::thread_rng().fill_bytes(&mut img); let mut output = Vec::new(); WebPEncoder::new(&mut output) .encode(&img, 256, 256, crate::ColorType::Rgba8) .unwrap(); let mut decoder = crate::WebPDecoder::new(std::io::Cursor::new(output)).unwrap(); let mut img2 = vec![0; 256 * 256 * 4]; decoder.read_image(&mut img2).unwrap(); assert_eq!(img, img2); } #[test] fn write_webp_exif() { let mut img = vec![0; 256 * 256 * 3]; rand::thread_rng().fill_bytes(&mut img); let mut exif = vec![0; 10]; rand::thread_rng().fill_bytes(&mut exif); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); encoder.set_exif_metadata(exif.clone()); encoder .encode(&img, 256, 256, crate::ColorType::Rgb8) .unwrap(); let mut decoder = crate::WebPDecoder::new(std::io::Cursor::new(output)).unwrap(); let mut img2 = vec![0; 256 * 256 * 3]; decoder.read_image(&mut img2).unwrap(); assert_eq!(img, img2); let exif2 = decoder.exif_metadata().unwrap(); assert_eq!(Some(exif), exif2); } #[test] fn roundtrip_libwebp() { roundtrip_libwebp_params(EncoderParams::default()); roundtrip_libwebp_params(EncoderParams { use_predictor_transform: false, ..Default::default() }); } fn roundtrip_libwebp_params(params: EncoderParams) { println!("Testing {params:?}"); let mut img = vec![0; 256 * 256 * 4]; rand::thread_rng().fill_bytes(&mut img); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); encoder.set_params(params.clone()); encoder .encode(&img[..256 * 256 * 3], 256, 256, crate::ColorType::Rgb8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); assert!(&img[..256 * 256 * 3] == &*decoded); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); encoder.set_params(params.clone()); encoder .encode(&img, 256, 256, crate::ColorType::Rgba8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); assert!(&img == &*decoded); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); encoder.set_params(params.clone()); encoder.set_icc_profile(vec![0; 10]); encoder .encode(&img, 256, 256, crate::ColorType::Rgba8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); assert!(&img == &*decoded); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); encoder.set_params(params.clone()); encoder.set_exif_metadata(vec![0; 10]); encoder .encode(&img, 256, 256, crate::ColorType::Rgba8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); assert!(&img == &*decoded); let mut output = Vec::new(); let mut encoder = WebPEncoder::new(&mut output); encoder.set_params(params.clone()); encoder.set_xmp_metadata(vec![0; 7]); encoder.set_icc_profile(vec![0; 8]); encoder.set_icc_profile(vec![0; 9]); encoder .encode(&img, 256, 256, crate::ColorType::Rgba8) .unwrap(); let decoded = webp::Decoder::new(&output).decode().unwrap(); assert!(&img == &*decoded); } } image-webp-0.2.0/src/extended.rs000064400000000000000000000264601046102023000146050ustar 00000000000000use super::lossless::LosslessDecoder; use crate::decoder::DecodingError; use byteorder_lite::ReadBytesExt; use std::io::{BufRead, Read}; #[derive(Debug, Clone)] pub(crate) struct WebPExtendedInfo { pub(crate) alpha: bool, pub(crate) canvas_width: u32, pub(crate) canvas_height: u32, pub(crate) icc_profile: bool, pub(crate) exif_metadata: bool, pub(crate) xmp_metadata: bool, pub(crate) animation: bool, pub(crate) background_color: [u8; 4], } /// Composites a frame onto a canvas. /// /// Starts by filling the rectangle occupied by the previous frame with the background /// color, if provided. Then copies or blends the frame onto the canvas. #[allow(clippy::too_many_arguments)] pub(crate) fn composite_frame( canvas: &mut [u8], canvas_width: u32, canvas_height: u32, clear_color: Option<[u8; 4]>, frame: &[u8], frame_offset_x: u32, frame_offset_y: u32, frame_width: u32, frame_height: u32, frame_has_alpha: bool, frame_use_alpha_blending: bool, previous_frame_width: u32, previous_frame_height: u32, previous_frame_offset_x: u32, previous_frame_offset_y: u32, ) { let frame_is_full_size = frame_offset_x == 0 && frame_offset_y == 0 && frame_width == canvas_width && frame_height == canvas_height; if frame_is_full_size && !frame_use_alpha_blending { if frame_has_alpha { canvas.copy_from_slice(frame); } else { for (input, output) in frame.chunks_exact(3).zip(canvas.chunks_exact_mut(4)) { output[..3].copy_from_slice(input); output[3] = 255; } } return; } // clear rectangle occupied by previous frame if let Some(clear_color) = clear_color { match (frame_is_full_size, frame_has_alpha) { (true, true) => { for pixel in canvas.chunks_exact_mut(4) { pixel.copy_from_slice(&clear_color); } } (true, false) => { for pixel in canvas.chunks_exact_mut(3) { pixel.copy_from_slice(&clear_color[..3]); } } (false, true) => { for y in 0..previous_frame_height as usize { for x in 0..previous_frame_width as usize { let canvas_index = ((x + previous_frame_offset_x as usize) + (y + previous_frame_offset_y as usize) * canvas_width as usize) * 4; let output = &mut canvas[canvas_index..][..4]; output.copy_from_slice(&clear_color); } } } (false, false) => { for y in 0..previous_frame_height as usize { for x in 0..previous_frame_width as usize { // let frame_index = (x + y * frame_width as usize) * 4; let canvas_index = ((x + previous_frame_offset_x as usize) + (y + previous_frame_offset_y as usize) * canvas_width as usize) * 3; let output = &mut canvas[canvas_index..][..3]; output.copy_from_slice(&clear_color[..3]); } } } } } let width = frame_width.min(canvas_width.saturating_sub(frame_offset_x)) as usize; let height = frame_height.min(canvas_height.saturating_sub(frame_offset_y)) as usize; if frame_has_alpha && frame_use_alpha_blending { for y in 0..height { for x in 0..width { let frame_index = (x + y * frame_width as usize) * 4; let canvas_index = ((x + frame_offset_x as usize) + (y + frame_offset_y as usize) * canvas_width as usize) * 4; let input = &frame[frame_index..][..4]; let output = &mut canvas[canvas_index..][..4]; let blended = do_alpha_blending(input.try_into().unwrap(), output.try_into().unwrap()); output.copy_from_slice(&blended); } } } else if frame_has_alpha { for y in 0..height { let frame_index = (y * frame_width as usize) * 4; let canvas_index = (frame_offset_x as usize + (y + frame_offset_y as usize) * canvas_width as usize) * 4; canvas[canvas_index..][..width * 4].copy_from_slice(&frame[frame_index..][..width * 4]); } } else { for y in 0..height { let index = (y * frame_width as usize) * 3; let canvas_index = (frame_offset_x as usize + (y + frame_offset_y as usize) * canvas_width as usize) * 4; let input = &frame[index..][..width * 3]; let output = &mut canvas[canvas_index..][..width * 4]; for (input, output) in input.chunks_exact(3).zip(output.chunks_exact_mut(4)) { output[..3].copy_from_slice(input); output[3] = 255; } } } } fn do_alpha_blending(buffer: [u8; 4], canvas: [u8; 4]) -> [u8; 4] { let canvas_alpha = f64::from(canvas[3]); let buffer_alpha = f64::from(buffer[3]); let blend_alpha_f64 = buffer_alpha + canvas_alpha * (1.0 - buffer_alpha / 255.0); //value should be between 0 and 255, this truncates the fractional part let blend_alpha: u8 = blend_alpha_f64 as u8; let blend_rgb: [u8; 3] = if blend_alpha == 0 { [0, 0, 0] } else { let mut rgb = [0u8; 3]; for i in 0..3 { let canvas_f64 = f64::from(canvas[i]); let buffer_f64 = f64::from(buffer[i]); let val = (buffer_f64 * buffer_alpha + canvas_f64 * canvas_alpha * (1.0 - buffer_alpha / 255.0)) / blend_alpha_f64; //value should be between 0 and 255, this truncates the fractional part rgb[i] = val as u8; } rgb }; [blend_rgb[0], blend_rgb[1], blend_rgb[2], blend_alpha] } pub(crate) fn get_alpha_predictor( x: usize, y: usize, width: usize, filtering_method: FilteringMethod, image_slice: &[u8], ) -> u8 { match filtering_method { FilteringMethod::None => 0, FilteringMethod::Horizontal => { if x == 0 && y == 0 { 0 } else if x == 0 { let index = (y - 1) * width + x; image_slice[index * 4 + 3] } else { let index = y * width + x - 1; image_slice[index * 4 + 3] } } FilteringMethod::Vertical => { if x == 0 && y == 0 { 0 } else if y == 0 { let index = y * width + x - 1; image_slice[index * 4 + 3] } else { let index = (y - 1) * width + x; image_slice[index * 4 + 3] } } FilteringMethod::Gradient => { let (left, top, top_left) = match (x, y) { (0, 0) => (0, 0, 0), (0, y) => { let above_index = (y - 1) * width + x; let val = image_slice[above_index * 4 + 3]; (val, val, val) } (x, 0) => { let before_index = y * width + x - 1; let val = image_slice[before_index * 4 + 3]; (val, val, val) } (x, y) => { let left_index = y * width + x - 1; let left = image_slice[left_index * 4 + 3]; let top_index = (y - 1) * width + x; let top = image_slice[top_index * 4 + 3]; let top_left_index = (y - 1) * width + x - 1; let top_left = image_slice[top_left_index * 4 + 3]; (left, top, top_left) } }; let combination = i16::from(left) + i16::from(top) - i16::from(top_left); i16::clamp(combination, 0, 255).try_into().unwrap() } } } pub(crate) fn read_extended_header( reader: &mut R, ) -> Result { let chunk_flags = reader.read_u8()?; let icc_profile = chunk_flags & 0b00100000 != 0; let alpha = chunk_flags & 0b00010000 != 0; let exif_metadata = chunk_flags & 0b00001000 != 0; let xmp_metadata = chunk_flags & 0b00000100 != 0; let animation = chunk_flags & 0b00000010 != 0; // reserved bytes are ignored let _reserved_bytes = read_3_bytes(reader)?; let canvas_width = read_3_bytes(reader)? + 1; let canvas_height = read_3_bytes(reader)? + 1; //product of canvas dimensions cannot be larger than u32 max if u32::checked_mul(canvas_width, canvas_height).is_none() { return Err(DecodingError::ImageTooLarge); } let info = WebPExtendedInfo { icc_profile, alpha, exif_metadata, xmp_metadata, animation, canvas_width, canvas_height, background_color: [0; 4], }; Ok(info) } pub(crate) fn read_3_bytes(reader: &mut R) -> Result { let mut buffer: [u8; 3] = [0; 3]; reader.read_exact(&mut buffer)?; let value: u32 = (u32::from(buffer[2]) << 16) | (u32::from(buffer[1]) << 8) | u32::from(buffer[0]); Ok(value) } #[derive(Debug)] pub(crate) struct AlphaChunk { _preprocessing: bool, pub(crate) filtering_method: FilteringMethod, pub(crate) data: Vec, } #[derive(Debug, Copy, Clone)] pub(crate) enum FilteringMethod { None, Horizontal, Vertical, Gradient, } pub(crate) fn read_alpha_chunk( reader: &mut R, width: u16, height: u16, ) -> Result { let info_byte = reader.read_u8()?; let preprocessing = (info_byte & 0b00110000) >> 4; let filtering = (info_byte & 0b00001100) >> 2; let compression = info_byte & 0b00000011; let preprocessing = match preprocessing { 0 => false, 1 => true, _ => return Err(DecodingError::InvalidAlphaPreprocessing), }; let filtering_method = match filtering { 0 => FilteringMethod::None, 1 => FilteringMethod::Horizontal, 2 => FilteringMethod::Vertical, 3 => FilteringMethod::Gradient, _ => unreachable!(), }; let lossless_compression = match compression { 0 => false, 1 => true, _ => return Err(DecodingError::InvalidCompressionMethod), }; let data = if lossless_compression { let mut decoder = LosslessDecoder::new(reader); let mut data = vec![0; usize::from(width) * usize::from(height) * 4]; decoder.decode_frame(width as u32, height as u32, true, &mut data)?; let mut green = vec![0; usize::from(width) * usize::from(height)]; for (rgba_val, green_val) in data.chunks_exact(4).zip(green.iter_mut()) { *green_val = rgba_val[1]; } green } else { let mut framedata = vec![0; width as usize * height as usize]; reader.read_exact(&mut framedata)?; framedata }; let chunk = AlphaChunk { _preprocessing: preprocessing, filtering_method, data, }; Ok(chunk) } image-webp-0.2.0/src/huffman.rs000064400000000000000000000212441046102023000144240ustar 00000000000000use std::io::BufRead; use crate::decoder::DecodingError; use super::lossless::BitReader; /// Rudimentary utility for reading Canonical Huffman Codes. /// Based off https://github.com/webmproject/libwebp/blob/7f8472a610b61ec780ef0a8873cd954ac512a505/src/utils/huffman.c /// const MAX_ALLOWED_CODE_LENGTH: usize = 15; const MAX_TABLE_BITS: u8 = 10; #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum HuffmanTreeNode { Branch(usize), //offset in vector to children Leaf(u16), //symbol stored in leaf Empty, } #[derive(Clone, Debug)] enum HuffmanTreeInner { Single(u16), Tree { tree: Vec, table: Vec, table_mask: u16, }, } /// Huffman tree #[derive(Clone, Debug)] pub(crate) struct HuffmanTree(HuffmanTreeInner); impl Default for HuffmanTree { fn default() -> Self { Self(HuffmanTreeInner::Single(0)) } } impl HuffmanTree { /// Builds a tree implicitly, just from code lengths pub(crate) fn build_implicit(code_lengths: Vec) -> Result { // Count symbols and build histogram let mut num_symbols = 0; let mut code_length_hist = [0; MAX_ALLOWED_CODE_LENGTH + 1]; for &length in code_lengths.iter().filter(|&&x| x != 0) { code_length_hist[usize::from(length)] += 1; num_symbols += 1; } // Handle special cases if num_symbols == 0 { return Err(DecodingError::HuffmanError); } else if num_symbols == 1 { let root_symbol = code_lengths.iter().position(|&x| x != 0).unwrap() as u16; return Ok(Self::build_single_node(root_symbol)); }; // Assign codes let mut curr_code = 0; let mut next_codes = [0; MAX_ALLOWED_CODE_LENGTH + 1]; let max_code_length = code_length_hist.iter().rposition(|&x| x != 0).unwrap() as u16; for code_len in 1..=usize::from(max_code_length) { next_codes[code_len] = curr_code; curr_code = (curr_code + code_length_hist[code_len]) << 1; } // Confirm that the huffman tree is valid if curr_code != 2 << max_code_length { return Err(DecodingError::HuffmanError); } // Calculate table/tree parameters let table_bits = max_code_length.min(MAX_TABLE_BITS as u16); let table_size = (1 << table_bits) as usize; let table_mask = table_size as u16 - 1; let tree_size = code_length_hist[table_bits as usize + 1..=max_code_length as usize] .iter() .sum::() as usize; // Populate decoding table let mut tree = Vec::with_capacity(2 * tree_size); let mut table = vec![0; table_size]; for (symbol, &length) in code_lengths.iter().enumerate() { if length == 0 { continue; } let code = next_codes[length as usize]; next_codes[length as usize] += 1; if length <= table_bits { let mut j = (u16::reverse_bits(code) >> (16 - length)) as usize; let entry = ((length as u32) << 16) | symbol as u32; while j < table_size { table[j] = entry; j += 1 << length as usize; } } else { let table_index = ((u16::reverse_bits(code) >> (16 - length)) & table_mask) as usize; let table_value = table[table_index]; debug_assert_eq!(table_value >> 16, 0); let mut node_index = if table_value == 0 { let node_index = tree.len(); table[table_index] = (node_index + 1) as u32; tree.push(HuffmanTreeNode::Empty); node_index } else { (table_value - 1) as usize }; let code = usize::from(code); for depth in (0..length - table_bits).rev() { let node = tree[node_index]; let offset = match node { HuffmanTreeNode::Empty => { // Turns a node from empty into a branch and assigns its children let offset = tree.len() - node_index; tree[node_index] = HuffmanTreeNode::Branch(offset); tree.push(HuffmanTreeNode::Empty); tree.push(HuffmanTreeNode::Empty); offset } HuffmanTreeNode::Leaf(_) => return Err(DecodingError::HuffmanError), HuffmanTreeNode::Branch(offset) => offset, }; node_index += offset + ((code >> depth) & 1); } match tree[node_index] { HuffmanTreeNode::Empty => { tree[node_index] = HuffmanTreeNode::Leaf(symbol as u16) } HuffmanTreeNode::Leaf(_) => return Err(DecodingError::HuffmanError), HuffmanTreeNode::Branch(_offset) => return Err(DecodingError::HuffmanError), } } } Ok(Self(HuffmanTreeInner::Tree { tree, table, table_mask, })) } pub(crate) fn build_single_node(symbol: u16) -> HuffmanTree { Self(HuffmanTreeInner::Single(symbol)) } pub(crate) fn build_two_node(zero: u16, one: u16) -> HuffmanTree { Self(HuffmanTreeInner::Tree { tree: vec![ HuffmanTreeNode::Leaf(zero), HuffmanTreeNode::Leaf(one), HuffmanTreeNode::Empty, ], table: vec![1 << 16 | zero as u32, 1 << 16 | one as u32], table_mask: 0x1, }) } pub(crate) fn is_single_node(&self) -> bool { matches!(self.0, HuffmanTreeInner::Single(_)) } #[inline(never)] fn read_symbol_slowpath( tree: &[HuffmanTreeNode], mut v: usize, start_index: usize, bit_reader: &mut BitReader, ) -> Result { let mut depth = MAX_TABLE_BITS; let mut index = start_index; loop { match &tree[index] { HuffmanTreeNode::Branch(children_offset) => { index += children_offset + (v & 1); depth += 1; v >>= 1; } HuffmanTreeNode::Leaf(symbol) => { bit_reader.consume(depth)?; return Ok(*symbol); } HuffmanTreeNode::Empty => return Err(DecodingError::HuffmanError), } } } /// Reads a symbol using the bit reader. /// /// You must call call `bit_reader.fill()` before calling this function or it may erroroneosly /// detect the end of the stream and return a bitstream error. pub(crate) fn read_symbol( &self, bit_reader: &mut BitReader, ) -> Result { match &self.0 { HuffmanTreeInner::Tree { tree, table, table_mask, } => { let v = bit_reader.peek_full() as u16; let entry = table[(v & table_mask) as usize]; if entry >> 16 != 0 { bit_reader.consume((entry >> 16) as u8)?; return Ok(entry as u16); } Self::read_symbol_slowpath( tree, (v >> MAX_TABLE_BITS) as usize, ((entry & 0xffff) - 1) as usize, bit_reader, ) } HuffmanTreeInner::Single(symbol) => Ok(*symbol), } } /// Peek at the next symbol in the bitstream if it can be read with only a primary table lookup. /// /// Returns a tuple of the codelength and symbol value. This function may return wrong /// information if there aren't enough bits in the bit reader to read the next symbol. pub(crate) fn peek_symbol( &self, bit_reader: &mut BitReader, ) -> Option<(u8, u16)> { match &self.0 { HuffmanTreeInner::Tree { table, table_mask, .. } => { let v = bit_reader.peek_full() as u16; let entry = table[(v & table_mask) as usize]; if entry >> 16 != 0 { return Some(((entry >> 16) as u8, entry as u16)); } None } HuffmanTreeInner::Single(symbol) => Some((0, *symbol)), } } } image-webp-0.2.0/src/lib.rs000064400000000000000000000012261046102023000135440ustar 00000000000000//! Decoding and Encoding of WebP Images #![forbid(unsafe_code)] #![deny(missing_docs)] // Increase recursion limit for the `quick_error!` macro. #![recursion_limit = "256"] // Enable nightly benchmark functionality if "_benchmarks" feature is enabled. #![cfg_attr(all(test, feature = "_benchmarks"), feature(test))] #[cfg(all(test, feature = "_benchmarks"))] extern crate test; pub use self::decoder::{DecodingError, LoopCount, WebPDecoder}; pub use self::encoder::{ColorType, EncoderParams, EncodingError, WebPEncoder}; mod decoder; mod encoder; mod extended; mod huffman; mod loop_filter; mod lossless; mod lossless_transform; mod transform; pub mod vp8; image-webp-0.2.0/src/loop_filter.rs000064400000000000000000000102751046102023000153200ustar 00000000000000//! Does loop filtering on webp lossy images #[inline] fn c(val: i32) -> i32 { val.clamp(-128, 127) } //unsigned to signed #[inline] fn u2s(val: u8) -> i32 { i32::from(val) - 128 } //signed to unsigned #[inline] fn s2u(val: i32) -> u8 { (c(val) + 128) as u8 } #[inline] fn diff(val1: u8, val2: u8) -> u8 { if val1 > val2 { val1 - val2 } else { val2 - val1 } } //15.2 fn common_adjust(use_outer_taps: bool, pixels: &mut [u8], point: usize, stride: usize) -> i32 { let p1 = u2s(pixels[point - 2 * stride]); let p0 = u2s(pixels[point - stride]); let q0 = u2s(pixels[point]); let q1 = u2s(pixels[point + stride]); //value for the outer 2 pixels let outer = if use_outer_taps { c(p1 - q1) } else { 0 }; let mut a = c(outer + 3 * (q0 - p0)); let b = (c(a + 3)) >> 3; a = (c(a + 4)) >> 3; pixels[point] = s2u(q0 - a); pixels[point - stride] = s2u(p0 + b); a } fn simple_threshold(filter_limit: i32, pixels: &[u8], point: usize, stride: usize) -> bool { i32::from(diff(pixels[point - stride], pixels[point])) * 2 + i32::from(diff(pixels[point - 2 * stride], pixels[point + stride])) / 2 <= filter_limit } fn should_filter( interior_limit: u8, edge_limit: u8, pixels: &[u8], point: usize, stride: usize, ) -> bool { simple_threshold(i32::from(edge_limit), pixels, point, stride) && diff(pixels[point - 4 * stride], pixels[point - 3 * stride]) <= interior_limit && diff(pixels[point - 3 * stride], pixels[point - 2 * stride]) <= interior_limit && diff(pixels[point - 2 * stride], pixels[point - stride]) <= interior_limit && diff(pixels[point + 3 * stride], pixels[point + 2 * stride]) <= interior_limit && diff(pixels[point + 2 * stride], pixels[point + stride]) <= interior_limit && diff(pixels[point + stride], pixels[point]) <= interior_limit } fn high_edge_variance(threshold: u8, pixels: &[u8], point: usize, stride: usize) -> bool { diff(pixels[point - 2 * stride], pixels[point - stride]) > threshold || diff(pixels[point + stride], pixels[point]) > threshold } //simple filter //effects 4 pixels on an edge(2 each side) pub(crate) fn simple_segment(edge_limit: u8, pixels: &mut [u8], point: usize, stride: usize) { if simple_threshold(i32::from(edge_limit), pixels, point, stride) { common_adjust(true, pixels, point, stride); } } //normal filter //works on the 8 pixels on the edges between subblocks inside a macroblock pub(crate) fn subblock_filter( hev_threshold: u8, interior_limit: u8, edge_limit: u8, pixels: &mut [u8], point: usize, stride: usize, ) { if should_filter(interior_limit, edge_limit, pixels, point, stride) { let hv = high_edge_variance(hev_threshold, pixels, point, stride); let a = (common_adjust(hv, pixels, point, stride) + 1) >> 1; if !hv { pixels[point + stride] = s2u(u2s(pixels[point + stride]) - a); pixels[point - 2 * stride] = s2u(u2s(pixels[point - 2 * stride]) - a); } } } //normal filter //works on the 8 pixels on the edges between macroblocks pub(crate) fn macroblock_filter( hev_threshold: u8, interior_limit: u8, edge_limit: u8, pixels: &mut [u8], point: usize, stride: usize, ) { let mut spixels = [0i32; 8]; for i in 0..8 { spixels[i] = u2s(pixels[point + i * stride - 4 * stride]); } if should_filter(interior_limit, edge_limit, pixels, point, stride) { if !high_edge_variance(hev_threshold, pixels, point, stride) { let w = c(c(spixels[2] - spixels[5]) + 3 * (spixels[4] - spixels[3])); let mut a = c((27 * w + 63) >> 7); pixels[point] = s2u(spixels[4] - a); pixels[point - stride] = s2u(spixels[3] + a); a = c((18 * w + 63) >> 7); pixels[point + stride] = s2u(spixels[5] - a); pixels[point - 2 * stride] = s2u(spixels[2] + a); a = c((9 * w + 63) >> 7); pixels[point + 2 * stride] = s2u(spixels[6] - a); pixels[point - 3 * stride] = s2u(spixels[1] + a); } else { common_adjust(true, pixels, point, stride); } } } image-webp-0.2.0/src/lossless.rs000064400000000000000000000711401046102023000146470ustar 00000000000000//! Decoding of lossless WebP images //! //! [Lossless spec](https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification) //! use std::io::BufRead; use std::mem; use crate::decoder::DecodingError; use crate::lossless_transform::{ apply_color_indexing_transform, apply_color_transform, apply_predictor_transform, apply_subtract_green_transform, }; use super::huffman::HuffmanTree; use super::lossless_transform::TransformType; const CODE_LENGTH_CODES: usize = 19; const CODE_LENGTH_CODE_ORDER: [usize; CODE_LENGTH_CODES] = [ 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ]; #[rustfmt::skip] const DISTANCE_MAP: [(i8, i8); 120] = [ (0, 1), (1, 0), (1, 1), (-1, 1), (0, 2), (2, 0), (1, 2), (-1, 2), (2, 1), (-2, 1), (2, 2), (-2, 2), (0, 3), (3, 0), (1, 3), (-1, 3), (3, 1), (-3, 1), (2, 3), (-2, 3), (3, 2), (-3, 2), (0, 4), (4, 0), (1, 4), (-1, 4), (4, 1), (-4, 1), (3, 3), (-3, 3), (2, 4), (-2, 4), (4, 2), (-4, 2), (0, 5), (3, 4), (-3, 4), (4, 3), (-4, 3), (5, 0), (1, 5), (-1, 5), (5, 1), (-5, 1), (2, 5), (-2, 5), (5, 2), (-5, 2), (4, 4), (-4, 4), (3, 5), (-3, 5), (5, 3), (-5, 3), (0, 6), (6, 0), (1, 6), (-1, 6), (6, 1), (-6, 1), (2, 6), (-2, 6), (6, 2), (-6, 2), (4, 5), (-4, 5), (5, 4), (-5, 4), (3, 6), (-3, 6), (6, 3), (-6, 3), (0, 7), (7, 0), (1, 7), (-1, 7), (5, 5), (-5, 5), (7, 1), (-7, 1), (4, 6), (-4, 6), (6, 4), (-6, 4), (2, 7), (-2, 7), (7, 2), (-7, 2), (3, 7), (-3, 7), (7, 3), (-7, 3), (5, 6), (-5, 6), (6, 5), (-6, 5), (8, 0), (4, 7), (-4, 7), (7, 4), (-7, 4), (8, 1), (8, 2), (6, 6), (-6, 6), (8, 3), (5, 7), (-5, 7), (7, 5), (-7, 5), (8, 4), (6, 7), (-6, 7), (7, 6), (-7, 6), (8, 5), (7, 7), (-7, 7), (8, 6), (8, 7) ]; const GREEN: usize = 0; const RED: usize = 1; const BLUE: usize = 2; const ALPHA: usize = 3; const DIST: usize = 4; const HUFFMAN_CODES_PER_META_CODE: usize = 5; type HuffmanCodeGroup = [HuffmanTree; HUFFMAN_CODES_PER_META_CODE]; const ALPHABET_SIZE: [u16; HUFFMAN_CODES_PER_META_CODE] = [256 + 24, 256, 256, 256, 40]; #[inline] pub(crate) fn subsample_size(size: u16, bits: u8) -> u16 { ((u32::from(size) + (1u32 << bits) - 1) >> bits) .try_into() .unwrap() } const NUM_TRANSFORM_TYPES: usize = 4; //Decodes lossless WebP images #[derive(Debug)] pub(crate) struct LosslessDecoder { bit_reader: BitReader, transforms: [Option; NUM_TRANSFORM_TYPES], transform_order: Vec, width: u16, height: u16, } impl LosslessDecoder { /// Create a new decoder pub(crate) fn new(r: R) -> LosslessDecoder { LosslessDecoder { bit_reader: BitReader::new(r), transforms: [None, None, None, None], transform_order: Vec::new(), width: 0, height: 0, } } /// Decodes a frame. /// /// In an alpha chunk the width and height are not included in the header, so they should be /// provided by setting the `implicit_dimensions` argument. Otherwise that argument should be /// `None` and the frame dimensions will be determined by reading the VP8L header. pub(crate) fn decode_frame( &mut self, width: u32, height: u32, implicit_dimensions: bool, buf: &mut [u8], ) -> Result<(), DecodingError> { if implicit_dimensions { self.width = width as u16; self.height = height as u16; } else { let signature = self.bit_reader.read_bits::(8)?; if signature != 0x2f { return Err(DecodingError::LosslessSignatureInvalid(signature)); } self.width = self.bit_reader.read_bits::(14)? + 1; self.height = self.bit_reader.read_bits::(14)? + 1; if u32::from(self.width) != width || u32::from(self.height) != height { return Err(DecodingError::InconsistentImageSizes); } let _alpha_used = self.bit_reader.read_bits::(1)?; let version_num = self.bit_reader.read_bits::(3)?; if version_num != 0 { return Err(DecodingError::VersionNumberInvalid(version_num)); } } let transformed_width = self.read_transforms()?; let transformed_size = usize::from(transformed_width) * usize::from(self.height) * 4; self.decode_image_stream( transformed_width, self.height, true, &mut buf[..transformed_size], )?; let mut image_size = transformed_size; let mut width = transformed_width; for &trans_index in self.transform_order.iter().rev() { let transform = self.transforms[usize::from(trans_index)].as_ref().unwrap(); match transform { TransformType::PredictorTransform { size_bits, predictor_data, } => apply_predictor_transform( &mut buf[..image_size], width, self.height, *size_bits, predictor_data, )?, TransformType::ColorTransform { size_bits, transform_data, } => { apply_color_transform(&mut buf[..image_size], width, *size_bits, transform_data) } TransformType::SubtractGreen => { apply_subtract_green_transform(&mut buf[..image_size]) } TransformType::ColorIndexingTransform { table_size, table_data, } => { width = self.width; image_size = usize::from(width) * usize::from(self.height) * 4; apply_color_indexing_transform(buf, width, self.height, *table_size, table_data) } } } Ok(()) } /// Reads Image data from the bitstream /// /// Can be in any of the 5 roles described in the Specification. ARGB Image role has different /// behaviour to the other 4. xsize and ysize describe the size of the blocks where each block /// has its own entropy code fn decode_image_stream( &mut self, xsize: u16, ysize: u16, is_argb_img: bool, data: &mut [u8], ) -> Result<(), DecodingError> { let color_cache_bits = self.read_color_cache()?; let color_cache = color_cache_bits.map(|bits| ColorCache { color_cache_bits: bits, color_cache: vec![[0; 4]; 1 << bits], }); let huffman_info = self.read_huffman_codes(is_argb_img, xsize, ysize, color_cache)?; self.decode_image_data(xsize, ysize, huffman_info, data) } /// Reads transforms and their data from the bitstream fn read_transforms(&mut self) -> Result { let mut xsize = self.width; while self.bit_reader.read_bits::(1)? == 1 { let transform_type_val = self.bit_reader.read_bits::(2)?; if self.transforms[usize::from(transform_type_val)].is_some() { //can only have one of each transform, error return Err(DecodingError::TransformError); } self.transform_order.push(transform_type_val); let transform_type = match transform_type_val { 0 => { //predictor let size_bits = self.bit_reader.read_bits::(3)? + 2; let block_xsize = subsample_size(xsize, size_bits); let block_ysize = subsample_size(self.height, size_bits); let mut predictor_data = vec![0; usize::from(block_xsize) * usize::from(block_ysize) * 4]; self.decode_image_stream(block_xsize, block_ysize, false, &mut predictor_data)?; TransformType::PredictorTransform { size_bits, predictor_data, } } 1 => { //color transform let size_bits = self.bit_reader.read_bits::(3)? + 2; let block_xsize = subsample_size(xsize, size_bits); let block_ysize = subsample_size(self.height, size_bits); let mut transform_data = vec![0; usize::from(block_xsize) * usize::from(block_ysize) * 4]; self.decode_image_stream(block_xsize, block_ysize, false, &mut transform_data)?; TransformType::ColorTransform { size_bits, transform_data, } } 2 => { //subtract green TransformType::SubtractGreen } 3 => { let color_table_size = self.bit_reader.read_bits::(8)? + 1; let mut color_map = vec![0; usize::from(color_table_size) * 4]; self.decode_image_stream(color_table_size, 1, false, &mut color_map)?; let bits = if color_table_size <= 2 { 3 } else if color_table_size <= 4 { 2 } else if color_table_size <= 16 { 1 } else { 0 }; xsize = subsample_size(xsize, bits); Self::adjust_color_map(&mut color_map); TransformType::ColorIndexingTransform { table_size: color_table_size, table_data: color_map, } } _ => unreachable!(), }; self.transforms[usize::from(transform_type_val)] = Some(transform_type); } Ok(xsize) } /// Adjusts the color map since it's subtraction coded fn adjust_color_map(color_map: &mut [u8]) { for i in 4..color_map.len() { color_map[i] = color_map[i].wrapping_add(color_map[i - 4]); } } /// Reads huffman codes associated with an image fn read_huffman_codes( &mut self, read_meta: bool, xsize: u16, ysize: u16, color_cache: Option, ) -> Result { let mut num_huff_groups = 1u32; let mut huffman_bits = 0; let mut huffman_xsize = 1; let mut huffman_ysize = 1; let mut entropy_image = Vec::new(); if read_meta && self.bit_reader.read_bits::(1)? == 1 { //meta huffman codes huffman_bits = self.bit_reader.read_bits::(3)? + 2; huffman_xsize = subsample_size(xsize, huffman_bits); huffman_ysize = subsample_size(ysize, huffman_bits); let mut data = vec![0; usize::from(huffman_xsize) * usize::from(huffman_ysize) * 4]; self.decode_image_stream(huffman_xsize, huffman_ysize, false, &mut data)?; entropy_image = data .chunks_exact(4) .map(|pixel| { let meta_huff_code = u16::from(pixel[0]) << 8 | u16::from(pixel[1]); if u32::from(meta_huff_code) >= num_huff_groups { num_huff_groups = u32::from(meta_huff_code) + 1; } meta_huff_code }) .collect::>(); } let mut hufftree_groups = Vec::new(); for _i in 0..num_huff_groups { let mut group: HuffmanCodeGroup = Default::default(); for j in 0..HUFFMAN_CODES_PER_META_CODE { let mut alphabet_size = ALPHABET_SIZE[j]; if j == 0 { if let Some(color_cache) = color_cache.as_ref() { alphabet_size += 1 << color_cache.color_cache_bits; } } let tree = self.read_huffman_code(alphabet_size)?; group[j] = tree; } hufftree_groups.push(group); } let huffman_mask = if huffman_bits == 0 { !0 } else { (1 << huffman_bits) - 1 }; let info = HuffmanInfo { xsize: huffman_xsize, _ysize: huffman_ysize, color_cache, image: entropy_image, bits: huffman_bits, mask: huffman_mask, huffman_code_groups: hufftree_groups, }; Ok(info) } /// Decodes and returns a single huffman tree fn read_huffman_code(&mut self, alphabet_size: u16) -> Result { let simple = self.bit_reader.read_bits::(1)? == 1; if simple { let num_symbols = self.bit_reader.read_bits::(1)? + 1; let is_first_8bits = self.bit_reader.read_bits::(1)?; let zero_symbol = self.bit_reader.read_bits::(1 + 7 * is_first_8bits)?; if zero_symbol >= alphabet_size { return Err(DecodingError::BitStreamError); } if num_symbols == 1 { Ok(HuffmanTree::build_single_node(zero_symbol)) } else { let one_symbol = self.bit_reader.read_bits::(8)?; if one_symbol >= alphabet_size { return Err(DecodingError::BitStreamError); } Ok(HuffmanTree::build_two_node(zero_symbol, one_symbol)) } } else { let mut code_length_code_lengths = vec![0; CODE_LENGTH_CODES]; let num_code_lengths = 4 + self.bit_reader.read_bits::(4)?; for i in 0..num_code_lengths { code_length_code_lengths[CODE_LENGTH_CODE_ORDER[i]] = self.bit_reader.read_bits(3)?; } let new_code_lengths = self.read_huffman_code_lengths(code_length_code_lengths, alphabet_size)?; HuffmanTree::build_implicit(new_code_lengths) } } /// Reads huffman code lengths fn read_huffman_code_lengths( &mut self, code_length_code_lengths: Vec, num_symbols: u16, ) -> Result, DecodingError> { let table = HuffmanTree::build_implicit(code_length_code_lengths)?; let mut max_symbol = if self.bit_reader.read_bits::(1)? == 1 { let length_nbits = 2 + 2 * self.bit_reader.read_bits::(3)?; let max_minus_two = self.bit_reader.read_bits::(length_nbits)?; if max_minus_two > num_symbols - 2 { return Err(DecodingError::BitStreamError); } 2 + max_minus_two } else { num_symbols }; let mut code_lengths = vec![0; usize::from(num_symbols)]; let mut prev_code_len = 8; //default code length let mut symbol = 0; while symbol < num_symbols { if max_symbol == 0 { break; } max_symbol -= 1; self.bit_reader.fill()?; let code_len = table.read_symbol(&mut self.bit_reader)?; if code_len < 16 { code_lengths[usize::from(symbol)] = code_len; symbol += 1; if code_len != 0 { prev_code_len = code_len; } } else { let use_prev = code_len == 16; let slot = code_len - 16; let extra_bits = match slot { 0 => 2, 1 => 3, 2 => 7, _ => return Err(DecodingError::BitStreamError), }; let repeat_offset = match slot { 0 | 1 => 3, 2 => 11, _ => return Err(DecodingError::BitStreamError), }; let mut repeat = self.bit_reader.read_bits::(extra_bits)? + repeat_offset; if symbol + repeat > num_symbols { return Err(DecodingError::BitStreamError); } else { let length = if use_prev { prev_code_len } else { 0 }; while repeat > 0 { repeat -= 1; code_lengths[usize::from(symbol)] = length; symbol += 1; } } } } Ok(code_lengths) } /// Decodes the image data using the huffman trees and either of the 3 methods of decoding fn decode_image_data( &mut self, width: u16, height: u16, mut huffman_info: HuffmanInfo, data: &mut [u8], ) -> Result<(), DecodingError> { let num_values = usize::from(width) * usize::from(height); let huff_index = huffman_info.get_huff_index(0, 0); let mut tree = &huffman_info.huffman_code_groups[huff_index]; let mut index = 0; let mut next_block_start = 0; while index < num_values { self.bit_reader.fill()?; if index >= next_block_start { let x = index % usize::from(width); let y = index / usize::from(width); next_block_start = (x | usize::from(huffman_info.mask)).min(usize::from(width - 1)) + y * usize::from(width) + 1; let huff_index = huffman_info.get_huff_index(x as u16, y as u16); tree = &huffman_info.huffman_code_groups[huff_index]; // Fast path: If all the codes each contain only a single // symbol, then the pixel data isn't written to the bitstream // and we can just fill the output buffer with the symbol // directly. if tree[..4].iter().all(|t| t.is_single_node()) { let code = tree[GREEN].read_symbol(&mut self.bit_reader)?; if code < 256 { let n = if huffman_info.bits == 0 { num_values } else { next_block_start - index }; let red = tree[RED].read_symbol(&mut self.bit_reader)?; let blue = tree[BLUE].read_symbol(&mut self.bit_reader)?; let alpha = tree[ALPHA].read_symbol(&mut self.bit_reader)?; let value = [red as u8, code as u8, blue as u8, alpha as u8]; for i in 0..n { data[index * 4 + i * 4..][..4].copy_from_slice(&value); } if let Some(color_cache) = huffman_info.color_cache.as_mut() { color_cache.insert(value); } index += n; continue; } } } let code = tree[GREEN].read_symbol(&mut self.bit_reader)?; //check code if code < 256 { //literal, so just use huffman codes and read as argb let green = code as u8; let red = tree[RED].read_symbol(&mut self.bit_reader)? as u8; let blue = tree[BLUE].read_symbol(&mut self.bit_reader)? as u8; if self.bit_reader.nbits < 15 { self.bit_reader.fill()?; } let alpha = tree[ALPHA].read_symbol(&mut self.bit_reader)? as u8; data[index * 4] = red; data[index * 4 + 1] = green; data[index * 4 + 2] = blue; data[index * 4 + 3] = alpha; if let Some(color_cache) = huffman_info.color_cache.as_mut() { color_cache.insert([red, green, blue, alpha]); } index += 1; } else if code < 256 + 24 { //backward reference, so go back and use that to add image data let length_symbol = code - 256; let length = Self::get_copy_distance(&mut self.bit_reader, length_symbol)?; let dist_symbol = tree[DIST].read_symbol(&mut self.bit_reader)?; let dist_code = Self::get_copy_distance(&mut self.bit_reader, dist_symbol)?; let dist = Self::plane_code_to_distance(width, dist_code); if index < dist || num_values - index < length { return Err(DecodingError::BitStreamError); } if dist == 1 { let value: [u8; 4] = data[(index - dist) * 4..][..4].try_into().unwrap(); for i in 0..length { data[index * 4 + i * 4..][..4].copy_from_slice(&value); } } else { if index + length + 3 <= num_values { let start = (index - dist) * 4; data.copy_within(start..start + 16, index * 4); if length > 4 || dist < 4 { for i in (0..length * 4).step_by((dist * 4).min(16)).skip(1) { data.copy_within(start + i..start + i + 16, index * 4 + i); } } } else { for i in 0..length * 4 { data[index * 4 + i] = data[index * 4 + i - dist * 4]; } } if let Some(color_cache) = huffman_info.color_cache.as_mut() { for pixel in data[index * 4..][..length * 4].chunks_exact(4) { color_cache.insert(pixel.try_into().unwrap()); } } } index += length; } else { //color cache, so use previously stored pixels to get this pixel let color_cache = huffman_info .color_cache .as_mut() .ok_or(DecodingError::BitStreamError)?; let color = color_cache.lookup((code - 280).into()); data[index * 4..][..4].copy_from_slice(&color); index += 1; if index < next_block_start { if let Some((bits, code)) = tree[GREEN].peek_symbol(&mut self.bit_reader) { if code >= 280 { self.bit_reader.consume(bits)?; data[index * 4..][..4] .copy_from_slice(&color_cache.lookup((code - 280).into())); index += 1; } } } } } Ok(()) } /// Reads color cache data from the bitstream fn read_color_cache(&mut self) -> Result, DecodingError> { if self.bit_reader.read_bits::(1)? == 1 { let code_bits = self.bit_reader.read_bits::(4)?; if !(1..=11).contains(&code_bits) { return Err(DecodingError::InvalidColorCacheBits(code_bits)); } Ok(Some(code_bits)) } else { Ok(None) } } /// Gets the copy distance from the prefix code and bitstream fn get_copy_distance( bit_reader: &mut BitReader, prefix_code: u16, ) -> Result { if prefix_code < 4 { return Ok(usize::from(prefix_code + 1)); } let extra_bits: u8 = ((prefix_code - 2) >> 1).try_into().unwrap(); let offset = (2 + (usize::from(prefix_code) & 1)) << extra_bits; let bits = bit_reader.peek(extra_bits) as usize; bit_reader.consume(extra_bits)?; Ok(offset + bits + 1) } /// Gets distance to pixel fn plane_code_to_distance(xsize: u16, plane_code: usize) -> usize { if plane_code > 120 { plane_code - 120 } else { let (xoffset, yoffset) = DISTANCE_MAP[plane_code - 1]; let dist = i32::from(xoffset) + i32::from(yoffset) * i32::from(xsize); if dist < 1 { return 1; } dist.try_into().unwrap() } } } #[derive(Debug, Clone)] struct HuffmanInfo { xsize: u16, _ysize: u16, color_cache: Option, image: Vec, bits: u8, mask: u16, huffman_code_groups: Vec, } impl HuffmanInfo { fn get_huff_index(&self, x: u16, y: u16) -> usize { if self.bits == 0 { return 0; } let position = usize::from(y >> self.bits) * usize::from(self.xsize) + usize::from(x >> self.bits); let meta_huff_code: usize = usize::from(self.image[position]); meta_huff_code } } #[derive(Debug, Clone)] struct ColorCache { color_cache_bits: u8, color_cache: Vec<[u8; 4]>, } impl ColorCache { #[inline(always)] fn insert(&mut self, color: [u8; 4]) { let [r, g, b, a] = color; let color_u32 = (u32::from(r) << 16) | (u32::from(g) << 8) | (u32::from(b)) | (u32::from(a) << 24); let index = (0x1e35a7bdu32.wrapping_mul(color_u32)) >> (32 - self.color_cache_bits); self.color_cache[index as usize] = color; } #[inline(always)] fn lookup(&self, index: usize) -> [u8; 4] { self.color_cache[index] } } #[derive(Debug, Clone)] pub(crate) struct BitReader { reader: R, buffer: u64, nbits: u8, } impl BitReader { fn new(reader: R) -> Self { Self { reader, buffer: 0, nbits: 0, } } /// Fills the buffer with bits from the input stream. /// /// After this function, the internal buffer will contain 64-bits or have reached the end of /// the input stream. pub(crate) fn fill(&mut self) -> Result<(), DecodingError> { debug_assert!(self.nbits < 64); let mut buf = self.reader.fill_buf()?; if buf.len() >= 8 { let lookahead = u64::from_le_bytes(buf[..8].try_into().unwrap()); self.reader.consume(usize::from((63 - self.nbits) / 8)); self.buffer |= lookahead << self.nbits; self.nbits |= 56; } else { while !buf.is_empty() && self.nbits < 56 { self.buffer |= u64::from(buf[0]) << self.nbits; self.nbits += 8; self.reader.consume(1); buf = self.reader.fill_buf()?; } } Ok(()) } /// Peeks at the next `num` bits in the buffer. pub(crate) fn peek(&self, num: u8) -> u64 { self.buffer & ((1 << num) - 1) } /// Peeks at the full buffer. pub(crate) fn peek_full(&self) -> u64 { self.buffer } /// Consumes `num` bits from the buffer returning an error if there are not enough bits. pub(crate) fn consume(&mut self, num: u8) -> Result<(), DecodingError> { if self.nbits < num { return Err(DecodingError::BitStreamError); } self.buffer >>= num; self.nbits -= num; Ok(()) } /// Convenience function to read a number of bits and convert them to a type. pub(crate) fn read_bits>(&mut self, num: u8) -> Result { debug_assert!(num as usize <= 8 * mem::size_of::()); debug_assert!(num <= 32); if self.nbits < num { self.fill()?; } let value = self.peek(num) as u32; self.consume(num)?; match value.try_into() { Ok(value) => Ok(value), Err(_) => unreachable!("Value too large to fit in type"), } } } #[cfg(test)] mod test { use std::io::Cursor; use super::BitReader; #[test] fn bit_read_test() { //10011100 01000001 11100001 let mut bit_reader = BitReader::new(Cursor::new(vec![0x9C, 0x41, 0xE1])); assert_eq!(bit_reader.read_bits::(3).unwrap(), 4); //100 assert_eq!(bit_reader.read_bits::(2).unwrap(), 3); //11 assert_eq!(bit_reader.read_bits::(6).unwrap(), 12); //001100 assert_eq!(bit_reader.read_bits::(10).unwrap(), 40); //0000101000 assert_eq!(bit_reader.read_bits::(3).unwrap(), 7); //111 } #[test] fn bit_read_error_test() { //01101010 let mut bit_reader = BitReader::new(Cursor::new(vec![0x6A])); assert_eq!(bit_reader.read_bits::(3).unwrap(), 2); //010 assert_eq!(bit_reader.read_bits::(5).unwrap(), 13); //01101 assert!(bit_reader.read_bits::(4).is_err()); //error } } image-webp-0.2.0/src/lossless_transform.rs000064400000000000000000000513331046102023000167440ustar 00000000000000use std::ops::Range; use crate::decoder::DecodingError; use super::lossless::subsample_size; #[derive(Debug, Clone)] pub(crate) enum TransformType { PredictorTransform { size_bits: u8, predictor_data: Vec, }, ColorTransform { size_bits: u8, transform_data: Vec, }, SubtractGreen, ColorIndexingTransform { table_size: u16, table_data: Vec, }, } pub(crate) fn apply_predictor_transform( image_data: &mut [u8], width: u16, height: u16, size_bits: u8, predictor_data: &[u8], ) -> Result<(), DecodingError> { let block_xsize = usize::from(subsample_size(width, size_bits)); let width = usize::from(width); let height = usize::from(height); // Handle top and left borders specially. This involves ignoring mode and using specific // predictors for each. image_data[3] = image_data[3].wrapping_add(255); apply_predictor_transform_1(image_data, 4..width * 4, width); for y in 1..height { for i in 0..4 { image_data[y * width * 4 + i] = image_data[y * width * 4 + i].wrapping_add(image_data[(y - 1) * width * 4 + i]); } } for y in 1..height { for block_x in 0..block_xsize { let block_index = (y >> size_bits) * block_xsize + block_x; let predictor = predictor_data[block_index * 4 + 1]; let start_index = (y * width + (block_x << size_bits).max(1)) * 4; let end_index = (y * width + ((block_x + 1) << size_bits).min(width)) * 4; match predictor { 0 => apply_predictor_transform_0(image_data, start_index..end_index, width), 1 => apply_predictor_transform_1(image_data, start_index..end_index, width), 2 => apply_predictor_transform_2(image_data, start_index..end_index, width), 3 => apply_predictor_transform_3(image_data, start_index..end_index, width), 4 => apply_predictor_transform_4(image_data, start_index..end_index, width), 5 => apply_predictor_transform_5(image_data, start_index..end_index, width), 6 => apply_predictor_transform_6(image_data, start_index..end_index, width), 7 => apply_predictor_transform_7(image_data, start_index..end_index, width), 8 => apply_predictor_transform_8(image_data, start_index..end_index, width), 9 => apply_predictor_transform_9(image_data, start_index..end_index, width), 10 => apply_predictor_transform_10(image_data, start_index..end_index, width), 11 => apply_predictor_transform_11(image_data, start_index..end_index, width), 12 => apply_predictor_transform_12(image_data, start_index..end_index, width), 13 => apply_predictor_transform_13(image_data, start_index..end_index, width), _ => {} } } } Ok(()) } pub fn apply_predictor_transform_0(image_data: &mut [u8], range: Range, _width: usize) { for i in ((range.start + 3)..range.end).step_by(4) { image_data[i] = image_data[i].wrapping_add(0xff); } } pub fn apply_predictor_transform_1(image_data: &mut [u8], range: Range, _width: usize) { let mut prev: [u8; 4] = image_data[range.start - 4..][..4].try_into().unwrap(); for chunk in image_data[range].chunks_exact_mut(4) { prev = [ chunk[0].wrapping_add(prev[0]), chunk[1].wrapping_add(prev[1]), chunk[2].wrapping_add(prev[2]), chunk[3].wrapping_add(prev[3]), ]; chunk.copy_from_slice(&prev); } } pub fn apply_predictor_transform_2(image_data: &mut [u8], range: Range, width: usize) { for i in range { image_data[i] = image_data[i].wrapping_add(image_data[i - width * 4]); } } pub fn apply_predictor_transform_3(image_data: &mut [u8], range: Range, width: usize) { for i in range { image_data[i] = image_data[i].wrapping_add(image_data[i - width * 4 + 4]); } } pub fn apply_predictor_transform_4(image_data: &mut [u8], range: Range, width: usize) { for i in range { image_data[i] = image_data[i].wrapping_add(image_data[i - width * 4 - 4]); } } pub fn apply_predictor_transform_5(image_data: &mut [u8], range: Range, width: usize) { let (old, current) = image_data[..range.end].split_at_mut(range.start); let mut prev: [u8; 4] = old[range.start - 4..][..4].try_into().unwrap(); let top_right = &old[range.start - width * 4 + 4..]; let top = &old[range.start - width * 4..]; for ((chunk, tr), t) in current .chunks_exact_mut(4) .zip(top_right.chunks_exact(4)) .zip(top.chunks_exact(4)) { prev = [ chunk[0].wrapping_add(average2(average2(prev[0], tr[0]), t[0])), chunk[1].wrapping_add(average2(average2(prev[1], tr[1]), t[1])), chunk[2].wrapping_add(average2(average2(prev[2], tr[2]), t[2])), chunk[3].wrapping_add(average2(average2(prev[3], tr[3]), t[3])), ]; chunk.copy_from_slice(&prev); } } pub fn apply_predictor_transform_6(image_data: &mut [u8], range: Range, width: usize) { let (old, current) = image_data[..range.end].split_at_mut(range.start); let mut prev: [u8; 4] = old[range.start - 4..][..4].try_into().unwrap(); let top_left = &old[range.start - width * 4 - 4..]; for (chunk, tl) in current.chunks_exact_mut(4).zip(top_left.chunks_exact(4)) { for i in 0..4 { chunk[i] = chunk[i].wrapping_add(average2(prev[i], tl[i])); } prev.copy_from_slice(chunk); } } pub fn apply_predictor_transform_7(image_data: &mut [u8], range: Range, width: usize) { let (old, current) = image_data[..range.end].split_at_mut(range.start); let mut prev: [u8; 4] = old[range.start - 4..][..4].try_into().unwrap(); let top = &old[range.start - width * 4..][..(range.end - range.start)]; let mut current_chunks = current.chunks_exact_mut(64); let mut top_chunks = top.chunks_exact(64); for (current, top) in (&mut current_chunks).zip(&mut top_chunks) { for (chunk, t) in current.chunks_exact_mut(4).zip(top.chunks_exact(4)) { prev = [ chunk[0].wrapping_add(average2(prev[0], t[0])), chunk[1].wrapping_add(average2(prev[1], t[1])), chunk[2].wrapping_add(average2(prev[2], t[2])), chunk[3].wrapping_add(average2(prev[3], t[3])), ]; chunk.copy_from_slice(&prev); } } for (chunk, t) in current_chunks .into_remainder() .chunks_exact_mut(4) .zip(top_chunks.remainder().chunks_exact(4)) { prev = [ chunk[0].wrapping_add(average2(prev[0], t[0])), chunk[1].wrapping_add(average2(prev[1], t[1])), chunk[2].wrapping_add(average2(prev[2], t[2])), chunk[3].wrapping_add(average2(prev[3], t[3])), ]; chunk.copy_from_slice(&prev); } } pub fn apply_predictor_transform_8(image_data: &mut [u8], range: Range, width: usize) { for i in range { image_data[i] = image_data[i].wrapping_add(average2( image_data[i - width * 4 - 4], image_data[i - width * 4], )); } } pub fn apply_predictor_transform_9(image_data: &mut [u8], range: Range, width: usize) { for i in range { image_data[i] = image_data[i].wrapping_add(average2( image_data[i - width * 4], image_data[i - width * 4 + 4], )); } } pub fn apply_predictor_transform_10(image_data: &mut [u8], range: Range, width: usize) { let (old, current) = image_data[..range.end].split_at_mut(range.start); let mut prev: [u8; 4] = old[range.start - 4..][..4].try_into().unwrap(); let top_left = &old[range.start - width * 4 - 4..]; let top = &old[range.start - width * 4..]; let top_right = &old[range.start - width * 4 + 4..]; for (((chunk, tl), t), tr) in current .chunks_exact_mut(4) .zip(top_left.chunks_exact(4)) .zip(top.chunks_exact(4)) .zip(top_right.chunks_exact(4)) { prev = [ chunk[0].wrapping_add(average2(average2(prev[0], tl[0]), average2(t[0], tr[0]))), chunk[1].wrapping_add(average2(average2(prev[1], tl[1]), average2(t[1], tr[1]))), chunk[2].wrapping_add(average2(average2(prev[2], tl[2]), average2(t[2], tr[2]))), chunk[3].wrapping_add(average2(average2(prev[3], tl[3]), average2(t[3], tr[3]))), ]; chunk.copy_from_slice(&prev); } } pub fn apply_predictor_transform_11(image_data: &mut [u8], range: Range, width: usize) { let (old, current) = image_data[..range.end].split_at_mut(range.start); let top = &old[range.start - width * 4..]; let mut l = [ old[range.start - 4] as i16, old[range.start - 3] as i16, old[range.start - 2] as i16, old[range.start - 1] as i16, ]; let mut tl = [ old[range.start - width * 4 - 4] as i16, old[range.start - width * 4 - 3] as i16, old[range.start - width * 4 - 2] as i16, old[range.start - width * 4 - 1] as i16, ]; for (chunk, top) in current.chunks_exact_mut(4).zip(top.chunks_exact(4)) { let t = [top[0] as i16, top[1] as i16, top[2] as i16, top[3] as i16]; let mut predict_left = 0; let mut predict_top = 0; for i in 0..4 { let predict = l[i] + t[i] - tl[i]; predict_left += i16::abs(predict - l[i]); predict_top += i16::abs(predict - t[i]); } if predict_left < predict_top { chunk.copy_from_slice(&[ chunk[0].wrapping_add(l[0] as u8), chunk[1].wrapping_add(l[1] as u8), chunk[2].wrapping_add(l[2] as u8), chunk[3].wrapping_add(l[3] as u8), ]); } else { chunk.copy_from_slice(&[ chunk[0].wrapping_add(t[0] as u8), chunk[1].wrapping_add(t[1] as u8), chunk[2].wrapping_add(t[2] as u8), chunk[3].wrapping_add(t[3] as u8), ]); } tl = t; l = [ chunk[0] as i16, chunk[1] as i16, chunk[2] as i16, chunk[3] as i16, ]; } } pub fn apply_predictor_transform_12(image_data: &mut [u8], range: Range, width: usize) { let (old, current) = image_data[..range.end].split_at_mut(range.start); let mut prev: [u8; 4] = old[range.start - 4..][..4].try_into().unwrap(); let top_left = &old[range.start - width * 4 - 4..]; let top = &old[range.start - width * 4..]; for ((chunk, tl), t) in current .chunks_exact_mut(4) .zip(top_left.chunks_exact(4)) .zip(top.chunks_exact(4)) { prev = [ chunk[0].wrapping_add(clamp_add_subtract_full( i16::from(prev[0]), i16::from(t[0]), i16::from(tl[0]), )), chunk[1].wrapping_add(clamp_add_subtract_full( i16::from(prev[1]), i16::from(t[1]), i16::from(tl[1]), )), chunk[2].wrapping_add(clamp_add_subtract_full( i16::from(prev[2]), i16::from(t[2]), i16::from(tl[2]), )), chunk[3].wrapping_add(clamp_add_subtract_full( i16::from(prev[3]), i16::from(t[3]), i16::from(tl[3]), )), ]; chunk.copy_from_slice(&prev); } } pub fn apply_predictor_transform_13(image_data: &mut [u8], range: Range, width: usize) { let (old, current) = image_data[..range.end].split_at_mut(range.start); let mut prev: [u8; 4] = old[range.start - 4..][..4].try_into().unwrap(); let top_left = &old[range.start - width * 4 - 4..][..(range.end - range.start)]; let top = &old[range.start - width * 4..][..(range.end - range.start)]; for ((chunk, tl), t) in current .chunks_exact_mut(4) .zip(top_left.chunks_exact(4)) .zip(top.chunks_exact(4)) { prev = [ chunk[0].wrapping_add(clamp_add_subtract_half( (i16::from(prev[0]) + i16::from(t[0])) / 2, i16::from(tl[0]), )), chunk[1].wrapping_add(clamp_add_subtract_half( (i16::from(prev[1]) + i16::from(t[1])) / 2, i16::from(tl[1]), )), chunk[2].wrapping_add(clamp_add_subtract_half( (i16::from(prev[2]) + i16::from(t[2])) / 2, i16::from(tl[2]), )), chunk[3].wrapping_add(clamp_add_subtract_half( (i16::from(prev[3]) + i16::from(t[3])) / 2, i16::from(tl[3]), )), ]; chunk.copy_from_slice(&prev); } } pub(crate) fn apply_color_transform( image_data: &mut [u8], width: u16, size_bits: u8, transform_data: &[u8], ) { let block_xsize = usize::from(subsample_size(width, size_bits)); let width = usize::from(width); for (y, row) in image_data.chunks_exact_mut(width * 4).enumerate() { for (block_x, block) in row.chunks_mut(4 << size_bits).enumerate() { let block_index = (y >> size_bits) * block_xsize + block_x; let red_to_blue = transform_data[block_index * 4]; let green_to_blue = transform_data[block_index * 4 + 1]; let green_to_red = transform_data[block_index * 4 + 2]; for pixel in block.chunks_exact_mut(4) { let green = u32::from(pixel[1]); let mut temp_red = u32::from(pixel[0]); let mut temp_blue = u32::from(pixel[2]); temp_red += color_transform_delta(green_to_red as i8, green as i8); temp_blue += color_transform_delta(green_to_blue as i8, green as i8); temp_blue += color_transform_delta(red_to_blue as i8, temp_red as i8); pixel[0] = (temp_red & 0xff) as u8; pixel[2] = (temp_blue & 0xff) as u8; } } } } pub(crate) fn apply_subtract_green_transform(image_data: &mut [u8]) { for pixel in image_data.chunks_exact_mut(4) { pixel[0] = pixel[0].wrapping_add(pixel[1]); pixel[2] = pixel[2].wrapping_add(pixel[1]); } } pub(crate) fn apply_color_indexing_transform( image_data: &mut [u8], width: u16, height: u16, table_size: u16, table_data: &[u8], ) { // TODO: Replace with built-in div_ceil when MSRV is 1.73+ fn div_ceil(a: u16, b: u16) -> u16 { let d = a / b; let r = a % b; if r > 0 && b > 0 { d + 1 } else { d } } if table_size > 16 { let mut table = table_data.chunks_exact(4).collect::>(); table.resize(256, &[0; 4]); for pixel in image_data.chunks_exact_mut(4) { pixel.copy_from_slice(table[pixel[1] as usize]); } } else { let width_bits: u8 = if table_size <= 2 { 3 } else if table_size <= 4 { 2 } else if table_size <= 16 { 1 } else { unreachable!() }; let bits_per_entry = 8 / (1 << width_bits); let mask = (1 << bits_per_entry) - 1; let table = (0..256) .flat_map(|i| { let mut entry = Vec::new(); for j in 0..(1 << width_bits) { let k = i >> (j * bits_per_entry) & mask; if k < table_size { entry.extend_from_slice(&table_data[usize::from(k) * 4..][..4]); } else { entry.extend_from_slice(&[0; 4]); } } entry }) .collect::>(); let table = table.chunks_exact(4 << width_bits).collect::>(); let entry_size = 4 << width_bits; let index_image_width = div_ceil(width, 1 << width_bits) as usize; let final_entry_size = width as usize * 4 - entry_size * (index_image_width - 1); for y in (0..height as usize).rev() { for x in (0..index_image_width).rev() { let input_index = y * index_image_width * 4 + x * 4 + 1; let output_index = y * width as usize * 4 + x * entry_size; let table_index = image_data[input_index] as usize; if x == index_image_width - 1 { image_data[output_index..][..final_entry_size] .copy_from_slice(&table[table_index][..final_entry_size]); } else { image_data[output_index..][..entry_size].copy_from_slice(table[table_index]); } } } } } //predictor functions /// Get average of 2 bytes fn average2(a: u8, b: u8) -> u8 { ((u16::from(a) + u16::from(b)) / 2) as u8 } /// Clamp add subtract full on one part fn clamp_add_subtract_full(a: i16, b: i16, c: i16) -> u8 { // Clippy suggests the clamp method, but it seems to optimize worse as of rustc 1.82.0 nightly. #![allow(clippy::manual_clamp)] (a + b - c).max(0).min(255) as u8 } /// Clamp add subtract half on one part fn clamp_add_subtract_half(a: i16, b: i16) -> u8 { // Clippy suggests the clamp method, but it seems to optimize worse as of rustc 1.82.0 nightly. #![allow(clippy::manual_clamp)] (a + (a - b) / 2).max(0).min(255) as u8 } /// Does color transform on 2 numbers fn color_transform_delta(t: i8, c: i8) -> u32 { (i32::from(t) * i32::from(c)) as u32 >> 5 } #[cfg(all(test, feature = "_benchmarks"))] mod benches { use rand::Rng; use test::{black_box, Bencher}; fn measure_predictor(b: &mut Bencher, predictor: fn(&mut [u8], std::ops::Range, usize)) { let width = 256; let mut data = vec![0u8; width * 8]; rand::thread_rng().fill(&mut data[..]); b.bytes = 4 * width as u64 - 4; b.iter(|| { predictor( black_box(&mut data), black_box(width * 4 + 4..width * 8), black_box(width), ) }); } #[bench] fn predictor00(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_0); } #[bench] fn predictor01(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_1); } #[bench] fn predictor02(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_2); } #[bench] fn predictor03(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_3); } #[bench] fn predictor04(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_4); } #[bench] fn predictor05(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_5); } #[bench] fn predictor06(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_6); } #[bench] fn predictor07(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_7); } #[bench] fn predictor08(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_8); } #[bench] fn predictor09(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_9); } #[bench] fn predictor10(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_10); } #[bench] fn predictor11(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_11); } #[bench] fn predictor12(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_12); } #[bench] fn predictor13(b: &mut Bencher) { measure_predictor(b, super::apply_predictor_transform_13); } #[bench] fn color_transform(b: &mut Bencher) { let width = 256; let height = 256; let size_bits = 3; let mut data = vec![0u8; width * height * 4]; let mut transform_data = vec![0u8; (width * height * 4) >> (size_bits * 2)]; rand::thread_rng().fill(&mut data[..]); rand::thread_rng().fill(&mut transform_data[..]); b.bytes = 4 * width as u64 * height as u64; b.iter(|| { super::apply_color_transform( black_box(&mut data), black_box(width as u16), black_box(size_bits), black_box(&transform_data), ); }); } #[bench] fn subtract_green(b: &mut Bencher) { let mut data = vec![0u8; 1024 * 4]; rand::thread_rng().fill(&mut data[..]); b.bytes = data.len() as u64; b.iter(|| { super::apply_subtract_green_transform(black_box(&mut data)); }); } } image-webp-0.2.0/src/mod.rs000064400000000000000000000010601046102023000135510ustar 00000000000000//! Decoding and Encoding of WebP Images #[cfg(feature = "webp-encoder")] pub use self::encoder::{WebPEncoder, WebPQuality}; #[cfg(feature = "webp-encoder")] mod encoder; #[cfg(feature = "webp")] pub use self::decoder::WebPDecoder; #[cfg(feature = "webp")] mod decoder; #[cfg(feature = "webp")] mod extended; #[cfg(feature = "webp")] mod huffman; #[cfg(feature = "webp")] mod loop_filter; #[cfg(feature = "webp")] mod lossless; #[cfg(feature = "webp")] mod lossless_transform; #[cfg(feature = "webp")] mod transform; #[cfg(feature = "webp")] pub mod vp8; image-webp-0.2.0/src/transform.rs000064400000000000000000000052351046102023000150150ustar 00000000000000static CONST1: i64 = 20091; static CONST2: i64 = 35468; pub(crate) fn idct4x4(block: &mut [i32]) { // The intermediate results may overflow the types, so we stretch the type. fn fetch(block: &[i32], idx: usize) -> i64 { i64::from(block[idx]) } // Perform one lenght check up front to avoid subsequent bounds checks in this function assert!(block.len() >= 16); for i in 0usize..4 { let a1 = fetch(block, i) + fetch(block, 8 + i); let b1 = fetch(block, i) - fetch(block, 8 + i); let t1 = (fetch(block, 4 + i) * CONST2) >> 16; let t2 = fetch(block, 12 + i) + ((fetch(block, 12 + i) * CONST1) >> 16); let c1 = t1 - t2; let t1 = fetch(block, 4 + i) + ((fetch(block, 4 + i) * CONST1) >> 16); let t2 = (fetch(block, 12 + i) * CONST2) >> 16; let d1 = t1 + t2; block[i] = (a1 + d1) as i32; block[4 + i] = (b1 + c1) as i32; block[4 * 3 + i] = (a1 - d1) as i32; block[4 * 2 + i] = (b1 - c1) as i32; } for i in 0usize..4 { let a1 = fetch(block, 4 * i) + fetch(block, 4 * i + 2); let b1 = fetch(block, 4 * i) - fetch(block, 4 * i + 2); let t1 = (fetch(block, 4 * i + 1) * CONST2) >> 16; let t2 = fetch(block, 4 * i + 3) + ((fetch(block, 4 * i + 3) * CONST1) >> 16); let c1 = t1 - t2; let t1 = fetch(block, 4 * i + 1) + ((fetch(block, 4 * i + 1) * CONST1) >> 16); let t2 = (fetch(block, 4 * i + 3) * CONST2) >> 16; let d1 = t1 + t2; block[4 * i] = ((a1 + d1 + 4) >> 3) as i32; block[4 * i + 3] = ((a1 - d1 + 4) >> 3) as i32; block[4 * i + 1] = ((b1 + c1 + 4) >> 3) as i32; block[4 * i + 2] = ((b1 - c1 + 4) >> 3) as i32; } } // 14.3 pub(crate) fn iwht4x4(block: &mut [i32]) { // Perform one length check up front to avoid subsequent bounds checks in this function assert!(block.len() >= 16); for i in 0usize..4 { let a1 = block[i] + block[12 + i]; let b1 = block[4 + i] + block[8 + i]; let c1 = block[4 + i] - block[8 + i]; let d1 = block[i] - block[12 + i]; block[i] = a1 + b1; block[4 + i] = c1 + d1; block[8 + i] = a1 - b1; block[12 + i] = d1 - c1; } for block in block.chunks_exact_mut(4) { let a1 = block[0] + block[3]; let b1 = block[1] + block[2]; let c1 = block[1] - block[2]; let d1 = block[0] - block[3]; let a2 = a1 + b1; let b2 = c1 + d1; let c2 = a1 - b1; let d2 = d1 - c1; block[0] = (a2 + 3) >> 3; block[1] = (b2 + 3) >> 3; block[2] = (c2 + 3) >> 3; block[3] = (d2 + 3) >> 3; } } image-webp-0.2.0/src/vp8.rs000064400000000000000000003036201046102023000135160ustar 00000000000000//! An implementation of the VP8 Video Codec //! //! This module contains a partial implementation of the //! VP8 video format as defined in RFC-6386. //! //! It decodes Keyframes only. //! VP8 is the underpinning of the WebP image format //! //! # Related Links //! * [rfc-6386](http://tools.ietf.org/html/rfc6386) - The VP8 Data Format and Decoding Guide //! * [VP8.pdf](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37073.pdf) - An overview of //! of the VP8 format //! use byteorder_lite::BigEndian; use byteorder_lite::{LittleEndian, ReadBytesExt}; use std::cmp; use std::default::Default; use std::io::Read; use std::io::{Cursor, ErrorKind}; use crate::decoder::DecodingError; use super::loop_filter; use super::transform; const MAX_SEGMENTS: usize = 4; const NUM_DCT_TOKENS: usize = 12; // Prediction modes const DC_PRED: i8 = 0; const V_PRED: i8 = 1; const H_PRED: i8 = 2; const TM_PRED: i8 = 3; const B_PRED: i8 = 4; const B_DC_PRED: i8 = 0; const B_TM_PRED: i8 = 1; const B_VE_PRED: i8 = 2; const B_HE_PRED: i8 = 3; const B_LD_PRED: i8 = 4; const B_RD_PRED: i8 = 5; const B_VR_PRED: i8 = 6; const B_VL_PRED: i8 = 7; const B_HD_PRED: i8 = 8; const B_HU_PRED: i8 = 9; // Prediction mode enum #[repr(i8)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] enum LumaMode { /// Predict DC using row above and column to the left. #[default] DC = DC_PRED, /// Predict rows using row above. V = V_PRED, /// Predict columns using column to the left. H = H_PRED, /// Propagate second differences. TM = TM_PRED, /// Each Y subblock is independently predicted. B = B_PRED, } #[repr(i8)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] enum ChromaMode { /// Predict DC using row above and column to the left. #[default] DC = DC_PRED, /// Predict rows using row above. V = V_PRED, /// Predict columns using column to the left. H = H_PRED, /// Propagate second differences. TM = TM_PRED, } #[repr(i8)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] enum IntraMode { #[default] DC = B_DC_PRED, TM = B_TM_PRED, VE = B_VE_PRED, HE = B_HE_PRED, LD = B_LD_PRED, RD = B_RD_PRED, VR = B_VR_PRED, VL = B_VL_PRED, HD = B_HD_PRED, HU = B_HU_PRED, } type Prob = u8; static SEGMENT_ID_TREE: [i8; 6] = [2, 4, -0, -1, -2, -3]; // Section 11.2 // Tree for determining the keyframe luma intra prediction modes: static KEYFRAME_YMODE_TREE: [i8; 8] = [-B_PRED, 2, 4, 6, -DC_PRED, -V_PRED, -H_PRED, -TM_PRED]; // Default probabilities for decoding the keyframe luma modes static KEYFRAME_YMODE_PROBS: [Prob; 4] = [145, 156, 163, 128]; // Tree for determining the keyframe B_PRED mode: static KEYFRAME_BPRED_MODE_TREE: [i8; 18] = [ -B_DC_PRED, 2, -B_TM_PRED, 4, -B_VE_PRED, 6, 8, 12, -B_HE_PRED, 10, -B_RD_PRED, -B_VR_PRED, -B_LD_PRED, 14, -B_VL_PRED, 16, -B_HD_PRED, -B_HU_PRED, ]; // Probabilities for the BPRED_MODE_TREE static KEYFRAME_BPRED_MODE_PROBS: [[[u8; 9]; 10]; 10] = [ [ [231, 120, 48, 89, 115, 113, 120, 152, 112], [152, 179, 64, 126, 170, 118, 46, 70, 95], [175, 69, 143, 80, 85, 82, 72, 155, 103], [56, 58, 10, 171, 218, 189, 17, 13, 152], [144, 71, 10, 38, 171, 213, 144, 34, 26], [114, 26, 17, 163, 44, 195, 21, 10, 173], [121, 24, 80, 195, 26, 62, 44, 64, 85], [170, 46, 55, 19, 136, 160, 33, 206, 71], [63, 20, 8, 114, 114, 208, 12, 9, 226], [81, 40, 11, 96, 182, 84, 29, 16, 36], ], [ [134, 183, 89, 137, 98, 101, 106, 165, 148], [72, 187, 100, 130, 157, 111, 32, 75, 80], [66, 102, 167, 99, 74, 62, 40, 234, 128], [41, 53, 9, 178, 241, 141, 26, 8, 107], [104, 79, 12, 27, 217, 255, 87, 17, 7], [74, 43, 26, 146, 73, 166, 49, 23, 157], [65, 38, 105, 160, 51, 52, 31, 115, 128], [87, 68, 71, 44, 114, 51, 15, 186, 23], [47, 41, 14, 110, 182, 183, 21, 17, 194], [66, 45, 25, 102, 197, 189, 23, 18, 22], ], [ [88, 88, 147, 150, 42, 46, 45, 196, 205], [43, 97, 183, 117, 85, 38, 35, 179, 61], [39, 53, 200, 87, 26, 21, 43, 232, 171], [56, 34, 51, 104, 114, 102, 29, 93, 77], [107, 54, 32, 26, 51, 1, 81, 43, 31], [39, 28, 85, 171, 58, 165, 90, 98, 64], [34, 22, 116, 206, 23, 34, 43, 166, 73], [68, 25, 106, 22, 64, 171, 36, 225, 114], [34, 19, 21, 102, 132, 188, 16, 76, 124], [62, 18, 78, 95, 85, 57, 50, 48, 51], ], [ [193, 101, 35, 159, 215, 111, 89, 46, 111], [60, 148, 31, 172, 219, 228, 21, 18, 111], [112, 113, 77, 85, 179, 255, 38, 120, 114], [40, 42, 1, 196, 245, 209, 10, 25, 109], [100, 80, 8, 43, 154, 1, 51, 26, 71], [88, 43, 29, 140, 166, 213, 37, 43, 154], [61, 63, 30, 155, 67, 45, 68, 1, 209], [142, 78, 78, 16, 255, 128, 34, 197, 171], [41, 40, 5, 102, 211, 183, 4, 1, 221], [51, 50, 17, 168, 209, 192, 23, 25, 82], ], [ [125, 98, 42, 88, 104, 85, 117, 175, 82], [95, 84, 53, 89, 128, 100, 113, 101, 45], [75, 79, 123, 47, 51, 128, 81, 171, 1], [57, 17, 5, 71, 102, 57, 53, 41, 49], [115, 21, 2, 10, 102, 255, 166, 23, 6], [38, 33, 13, 121, 57, 73, 26, 1, 85], [41, 10, 67, 138, 77, 110, 90, 47, 114], [101, 29, 16, 10, 85, 128, 101, 196, 26], [57, 18, 10, 102, 102, 213, 34, 20, 43], [117, 20, 15, 36, 163, 128, 68, 1, 26], ], [ [138, 31, 36, 171, 27, 166, 38, 44, 229], [67, 87, 58, 169, 82, 115, 26, 59, 179], [63, 59, 90, 180, 59, 166, 93, 73, 154], [40, 40, 21, 116, 143, 209, 34, 39, 175], [57, 46, 22, 24, 128, 1, 54, 17, 37], [47, 15, 16, 183, 34, 223, 49, 45, 183], [46, 17, 33, 183, 6, 98, 15, 32, 183], [65, 32, 73, 115, 28, 128, 23, 128, 205], [40, 3, 9, 115, 51, 192, 18, 6, 223], [87, 37, 9, 115, 59, 77, 64, 21, 47], ], [ [104, 55, 44, 218, 9, 54, 53, 130, 226], [64, 90, 70, 205, 40, 41, 23, 26, 57], [54, 57, 112, 184, 5, 41, 38, 166, 213], [30, 34, 26, 133, 152, 116, 10, 32, 134], [75, 32, 12, 51, 192, 255, 160, 43, 51], [39, 19, 53, 221, 26, 114, 32, 73, 255], [31, 9, 65, 234, 2, 15, 1, 118, 73], [88, 31, 35, 67, 102, 85, 55, 186, 85], [56, 21, 23, 111, 59, 205, 45, 37, 192], [55, 38, 70, 124, 73, 102, 1, 34, 98], ], [ [102, 61, 71, 37, 34, 53, 31, 243, 192], [69, 60, 71, 38, 73, 119, 28, 222, 37], [68, 45, 128, 34, 1, 47, 11, 245, 171], [62, 17, 19, 70, 146, 85, 55, 62, 70], [75, 15, 9, 9, 64, 255, 184, 119, 16], [37, 43, 37, 154, 100, 163, 85, 160, 1], [63, 9, 92, 136, 28, 64, 32, 201, 85], [86, 6, 28, 5, 64, 255, 25, 248, 1], [56, 8, 17, 132, 137, 255, 55, 116, 128], [58, 15, 20, 82, 135, 57, 26, 121, 40], ], [ [164, 50, 31, 137, 154, 133, 25, 35, 218], [51, 103, 44, 131, 131, 123, 31, 6, 158], [86, 40, 64, 135, 148, 224, 45, 183, 128], [22, 26, 17, 131, 240, 154, 14, 1, 209], [83, 12, 13, 54, 192, 255, 68, 47, 28], [45, 16, 21, 91, 64, 222, 7, 1, 197], [56, 21, 39, 155, 60, 138, 23, 102, 213], [85, 26, 85, 85, 128, 128, 32, 146, 171], [18, 11, 7, 63, 144, 171, 4, 4, 246], [35, 27, 10, 146, 174, 171, 12, 26, 128], ], [ [190, 80, 35, 99, 180, 80, 126, 54, 45], [85, 126, 47, 87, 176, 51, 41, 20, 32], [101, 75, 128, 139, 118, 146, 116, 128, 85], [56, 41, 15, 176, 236, 85, 37, 9, 62], [146, 36, 19, 30, 171, 255, 97, 27, 20], [71, 30, 17, 119, 118, 255, 17, 18, 138], [101, 38, 60, 138, 55, 70, 43, 26, 142], [138, 45, 61, 62, 219, 1, 81, 188, 64], [32, 41, 20, 117, 151, 142, 20, 21, 163], [112, 19, 12, 61, 195, 128, 48, 4, 24], ], ]; // Section 11.4 Tree for determining macroblock the chroma mode static KEYFRAME_UV_MODE_TREE: [i8; 6] = [-DC_PRED, 2, -V_PRED, 4, -H_PRED, -TM_PRED]; // Probabilities for determining macroblock mode static KEYFRAME_UV_MODE_PROBS: [Prob; 3] = [142, 114, 183]; // Section 13.4 type TokenProbTables = [[[[Prob; NUM_DCT_TOKENS - 1]; 3]; 8]; 4]; // Probabilities that a token's probability will be updated static COEFF_UPDATE_PROBS: TokenProbTables = [ [ [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255], [223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255], [249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255], [234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255], [239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255], [251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], [251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255], [250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255], [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], ], [ [ [217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255], [234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255], ], [ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], [223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], [238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255], [249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255], [247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], [252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255], [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], ], [ [ [186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255], [234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255], [251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255], ], [ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], [236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], [251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255], ], [ [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], [254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], [254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], ], [ [ [248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255], [248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255], [246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255], [252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255], ], [ [255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255], [248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255], [253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255], [245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255], [253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255], [252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255], [249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255], [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], [ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], ], ], ]; // Section 13.5 // Default Probabilities for tokens static COEFF_PROBS: TokenProbTables = [ [ [ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], ], [ [253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128], [189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128], [106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128], ], [ [1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128], [181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128], [78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128], ], [ [1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128], [184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128], [77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128], ], [ [1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128], [170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128], [37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128], ], [ [1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128], [207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128], [102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128], ], [ [1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128], [177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128], [80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128], ], [ [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], [246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], [255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], ], ], [ [ [198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62], [131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1], [68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128], ], [ [1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128], [184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128], [81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128], ], [ [1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128], [99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128], [23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128], ], [ [1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128], [109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128], [44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128], ], [ [1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128], [94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128], [22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128], ], [ [1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128], [124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128], [35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128], ], [ [1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128], [121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128], [45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128], ], [ [1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128], [203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128], [137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128], ], ], [ [ [253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128], [175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128], [73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128], ], [ [1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128], [239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128], [155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128], ], [ [1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128], [201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128], [69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128], ], [ [1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128], [223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128], [141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128], ], [ [1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128], [190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128], [149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], ], [ [1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128], [247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128], [240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128], ], [ [1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128], [213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128], [55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128], ], [ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], ], ], [ [ [202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255], [126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128], [61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128], ], [ [1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128], [166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128], [39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128], ], [ [1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128], [124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128], [24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128], ], [ [1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128], [149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128], [28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128], ], [ [1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128], [123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128], [20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128], ], [ [1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128], [168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128], [47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128], ], [ [1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128], [141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128], [42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128], ], [ [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], [244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], [238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], ], ], ]; // DCT Tokens const DCT_0: i8 = 0; const DCT_1: i8 = 1; const DCT_2: i8 = 2; const DCT_3: i8 = 3; const DCT_4: i8 = 4; const DCT_CAT1: i8 = 5; const DCT_CAT2: i8 = 6; const DCT_CAT3: i8 = 7; const DCT_CAT4: i8 = 8; const DCT_CAT5: i8 = 9; const DCT_CAT6: i8 = 10; const DCT_EOB: i8 = 11; static DCT_TOKEN_TREE: [i8; 22] = [ -DCT_EOB, 2, -DCT_0, 4, -DCT_1, 6, 8, 12, -DCT_2, 10, -DCT_3, -DCT_4, 14, 16, -DCT_CAT1, -DCT_CAT2, 18, 20, -DCT_CAT3, -DCT_CAT4, -DCT_CAT5, -DCT_CAT6, ]; static PROB_DCT_CAT: [[Prob; 12]; 6] = [ [159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [165, 145, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [173, 148, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0], [176, 155, 140, 135, 0, 0, 0, 0, 0, 0, 0, 0], [180, 157, 141, 134, 130, 0, 0, 0, 0, 0, 0, 0], [254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0], ]; static DCT_CAT_BASE: [u8; 6] = [5, 7, 11, 19, 35, 67]; static COEFF_BANDS: [u8; 16] = [0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7]; #[rustfmt::skip] static DC_QUANT: [i16; 128] = [ 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 91, 93, 95, 96, 98, 100, 101, 102, 104, 106, 108, 110, 112, 114, 116, 118, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 143, 145, 148, 151, 154, 157, ]; #[rustfmt::skip] static AC_QUANT: [i16; 128] = [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 119, 122, 125, 128, 131, 134, 137, 140, 143, 146, 149, 152, 155, 158, 161, 164, 167, 170, 173, 177, 181, 185, 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 234, 239, 245, 249, 254, 259, 264, 269, 274, 279, 284, ]; static ZIGZAG: [u8; 16] = [0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15]; struct BoolReader { reader: Cursor>, range: u32, value: u32, bit_count: u8, eof: bool, } impl BoolReader { pub(crate) fn new() -> BoolReader { BoolReader { reader: Default::default(), range: 0, value: 0, bit_count: 0, eof: false, } } pub(crate) fn init(&mut self, buf: Vec) -> Result<(), DecodingError> { if buf.len() < 2 { return Err(DecodingError::NotEnoughInitData); } self.reader = Cursor::new(buf); self.value = self.reader.read_u16::()? as u32; self.range = 255; self.bit_count = 0; Ok(()) } pub(crate) fn read_bool(&mut self, probability: u8) -> Result { let split = 1 + (((self.range - 1) * u32::from(probability)) >> 8); let bigsplit = split << 8; let retval = if self.value >= bigsplit { self.range -= split; self.value -= bigsplit; true } else { self.range = split; false }; if self.range < 128 { // Compute shift required to satisfy `self.range >= 128`. // Apply that shift to `self.range`, `self.value`, and `self.bitcount`. // // Subtract 24 because we only care about leading zeros in the // lowest byte of `self.range` which is a `u32`. let shift = self.range.leading_zeros() - 24; self.value <<= shift; self.range <<= shift; self.bit_count += shift as u8; if self.bit_count >= 8 { self.bit_count %= 8; // libwebp seems to (sometimes?) allow bitstreams that read one byte past the end. // This match statement replicates that logic. match self.reader.read_u8() { Ok(v) => self.value |= u32::from(v) << self.bit_count, Err(e) if e.kind() == ErrorKind::UnexpectedEof && !self.eof => self.eof = true, Err(e) => return Err(DecodingError::IoError(e)), }; } } Ok(retval) } pub(crate) fn read_literal(&mut self, n: u8) -> Result { let mut v = 0u8; let mut n = n; while n != 0 { v = (v << 1) + self.read_bool(128u8)? as u8; n -= 1; } Ok(v) } pub(crate) fn read_magnitude_and_sign(&mut self, n: u8) -> Result { let magnitude = self.read_literal(n)?; let sign = self.read_literal(1)?; if sign == 1 { Ok(-i32::from(magnitude)) } else { Ok(i32::from(magnitude)) } } pub(crate) fn read_with_tree( &mut self, tree: &[i8], probs: &[Prob], start: isize, ) -> Result { let mut index = start; loop { let a = self.read_bool(probs[index as usize >> 1])?; let b = index + a as isize; index = tree[b as usize] as isize; if index <= 0 { break; } } Ok(-index as i8) } pub(crate) fn read_flag(&mut self) -> Result { Ok(0 != self.read_literal(1)?) } } #[derive(Default, Clone, Copy)] struct MacroBlock { bpred: [IntraMode; 16], complexity: [u8; 9], luma_mode: LumaMode, chroma_mode: ChromaMode, segmentid: u8, coeffs_skipped: bool, } /// A Representation of the last decoded video frame #[derive(Default, Debug, Clone)] pub struct Frame { /// The width of the luma plane pub width: u16, /// The height of the luma plane pub height: u16, /// The luma plane of the frame pub ybuf: Vec, /// The blue plane of the frame pub ubuf: Vec, /// The red plane of the frame pub vbuf: Vec, /// Indicates whether this frame is a keyframe pub keyframe: bool, version: u8, /// Indicates whether this frame is intended for display pub for_display: bool, // Section 9.2 /// The pixel type of the frame as defined by Section 9.2 /// of the VP8 Specification pub pixel_type: u8, // Section 9.4 and 15 filter_type: bool, //if true uses simple filter // if false uses normal filter filter_level: u8, sharpness_level: u8, } impl Frame { /// Chroma plane is half the size of the Luma plane fn chroma_width(&self) -> u16 { (self.width + 1) / 2 } fn chroma_height(&self) -> u16 { (self.height + 1) / 2 } /// Fills an rgb buffer with the image pub(crate) fn fill_rgb(&self, buf: &mut [u8]) { const BPP: usize = 3; let mut index = 0_usize; for (y, row) in buf .chunks_exact_mut(usize::from(self.width) * BPP) .enumerate() { let chroma_index = usize::from(self.chroma_width()) * (y / 2); let next_index = index + usize::from(self.width); Frame::fill_rgb_row( &self.ybuf[index..next_index], &self.ubuf[chroma_index..], &self.vbuf[chroma_index..], row, ); index = next_index; } } fn fill_rgb_row(y_vec: &[u8], u_vec: &[u8], v_vec: &[u8], rgb: &mut [u8]) { // Fill 2 pixels per iteration: these pixels share `u` and `v` components let mut rgb_chunks = rgb.chunks_exact_mut(6); let mut y_chunks = y_vec.chunks_exact(2); let mut u_iter = u_vec.iter(); let mut v_iter = v_vec.iter(); for (((rgb, y), &u), &v) in (&mut rgb_chunks) .zip(&mut y_chunks) .zip(&mut u_iter) .zip(&mut v_iter) { let coeffs = [ mulhi(v, 26149), mulhi(u, 6419), mulhi(v, 13320), mulhi(u, 33050), ]; rgb[0] = clip(mulhi(y[0], 19077) + coeffs[0] - 14234); rgb[1] = clip(mulhi(y[0], 19077) - coeffs[1] - coeffs[2] + 8708); rgb[2] = clip(mulhi(y[0], 19077) + coeffs[3] - 17685); rgb[3] = clip(mulhi(y[1], 19077) + coeffs[0] - 14234); rgb[4] = clip(mulhi(y[1], 19077) - coeffs[1] - coeffs[2] + 8708); rgb[5] = clip(mulhi(y[1], 19077) + coeffs[3] - 17685); } let remainder = rgb_chunks.into_remainder(); if remainder.len() >= 3 { if let (Some(&y), Some(&u), Some(&v)) = ( y_chunks.remainder().iter().next(), u_iter.next(), v_iter.next(), ) { let coeffs = [ mulhi(v, 26149), mulhi(u, 6419), mulhi(v, 13320), mulhi(u, 33050), ]; remainder[0] = clip(mulhi(y, 19077) + coeffs[0] - 14234); remainder[1] = clip(mulhi(y, 19077) - coeffs[1] - coeffs[2] + 8708); remainder[2] = clip(mulhi(y, 19077) + coeffs[3] - 17685); } } } /// Fills an rgba buffer by skipping the alpha values pub(crate) fn fill_rgba(&self, buf: &mut [u8]) { let mut index = 0_usize; for (y, row) in buf .chunks_exact_mut(usize::from(self.width) * 4) .enumerate() { let chroma_index_row = usize::from(self.chroma_width()) * (y / 2); for (x, rgb_chunk) in row.chunks_exact_mut(4).enumerate() { let chroma_index = chroma_index_row + x / 2; Frame::fill_single( self.ybuf[index], self.ubuf[chroma_index], self.vbuf[chroma_index], rgb_chunk, ); index += 1; } } } fn fill_single(y: u8, u: u8, v: u8, rgb: &mut [u8]) { // // Conversion values from https://docs.microsoft.com/en-us/windows/win32/medfound/recommended-8-bit-yuv-formats-for-video-rendering#converting-8-bit-yuv-to-rgb888 // let c: i32 = i32::from(y) - 16; // let d: i32 = i32::from(u) - 128; // let e: i32 = i32::from(v) - 128; // let r: u8 = clamp((298 * c + 409 * e + 128) >> 8, 0, 255) // .try_into() // .unwrap(); // let g: u8 = clamp((298 * c - 100 * d - 208 * e + 128) >> 8, 0, 255) // .try_into() // .unwrap(); // let b: u8 = clamp((298 * c + 516 * d + 128) >> 8, 0, 255) // .try_into() // .unwrap(); rgb[0] = clip(mulhi(y, 19077) + mulhi(v, 26149) - 14234); rgb[1] = clip(mulhi(y, 19077) - mulhi(u, 6419) - mulhi(v, 13320) + 8708); rgb[2] = clip(mulhi(y, 19077) + mulhi(u, 33050) - 17685); } /// Gets the buffer size pub fn get_buf_size(&self) -> usize { self.ybuf.len() * 3 } } /// `_mm_mulhi_epu16` emulation used in `Frame::fill_rgb` and `Frame::fill_rgba`. fn mulhi(v: u8, coeff: u16) -> i32 { ((u32::from(v) * u32::from(coeff)) >> 8) as i32 } /// Used in `Frame::fill_rgb` and `Frame::fill_rgba`. /// This function has been rewritten to encourage auto-vectorization. /// /// Based on [src/dsp/yuv.h](https://github.com/webmproject/libwebp/blob/8534f53960befac04c9631e6e50d21dcb42dfeaf/src/dsp/yuv.h#L79) /// from the libwebp source. /// ```text /// const YUV_FIX2: i32 = 6; /// const YUV_MASK2: i32 = (256 << YUV_FIX2) - 1; /// fn clip(v: i32) -> u8 { /// if (v & !YUV_MASK2) == 0 { /// (v >> YUV_FIX2) as u8 /// } else if v < 0 { /// 0 /// } else { /// 255 /// } /// } /// ``` // Clippy suggests the clamp method, but it seems to optimize worse as of rustc 1.82.0 nightly. #[allow(clippy::manual_clamp)] fn clip(v: i32) -> u8 { const YUV_FIX2: i32 = 6; (v >> YUV_FIX2).max(0).min(255) as u8 } #[derive(Clone, Copy, Default)] struct Segment { ydc: i16, yac: i16, y2dc: i16, y2ac: i16, uvdc: i16, uvac: i16, delta_values: bool, quantizer_level: i8, loopfilter_level: i8, } /// VP8 Decoder /// /// Only decodes keyframes pub struct Vp8Decoder { r: R, b: BoolReader, mbwidth: u16, mbheight: u16, macroblocks: Vec, frame: Frame, segments_enabled: bool, segments_update_map: bool, segment: [Segment; MAX_SEGMENTS], ref_delta: [i32; 4], mode_delta: [i32; 4], partitions: [BoolReader; 8], num_partitions: u8, segment_tree_probs: [Prob; 3], token_probs: Box, // Section 9.10 prob_intra: Prob, // Section 9.11 prob_skip_false: Option, top: Vec, left: MacroBlock, top_border: Vec, left_border: Vec, } impl Vp8Decoder { /// Create a new decoder. /// The reader must present a raw vp8 bitstream to the decoder pub fn new(r: R) -> Vp8Decoder { let f = Frame::default(); let s = Segment::default(); let m = MacroBlock::default(); Vp8Decoder { r, b: BoolReader::new(), mbwidth: 0, mbheight: 0, macroblocks: Vec::new(), frame: f, segments_enabled: false, segments_update_map: false, segment: [s; MAX_SEGMENTS], ref_delta: [0; 4], mode_delta: [0; 4], partitions: [ BoolReader::new(), BoolReader::new(), BoolReader::new(), BoolReader::new(), BoolReader::new(), BoolReader::new(), BoolReader::new(), BoolReader::new(), ], num_partitions: 1, segment_tree_probs: [255u8; 3], token_probs: Box::new(COEFF_PROBS), // Section 9.10 prob_intra: 0u8, // Section 9.11 prob_skip_false: None, top: Vec::new(), left: m, top_border: Vec::new(), left_border: Vec::new(), } } fn update_token_probabilities(&mut self) -> Result<(), DecodingError> { for (i, is) in COEFF_UPDATE_PROBS.iter().enumerate() { for (j, js) in is.iter().enumerate() { for (k, ks) in js.iter().enumerate() { for (t, prob) in ks.iter().enumerate().take(NUM_DCT_TOKENS - 1) { if self.b.read_bool(*prob)? { let v = self.b.read_literal(8)?; self.token_probs[i][j][k][t] = v; } } } } } Ok(()) } fn init_partitions(&mut self, n: usize) -> Result<(), DecodingError> { if n > 1 { let mut sizes = vec![0; 3 * n - 3]; self.r.read_exact(sizes.as_mut_slice())?; for (i, s) in sizes.chunks(3).enumerate() { let size = { s } .read_u24::() .expect("Reading from &[u8] can't fail and the chunk is complete"); let mut buf = vec![0; size as usize]; self.r.read_exact(buf.as_mut_slice())?; self.partitions[i].init(buf)?; } } let mut buf = Vec::new(); self.r.read_to_end(&mut buf)?; self.partitions[n - 1].init(buf)?; Ok(()) } fn read_quantization_indices(&mut self) -> Result<(), DecodingError> { fn dc_quant(index: i32) -> i16 { DC_QUANT[index.clamp(0, 127) as usize] } fn ac_quant(index: i32) -> i16 { AC_QUANT[index.clamp(0, 127) as usize] } let yac_abs = self.b.read_literal(7)?; let ydc_delta = if self.b.read_flag()? { self.b.read_magnitude_and_sign(4)? } else { 0 }; let y2dc_delta = if self.b.read_flag()? { self.b.read_magnitude_and_sign(4)? } else { 0 }; let y2ac_delta = if self.b.read_flag()? { self.b.read_magnitude_and_sign(4)? } else { 0 }; let uvdc_delta = if self.b.read_flag()? { self.b.read_magnitude_and_sign(4)? } else { 0 }; let uvac_delta = if self.b.read_flag()? { self.b.read_magnitude_and_sign(4)? } else { 0 }; let n = if self.segments_enabled { MAX_SEGMENTS } else { 1 }; for i in 0usize..n { let base = i32::from(if self.segments_enabled { if !self.segment[i].delta_values { i16::from(self.segment[i].quantizer_level) } else { i16::from(self.segment[i].quantizer_level) + i16::from(yac_abs) } } else { i16::from(yac_abs) }); self.segment[i].ydc = dc_quant(base + ydc_delta); self.segment[i].yac = ac_quant(base); self.segment[i].y2dc = dc_quant(base + y2dc_delta) * 2; // The intermediate result (max`284*155`) can be larger than the `i16` range. self.segment[i].y2ac = (i32::from(ac_quant(base + y2ac_delta)) * 155 / 100) as i16; self.segment[i].uvdc = dc_quant(base + uvdc_delta); self.segment[i].uvac = ac_quant(base + uvac_delta); if self.segment[i].y2ac < 8 { self.segment[i].y2ac = 8; } if self.segment[i].uvdc > 132 { self.segment[i].uvdc = 132; } } Ok(()) } fn read_loop_filter_adjustments(&mut self) -> Result<(), DecodingError> { if self.b.read_flag()? { for i in 0usize..4 { let ref_frame_delta_update_flag = self.b.read_flag()?; self.ref_delta[i] = if ref_frame_delta_update_flag { self.b.read_magnitude_and_sign(6)? } else { 0i32 }; } for i in 0usize..4 { let mb_mode_delta_update_flag = self.b.read_flag()?; self.mode_delta[i] = if mb_mode_delta_update_flag { self.b.read_magnitude_and_sign(6)? } else { 0i32 }; } } Ok(()) } fn read_segment_updates(&mut self) -> Result<(), DecodingError> { // Section 9.3 self.segments_update_map = self.b.read_flag()?; let update_segment_feature_data = self.b.read_flag()?; if update_segment_feature_data { let segment_feature_mode = self.b.read_flag()?; for i in 0usize..MAX_SEGMENTS { self.segment[i].delta_values = !segment_feature_mode; } for i in 0usize..MAX_SEGMENTS { let update = self.b.read_flag()?; self.segment[i].quantizer_level = if update { self.b.read_magnitude_and_sign(7)? } else { 0i32 } as i8; } for i in 0usize..MAX_SEGMENTS { let update = self.b.read_flag()?; self.segment[i].loopfilter_level = if update { self.b.read_magnitude_and_sign(6)? } else { 0i32 } as i8; } } if self.segments_update_map { for i in 0usize..3 { let update = self.b.read_flag()?; self.segment_tree_probs[i] = if update { self.b.read_literal(8)? } else { 255 }; } } Ok(()) } fn read_frame_header(&mut self) -> Result<(), DecodingError> { let tag = self.r.read_u24::()?; self.frame.keyframe = tag & 1 == 0; self.frame.version = ((tag >> 1) & 7) as u8; self.frame.for_display = (tag >> 4) & 1 != 0; let first_partition_size = tag >> 5; if self.frame.keyframe { let mut tag = [0u8; 3]; self.r.read_exact(&mut tag)?; if tag != [0x9d, 0x01, 0x2a] { return Err(DecodingError::Vp8MagicInvalid(tag)); } let w = self.r.read_u16::()?; let h = self.r.read_u16::()?; self.frame.width = w & 0x3FFF; self.frame.height = h & 0x3FFF; self.top = init_top_macroblocks(self.frame.width as usize); // Almost always the first macro block, except when non exists (i.e. `width == 0`) self.left = self.top.first().cloned().unwrap_or_default(); self.mbwidth = (self.frame.width + 15) / 16; self.mbheight = (self.frame.height + 15) / 16; self.frame.ybuf = vec![0u8; self.frame.width as usize * self.frame.height as usize]; self.frame.ubuf = vec![0u8; self.frame.chroma_width() as usize * self.frame.chroma_height() as usize]; self.frame.vbuf = vec![0u8; self.frame.chroma_width() as usize * self.frame.chroma_height() as usize]; self.top_border = vec![127u8; self.frame.width as usize + 4 + 16]; self.left_border = vec![129u8; 1 + 16]; } let mut buf = vec![0; first_partition_size as usize]; self.r.read_exact(&mut buf)?; // initialise binary decoder self.b.init(buf)?; if self.frame.keyframe { let color_space = self.b.read_literal(1)?; self.frame.pixel_type = self.b.read_literal(1)?; if color_space != 0 { return Err(DecodingError::ColorSpaceInvalid(color_space)); } } self.segments_enabled = self.b.read_flag()?; if self.segments_enabled { self.read_segment_updates()?; } self.frame.filter_type = self.b.read_flag()?; self.frame.filter_level = self.b.read_literal(6)?; self.frame.sharpness_level = self.b.read_literal(3)?; let lf_adjust_enable = self.b.read_flag()?; if lf_adjust_enable { self.read_loop_filter_adjustments()?; } self.num_partitions = (1usize << self.b.read_literal(2)? as usize) as u8; let num_partitions = self.num_partitions as usize; self.init_partitions(num_partitions)?; self.read_quantization_indices()?; if !self.frame.keyframe { // 9.7 refresh golden frame and altref frame // FIXME: support this? return Err(DecodingError::UnsupportedFeature( "Non-keyframe frames".to_owned(), )); } else { // Refresh entropy probs ????? let _ = self.b.read_literal(1); } self.update_token_probabilities()?; let mb_no_skip_coeff = self.b.read_literal(1)?; self.prob_skip_false = if mb_no_skip_coeff == 1 { Some(self.b.read_literal(8)?) } else { None }; if !self.frame.keyframe { // 9.10 remaining frame data self.prob_intra = 0; // FIXME: support this? return Err(DecodingError::UnsupportedFeature( "Non-keyframe frames".to_owned(), )); } else { // Reset motion vectors } Ok(()) } fn read_macroblock_header(&mut self, mbx: usize) -> Result { let mut mb = MacroBlock::default(); if self.segments_enabled && self.segments_update_map { mb.segmentid = self .b .read_with_tree(&SEGMENT_ID_TREE, &self.segment_tree_probs, 0)? as u8; }; mb.coeffs_skipped = if self.prob_skip_false.is_some() { self.b.read_bool(*self.prob_skip_false.as_ref().unwrap())? } else { false }; let inter_predicted = if !self.frame.keyframe { self.b.read_bool(self.prob_intra)? } else { false }; if inter_predicted { return Err(DecodingError::UnsupportedFeature( "VP8 inter-prediction".to_owned(), )); } if self.frame.keyframe { // intra prediction let luma = self .b .read_with_tree(&KEYFRAME_YMODE_TREE, &KEYFRAME_YMODE_PROBS, 0)?; mb.luma_mode = LumaMode::from_i8(luma).ok_or(DecodingError::LumaPredictionModeInvalid(luma))?; match mb.luma_mode.into_intra() { // `LumaMode::B` - This is predicted individually None => { for y in 0usize..4 { for x in 0usize..4 { let top = self.top[mbx].bpred[12 + x]; let left = self.left.bpred[y]; let intra = self.b.read_with_tree( &KEYFRAME_BPRED_MODE_TREE, &KEYFRAME_BPRED_MODE_PROBS[top as usize][left as usize], 0, )?; let bmode = IntraMode::from_i8(intra) .ok_or(DecodingError::IntraPredictionModeInvalid(intra))?; mb.bpred[x + y * 4] = bmode; self.top[mbx].bpred[12 + x] = bmode; self.left.bpred[y] = bmode; } } } Some(mode) => { for i in 0usize..4 { mb.bpred[12 + i] = mode; self.left.bpred[i] = mode; } } } let chroma = self.b .read_with_tree(&KEYFRAME_UV_MODE_TREE, &KEYFRAME_UV_MODE_PROBS, 0)?; mb.chroma_mode = ChromaMode::from_i8(chroma) .ok_or(DecodingError::ChromaPredictionModeInvalid(chroma))?; } self.top[mbx].chroma_mode = mb.chroma_mode; self.top[mbx].luma_mode = mb.luma_mode; self.top[mbx].bpred = mb.bpred; Ok(mb) } fn intra_predict_luma(&mut self, mbx: usize, mby: usize, mb: &MacroBlock, resdata: &[i32]) { let stride = 1usize + 16 + 4; let w = self.frame.width as usize; let mw = self.mbwidth as usize; let mut ws = create_border_luma(mbx, mby, mw, &self.top_border, &self.left_border); match mb.luma_mode { LumaMode::V => predict_vpred(&mut ws, 16, 1, 1, stride), LumaMode::H => predict_hpred(&mut ws, 16, 1, 1, stride), LumaMode::TM => predict_tmpred(&mut ws, 16, 1, 1, stride), LumaMode::DC => predict_dcpred(&mut ws, 16, stride, mby != 0, mbx != 0), LumaMode::B => predict_4x4(&mut ws, stride, &mb.bpred, resdata), } if mb.luma_mode != LumaMode::B { for y in 0usize..4 { for x in 0usize..4 { let i = x + y * 4; // Create a reference to a [i32; 16] array for add_residue (slices of size 16 do not work). let rb: &[i32; 16] = resdata[i * 16..][..16].try_into().unwrap(); let y0 = 1 + y * 4; let x0 = 1 + x * 4; add_residue(&mut ws, rb, y0, x0, stride); } } } self.left_border[0] = ws[16]; for (i, left) in self.left_border[1..][..16].iter_mut().enumerate() { *left = ws[(i + 1) * stride + 16]; } for (top, &w) in self.top_border[mbx * 16..][..16] .iter_mut() .zip(&ws[16 * stride + 1..][..16]) { *top = w; } // Length is the remainder to the border, but maximally the current chunk. let ylength = cmp::min(self.frame.height as usize - mby * 16, 16); let xlength = cmp::min(self.frame.width as usize - mbx * 16, 16); for y in 0usize..ylength { for (ybuf, &ws) in self.frame.ybuf[(mby * 16 + y) * w + mbx * 16..][..xlength] .iter_mut() .zip(ws[(1 + y) * stride + 1..][..xlength].iter()) { *ybuf = ws; } } } fn intra_predict_chroma(&mut self, mbx: usize, mby: usize, mb: &MacroBlock, resdata: &[i32]) { let stride = 1usize + 8; let w = self.frame.chroma_width() as usize; //8x8 with left top border of 1 let mut uws = [0u8; (8 + 1) * (8 + 1)]; let mut vws = [0u8; (8 + 1) * (8 + 1)]; let ylength = cmp::min(self.frame.chroma_height() as usize - mby * 8, 8); let xlength = cmp::min(self.frame.chroma_width() as usize - mbx * 8, 8); //left border for y in 0usize..8 { let (uy, vy) = if mbx == 0 || y >= ylength { (129, 129) } else { let index = (mby * 8 + y) * w + ((mbx - 1) * 8 + 7); (self.frame.ubuf[index], self.frame.vbuf[index]) }; uws[(y + 1) * stride] = uy; vws[(y + 1) * stride] = vy; } //top border for x in 0usize..8 { let (ux, vx) = if mby == 0 || x >= xlength { (127, 127) } else { let index = ((mby - 1) * 8 + 7) * w + (mbx * 8 + x); (self.frame.ubuf[index], self.frame.vbuf[index]) }; uws[x + 1] = ux; vws[x + 1] = vx; } //top left point let (u1, v1) = if mby == 0 { (127, 127) } else if mbx == 0 { (129, 129) } else { let index = ((mby - 1) * 8 + 7) * w + (mbx - 1) * 8 + 7; if index >= self.frame.ubuf.len() { (127, 127) } else { (self.frame.ubuf[index], self.frame.vbuf[index]) } }; uws[0] = u1; vws[0] = v1; match mb.chroma_mode { ChromaMode::DC => { predict_dcpred(&mut uws, 8, stride, mby != 0, mbx != 0); predict_dcpred(&mut vws, 8, stride, mby != 0, mbx != 0); } ChromaMode::V => { predict_vpred(&mut uws, 8, 1, 1, stride); predict_vpred(&mut vws, 8, 1, 1, stride); } ChromaMode::H => { predict_hpred(&mut uws, 8, 1, 1, stride); predict_hpred(&mut vws, 8, 1, 1, stride); } ChromaMode::TM => { predict_tmpred(&mut uws, 8, 1, 1, stride); predict_tmpred(&mut vws, 8, 1, 1, stride); } } for y in 0usize..2 { for x in 0usize..2 { let i = x + y * 2; let urb: &[i32; 16] = resdata[16 * 16 + i * 16..][..16].try_into().unwrap(); let y0 = 1 + y * 4; let x0 = 1 + x * 4; add_residue(&mut uws, urb, y0, x0, stride); let vrb: &[i32; 16] = resdata[20 * 16 + i * 16..][..16].try_into().unwrap(); add_residue(&mut vws, vrb, y0, x0, stride); } } for y in 0usize..ylength { let uv_buf_index = (mby * 8 + y) * w + mbx * 8; let ws_index = (1 + y) * stride + 1; for (((ub, vb), &uw), &vw) in self.frame.ubuf[uv_buf_index..][..xlength] .iter_mut() .zip(self.frame.vbuf[uv_buf_index..][..xlength].iter_mut()) .zip(uws[ws_index..][..xlength].iter()) .zip(vws[ws_index..][..xlength].iter()) { *ub = uw; *vb = vw; } } } fn read_coefficients( &mut self, block: &mut [i32], p: usize, plane: usize, complexity: usize, dcq: i16, acq: i16, ) -> Result { let first = if plane == 0 { 1usize } else { 0usize }; let probs = &self.token_probs[plane]; let tree = &DCT_TOKEN_TREE; let mut complexity = complexity; let mut has_coefficients = false; let mut skip = false; for i in first..16usize { let table = &probs[COEFF_BANDS[i] as usize][complexity]; let token = if !skip { self.partitions[p].read_with_tree(tree, table, 0)? } else { self.partitions[p].read_with_tree(tree, table, 2)? }; let mut abs_value = i32::from(match token { DCT_EOB => break, DCT_0 => { skip = true; has_coefficients = true; complexity = 0; continue; } literal @ DCT_1..=DCT_4 => i16::from(literal), category @ DCT_CAT1..=DCT_CAT6 => { let t = PROB_DCT_CAT[(category - DCT_CAT1) as usize]; let mut extra = 0i16; let mut j = 0; while t[j] > 0 { extra = extra + extra + self.partitions[p].read_bool(t[j])? as i16; j += 1; } i16::from(DCT_CAT_BASE[(category - DCT_CAT1) as usize]) + extra } c => panic!("unknown token: {}", c), }); skip = false; complexity = if abs_value == 0 { 0 } else if abs_value == 1 { 1 } else { 2 }; if self.partitions[p].read_bool(128)? { abs_value = -abs_value; } block[ZIGZAG[i] as usize] = abs_value * i32::from(if ZIGZAG[i] > 0 { acq } else { dcq }); has_coefficients = true; } Ok(has_coefficients) } fn read_residual_data( &mut self, mb: &MacroBlock, mbx: usize, p: usize, ) -> Result<[i32; 384], DecodingError> { let sindex = mb.segmentid as usize; let mut blocks = [0i32; 384]; let mut plane = if mb.luma_mode == LumaMode::B { 3 } else { 1 }; if plane == 1 { let complexity = self.top[mbx].complexity[0] + self.left.complexity[0]; let mut block = [0i32; 16]; let dcq = self.segment[sindex].y2dc; let acq = self.segment[sindex].y2ac; let n = self.read_coefficients(&mut block, p, plane, complexity as usize, dcq, acq)?; self.left.complexity[0] = if n { 1 } else { 0 }; self.top[mbx].complexity[0] = if n { 1 } else { 0 }; transform::iwht4x4(&mut block); for k in 0usize..16 { blocks[16 * k] = block[k]; } plane = 0; } for y in 0usize..4 { let mut left = self.left.complexity[y + 1]; for x in 0usize..4 { let i = x + y * 4; let block = &mut blocks[i * 16..i * 16 + 16]; let complexity = self.top[mbx].complexity[x + 1] + left; let dcq = self.segment[sindex].ydc; let acq = self.segment[sindex].yac; let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq)?; if block[0] != 0 || n { transform::idct4x4(block); } left = if n { 1 } else { 0 }; self.top[mbx].complexity[x + 1] = if n { 1 } else { 0 }; } self.left.complexity[y + 1] = left; } plane = 2; for &j in &[5usize, 7usize] { for y in 0usize..2 { let mut left = self.left.complexity[y + j]; for x in 0usize..2 { let i = x + y * 2 + if j == 5 { 16 } else { 20 }; let block = &mut blocks[i * 16..i * 16 + 16]; let complexity = self.top[mbx].complexity[x + j] + left; let dcq = self.segment[sindex].uvdc; let acq = self.segment[sindex].uvac; let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq)?; if block[0] != 0 || n { transform::idct4x4(block); } left = if n { 1 } else { 0 }; self.top[mbx].complexity[x + j] = if n { 1 } else { 0 }; } self.left.complexity[y + j] = left; } } Ok(blocks) } /// Does loop filtering on the macroblock fn loop_filter(&mut self, mbx: usize, mby: usize, mb: &MacroBlock) { let luma_w = self.frame.width as usize; let luma_h = self.frame.height as usize; let chroma_w = self.frame.chroma_width() as usize; let chroma_h = self.frame.chroma_height() as usize; let (filter_level, interior_limit, hev_threshold) = self.calculate_filter_parameters(mb); if filter_level > 0 { let mbedge_limit = (filter_level + 2) * 2 + interior_limit; let sub_bedge_limit = (filter_level * 2) + interior_limit; let luma_ylength = cmp::min(luma_h - 16 * mby, 16); let luma_xlength = cmp::min(luma_w - 16 * mbx, 16); let chroma_ylength = cmp::min(chroma_h - 8 * mby, 8); let chroma_xlength = cmp::min(chroma_w - 8 * mbx, 8); //filter across left of macroblock if mbx > 0 { //simple loop filtering if self.frame.filter_type { if luma_xlength >= 2 { for y in 0usize..luma_ylength { let y0 = mby * 16 + y; let x0 = mbx * 16; loop_filter::simple_segment( mbedge_limit, &mut self.frame.ybuf[..], y0 * luma_w + x0, 1, ); } } } else { if luma_xlength >= 4 { for y in 0usize..luma_ylength { let y0 = mby * 16 + y; let x0 = mbx * 16; loop_filter::macroblock_filter( hev_threshold, interior_limit, mbedge_limit, &mut self.frame.ybuf[..], y0 * luma_w + x0, 1, ); } } if chroma_xlength >= 4 { for y in 0usize..chroma_ylength { let y0 = mby * 8 + y; let x0 = mbx * 8; loop_filter::macroblock_filter( hev_threshold, interior_limit, mbedge_limit, &mut self.frame.ubuf[..], y0 * chroma_w + x0, 1, ); loop_filter::macroblock_filter( hev_threshold, interior_limit, mbedge_limit, &mut self.frame.vbuf[..], y0 * chroma_w + x0, 1, ); } } } } //filter across vertical subblocks in macroblock if mb.luma_mode == LumaMode::B || !mb.coeffs_skipped { if self.frame.filter_type { for x in (4usize..luma_xlength - 1).step_by(4) { for y in 0..luma_ylength { let y0 = mby * 16 + y; let x0 = mbx * 16 + x; loop_filter::simple_segment( sub_bedge_limit, &mut self.frame.ybuf[..], y0 * luma_w + x0, 1, ); } } } else { if luma_xlength > 3 { for x in (4usize..luma_xlength - 3).step_by(4) { for y in 0..luma_ylength { let y0 = mby * 16 + y; let x0 = mbx * 16 + x; loop_filter::subblock_filter( hev_threshold, interior_limit, sub_bedge_limit, &mut self.frame.ybuf[..], y0 * luma_w + x0, 1, ); } } } if chroma_xlength == 8 { for y in 0usize..chroma_ylength { let y0 = mby * 8 + y; let x0 = mbx * 8 + 4; loop_filter::subblock_filter( hev_threshold, interior_limit, sub_bedge_limit, &mut self.frame.ubuf[..], y0 * chroma_w + x0, 1, ); loop_filter::subblock_filter( hev_threshold, interior_limit, sub_bedge_limit, &mut self.frame.vbuf[..], y0 * chroma_w + x0, 1, ); } } } } //filter across top of macroblock if mby > 0 { if self.frame.filter_type { if luma_ylength >= 2 { for x in 0usize..luma_xlength { let y0 = mby * 16; let x0 = mbx * 16 + x; loop_filter::simple_segment( mbedge_limit, &mut self.frame.ybuf[..], y0 * luma_w + x0, luma_w, ); } } } else { //if bottom macroblock, can only filter if there is 3 pixels below if luma_ylength >= 4 { for x in 0usize..luma_xlength { let y0 = mby * 16; let x0 = mbx * 16 + x; loop_filter::macroblock_filter( hev_threshold, interior_limit, mbedge_limit, &mut self.frame.ybuf[..], y0 * luma_w + x0, luma_w, ); } } if chroma_ylength >= 4 { for x in 0usize..chroma_xlength { let y0 = mby * 8; let x0 = mbx * 8 + x; loop_filter::macroblock_filter( hev_threshold, interior_limit, mbedge_limit, &mut self.frame.ubuf[..], y0 * chroma_w + x0, chroma_w, ); loop_filter::macroblock_filter( hev_threshold, interior_limit, mbedge_limit, &mut self.frame.vbuf[..], y0 * chroma_w + x0, chroma_w, ); } } } } //filter across horizontal subblock edges within the macroblock if mb.luma_mode == LumaMode::B || !mb.coeffs_skipped { if self.frame.filter_type { for y in (4usize..luma_ylength - 1).step_by(4) { for x in 0..luma_xlength { let y0 = mby * 16 + y; let x0 = mbx * 16 + x; loop_filter::simple_segment( sub_bedge_limit, &mut self.frame.ybuf[..], y0 * luma_w + x0, luma_w, ); } } } else { if luma_ylength > 3 { for y in (4usize..luma_ylength - 3).step_by(4) { for x in 0..luma_xlength { let y0 = mby * 16 + y; let x0 = mbx * 16 + x; loop_filter::subblock_filter( hev_threshold, interior_limit, sub_bedge_limit, &mut self.frame.ybuf[..], y0 * luma_w + x0, luma_w, ); } } } if chroma_ylength == 8 { for x in 0..chroma_xlength { let y0 = mby * 8 + 4; let x0 = mbx * 8 + x; loop_filter::subblock_filter( hev_threshold, interior_limit, sub_bedge_limit, &mut self.frame.ubuf[..], y0 * chroma_w + x0, chroma_w, ); loop_filter::subblock_filter( hev_threshold, interior_limit, sub_bedge_limit, &mut self.frame.vbuf[..], y0 * chroma_w + x0, chroma_w, ); } } } } } } //return values are the filter level, interior limit and hev threshold fn calculate_filter_parameters(&self, macroblock: &MacroBlock) -> (u8, u8, u8) { let segment = self.segment[macroblock.segmentid as usize]; let mut filter_level = self.frame.filter_level as i32; if self.segments_enabled { if segment.delta_values { filter_level += i32::from(segment.loopfilter_level); } else { filter_level = i32::from(segment.loopfilter_level); } } filter_level = filter_level.clamp(0, 63); if macroblock.luma_mode == LumaMode::B { filter_level += self.mode_delta[0]; } let filter_level = filter_level.clamp(0, 63) as u8; //interior limit let mut interior_limit = filter_level; if self.frame.sharpness_level > 0 { interior_limit >>= if self.frame.sharpness_level > 4 { 2 } else { 1 }; if interior_limit > 9 - self.frame.sharpness_level { interior_limit = 9 - self.frame.sharpness_level; } } if interior_limit == 0 { interior_limit = 1; } //high edge variance threshold let mut hev_threshold = 0; #[allow(clippy::collapsible_else_if)] if self.frame.keyframe { if filter_level >= 40 { hev_threshold = 2; } else { hev_threshold = 1; } } else { if filter_level >= 40 { hev_threshold = 3; } else if filter_level >= 20 { hev_threshold = 2; } else if filter_level >= 15 { hev_threshold = 1; } } (filter_level, interior_limit, hev_threshold) } /// Decodes the current frame pub fn decode_frame(&mut self) -> Result<&Frame, DecodingError> { self.read_frame_header()?; for mby in 0..self.mbheight as usize { let p = mby % self.num_partitions as usize; self.left = MacroBlock::default(); for mbx in 0..self.mbwidth as usize { let mb = self.read_macroblock_header(mbx)?; let blocks = if !mb.coeffs_skipped { self.read_residual_data(&mb, mbx, p)? } else { if mb.luma_mode != LumaMode::B { self.left.complexity[0] = 0; self.top[mbx].complexity[0] = 0; } for i in 1usize..9 { self.left.complexity[i] = 0; self.top[mbx].complexity[i] = 0; } [0i32; 384] }; self.intra_predict_luma(mbx, mby, &mb, &blocks); self.intra_predict_chroma(mbx, mby, &mb, &blocks); self.macroblocks.push(mb); } self.left_border = vec![129u8; 1 + 16]; } //do loop filtering for mby in 0..self.mbheight as usize { for mbx in 0..self.mbwidth as usize { let mb = self.macroblocks[mby * self.mbwidth as usize + mbx]; self.loop_filter(mbx, mby, &mb); } } Ok(&self.frame) } } impl LumaMode { fn from_i8(val: i8) -> Option { Some(match val { DC_PRED => LumaMode::DC, V_PRED => LumaMode::V, H_PRED => LumaMode::H, TM_PRED => LumaMode::TM, B_PRED => LumaMode::B, _ => return None, }) } fn into_intra(self) -> Option { Some(match self { LumaMode::DC => IntraMode::DC, LumaMode::V => IntraMode::VE, LumaMode::H => IntraMode::HE, LumaMode::TM => IntraMode::TM, LumaMode::B => return None, }) } } impl ChromaMode { fn from_i8(val: i8) -> Option { Some(match val { DC_PRED => ChromaMode::DC, V_PRED => ChromaMode::V, H_PRED => ChromaMode::H, TM_PRED => ChromaMode::TM, _ => return None, }) } } impl IntraMode { fn from_i8(val: i8) -> Option { Some(match val { B_DC_PRED => IntraMode::DC, B_TM_PRED => IntraMode::TM, B_VE_PRED => IntraMode::VE, B_HE_PRED => IntraMode::HE, B_LD_PRED => IntraMode::LD, B_RD_PRED => IntraMode::RD, B_VR_PRED => IntraMode::VR, B_VL_PRED => IntraMode::VL, B_HD_PRED => IntraMode::HD, B_HU_PRED => IntraMode::HU, _ => return None, }) } } fn init_top_macroblocks(width: usize) -> Vec { let mb_width = (width + 15) / 16; let mb = MacroBlock { // Section 11.3 #3 bpred: [IntraMode::DC; 16], luma_mode: LumaMode::DC, ..MacroBlock::default() }; vec![mb; mb_width] } fn create_border_luma(mbx: usize, mby: usize, mbw: usize, top: &[u8], left: &[u8]) -> [u8; 357] { let stride = 1usize + 16 + 4; let mut ws = [0u8; (1 + 16) * (1 + 16 + 4)]; // A { let above = &mut ws[1..stride]; if mby == 0 { for above in above.iter_mut() { *above = 127; } } else { for (above, &top) in above[..16].iter_mut().zip(&top[mbx * 16..]) { *above = top; } if mbx == mbw - 1 { for above in above[16..].iter_mut() { *above = top[mbx * 16 + 15]; } } else { for (above, &top) in above[16..].iter_mut().zip(&top[mbx * 16 + 16..]) { *above = top; } } } } for i in 17usize..stride { ws[4 * stride + i] = ws[i]; ws[8 * stride + i] = ws[i]; ws[12 * stride + i] = ws[i]; } // L if mbx == 0 { for i in 0usize..16 { ws[(i + 1) * stride] = 129; } } else { for (i, &left) in (0usize..16).zip(&left[1..]) { ws[(i + 1) * stride] = left; } } // P ws[0] = if mby == 0 { 127 } else if mbx == 0 { 129 } else { left[0] }; ws } fn avg3(left: u8, this: u8, right: u8) -> u8 { let avg = (u16::from(left) + 2 * u16::from(this) + u16::from(right) + 2) >> 2; avg as u8 } fn avg2(this: u8, right: u8) -> u8 { let avg = (u16::from(this) + u16::from(right) + 1) >> 1; avg as u8 } // Only 16 elements from rblock are used to add residue, so it is restricted to 16 elements // to enable SIMD and other optimizations. // // Clippy suggests the clamp method, but it seems to optimize worse as of rustc 1.82.0 nightly. #[allow(clippy::manual_clamp)] fn add_residue(pblock: &mut [u8], rblock: &[i32; 16], y0: usize, x0: usize, stride: usize) { let mut pos = y0 * stride + x0; for row in rblock.chunks(4) { for (p, &a) in pblock[pos..][..4].iter_mut().zip(row.iter()) { *p = (a + i32::from(*p)).max(0).min(255) as u8; } pos += stride; } } fn predict_4x4(ws: &mut [u8], stride: usize, modes: &[IntraMode], resdata: &[i32]) { for sby in 0usize..4 { for sbx in 0usize..4 { let i = sbx + sby * 4; let y0 = sby * 4 + 1; let x0 = sbx * 4 + 1; match modes[i] { IntraMode::TM => predict_tmpred(ws, 4, x0, y0, stride), IntraMode::VE => predict_bvepred(ws, x0, y0, stride), IntraMode::HE => predict_bhepred(ws, x0, y0, stride), IntraMode::DC => predict_bdcpred(ws, x0, y0, stride), IntraMode::LD => predict_bldpred(ws, x0, y0, stride), IntraMode::RD => predict_brdpred(ws, x0, y0, stride), IntraMode::VR => predict_bvrpred(ws, x0, y0, stride), IntraMode::VL => predict_bvlpred(ws, x0, y0, stride), IntraMode::HD => predict_bhdpred(ws, x0, y0, stride), IntraMode::HU => predict_bhupred(ws, x0, y0, stride), } let rb: &[i32; 16] = resdata[i * 16..][..16].try_into().unwrap(); add_residue(ws, rb, y0, x0, stride); } } } fn predict_vpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) { // This pass copies the top row to the rows below it. let (above, curr) = a.split_at_mut(stride * y0); let above_slice = &above[x0..]; for curr_chunk in curr.chunks_exact_mut(stride).take(size) { for (curr, &above) in curr_chunk[1..].iter_mut().zip(above_slice) { *curr = above; } } } fn predict_hpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) { // This pass copies the first value of a row to the values right of it. for chunk in a.chunks_exact_mut(stride).skip(y0).take(size) { let left = chunk[x0 - 1]; chunk[x0..].iter_mut().for_each(|a| *a = left); } } fn predict_dcpred(a: &mut [u8], size: usize, stride: usize, above: bool, left: bool) { let mut sum = 0; let mut shf = if size == 8 { 2 } else { 3 }; if left { for y in 0usize..size { sum += u32::from(a[(y + 1) * stride]); } shf += 1; } if above { sum += a[1..=size].iter().fold(0, |acc, &x| acc + u32::from(x)); shf += 1; } let dcval = if !left && !above { 128 } else { (sum + (1 << (shf - 1))) >> shf }; for y in 0usize..size { a[1 + stride * (y + 1)..][..size] .iter_mut() .for_each(|a| *a = dcval as u8); } } // Clippy suggests the clamp method, but it seems to optimize worse as of rustc 1.82.0 nightly. #[allow(clippy::manual_clamp)] fn predict_tmpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) { // The formula for tmpred is: // X_ij = L_i + A_j - P (i, j=0, 1, 2, 3) // // |-----|-----|-----|-----|-----| // | P | A0 | A1 | A2 | A3 | // |-----|-----|-----|-----|-----| // | L0 | X00 | X01 | X02 | X03 | // |-----|-----|-----|-----|-----| // | L1 | X10 | X11 | X12 | X13 | // |-----|-----|-----|-----|-----| // | L2 | X20 | X21 | X22 | X23 | // |-----|-----|-----|-----|-----| // | L3 | X30 | X31 | X32 | X33 | // |-----|-----|-----|-----|-----| // Diagram from p. 52 of RFC 6386 // Split at L0 let (above, x_block) = a.split_at_mut(y0 * stride + (x0 - 1)); let p = i32::from(above[(y0 - 1) * stride + x0 - 1]); let above_slice = &above[(y0 - 1) * stride + x0..]; for y in 0usize..size { let left_minus_p = i32::from(x_block[y * stride]) - p; // Add 1 to skip over L0 byte x_block[y * stride + 1..][..size] .iter_mut() .zip(above_slice) .for_each(|(cur, &abv)| *cur = (left_minus_p + i32::from(abv)).max(0).min(255) as u8); } } fn predict_bdcpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let mut v = 4; a[(y0 - 1) * stride + x0..][..4] .iter() .for_each(|&a| v += u32::from(a)); for i in 0usize..4 { v += u32::from(a[(y0 + i) * stride + x0 - 1]); } v >>= 3; for chunk in a.chunks_exact_mut(stride).skip(y0).take(4) { for ch in chunk[x0..][..4].iter_mut() { *ch = v as u8; } } } fn topleft_pixel(a: &[u8], x0: usize, y0: usize, stride: usize) -> u8 { a[(y0 - 1) * stride + x0 - 1] } fn top_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8, u8, u8, u8, u8) { let pos = (y0 - 1) * stride + x0; let a_slice = &a[pos..pos + 8]; let a0 = a_slice[0]; let a1 = a_slice[1]; let a2 = a_slice[2]; let a3 = a_slice[3]; let a4 = a_slice[4]; let a5 = a_slice[5]; let a6 = a_slice[6]; let a7 = a_slice[7]; (a0, a1, a2, a3, a4, a5, a6, a7) } fn left_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8) { let l0 = a[y0 * stride + x0 - 1]; let l1 = a[(y0 + 1) * stride + x0 - 1]; let l2 = a[(y0 + 2) * stride + x0 - 1]; let l3 = a[(y0 + 3) * stride + x0 - 1]; (l0, l1, l2, l3) } fn edge_pixels( a: &[u8], x0: usize, y0: usize, stride: usize, ) -> (u8, u8, u8, u8, u8, u8, u8, u8, u8) { let pos = (y0 - 1) * stride + x0 - 1; let a_slice = &a[pos..=pos + 4]; let e0 = a[pos + 4 * stride]; let e1 = a[pos + 3 * stride]; let e2 = a[pos + 2 * stride]; let e3 = a[pos + stride]; let e4 = a_slice[0]; let e5 = a_slice[1]; let e6 = a_slice[2]; let e7 = a_slice[3]; let e8 = a_slice[4]; (e0, e1, e2, e3, e4, e5, e6, e7, e8) } fn predict_bvepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let p = topleft_pixel(a, x0, y0, stride); let (a0, a1, a2, a3, a4, _, _, _) = top_pixels(a, x0, y0, stride); let avg_1 = avg3(p, a0, a1); let avg_2 = avg3(a0, a1, a2); let avg_3 = avg3(a1, a2, a3); let avg_4 = avg3(a2, a3, a4); let avg = [avg_1, avg_2, avg_3, avg_4]; let mut pos = y0 * stride + x0; for _ in 0..4 { a[pos..=pos + 3].copy_from_slice(&avg); pos += stride; } } fn predict_bhepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let p = topleft_pixel(a, x0, y0, stride); let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride); let avgs = [ avg3(p, l0, l1), avg3(l0, l1, l2), avg3(l1, l2, l3), avg3(l2, l3, l3), ]; let mut pos = y0 * stride + x0; for &avg in avgs.iter() { for a_p in a[pos..=pos + 3].iter_mut() { *a_p = avg; } pos += stride; } } fn predict_bldpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride); let avgs = [ avg3(a0, a1, a2), avg3(a1, a2, a3), avg3(a2, a3, a4), avg3(a3, a4, a5), avg3(a4, a5, a6), avg3(a5, a6, a7), avg3(a6, a7, a7), ]; let mut pos = y0 * stride + x0; for i in 0..4 { a[pos..=pos + 3].copy_from_slice(&avgs[i..=i + 3]); pos += stride; } } fn predict_brdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let (e0, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride); let avgs = [ avg3(e0, e1, e2), avg3(e1, e2, e3), avg3(e2, e3, e4), avg3(e3, e4, e5), avg3(e4, e5, e6), avg3(e5, e6, e7), avg3(e6, e7, e8), ]; let mut pos = y0 * stride + x0; for i in 0..4 { a[pos..=pos + 3].copy_from_slice(&avgs[3 - i..7 - i]); pos += stride; } } fn predict_bvrpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let (_, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride); a[(y0 + 3) * stride + x0] = avg3(e1, e2, e3); a[(y0 + 2) * stride + x0] = avg3(e2, e3, e4); a[(y0 + 3) * stride + x0 + 1] = avg3(e3, e4, e5); a[(y0 + 1) * stride + x0] = avg3(e3, e4, e5); a[(y0 + 2) * stride + x0 + 1] = avg2(e4, e5); a[y0 * stride + x0] = avg2(e4, e5); a[(y0 + 3) * stride + x0 + 2] = avg3(e4, e5, e6); a[(y0 + 1) * stride + x0 + 1] = avg3(e4, e5, e6); a[(y0 + 2) * stride + x0 + 2] = avg2(e5, e6); a[y0 * stride + x0 + 1] = avg2(e5, e6); a[(y0 + 3) * stride + x0 + 3] = avg3(e5, e6, e7); a[(y0 + 1) * stride + x0 + 2] = avg3(e5, e6, e7); a[(y0 + 2) * stride + x0 + 3] = avg2(e6, e7); a[y0 * stride + x0 + 2] = avg2(e6, e7); a[(y0 + 1) * stride + x0 + 3] = avg3(e6, e7, e8); a[y0 * stride + x0 + 3] = avg2(e7, e8); } fn predict_bvlpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride); a[y0 * stride + x0] = avg2(a0, a1); a[(y0 + 1) * stride + x0] = avg3(a0, a1, a2); a[(y0 + 2) * stride + x0] = avg2(a1, a2); a[y0 * stride + x0 + 1] = avg2(a1, a2); a[(y0 + 1) * stride + x0 + 1] = avg3(a1, a2, a3); a[(y0 + 3) * stride + x0] = avg3(a1, a2, a3); a[(y0 + 2) * stride + x0 + 1] = avg2(a2, a3); a[y0 * stride + x0 + 2] = avg2(a2, a3); a[(y0 + 3) * stride + x0 + 1] = avg3(a2, a3, a4); a[(y0 + 1) * stride + x0 + 2] = avg3(a2, a3, a4); a[(y0 + 2) * stride + x0 + 2] = avg2(a3, a4); a[y0 * stride + x0 + 3] = avg2(a3, a4); a[(y0 + 3) * stride + x0 + 2] = avg3(a3, a4, a5); a[(y0 + 1) * stride + x0 + 3] = avg3(a3, a4, a5); a[(y0 + 2) * stride + x0 + 3] = avg3(a4, a5, a6); a[(y0 + 3) * stride + x0 + 3] = avg3(a5, a6, a7); } fn predict_bhdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let (e0, e1, e2, e3, e4, e5, e6, e7, _) = edge_pixels(a, x0, y0, stride); a[(y0 + 3) * stride + x0] = avg2(e0, e1); a[(y0 + 3) * stride + x0 + 1] = avg3(e0, e1, e2); a[(y0 + 2) * stride + x0] = avg2(e1, e2); a[(y0 + 3) * stride + x0 + 2] = avg2(e1, e2); a[(y0 + 2) * stride + x0 + 1] = avg3(e1, e2, e3); a[(y0 + 3) * stride + x0 + 3] = avg3(e1, e2, e3); a[(y0 + 2) * stride + x0 + 2] = avg2(e2, e3); a[(y0 + 1) * stride + x0] = avg2(e2, e3); a[(y0 + 2) * stride + x0 + 3] = avg3(e2, e3, e4); a[(y0 + 1) * stride + x0 + 1] = avg3(e2, e3, e4); a[(y0 + 1) * stride + x0 + 2] = avg2(e3, e4); a[y0 * stride + x0] = avg2(e3, e4); a[(y0 + 1) * stride + x0 + 3] = avg3(e3, e4, e5); a[y0 * stride + x0 + 1] = avg3(e3, e4, e5); a[y0 * stride + x0 + 2] = avg3(e4, e5, e6); a[y0 * stride + x0 + 3] = avg3(e5, e6, e7); } fn predict_bhupred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride); a[y0 * stride + x0] = avg2(l0, l1); a[y0 * stride + x0 + 1] = avg3(l0, l1, l2); a[y0 * stride + x0 + 2] = avg2(l1, l2); a[(y0 + 1) * stride + x0] = avg2(l1, l2); a[y0 * stride + x0 + 3] = avg3(l1, l2, l3); a[(y0 + 1) * stride + x0 + 1] = avg3(l1, l2, l3); a[(y0 + 1) * stride + x0 + 2] = avg2(l2, l3); a[(y0 + 2) * stride + x0] = avg2(l2, l3); a[(y0 + 1) * stride + x0 + 3] = avg3(l2, l3, l3); a[(y0 + 2) * stride + x0 + 1] = avg3(l2, l3, l3); a[(y0 + 2) * stride + x0 + 2] = l3; a[(y0 + 2) * stride + x0 + 3] = l3; a[(y0 + 3) * stride + x0] = l3; a[(y0 + 3) * stride + x0 + 1] = l3; a[(y0 + 3) * stride + x0 + 2] = l3; a[(y0 + 3) * stride + x0 + 3] = l3; } #[cfg(all(test, feature = "_benchmarks"))] mod benches { use super::*; use test::{black_box, Bencher}; const W: usize = 256; const H: usize = 256; fn make_sample_image() -> Vec { let mut v = Vec::with_capacity((W * H * 4) as usize); for c in 0u8..=255 { for k in 0u8..=255 { v.push(c); v.push(0); v.push(0); v.push(k); } } v } #[bench] fn bench_predict_4x4(b: &mut Bencher) { let mut v = black_box(make_sample_image()); let res_data = vec![1i32; W * H * 4]; let modes = [ IntraMode::TM, IntraMode::VE, IntraMode::HE, IntraMode::DC, IntraMode::LD, IntraMode::RD, IntraMode::VR, IntraMode::VL, IntraMode::HD, IntraMode::HU, IntraMode::TM, IntraMode::VE, IntraMode::HE, IntraMode::DC, IntraMode::LD, IntraMode::RD, ]; b.iter(|| { black_box(predict_4x4(&mut v, W * 2, &modes, &res_data)); }); } #[bench] fn bench_predict_bvepred(b: &mut Bencher) { let mut v = make_sample_image(); b.iter(|| { predict_bvepred(black_box(&mut v), 5, 5, W * 2); }); } #[bench] fn bench_predict_bldpred(b: &mut Bencher) { let mut v = black_box(make_sample_image()); b.iter(|| { black_box(predict_bldpred(black_box(&mut v), 5, 5, W * 2)); }); } #[bench] fn bench_predict_brdpred(b: &mut Bencher) { let mut v = black_box(make_sample_image()); b.iter(|| { black_box(predict_brdpred(black_box(&mut v), 5, 5, W * 2)); }); } #[bench] fn bench_predict_bhepred(b: &mut Bencher) { let mut v = black_box(make_sample_image()); b.iter(|| { black_box(predict_bhepred(black_box(&mut v), 5, 5, W * 2)); }); } #[bench] fn bench_top_pixels(b: &mut Bencher) { let v = black_box(make_sample_image()); b.iter(|| { black_box(top_pixels(black_box(&v), 5, 5, W * 2)); }); } #[bench] fn bench_edge_pixels(b: &mut Bencher) { let v = black_box(make_sample_image()); b.iter(|| { black_box(edge_pixels(black_box(&v), 5, 5, W * 2)); }); } } #[cfg(test)] mod tests { use super::*; #[test] fn test_avg2() { for i in 0u8..=255 { for j in 0u8..=255 { let ceil_avg = ((i as f32) + (j as f32)) / 2.0; let ceil_avg = ceil_avg.ceil() as u8; assert_eq!( ceil_avg, avg2(i, j), "avg2({}, {}), expected {}, got {}.", i, j, ceil_avg, avg2(i, j) ); } } } #[test] fn test_avg2_specific() { assert_eq!( 255, avg2(255, 255), "avg2(255, 255), expected 255, got {}.", avg2(255, 255) ); assert_eq!(1, avg2(1, 1), "avg2(1, 1), expected 1, got {}.", avg2(1, 1)); assert_eq!(2, avg2(2, 1), "avg2(2, 1), expected 2, got {}.", avg2(2, 1)); } #[test] fn test_avg3() { for i in 0u8..=255 { for j in 0u8..=255 { for k in 0u8..=255 { let floor_avg = ((i as f32) + 2.0 * (j as f32) + { k as f32 } + 2.0) / 4.0; let floor_avg = floor_avg.floor() as u8; assert_eq!( floor_avg, avg3(i, j, k), "avg3({}, {}, {}), expected {}, got {}.", i, j, k, floor_avg, avg3(i, j, k) ); } } } } #[test] fn test_edge_pixels() { #[rustfmt::skip] let im = vec![5, 6, 7, 8, 9, 4, 0, 0, 0, 0, 3, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0]; let (e0, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(&im, 1, 1, 5); assert_eq!(e0, 1); assert_eq!(e1, 2); assert_eq!(e2, 3); assert_eq!(e3, 4); assert_eq!(e4, 5); assert_eq!(e5, 6); assert_eq!(e6, 7); assert_eq!(e7, 8); assert_eq!(e8, 9); } #[test] fn test_top_pixels() { #[rustfmt::skip] let im = vec![1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; let (e0, e1, e2, e3, e4, e5, e6, e7) = top_pixels(&im, 0, 1, 8); assert_eq!(e0, 1); assert_eq!(e1, 2); assert_eq!(e2, 3); assert_eq!(e3, 4); assert_eq!(e4, 5); assert_eq!(e5, 6); assert_eq!(e6, 7); assert_eq!(e7, 8); } #[test] fn test_add_residue() { let mut pblock = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let rblock = [ -1, -2, -3, -4, 250, 249, 248, 250, -10, -18, -192, -17, -3, 15, 18, 9, ]; let expected: [u8; 16] = [0, 0, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 10, 29, 33, 25]; add_residue(&mut pblock, &rblock, 0, 0, 4); for (&e, &i) in expected.iter().zip(&pblock) { assert_eq!(e, i); } } #[test] fn test_predict_bhepred() { #[rustfmt::skip] let expected: Vec = vec![5, 0, 0, 0, 0, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1]; #[rustfmt::skip] let mut im = vec![5, 0, 0, 0, 0, 4, 0, 0, 0, 0, 3, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0]; predict_bhepred(&mut im, 1, 1, 5); for (&e, i) in expected.iter().zip(im) { assert_eq!(e, i); } } #[test] fn test_predict_brdpred() { #[rustfmt::skip] let expected: Vec = vec![5, 6, 7, 8, 9, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5]; #[rustfmt::skip] let mut im = vec![5, 6, 7, 8, 9, 4, 0, 0, 0, 0, 3, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0]; predict_brdpred(&mut im, 1, 1, 5); for (&e, i) in expected.iter().zip(im) { assert_eq!(e, i); } } #[test] fn test_predict_bldpred() { #[rustfmt::skip] let mut im: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; let avg_1 = 2u8; let avg_2 = 3u8; let avg_3 = 4u8; let avg_4 = 5u8; let avg_5 = 6u8; let avg_6 = 7u8; let avg_7 = 8u8; predict_bldpred(&mut im, 0, 1, 8); assert_eq!(im[8], avg_1); assert_eq!(im[9], avg_2); assert_eq!(im[10], avg_3); assert_eq!(im[11], avg_4); assert_eq!(im[16], avg_2); assert_eq!(im[17], avg_3); assert_eq!(im[18], avg_4); assert_eq!(im[19], avg_5); assert_eq!(im[24], avg_3); assert_eq!(im[25], avg_4); assert_eq!(im[26], avg_5); assert_eq!(im[27], avg_6); assert_eq!(im[32], avg_4); assert_eq!(im[33], avg_5); assert_eq!(im[34], avg_6); assert_eq!(im[35], avg_7); } #[test] fn test_predict_bvepred() { #[rustfmt::skip] let mut im: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; let avg_1 = 2u8; let avg_2 = 3u8; let avg_3 = 4u8; let avg_4 = 5u8; predict_bvepred(&mut im, 1, 1, 9); assert_eq!(im[10], avg_1); assert_eq!(im[11], avg_2); assert_eq!(im[12], avg_3); assert_eq!(im[13], avg_4); assert_eq!(im[19], avg_1); assert_eq!(im[20], avg_2); assert_eq!(im[21], avg_3); assert_eq!(im[22], avg_4); assert_eq!(im[28], avg_1); assert_eq!(im[29], avg_2); assert_eq!(im[30], avg_3); assert_eq!(im[31], avg_4); assert_eq!(im[37], avg_1); assert_eq!(im[38], avg_2); assert_eq!(im[39], avg_3); assert_eq!(im[40], avg_4); } }