hayro-jpeg2000-0.3.2/.cargo_vcs_info.json0000644000000001540000000000100134050ustar { "git": { "sha1": "8be3586edbbb80065017dc2e50af13ba3c40bbab" }, "path_in_vcs": "hayro-jpeg2000" }hayro-jpeg2000-0.3.2/.gitignore000064400000000000000000000000341046102023000141620ustar 00000000000000diffs snapshots test-inputs hayro-jpeg2000-0.3.2/Cargo.lock0000644000000334040000000000100113640ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "adler2" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "autocfg" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "bitflags" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bumpalo" version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "bytemuck" version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" [[package]] name = "byteorder-lite" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "cfg-if" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "console" version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" dependencies = [ "encode_unicode", "libc", "once_cell", "unicode-width", "windows-sys", ] [[package]] name = "crc32fast" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-deque" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "either" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "encode_unicode" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "fdeflate" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e6853b52649d4ac5c0bd02320cddc5ba956bdb407c4b75a2c6b75bf51500f8c" dependencies = [ "simd-adler32", ] [[package]] name = "fearless_simd" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fb2907d1f08b2b316b9223ced5b0e89d87028ba8deae9764741dba8ff7f3903" dependencies = [ "bytemuck", ] [[package]] name = "flate2" version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "hayro-jpeg2000" version = "0.3.2" dependencies = [ "fearless_simd", "image", "indicatif", "log", "moxcms", "rayon", "serde", "serde_json", ] [[package]] name = "image" version = "0.25.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6506c6c10786659413faa717ceebcb8f70731c0a60cbae39795fdf114519c1a" dependencies = [ "bytemuck", "byteorder-lite", "moxcms", "num-traits", "png", ] [[package]] name = "indicatif" version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" dependencies = [ "console", "number_prefix", "portable-atomic", "rayon", "unicode-width", "web-time", ] [[package]] name = "itoa" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "js-sys" version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] name = "libc" version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "log" version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "memchr" version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miniz_oxide" version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", "simd-adler32", ] [[package]] name = "moxcms" version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac9557c559cd6fc9867e122e20d2cbefc9ca29d80d027a8e39310920ed2f0a97" dependencies = [ "num-traits", "pxfm", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "number_prefix" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "png" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97baced388464909d42d89643fe4361939af9b7ce7a31ee32a168f832a70f2a0" dependencies = [ "bitflags", "crc32fast", "fdeflate", "flate2", "miniz_oxide", ] [[package]] name = "portable-atomic" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "proc-macro2" version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] [[package]] name = "pxfm" version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7186d3822593aa4393561d186d1393b3923e9d6163d3fbfd6e825e3e6cf3e6a8" dependencies = [ "num-traits", ] [[package]] name = "quote" version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] [[package]] name = "rayon" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "rustversion" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "serde" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", "serde_derive", ] [[package]] name = "serde_core" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", "serde_core", ] [[package]] name = "simd-adler32" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "syn" version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "unicode-ident" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-width" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" [[package]] name = "wasm-bindgen" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] [[package]] name = "web-time" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" hayro-jpeg2000-0.3.2/Cargo.toml0000644000000052670000000000100114150ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2024" name = "hayro-jpeg2000" version = "0.3.2" authors = ["Laurenz Stampfl "] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "A memory-safe, pure-Rust JPEG 2000 decoder." readme = "README.md" license = "Apache-2.0 OR MIT" repository = "https://github.com/LaurenzV/hayro" resolver = "2" [features] default = [ "std", "simd", "image", ] image = [ "dep:image", "dep:moxcms", "std", ] logging = ["dep:log"] simd = [ "fearless_simd", "std", ] std = [] [lib] name = "hayro_jpeg2000" path = "src/lib.rs" [[example]] name = "png" path = "examples/png.rs" [[test]] name = "asset_suite" path = "tests/mod.rs" harness = false [dependencies.fearless_simd] version = "0.3.0" optional = true [dependencies.image] version = "0.25" optional = true default-features = false [dependencies.log] version = "0.4" optional = true [dependencies.moxcms] version = "0.7.11" optional = true [dev-dependencies.image] version = "0.25" features = ["png"] default-features = false [dev-dependencies.indicatif] version = "0.17" features = ["rayon"] [dev-dependencies.rayon] version = "1.11" [dev-dependencies.serde] version = "1" features = ["derive"] [dev-dependencies.serde_json] version = "1" [lints.clippy] collection_is_never_read = "warn" dbg_macro = "warn" debug_assert_with_mut_call = "warn" default_trait_access = "warn" doc_markdown = "warn" fn_to_numeric_cast_any = "warn" infinite_loop = "warn" large_stack_arrays = "warn" mismatching_type_param_order = "warn" negative_feature_names = "warn" redundant_feature_names = "warn" same_functions_in_if_condition = "warn" semicolon_if_nothing_returned = "warn" todo = "warn" too_many_arguments = "allow" unseparated_literal_suffix = "warn" use_self = "warn" wildcard_dependencies = "warn" [lints.rust] elided_lifetimes_in_paths = "warn" keyword_idents_2024 = "forbid" missing_docs = "warn" non_ascii_idents = "forbid" non_local_definitions = "forbid" trivial_numeric_casts = "warn" unnameable_types = "warn" unreachable_pub = "warn" unsafe_op_in_unsafe_fn = "forbid" unused_import_braces = "warn" unused_lifetimes = "warn" unused_macro_rules = "warn" unused_qualifications = "warn" hayro-jpeg2000-0.3.2/Cargo.toml.orig000064400000000000000000000021721046102023000150660ustar 00000000000000[package] name = "hayro-jpeg2000" version = "0.3.2" description = "A memory-safe, pure-Rust JPEG 2000 decoder." authors = { workspace = true } edition = { workspace = true } repository = { workspace = true } license = { workspace = true } readme = "README.md" [dependencies] log = { workspace = true, optional = true } fearless_simd = { workspace = true, optional = true } image = { workspace = true, default-features = false, optional = true } moxcms = { workspace = true, optional = true } [features] default = ["std", "simd", "image"] # Enable std library support. Disable for no_std environments. std = [] # Enable SIMD via `fearless_simd`. simd = ["fearless_simd", "std"] # Integration with the `image` crate. image = ["dep:image", "dep:moxcms", "std"] # Enable logging via the `log` crate. logging = ["dep:log"] [lints] workspace = true [dev-dependencies] image = { workspace = true, default-features = false, features = ["png"] } indicatif = { version = "0.17", features = ["rayon"] } rayon = "1.11" serde = { version = "1", features = ["derive"] } serde_json = "1" [[test]] name = "asset_suite" path = "tests/mod.rs" harness = false hayro-jpeg2000-0.3.2/README.md000064400000000000000000000060451046102023000134610ustar 00000000000000# hayro-jpeg2000 [![Crates.io](https://img.shields.io/crates/v/hayro-jpeg2000.svg)](https://crates.io/crates/hayro-jpeg2000) [![Documentation](https://docs.rs/hayro-jpeg2000/badge.svg)](https://docs.rs/hayro-jpeg2000) A memory-safe, pure-Rust JPEG 2000 decoder. `hayro-jpeg2000` can decode both raw JPEG 2000 codestreams (`.j2c`) and images wrapped inside the JP2 container format. The decoder supports the vast majority of features defined in the JPEG2000 core coding system (ISO/IEC 15444-1) as well as some color spaces from the extensions (ISO/IEC 15444-2). There are still some missing pieces for some "obscure" features(like for example support for progression order changes in tile-parts), but all features that actually commonly appear in real-life images should be supported (if not, please open an issue!). The decoder abstracts away most of the internal complexity of JPEG2000 and yields a simple 8-bit image with either greyscale, RGB, CMYK or an ICC-based color space, which can then be processed further according to your needs. ## Example ```rust use hayro_jpeg2000::{Image, DecodeSettings}; let data = std::fs::read("image.jp2").unwrap(); let image = Image::new(&data, &DecodeSettings::default()).unwrap(); println!( "{}x{} image in {:?} with alpha={}", image.width, image.height, image.color_space, image.has_alpha, ); let bitmap = image.decode().unwrap(); ``` If you want to see a more comprehensive example, please take a look at the example in [GitHub](https://github.com/LaurenzV/hayro/blob/main/hayro-jpeg2000/examples/png.rs), which shows you the main steps needed to convert a JPEG2000 image into PNG for example. ## Testing The decoder has been tested against 20.000+ images scraped from random PDFs on the internet and also passes a large part of the `OpenJPEG` test suite. So you can expect the crate to perform decently in terms of decoding correctness. ## Performance A decent amount of effort has already been put into optimizing this crate (both in terms of raw performance but also memory allocations). However, there are some more important optimizations that have not been implemented yet, so there is definitely still room for improvement (and I am planning on implementing them eventually). Overall, you should expect this crate to have worse performance than `OpenJPEG`, but the difference gap should not be too large. ## Safety By default, the crate has the `simd` feature enabled, which uses the [`fearless_simd`](https://github.com/linebender/fearless_simd) crate to accelerate important parts of the pipeline. If you want to eliminate any usage of unsafe in this crate as well as its dependencies, you can simply disable this feature, at the cost of worse decoding performance. Unsafe code is forbidden via a crate-level attribute. ## License Licensed under either of - Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or ) - MIT license ([LICENSE-MIT](LICENSE-MIT) or ) at your option. hayro-jpeg2000-0.3.2/TESTS.md000064400000000000000000000036711046102023000134300ustar 00000000000000Test inputs are not committed to git. Instead, they are downloaded on demand from https://hayro-assets.dev under the `jpeg2000` namespace. Each namespace has a manifest file that describes every entry and optional metadata: - A plain string uses the same value for the test id and the file path, and the test is expected to render and match a snapshot. - A JSON object uses the following fields: - `id`: human-readable test id (typically the filename without the extension). - `path` (or `file`): the actual filename to download and decode. - `render` (optional, default `true`): set to `false` for crash-only coverage without snapshot checks. - `strict` / `resolve_palette_indices` (optional): override the default decode settings for the test. - `target_resolution` (optional): a `[width, height]` hint for decoding at a reduced resolution. The manifests live next to the crate (currently `manifest_serenity.json`, `manifest_openjpeg.json`, and `manifest_custom.json`). Files are stored locally under `test-inputs//` and ignored by git. ## Synchronizing inputs Run the helper script whenever you need to populate or refresh the inputs: ```bash python3 sync.py ``` Use `--force` to redownload files even if a cached copy exists. The script downloads every entry in every manifest and mirrors the remote directory layout under `test-inputs/`. ## Generating baseline snapshots Snapshots are stored under `snapshots/` and also ignored by git. To seed them, first ensure the decoder is built from a known-good revision, then run the harness once: ```bash REPLACE=1 cargo test --release ``` This renders every manifest entry, writes the PNG snapshots, and logs any failures. After the baseline exists, run the suite normally to verify changes: ```bash cargo test --release ``` If the decoder output changes intentionally, rerun with `REPLACE=1` to update the affected snapshots and then rerun without `REPLACE` to confirm everything passes. hayro-jpeg2000-0.3.2/assets/CGATS001Compat-v2-micro.icc000064400000000000000000000204201046102023000200370ustar 00000000000000!ADBEscnrCMYKLab  *acspMSFTsawsctrl-handgi>90Xdesc_cprt wtptA2B0 descuCMYtextCC0XYZ -mft20 ##)%/D5S;ZAZGNMJS0Y^djp+u{OkΚ(3@GQ[gdUA!Wh b#M(.3V8>)CI%NT3Y_.dj okty$RboӭX4ZӚH ,c#(-3#8T=BH!M~RXY]cJhn/sx~;~ZO)'Er̤ޡI 9s<{!j&^+\0c5n:?DIO#TMYm^chmrw|pJ#ߙɞ£ѨC$đ}Ѕ֦]v(n~V~X5~SVN2~dx}h}.}‹j ~%T9~Ć>cg{ L{|D|ՓRn}tI~jzɷzͮͦ{#}*{ȟ4P|R} z ay_zJ{{FO{ɣ{g*y߄2yb}IylzszkVN{8`zȎ鎨}9Ќ6|̊|t2||JTy}"I[trWH-ʅl XșpF"a:]9˔̅nԄE뗩 +q-.W?+_mD)| 5cȤlgCт! D՝{.y{z5u{e {> | TTIk͗ƈن*c.IԛۏHwTW[pQ:-R ގ1yOƶyd6yJG*z$-c{y HKcӭFso#ʣ|Հ.~*b@EZ#v!_}iޘb5,,E"fw#<A|laͪFDң#!jC9 D|E(a~RDƪ4#&ПxlxkS˽x8y?zQTlfPSi8K]I %CNl8äkS0O)84#:ĉ)I#l7)8S<ĕ8U*ꐚelB6Snd8PxylX S8L!&cx_uƏygvzjwx{uxN=|z.g~4{+uvXx.evy|DLdzÁf {`s8.t앗v]ttwݍJy6yrsptrvI&w4^w܋q 4 riȗsquGvl$v߁px!q^s &pDtȱfFv;uʊss{Wetӏ4v(jԃwB˃y, DzJ,&flioA, / Dv*j' B·&o~m~RdM~(d=h~( ~i4*B~HAj}c6}o<}qx}jq"=sb}rt\6Jv6-xX{1H1@{{㍢ZNj~5Y]J@ubz.7YMG4/̈& 3pxW䈬r3%R_ݲI>ٕS{w=ģ^Vчɞe2`;fnJv0uU1٠.Xh'ph'r^ksMNu)vB~4N~:Z^}j~DLL)~r(~t<ĊiETK(d'2J"T⃭~h J1H'X{v.#|vÚ!g 3IXa&Ґd+(>Ӟ򸩂 fM|H&_$psSqaZ_Ur>YCtNܢuLx=}js|Y٧k|Q=>|ˠm}r5X=8h%AyqjX2e<nQ€eXpީ*Wdi<4j\<бpW:p}kb0rn QtpnOvsExnuc)zv%m}=o})r}lnt:}RDCv}vswd|Ljcm!QojorBs uAjhpᮔk>،mhpnA@rI\ s,Jgi嫊ldgo!@p r`3CfUiji gken'?%q qʿ3}i̢X~(l˂~uo|`~r:tWu;zzey{rz&{{B_F|{9W}=|h8~|xhy ~y2]wz7{,z}K߸vwEE}wz[xܐ6yz{KzuNu{vZw5x6{ t|u-ziuƭ*Yv4w[:zKۨ fhkrTnbS p.2rwæwxqpyDQz+-zo}s:[ЅfoWP],3!mvN%m+ދPS߈Eml4›M*upBQ;k)PuL\ @F;u@bw[3?㊸| fg~'ioP k59xno{tfuOu^4vXw {*eUN^ 4C~'&zWDeY;Mќ43[zy¥d,IM6w3:0:yPw6d3cLɚv.2ܖbhexTg>βj%9 kɨnghq8Tuq>ݯr%sGv gι|T|#>|%s| }gSױ>S%EҠgGS˰5>A %| /g"vSگ*>Bؚ%Q )Æh'a2kbednWiVckqm=usNpi vqxcr%gt juamw*;p#xl sxŽ`8ed<ԁBg_kEP:um^r6,]taew]i90jbrυ[1_ؠ}c\gP81ilrXZiVs^|bbe[i,7Tgs9]t`07vJdv\whVyl922zo-W~mrVUpp[rr#ttmsUv u1w5w+}ynmorqS\sm/tbJ}^ jm6po?Qq3}.qE}Fwi:vk~o;mPo-p {}MhrjR*n#o*OnK*-n{}_kcfgI$k0%Hmjw %~m~~pGd~rUG1t2$u(}Y{||{}+cX|1}tF|}$=|}yT0{`yayEPzd#Xz0kwz$x`x[DGxȑ@"xx]y9xZ_xCux̜X"wSƑc^mяMbV f{:isk|Ikl\n@Tq9s$;r(y%kzS0{R8F{ :{K~ׇ2iŇbR8L}ShQb7ZR+}OhܞP86 *>0ng]q[@$a=Ebsd+gW iZmiZpgkDØm+>o iqCluYMvCӕww*x CyZkX\B듚I* jј WB>)4 jJWSA֐v)= Zٳ [H#^4azcgiYfHzh3ߨjЧk xqyYhqGwr3qsͤrs[TweX᫈}G?}.3:}̡}~XdF楉2`;ǒ=XBFݤC2Νݏޑ^WHsb\xcfb"Xjhg5lkfvmxXh,]kw$u{SxEYByt^%zOUb{ 2HdM{w|O}U1rZS_x^12azxiLwScpXbRb]0g^xe KIoQ=oVQ[V}/\ZxjjV܄kmL\qioaL?r.fM)qsi pše4fCxhpih3k\lJmo(o{qy4w{`ud4wfg|x7I>jay>'^kyeU}U\؅t}`Ѕ-dNdVGgX&{hZhZ f|^3baۏKFd%f(Rv7XJ{\{pa`Ec(%&d^J2SwVwrx[Zy`?{e/|gvVrudq't h Y7uk>=vn ?xoo|Sn ro"ptYWqu kIhSj9kmtfUa MZJG_0Ņcd߇ep{rb_fI,iA/km7Oq|ow]}@qsHI}s(.}tkVju~oy|X\rz*|G z}8.Bz}c*|}n_w[uwϊ Fx-^x&zfmyYlZywETvsW,vNxW`ݕqTROY:]D `ߎHdL_m7`Mca9f] vhl^$FkL2m8?oڋpo_'~FD%eM-~IR~)U}99}7B*dmIHOQ!(R[}236|)?"cFH3L0(O]|8_ NvRbT]faZBDi`~ ldp&WE\t+\ak\'`Ie@ci f{lEwgPckqUnuZZp?U^ysatl}pJzoP{vXWV| >#Y|k^g}/ʂF/n(L̈oW!Rl6=$V8U\ Bm+IוVHO*u0b$%v~e,{h{bkPfQ"mi=ok$pmwxp`f rOht/;kut$kRvK[vex_a΂CNae :g.#*g(t^^Mb9d0"cssT%wKCQ00 V9Xa~R /VA[,.^҄M`i|PU{wt,xjv%Syqj}uMqyd=sz+ t~zyu>z}9~Ln}CR/VYG[gm=D]P.`,abNcu<;h,Pi`j-l{P:؄0r+nstev": ~'*c~=9~ ~hayro-jpeg2000-0.3.2/assets/LAB.icc000064400000000000000000000007441046102023000145620ustar 00000000000000lcms@abstLab Lab   "acspAPPL-lcmsdescFcprtLwtptTchadh,A2B0Pmluc enUS*Lab identity built-inmluc enUS0No copyright, use freelyXYZ -sf32mAB  paraparaparahayro-jpeg2000-0.3.2/assets/LICENSE.txt000064400000000000000000000146301046102023000153260ustar 00000000000000CC0 1.0 Universal Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; ii. moral rights retained by the original author(s) and/or performer(s); iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; v. rights protecting the extraction, dissemination, use and reuse of data in a Work; vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. For more information, please see http://creativecommons.org/publicdomain/zero/1.0/hayro-jpeg2000-0.3.2/assets/ProPhoto-v2-micro.icc000064400000000000000000000007601046102023000173700ustar 00000000000000lcmsmntrRGB XYZ  acspMSFTsawsctrl-hand1믿ea$ desc_cprt wtptrXYZ,gXYZ@bXYZTrTRChgTRChbTRChdescuROMtextCC0XYZ :XYZ 7IXYZ "=XYZ ,curv>C/) {qZ"b%(,9/3c7%;?C GZKP$TYb^,chm3rmw}4j- Iu͎vw{hayro-jpeg2000-0.3.2/assets/README.md000064400000000000000000000003311046102023000147530ustar 00000000000000- The LAB profile was created manually via lcms. - The CGATS001 and ProPhoto profiles are taken from https://github.com/saucecontrol/Compact-ICC-Profiles. All profiles can be used under the CC0 1.0 Universal license.hayro-jpeg2000-0.3.2/examples/png.rs000064400000000000000000000020421046102023000151430ustar 00000000000000//! This example shows you how you can convert a JPEG2000 image into PNG using //! the `image` crate. use std::env; use std::error::Error; use std::path::PathBuf; fn main() -> Result<(), Box> { #[cfg(feature = "logging")] if let Ok(()) = log::set_logger(&LOGGER) { log::set_max_level(log::LevelFilter::Warn); } let target = env::args() .nth(1) .map(PathBuf::from) .unwrap_or_else(|| PathBuf::from("test.jp2")); hayro_jpeg2000::integration::register_decoding_hook(); let image = image::ImageReader::open(target)?.decode()?; image.save("out.png")?; Ok(()) } #[cfg(feature = "logging")] static LOGGER: SimpleLogger = SimpleLogger; #[cfg(feature = "logging")] struct SimpleLogger; #[cfg(feature = "logging")] impl log::Log for SimpleLogger { fn enabled(&self, metadata: &log::Metadata<'_>) -> bool { metadata.level() <= log::LevelFilter::Warn } fn log(&self, record: &log::Record<'_>) { eprintln!("{}", record.args()); } fn flush(&self) {} } hayro-jpeg2000-0.3.2/manifest_custom.json000064400000000000000000000000031046102023000162610ustar 00000000000000[] hayro-jpeg2000-0.3.2/manifest_openjpeg.json000064400000000000000000000077621046102023000166010ustar 00000000000000[ "Bretagne1_0.j2k", "Bretagne1_1.j2k", "Bretagne1_2.j2k", "Bretagne2_0.j2k", "Bretagne2_1.j2k", "Bretagne2_2.j2k", "Bretagne2_3.j2k", "Bretagne2_4.j2k", "Bretagne2_bypass_pterm.j2k", "Bretagne2_bypass_termall_pterm.j2k", "Bretagne2_bypass_termall.j2k", "Bretagne2_bypass_vsc_reset_termall_pterm_segsym.j2k", "Bretagne2_bypass.j2k", "Bretagne2_pterm.j2k", "Bretagne2_reset.j2k", "Bretagne2_segsym.j2k", "Bretagne2_termall_pterm.j2k", "Bretagne2_termall.j2k", "Bretagne2_vsc.j2k", "Cevennes1.j2k", "Cevennes2.jp2", "Rome.jp2", "X_4_2K_24_185_CBR_WB_000.j2k", "X_5_2K_24_235_CBR_STEM24_000.j2k", "X_6_2K_24_FULL_CBR_CIRCLE_000.j2k", "p0_08.j2k", "zoo2.jp2", "file7.jp2", "file1.jp2", "file5.jp2", "file5_no_icc.jp2", "file6.jp2", "subsampling_2.jp2", "p0_04.j2k", "file4.jp2", "file8.jp2", "p1_04.j2k", "d2_colr.j2c", "d1_colr.j2c", "a2_colr.j2c", "f2_mono.j2c", "f1_mono.j2c", "b1_mono.j2c", "a5_mono.j2c", "c2_mono.j2c", "a3_mono.j2c", "b3_mono.j2c", "c1_mono.j2c", "a1_mono.j2c", "p0_16.j2k", "p0_14.j2k", "p0_09.j2k", "p0_12.j2k", "p0_11.j2k", "Bretagne2.j2k", "kakadu_v4-4_openjpegv2_broken.j2k", "123.j2c", "bug.j2c", "illegalcolortransform.j2k", "Cannotreaddatawithnosizeknown.j2k", "MarkerIsNotCompliant.j2k", "test_lossless.j2k", "issue228.j2k", "pacs.ge.j2k", "buxR.j2k", "CT_Phillips_JPEG2K_Decompr_Problem.j2k", "buxI.j2k", "issue171.jp2", "movie_00000.j2k", "movie_00001.j2k", "movie_00002.j2k", "_00042.j2k", "small_world_non_consecutive_tilepart_tlm.jp2", "tnsot_zero.jp2", "j2k32.j2k", "issue188_beach_64bitsbox.jp2", "issue399.j2k", "text_GBR.jp2", "cthead1.j2k", "relax.jp2", "Marrin.jp2", "issue206_image-000.jp2", "tnsot_zero_missing_eoc.jp2", "basn6a08.jp2", "basn4a08.jp2", "huge-tile-size.jp2", "issue653-zero-unknownbox.jp2", "issue818.jp2", "a4_colr.j2c", "b2_mono.j2c", "p1_02.j2k", "file9.jp2", "p0_01.j2k", "p0_10.j2k", "issue104_jpxstream.jp2", "issue135.j2k", "issue254.jp2", "kodak_2layers_lrcp.j2c", "issue235.jp2", "e2_colr.j2c", "p1_01.j2k", "orb-blue10-lin-j2k.j2k", "orb-blue10-lin-jp2.jp2", "orb-blue10-win-j2k.j2k", "orb-blue10-win-jp2.jp2", "issue979.j2k", "issue979.j2k", "issue412.jp2", "issue458.jp2", "p0_02.j2k", "p1_06.j2k", "g4_colr.j2c", { "id": "4241ac039aba57e6a9c948d519d94216_asan_heap-oob_14650f2_7469_602", "path": "4241ac039aba57e6a9c948d519d94216_asan_heap-oob_14650f2_7469_602.jp2", "render": false }, { "id": "dwt_interleave_h.gsr105", "path": "dwt_interleave_h.gsr105.jp2", "render": false }, { "id": "mem-b2b86b74-2753", "path": "mem-b2b86b74-2753.jp2", "render": false }, "file2.jp2", "file3.jp2", "subsampling_1.jp2", "zoo1.jp2", "g1_colr.j2c", "g2_colr.j2c", "p1_05.j2k", "issue559-eci-090-CIELab.jp2", "issue559-eci-091-CIELab.jp2", { "id": "file9_no_palette", "path": "file9.jp2", "resolve_palette_indices": false }, { "id": "Bretagne2_res1", "path": "Bretagne2_0.j2k", "target_resolution": [1900, 1900] }, { "id": "Bretagne2_res2", "path": "Bretagne2_0.j2k", "target_resolution": [900, 900] }, { "id": "Bretagne2_res3", "path": "Bretagne2_0.j2k", "target_resolution": [450, 450] }, { "id": "Bretagne2_res4", "path": "Bretagne2_0.j2k", "target_resolution": [225, 225] }, { "id": "Bretagne2_res5", "path": "Bretagne2_0.j2k", "target_resolution": [120, 120] }, { "id": "Bretagne2_res6", "path": "Bretagne2_0.j2k", "target_resolution": [60, 60] }, { "id": "zoo1_res2", "path": "zoo1.jp2", "target_resolution": [1200, 1200] }, { "id": "zoo1_res3", "path": "zoo1.jp2", "target_resolution": [600, 600] }, { "id": "zoo1_res4", "path": "zoo1.jp2", "target_resolution": [300, 300] }, { "id": "p0_08_res1", "path": "p0_08.j2k", "target_resolution": [220, 200] } ] hayro-jpeg2000-0.3.2/manifest_serenity.json000064400000000000000000000046701046102023000166270ustar 00000000000000[ "buggie-gray.jpf", "cmyk-small.jpf", "jasper-rgba-u8-cbstyle-01-bypass-finer-layers.jp2", "jasper-rgba-u8-cbstyle-01-bypass-layers.jp2", "jasper-rgba-u8-cbstyle-01-bypass.jp2", "jasper-rgba-u8-cbstyle-02-resetprob.jp2", "jasper-rgba-u8-cbstyle-04-termall-layers.jp2", "jasper-rgba-u8-cbstyle-04-termall.jp2", "jasper-rgba-u8-cbstyle-05-bypass-termall.jp2", "jasper-rgba-u8-cbstyle-06-resetprob-termall.jp2", "jasper-rgba-u8-cbstyle-08-vcausal.jp2", "jasper-rgba-u8-cbstyle-16-pterm.jp2", "jasper-rgba-u8-cbstyle-32-segsym.jp2", "jasper-rgba-u8-cbstyle-36-termall-segsym.jp2", "jasper-rgba-u8-cbstyle-59-all-but-termall.jp2", "jasper-rgba-u8-cbstyle-63-all.jp2", "jasper-rgba-u8-solid-alpha-cbstyle-04-termall.jp2", "jasper-tile4x2-res5.jp2", "kakadu-lossless-cmyk-u8-prog1-layers1-res6.jp2", "kakadu-lossless-gray-alpha-u8-prog1-layers1-res6.jp2", "kakadu-lossless-gray-u8-prog1-layers1-res6.jp2", "kakadu-lossless-rgb-u8-prog1-layers1-res6-mct.jp2", "kakadu-lossless-rgba-u16-prog1-layers1-res6.jp2", "kakadu-lossless-rgba-u8-prog1-layers1-res6-mct.jp2", "kakadu-lossy-rgba-u8-prog0-layers1-res6-mct.jp2", "openjpeg-lossless-RGN.jp2", "openjpeg-lossless-rgba-u4.jp2", "openjpeg-lossless-rgba-u8-PLT.jp2", "openjpeg-lossless-rgba-u8-TLM.jp2", "openjpeg-lossless-rgba-u8-prog0-EPH-SOP.jp2", "openjpeg-lossless-rgba-u8-prog0-EPH-empty-packets.jp2", "openjpeg-lossless-rgba-u8-prog0-EPH.jp2", "openjpeg-lossless-rgba-u8-prog0-SOP.jp2", "openjpeg-lossless-rgba-u8-prog0-tile-part-index-overflow.jp2", "openjpeg-lossless-rgba-u8-prog0-tile4x2-cblk4x16-tp3-layers3-res2.jp2", "openjpeg-lossless-rgba-u8-prog1-tile4x2-cblk4x16-tp3-layers3-res2.jp2", "openjpeg-lossless-rgba-u8-prog2-tile4x2-cblk4x16-tp3-layers3-res2.jp2", "openjpeg-lossless-rgba-u8-prog3-tile4x2-cblk4x16-tp3-layers3-res2.jp2", "openjpeg-lossless-rgba-u8-prog4-tile4x2-cblk4x16-tp3-layers3-res2.jp2", "openjpeg-lossy-quantization-scalar-derived.jp2", "indexed-small.jp2", "openjpeg-lossless-indexed-u8-rgb-u8.jp2", "kakadu-lossless-cmyka-u8-prog1-layers1-res6.jp2", "openjpeg-lossless-bgra-u8.jp2", "kakadu-lossless-lab-alpha-u8-prog1-layers1-res6.jp2", "kakadu-lossless-lab-u8-prog1-layers1-res6.jp2", { "id": "indexed-small_res1", "path": "indexed-small.jp2", "target_resolution": [1, 1] }, { "id": "large_target_resolution", "path": "jasper-tile4x2-res5.jp2", "target_resolution": [1000, 1000] } ] hayro-jpeg2000-0.3.2/src/error.rs000064400000000000000000000222671046102023000144740ustar 00000000000000//! Error types for JPEG 2000 decoding. use core::fmt; /// The main error type for JPEG 2000 decoding operations. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum DecodeError { /// Errors related to JP2 file format and box parsing. Format(FormatError), /// Errors related to codestream markers. Marker(MarkerError), /// Errors related to tile processing. Tile(TileError), /// Errors related to image dimensions and validation. Validation(ValidationError), /// Errors related to decoding operations. Decoding(DecodingError), /// Errors related to color space and component handling. Color(ColorError), } /// Errors related to JP2 file format and box parsing. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum FormatError { /// Invalid JP2 signature. InvalidSignature, /// Invalid JP2 file type. InvalidFileType, /// Invalid or malformed JP2 box. InvalidBox, /// Missing codestream data. MissingCodestream, /// Unsupported JP2 image format. Unsupported, } /// Errors related to codestream markers. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum MarkerError { /// Invalid marker encountered. Invalid, /// Unsupported marker encountered. Unsupported, /// Expected a specific marker. Expected(&'static str), /// Missing a required marker. Missing(&'static str), /// Failed to read or parse a marker. ParseFailure(&'static str), } /// Errors related to tile processing. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TileError { /// Invalid image tile was encountered. Invalid, /// Invalid tile index in tile-part header. InvalidIndex, /// Invalid tile or image offsets. InvalidOffsets, /// PPT marker present when PPM marker exists in main header. PpmPptConflict, } /// Errors related to image dimensions and validation. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ValidationError { /// Invalid image dimensions. InvalidDimensions, /// Image dimensions exceed supported limits. ImageTooLarge, /// Image has too many channels. TooManyChannels, /// Invalid component metadata. InvalidComponentMetadata, /// Invalid progression order. InvalidProgressionOrder, /// Invalid transformation type. InvalidTransformation, /// Invalid quantization style. InvalidQuantizationStyle, /// Missing exponents for precinct sizes. MissingPrecinctExponents, /// Not enough exponents provided in header. InsufficientExponents, /// Missing exponent step size. MissingStepSize, /// Invalid quantization exponents. InvalidExponents, } /// Errors related to decoding operations. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum DecodingError { /// An error occurred while decoding a code-block. CodeBlockDecodeFailure, /// Number of bitplanes in a code-block is too large. TooManyBitplanes, /// A code-block contains too many coding passes. TooManyCodingPasses, /// Invalid number of bitplanes in a code-block. InvalidBitplaneCount, /// A precinct was invalid. InvalidPrecinct, /// A progression iterator ver invalid. InvalidProgressionIterator, /// Unexpected end of data. UnexpectedEof, } /// Errors related to color space and component handling. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ColorError { /// Multi-component transform failed. Mct, /// Failed to resolve palette indices. PaletteResolutionFailed, /// Failed to convert from sYCC to RGB. SyccConversionFailed, /// Failed to convert from LAB to RGB. LabConversionFailed, } impl fmt::Display for DecodeError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Format(e) => write!(f, "{e}"), Self::Marker(e) => write!(f, "{e}"), Self::Tile(e) => write!(f, "{e}"), Self::Validation(e) => write!(f, "{e}"), Self::Decoding(e) => write!(f, "{e}"), Self::Color(e) => write!(f, "{e}"), } } } impl fmt::Display for FormatError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::InvalidSignature => write!(f, "invalid JP2 signature"), Self::InvalidFileType => write!(f, "invalid JP2 file type"), Self::InvalidBox => write!(f, "invalid JP2 box"), Self::MissingCodestream => write!(f, "missing codestream data"), Self::Unsupported => write!(f, "unsupported JP2 image"), } } } impl fmt::Display for MarkerError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Invalid => write!(f, "invalid marker"), Self::Unsupported => write!(f, "unsupported marker"), Self::Expected(marker) => write!(f, "expected {marker} marker"), Self::Missing(marker) => write!(f, "missing {marker} marker"), Self::ParseFailure(marker) => write!(f, "failed to parse {marker} marker"), } } } impl fmt::Display for TileError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Invalid => write!(f, "image contains no tiles"), Self::InvalidIndex => write!(f, "invalid tile index in tile-part header"), Self::InvalidOffsets => write!(f, "invalid tile offsets"), Self::PpmPptConflict => { write!( f, "PPT marker present when PPM marker exists in main header" ) } } } } impl fmt::Display for ValidationError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::InvalidDimensions => write!(f, "invalid image dimensions"), Self::ImageTooLarge => write!(f, "image is too large"), Self::TooManyChannels => write!(f, "image has too many channels"), Self::InvalidComponentMetadata => write!(f, "invalid component metadata"), Self::InvalidProgressionOrder => write!(f, "invalid progression order"), Self::InvalidTransformation => write!(f, "invalid transformation type"), Self::InvalidQuantizationStyle => write!(f, "invalid quantization style"), Self::MissingPrecinctExponents => { write!(f, "missing exponents for precinct sizes") } Self::InsufficientExponents => { write!(f, "not enough exponents provided in header") } Self::MissingStepSize => write!(f, "missing exponent step size"), Self::InvalidExponents => write!(f, "invalid quantization exponents"), } } } impl fmt::Display for DecodingError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::CodeBlockDecodeFailure => write!(f, "failed to decode code-block"), Self::TooManyBitplanes => write!(f, "number of bitplanes is too large"), Self::TooManyCodingPasses => { write!(f, "code-block contains too many coding passes") } Self::InvalidBitplaneCount => write!(f, "invalid number of bitplanes"), Self::InvalidPrecinct => write!(f, "a precinct was invalid"), Self::InvalidProgressionIterator => { write!(f, "a progression iterator was invalid") } Self::UnexpectedEof => write!(f, "unexpected end of data"), } } } impl fmt::Display for ColorError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Mct => write!(f, "multi-component transform failed"), Self::PaletteResolutionFailed => write!(f, "failed to resolve palette indices"), Self::SyccConversionFailed => write!(f, "failed to convert from sYCC to RGB"), Self::LabConversionFailed => write!(f, "failed to convert from LAB to RGB"), } } } impl core::error::Error for DecodeError {} impl core::error::Error for FormatError {} impl core::error::Error for MarkerError {} impl core::error::Error for TileError {} impl core::error::Error for ValidationError {} impl core::error::Error for DecodingError {} impl core::error::Error for ColorError {} impl From for DecodeError { fn from(e: FormatError) -> Self { Self::Format(e) } } impl From for DecodeError { fn from(e: MarkerError) -> Self { Self::Marker(e) } } impl From for DecodeError { fn from(e: TileError) -> Self { Self::Tile(e) } } impl From for DecodeError { fn from(e: ValidationError) -> Self { Self::Validation(e) } } impl From for DecodeError { fn from(e: DecodingError) -> Self { Self::Decoding(e) } } impl From for DecodeError { fn from(e: ColorError) -> Self { Self::Color(e) } } /// Result type for JPEG 2000 decoding operations. pub type Result = core::result::Result; macro_rules! bail { ($err:expr) => { return Err($err.into()) }; } macro_rules! err { ($err:expr) => { Err($err.into()) }; } pub(crate) use bail; pub(crate) use err; hayro-jpeg2000-0.3.2/src/integration.rs000064400000000000000000000276141046102023000156670ustar 00000000000000//! Integration with the [image] crate use std::{ ffi::OsStr, io::{BufRead, Seek}, }; use crate::{ColorSpace, DecodeSettings, Image}; use ::image::error::{DecodingError, ImageFormatHint}; use ::image::{ColorType, ExtendedColorType, ImageDecoder, ImageError, ImageResult}; use image::hooks::{decoding_hook_registered, register_format_detection_hook}; use moxcms::{CmsError, ColorProfile, Layout, TransformOptions}; const CMYK_PROFILE: &[u8] = include_bytes!("../assets/CGATS001Compat-v2-micro.icc"); impl ImageDecoder for Image<'_> { fn dimensions(&self) -> (u32, u32) { (self.width(), self.height()) } fn color_type(&self) -> ColorType { let channel_count = self.color_space.num_channels(); let has_alpha = self.has_alpha; match (channel_count, has_alpha) { (1, false) => ColorType::L8, (1, true) => ColorType::La8, (3, false) => ColorType::Rgb8, (3, true) => ColorType::Rgba8, // We convert CMYK to RGB. (4, false) => ColorType::Rgb8, (4, true) => ColorType::Rgba8, // We have to return something... _ => ColorType::Rgb8, } } fn original_color_type(&self) -> ExtendedColorType { let channel_count = self.color_space.num_channels(); let has_alpha = self.has_alpha; let depth = self.original_bit_depth(); // match logic based on color_type() above match (channel_count, depth, has_alpha) { // Grayscale (1, 1, false) => ExtendedColorType::L1, (1, 1, true) => ExtendedColorType::La1, (1, 2, false) => ExtendedColorType::L2, (1, 2, true) => ExtendedColorType::La2, (1, 4, false) => ExtendedColorType::L4, (1, 4, true) => ExtendedColorType::La4, (1, 8, false) => ExtendedColorType::L8, (1, 8, true) => ExtendedColorType::La8, (1, 16, false) => ExtendedColorType::L8, (1, 16, true) => ExtendedColorType::La8, // RGB (3, 1, false) => ExtendedColorType::Rgb1, (3, 1, true) => ExtendedColorType::Rgba1, (3, 2, false) => ExtendedColorType::Rgb2, (3, 2, true) => ExtendedColorType::Rgba2, (3, 4, false) => ExtendedColorType::Rgb4, (3, 4, true) => ExtendedColorType::Rgba4, (3, 8, false) => ExtendedColorType::Rgb8, (3, 8, true) => ExtendedColorType::Rgba8, (3, 16, false) => ExtendedColorType::Rgb8, (3, 16, true) => ExtendedColorType::Rgba8, // CMYK (4, 8, false) => ExtendedColorType::Cmyk8, (4, 16, false) => ExtendedColorType::Cmyk16, // CMYK with alpha is not representable _ => ExtendedColorType::Unknown(orig_bits_per_pixel(self)), } } fn read_image(self, buf: &mut [u8]) -> ImageResult<()> where Self: Sized, { convert_inner(&self, buf) } fn read_image_boxed(self: Box, buf: &mut [u8]) -> ImageResult<()> { convert_inner(&self, buf) } } #[doc(hidden)] /// JPEG2000 decoder compatible with `image` decoding hook APIs that pass an `impl Read + Seek` pub struct Jp2Decoder { // Lots of fields from `crate::Image` are duplicated here; // this is necessary because `crate::Image` borrows a slice and keeping it in the same struct // as `input: Vec` would create a self-referential struct that Rust cannot easily express. // // This approach is modeled after the integration of early versions of zune-jpeg into image: // https://docs.rs/image/0.25.6/src/image/codecs/jpeg/decoder.rs.html#27-58 // // Buffering the entire input in memory is not an issue for lossy formats like JPEG. // The compression ratio is so high that an image that expands to hundreds of MB when decoded // only takes up a single-digit number of MB in a compressed form. input: Vec, width: u32, height: u32, color_type: ColorType, orig_color_type: ExtendedColorType, } impl Jp2Decoder { /// Create a new decoder that decodes from the stream ```r``` pub fn new(r: R) -> ImageResult { let mut input = Vec::new(); let mut r = r; r.read_to_end(&mut input)?; let headers = Image::new(&input, &DecodeSettings::default())?; Ok(Self { width: headers.width(), height: headers.height(), color_type: headers.color_type(), orig_color_type: headers.original_color_type(), input, }) } } impl ImageDecoder for Jp2Decoder { fn dimensions(&self) -> (u32, u32) { (self.width, self.height) } fn color_type(&self) -> ColorType { self.color_type } fn original_color_type(&self) -> ExtendedColorType { self.orig_color_type } fn read_image(self, buf: &mut [u8]) -> ImageResult<()> where Self: Sized, { // we can safely .unwrap() because we've already done this on decoder creation and know this works let decoder = Image::new(&self.input, &DecodeSettings::default()).unwrap(); decoder.read_image(buf) } fn read_image_boxed(self: Box, buf: &mut [u8]) -> ImageResult<()> { // we can safely .unwrap() because we've already done this on decoder creation and know this works let decoder = Image::new(&self.input, &DecodeSettings::default()).unwrap(); decoder.read_image(buf) } } /// Private convenience function for `image` integration fn orig_bits_per_pixel(img: &Image<'_>) -> u8 { let mut channel_count = img.color_space().num_channels(); if img.has_alpha { channel_count += 1; } channel_count * img.original_bit_depth() } fn convert_inner(image: &Image<'_>, buf: &mut [u8]) -> ImageResult<()> { let width = image.width(); let height = image.height(); let color_space = image.color_space().clone(); let has_alpha = image.has_alpha(); fn from_icc( icc: &[u8], num_channels: u8, has_alpha: bool, width: u32, height: u32, input_data: &[u8], ) -> Result, CmsError> { let src_profile = ColorProfile::new_from_slice(icc)?; let dest_profile = ColorProfile::new_srgb(); let (src_layout, dest_layout, out_channels) = match (num_channels, has_alpha) { (1, false) => (Layout::Gray, Layout::Gray, 1), (1, true) => (Layout::GrayAlpha, Layout::GrayAlpha, 2), (3, false) => (Layout::Rgb, Layout::Rgb, 3), (3, true) => (Layout::Rgba, Layout::Rgba, 4), // CMYK will be converted to RGB. (4, false) => (Layout::Rgba, Layout::Rgb, 3), _ => { return Err(CmsError::UnsupportedChannelConfiguration); } }; let transform = src_profile.create_transform_8bit( src_layout, &dest_profile, dest_layout, TransformOptions::default(), )?; let mut transformed = vec![0; (width * height * out_channels) as usize]; transform.transform(input_data, &mut transformed)?; Ok(transformed) } fn process( image: &Image<'_>, buf: &mut [u8], width: u32, height: u32, has_alpha: bool, cs: ColorSpace, ) -> Result<(), ImageError> { match (cs, has_alpha) { (ColorSpace::Gray, false) => { image.decode_into(buf)?; } (ColorSpace::Gray, true) => { image.decode_into(buf)?; } (ColorSpace::RGB, false) => { image.decode_into(buf)?; } (ColorSpace::RGB, true) => { image.decode_into(buf)?; } (ColorSpace::CMYK, false) => { let decoded = image.decode()?; let transformed = from_icc(CMYK_PROFILE, 4, has_alpha, width, height, &decoded) .map_err(icc_err_to_image)?; buf.copy_from_slice(&transformed); } (ColorSpace::CMYK, true) => { // moxcms doesn't support CMYK interleaved with alpha, so we // need to split it. let decoded = image.decode()?; let mut cmyk = vec![]; let mut alpha = vec![]; for sample in decoded.chunks_exact(5) { cmyk.extend_from_slice(&sample[..4]); alpha.push(sample[4]); } let rgb = from_icc(CMYK_PROFILE, 4, false, width, height, &cmyk) .map_err(icc_err_to_image)?; for (out, pixel) in buf.chunks_exact_mut(4).zip( rgb.chunks_exact(3) .zip(alpha) .map(|(rgb, alpha)| [rgb[0], rgb[1], rgb[2], alpha]), ) { out.copy_from_slice(&pixel); } } ( ColorSpace::Icc { profile, num_channels: num_components, }, has_alpha, ) => { let decoded = image.decode()?; let transformed = from_icc(&profile, num_components, has_alpha, width, height, &decoded); if let Ok(transformed) = transformed { buf.copy_from_slice(&transformed); } else { match num_components { 1 => process(image, buf, width, height, has_alpha, ColorSpace::Gray)?, 3 => process(image, buf, width, height, has_alpha, ColorSpace::RGB)?, 4 => process(image, buf, width, height, has_alpha, ColorSpace::CMYK)?, _ => { return Err(unsupported_color_error(image.original_color_type())); } } }; } (ColorSpace::Unknown { .. }, _) => { return Err(unsupported_color_error(image.original_color_type())); } }; Ok(()) } process(image, buf, width, height, has_alpha, color_space) } impl From for DecodingError { fn from(value: crate::DecodeError) -> Self { let format = ImageFormatHint::Name("JPEG2000".to_owned()); Self::new(format, value) } } impl From for ImageError { fn from(value: crate::DecodeError) -> Self { Self::Decoding(value.into()) } } fn icc_err_to_image(err: CmsError) -> ImageError { let format = ImageFormatHint::Name("JPEG2000".to_owned()); ImageError::Decoding(DecodingError::new(format, err)) } fn unsupported_color_error(color: ExtendedColorType) -> ImageError { ImageError::Unsupported(image::error::UnsupportedError::from_format_and_kind( ImageFormatHint::Name("JPEG2000".to_owned()), image::error::UnsupportedErrorKind::Color(color), )) } /// Registers the decoder with the `image` crate so that non-format-specific calls such as /// `ImageReader::open("image.jp2")?.decode()?;` work with JPEG2000 files. /// /// Returns `true` on success, or `false` if the hook for JPEG2000 is already registered. pub fn register_decoding_hook() -> bool { if decoding_hook_registered(OsStr::new("jp2")) { return false; } for extension in ["jp2", "jpg2", "j2k", "jpf"] { image::hooks::register_decoding_hook( extension.into(), Box::new(|r| Ok(Box::new(Jp2Decoder::new(r)?))), ); register_format_detection_hook(extension.into(), crate::JP2_MAGIC, None); } for extension in ["j2c", "jpc"] { image::hooks::register_decoding_hook( extension.into(), Box::new(|r| Ok(Box::new(Jp2Decoder::new(r)?))), ); register_format_detection_hook(extension.into(), crate::CODESTREAM_MAGIC, None); } true } hayro-jpeg2000-0.3.2/src/j2c/arithmetic_decoder.rs000064400000000000000000000230441046102023000176310ustar 00000000000000//! The arithmetic decoder, described in Annex C. //! //! The arithmetic decoder keeps track of some state and continuously receives //! context labels as input, each time yielding a new bit from the original data //! as output. pub(crate) struct ArithmeticDecoder<'a> { /// The underlying encoded data. data: &'a [u8], /// The C-register (see Table C.1). c: u32, /// The A-register (see Table C.1). a: u32, /// The pointer to the current byte. base_pointer: u32, /// The bit shift counter. shift_count: u32, } impl<'a> ArithmeticDecoder<'a> { pub(crate) fn new(data: &'a [u8]) -> Self { let mut decoder = ArithmeticDecoder { data, c: 0, a: 0, base_pointer: 0, shift_count: 0, }; decoder.initialize(); decoder } /// Read the next bit using the given context label. #[inline(always)] pub(crate) fn read_bit(&mut self, context: &mut ArithmeticDecoderContext) -> u32 { self.decode(context) } /// The INITDEC procedure from C.3.5. /// /// We use the version from Annex G in . fn initialize(&mut self) { self.c = ((self.current_byte() as u32) ^ 0xff) << 16; self.read_byte(); self.c <<= 7; self.shift_count -= 7; self.a = 0x8000; } /// The BYTEIN procedure from C.3.4. /// /// We use the version from Annex G from . #[inline(always)] fn read_byte(&mut self) { if self.current_byte() == 0xff { let b1 = self.next_byte(); if b1 > 0x8f { self.shift_count = 8; } else { self.base_pointer += 1; self.c = self .c .wrapping_add(0xfe00) .wrapping_sub((self.current_byte() as u32) << 9); self.shift_count = 7; } } else { self.base_pointer += 1; self.c = self .c .wrapping_add(0xff00) .wrapping_sub((self.current_byte() as u32) << 8); self.shift_count = 8; } } /// The RENORMD procedure from C.3.3. #[inline(always)] fn renormalize(&mut self) { // Original code: // loop { // if self.shift_count == 0 { // self.read_byte(); // } // // self.a <<= 1; // self.c <<= 1; // self.shift_count -= 1; // // if self.a & 0x8000 != 0 { // break; // } // } // Optimization: Batch shifts. while self.a & 0x8000 == 0 { if self.shift_count == 0 { self.read_byte(); } let shifts_needed = self.a.leading_zeros() - 16; let batch = shifts_needed.min(self.shift_count); self.a <<= batch; self.c <<= batch; self.shift_count -= batch; } } /// The `LPS_EXCHANGE` procedure from C.3.2. #[inline(always)] fn exchange_lps(&mut self, context: &mut ArithmeticDecoderContext, qe_entry: &QeData) -> u32 { // Original code: // let d; // // if self.a < qe_entry.qe { // self.a = qe_entry.qe; // d = context.mps; // context.index = qe_entry.nmps; // } else { // self.a = qe_entry.qe; // d = 1 - context.mps; // // if qe_entry.switch { // context.mps = 1 - context.mps; // } // // context.index = qe_entry.nlps; // } // Branchless version, shows better performance. let cond = (self.a < qe_entry.qe) as u32; let inv_cond = 1 - cond; self.a = qe_entry.qe; // d = if cond { mps } else { 1 - mps } let d = context.mps() ^ inv_cond; // flip mps only when !cond && switch context.xor_mps(inv_cond & (qe_entry.switch as u32)); // index = if cond { nmps } else { nlps } let cond_u8 = cond as u8; let inv_cond_u8 = inv_cond as u8; context.set_index(cond_u8 * qe_entry.nmps + inv_cond_u8 * qe_entry.nlps); d } /// The DECODE procedure from C.3.2. /// /// We use the version from Annex G from . #[inline(always)] fn decode(&mut self, context: &mut ArithmeticDecoderContext) -> u32 { let qe_entry = &QE_TABLE[context.index() as usize]; self.a -= qe_entry.qe; let d; if (self.c >> 16) < self.a { if self.a & 0x8000 == 0 { d = self.exchange_mps(context, qe_entry); self.renormalize(); } else { d = context.mps(); } } else { self.c -= self.a << 16; d = self.exchange_lps(context, qe_entry); self.renormalize(); } d } /// The `MPS_EXCHANGE` procedure from C.3.2. #[inline(always)] fn exchange_mps(&mut self, context: &mut ArithmeticDecoderContext, qe_entry: &QeData) -> u32 { // Original code: // let d; // // if self.a < qe_entry.qe { // d = 1 - context.mps; // // if qe_entry.switch { // context.mps = 1 - context.mps; // } // // context.index = qe_entry.nlps; // } else { // d = context.mps; // context.index = qe_entry.nmps; // } // Branchless version, shows better performance. let cond = (self.a < qe_entry.qe) as u32; let inv_cond = 1 - cond; // d = if cond { 1 - mps } else { mps } let d = context.mps() ^ cond; // flip mps only when cond && switch context.xor_mps(cond & (qe_entry.switch as u32)); // index = if cond { nlps } else { nmps } let cond_u8 = cond as u8; let inv_cond_u8 = inv_cond as u8; context.set_index(cond_u8 * qe_entry.nlps + inv_cond_u8 * qe_entry.nmps); d } #[inline(always)] fn current_byte(&self) -> u8 { self.data .get(self.base_pointer as usize) .copied() // "The number of bytes corresponding to the coding passes is // specified in the packet header. Often at that point there are // more symbols to be decoded. Therefore, the decoder shall extend // the input bit stream to the arithmetic coder with 0xFF bytes, // as necessary, until all symbols have been decoded." .unwrap_or(0xFF) } #[inline(always)] fn next_byte(&self) -> u8 { self.data .get((self.base_pointer + 1) as usize) .copied() .unwrap_or(0xFF) } } // Previously, we stored the context as 2 u32's, but doing it with a bit-packed // u8 seems to be slightly better (though it doesn't make that huge of a // difference). /// Bits 0-6 = index (0-46). /// Bit 7 = mps (0 or 1). #[derive(Copy, Clone, Debug, Default)] pub(crate) struct ArithmeticDecoderContext(u8); impl ArithmeticDecoderContext { #[inline(always)] pub(crate) fn index(self) -> u32 { (self.0 & 0x7F) as u32 } #[inline(always)] pub(crate) fn mps(self) -> u32 { (self.0 >> 7) as u32 } #[inline(always)] fn set_index(&mut self, index: u8) { self.0 = (self.0 & 0x80) | index; } #[inline(always)] fn xor_mps(&mut self, val: u32) { self.0 ^= ((val & 1) << 7) as u8; } #[inline(always)] pub(crate) fn reset(&mut self) { self.0 = 0; } #[inline(always)] pub(crate) fn reset_with_index(&mut self, index: u8) { self.0 = index; } } #[derive(Debug, Clone, Copy)] struct QeData { qe: u32, nmps: u8, nlps: u8, switch: bool, } macro_rules! qe { ($($qe:expr, $nmps:expr, $nlps:expr, $switch:expr),+ $(,)?) => { [ $( QeData { qe: $qe, nmps: $nmps, nlps: $nlps, switch: $switch, } ),+ ] }; } /// QE values and associated data from Table C.2. #[rustfmt::skip] static QE_TABLE: [QeData; 47] = qe!( 0x5601, 1, 1, true, 0x3401, 2, 6, false, 0x1801, 3, 9, false, 0x0AC1, 4, 12, false, 0x0521, 5, 29, false, 0x0221, 38, 33, false, 0x5601, 7, 6, true, 0x5401, 8, 14, false, 0x4801, 9, 14, false, 0x3801, 10, 14, false, 0x3001, 11, 17, false, 0x2401, 12, 18, false, 0x1C01, 13, 20, false, 0x1601, 29, 21, false, 0x5601, 15, 14, true, 0x5401, 16, 14, false, 0x5101, 17, 15, false, 0x4801, 18, 16, false, 0x3801, 19, 17, false, 0x3401, 20, 18, false, 0x3001, 21, 19, false, 0x2801, 22, 19, false, 0x2401, 23, 20, false, 0x2201, 24, 21, false, 0x1C01, 25, 22, false, 0x1801, 26, 23, false, 0x1601, 27, 24, false, 0x1401, 28, 25, false, 0x1201, 29, 26, false, 0x1101, 30, 27, false, 0x0AC1, 31, 28, false, 0x09C1, 32, 29, false, 0x08A1, 33, 30, false, 0x0521, 34, 31, false, 0x0441, 35, 32, false, 0x02A1, 36, 33, false, 0x0221, 37, 34, false, 0x0141, 38, 35, false, 0x0111, 39, 36, false, 0x0085, 40, 37, false, 0x0049, 41, 38, false, 0x0025, 42, 39, false, 0x0015, 43, 40, false, 0x0009, 44, 41, false, 0x0005, 45, 42, false, 0x0001, 45, 43, false, 0x5601, 46, 46, false, ); hayro-jpeg2000-0.3.2/src/j2c/bitplane.rs000064400000000000000000001120251046102023000156070ustar 00000000000000//! Bitplane decoding, described in Annex D. //! //! JPEG2000 groups the samples of each component into their constituent //! bit planes and uses a special context-modeling approach to encode the //! bits using the arithmetic encoder. In this stage, we need to "revert" the //! context-modeling so that we can extract the magnitudes and signs of each //! sample. //! //! Some of the references are taken from the //! "JPEG2000 Standard for Image Compression" book instead of the specification. use alloc::vec; use alloc::vec::Vec; use super::arithmetic_decoder::{ArithmeticDecoder, ArithmeticDecoderContext}; use super::build::{CodeBlock, SubBandType}; use super::codestream::CodeBlockStyle; use super::decode::{DecompositionStorage, TileDecodeContext}; use crate::error::{DecodingError, Result, bail}; use crate::reader::BitReader; /// Decode the layers of the given code block into coefficients. /// /// The result will be stored in the form of a vector of signs and magnitudes /// in the bitplane decoder context. pub(crate) fn decode( code_block: &CodeBlock, sub_band_type: SubBandType, total_bitplanes: u8, style: &CodeBlockStyle, tile_ctx: &mut TileDecodeContext<'_>, storage: &DecompositionStorage<'_>, strict: bool, ) -> Result<()> { tile_ctx.bit_plane_decode_context.reset( code_block, sub_band_type, style, total_bitplanes, strict, )?; tile_ctx.bit_plane_decode_buffers.reset(); decode_inner( code_block, storage, &mut tile_ctx.bit_plane_decode_context, &mut tile_ctx.bit_plane_decode_buffers, ) .ok_or(DecodingError::CodeBlockDecodeFailure)?; Ok(()) } fn decode_inner( code_block: &CodeBlock, storage: &DecompositionStorage<'_>, ctx: &mut BitPlaneDecodeContext, bp_buffers: &mut BitPlaneDecodeBuffers, ) -> Option<()> { bp_buffers.reset(); let mut last_segment_idx = 0; let mut coding_passes = 0; // Build a list so that we can associate coding passes with their segments // and data more easily. for layer in &storage.layers[code_block.layers.start..code_block.layers.end] { if let Some(range) = layer.segments.clone() { let layer_segments = &storage.segments[range.clone()]; for segment in layer_segments { if segment.idx != last_segment_idx { assert_eq!(segment.idx, last_segment_idx + 1); bp_buffers .segment_ranges .push(bp_buffers.combined_layers.len()); bp_buffers.segment_coding_passes.push(coding_passes); last_segment_idx += 1; } bp_buffers.combined_layers.extend(segment.data); coding_passes += segment.coding_pases; } } } assert_eq!(coding_passes, code_block.number_of_coding_passes); bp_buffers .segment_ranges .push(bp_buffers.combined_layers.len()); bp_buffers.segment_coding_passes.push(coding_passes); let is_normal_mode = !ctx.style.selective_arithmetic_coding_bypass && !ctx.style.termination_on_each_pass; if is_normal_mode { // Only one termination per code block, so we can just decode the // whole range in one single go, processing all coding passes at once. let mut decoder = ArithmeticDecoder::new(&bp_buffers.combined_layers); handle_coding_passes( 0, code_block .number_of_coding_passes .min(ctx.max_coding_passes), ctx, &mut decoder, )?; } else { // Otherwise, each segment introduces a termination. For "termination on // each pass", each segment only covers one coding pass // and a termination is introduced every time. Otherwise, for only // arithmetic coding bypass, terminations are introduced based on the // exact index of the covered coding passes (see Table D.9). for segment in 0..bp_buffers.segment_coding_passes.len() - 1 { let start_coding_pass = bp_buffers.segment_coding_passes[segment]; let end_coding_pass = bp_buffers.segment_coding_passes[segment + 1].min(ctx.max_coding_passes); let data = &bp_buffers.combined_layers [bp_buffers.segment_ranges[segment]..bp_buffers.segment_ranges[segment + 1]]; let use_arithmetic = if ctx.style.selective_arithmetic_coding_bypass { if start_coding_pass <= 9 { true } else { // Only for cleanup pass. start_coding_pass.is_multiple_of(3) } } else { true }; if use_arithmetic { let mut decoder = ArithmeticDecoder::new(data); handle_coding_passes(start_coding_pass, end_coding_pass, ctx, &mut decoder)?; } else { let mut decoder = BypassDecoder::new(data, ctx.strict); handle_coding_passes(start_coding_pass, end_coding_pass, ctx, &mut decoder)?; } } } Some(()) } fn handle_coding_passes( start: u8, end: u8, ctx: &mut BitPlaneDecodeContext, decoder: &mut impl BitDecoder, ) -> Option<()> { for coding_pass in start..end { enum PassType { Cleanup, SignificancePropagation, MagnitudeRefinement, } // The first bitplane only has a cleanup pass, all other bitplanes // are in the order SPP -> MRR -> C. let pass = match coding_pass % 3 { 0 => PassType::Cleanup, 1 => PassType::SignificancePropagation, 2 => PassType::MagnitudeRefinement, _ => unreachable!(), }; let current_bitplane = coding_pass.div_ceil(3); ctx.current_bit_position = ctx.bitplanes - 1 - current_bitplane; match pass { PassType::Cleanup => { cleanup_pass(ctx, decoder)?; if ctx.style.segmentation_symbols { let b0 = decoder.read_bit(ctx.arithmetic_decoder_context(18))?; let b1 = decoder.read_bit(ctx.arithmetic_decoder_context(18))?; let b2 = decoder.read_bit(ctx.arithmetic_decoder_context(18))?; let b3 = decoder.read_bit(ctx.arithmetic_decoder_context(18))?; if (b0 != 1 || b1 != 0 || b2 != 1 || b3 != 0) && ctx.strict { return None; } } ctx.reset_for_next_bitplane(); } PassType::SignificancePropagation => { significance_propagation_pass(ctx, decoder)?; } PassType::MagnitudeRefinement => { magnitude_refinement_pass(ctx, decoder)?; } } if ctx.style.reset_context_probabilities { ctx.reset_contexts(); } } Some(()) } // We only allow 31 bit planes because we need one bit for the sign. pub(crate) const BITPLANE_BIT_SIZE: u32 = size_of::() as u32 * 8 - 1; const SIGNIFICANCE_SHIFT: u8 = 7; const HAS_MAGNITUDE_REFINEMENT_SHIFT: u8 = 6; const HAS_ZERO_CODING_SHIFT: u8 = 5; /// Bit-packed coefficient state (only 3 bits used): /// - Bit 7: significance state (set when first non-zero bit is encountered) /// - Bit 6: has had magnitude refinement pass /// - Bit 5: zero coded in current bitplane's significance propagation pass #[derive(Default, Copy, Clone)] pub(crate) struct CoefficientState(u8); impl CoefficientState { #[inline(always)] fn set_bit(&mut self, shift: u8, value: u8) { debug_assert!(value < 2); self.0 &= !(1_u8 << shift); self.0 |= value << shift; } #[inline(always)] fn set_significant(&mut self) { self.set_bit(SIGNIFICANCE_SHIFT, 1); } #[inline(always)] fn set_zero_coded(&mut self, value: u8) { self.set_bit(HAS_ZERO_CODING_SHIFT, value & 1); } #[inline(always)] fn set_magnitude_refined(&mut self) { self.set_bit(HAS_MAGNITUDE_REFINEMENT_SHIFT, 1); } #[inline(always)] fn is_significant(&self) -> bool { self.significance() == 1 } #[inline(always)] fn significance(&self) -> u8 { (self.0 >> SIGNIFICANCE_SHIFT) & 1 } #[inline(always)] fn magnitude_refinement(&self) -> u8 { (self.0 >> HAS_MAGNITUDE_REFINEMENT_SHIFT) & 1 } #[inline(always)] fn is_zero_coded(&self) -> bool { (self.0 >> HAS_ZERO_CODING_SHIFT) & 1 == 1 } } #[derive(Copy, Clone, Debug, Default)] pub(crate) struct Coefficient(u32); impl Coefficient { pub(crate) fn get(&self) -> i32 { let mut magnitude = (self.0 & !0x80000000) as i32; // Map sign (0 for positive, 1 for negative) to 1, -1. magnitude *= 1 - 2 * (self.sign() as i32); magnitude } fn set_sign(&mut self, sign: u8) { self.0 |= (sign as u32) << 31; } fn sign(&self) -> u32 { (self.0 >> 31) & 1 } fn push_bit_at(&mut self, bit: u32, position: u8) { self.0 |= bit << position; } } const COEFFICIENTS_PADDING: u32 = 1; /// Store the significances of each neighbor for a specific coefficient. /// The order from MSB to LSB is as follows: /// /// top-left, top, top-right, left, bottom-left, right, bottom-right, bottom. /// /// See the `context_label_sign_coding` method for why we aren't simply using /// row-major order. #[derive(Default, Copy, Clone)] struct NeighborSignificances(u8); impl NeighborSignificances { fn set_top_left(&mut self) { self.0 |= 1 << 7; } fn set_top(&mut self) { self.0 |= 1 << 6; } fn set_top_right(&mut self) { self.0 |= 1 << 5; } fn set_left(&mut self) { self.0 |= 1 << 4; } fn set_bottom_left(&mut self) { self.0 |= 1 << 3; } fn set_right(&mut self) { self.0 |= 1 << 2; } fn set_bottom_right(&mut self) { self.0 |= 1 << 1; } fn set_bottom(&mut self) { self.0 |= 1; } fn all(&self) -> u8 { self.0 } // Needed for vertically causal context. fn all_without_bottom(&self) -> u8 { self.0 & 0b11110100 } } #[derive(Default)] pub(crate) struct BitPlaneDecodeBuffers { combined_layers: Vec, segment_ranges: Vec, segment_coding_passes: Vec, } impl BitPlaneDecodeBuffers { fn reset(&mut self) { self.combined_layers.clear(); self.segment_ranges.clear(); self.segment_coding_passes.clear(); // The design of these two buffers is that the ranges are stored // as [idx, idx + 1), so we need to store the first 0 when resetting. self.segment_ranges.push(0); self.segment_coding_passes.push(0); } } pub(crate) struct BitPlaneDecodeContext { /// A vector of bit-packed fields for each coefficient in the code-block. coefficient_states: Vec, /// The neighbor significances for each coefficient. neighbor_significances: Vec, /// The magnitude and signs of each coefficient that is successively built /// as we advance through the bitplanes. coefficients: Vec, /// The width of the code-block we are processing. width: u32, /// The width of the code-block we are processing, with padding. padded_width: u32, /// The height of the code-block we are processing. height: u32, /// The code-block style for the current code-block. style: CodeBlockStyle, /// The number of bitplanes (minus implicitly missing bitplanes) to decode. bitplanes: u8, /// Whether strict mode is enabled. strict: bool, /// The maximum number of coding passes to process. max_coding_passes: u8, /// The type of sub-band the current code block belongs to. sub_band_type: SubBandType, /// The arithmetic decoder contexts for each context label. contexts: [ArithmeticDecoderContext; 19], /// The bit position for the current bitplane. current_bit_position: u8, } impl Default for BitPlaneDecodeContext { fn default() -> Self { Self { coefficient_states: vec![], coefficients: vec![], neighbor_significances: vec![], width: 0, padded_width: COEFFICIENTS_PADDING * 2, height: 0, style: CodeBlockStyle::default(), bitplanes: 0, max_coding_passes: 0, strict: false, sub_band_type: SubBandType::LowLow, contexts: [ArithmeticDecoderContext::default(); 19], current_bit_position: 0, } } } impl BitPlaneDecodeContext { /// Completely reset context so that it can be reused for a new code-block. pub(crate) fn reset( &mut self, code_block: &CodeBlock, sub_band_type: SubBandType, code_block_style: &CodeBlockStyle, total_bitplanes: u8, strict: bool, ) -> Result<()> { let (width, height) = (code_block.rect.width(), code_block.rect.height()); let padded_width = width + COEFFICIENTS_PADDING * 2; let padded_height = height + COEFFICIENTS_PADDING * 2; let num_coefficients = padded_width as usize * padded_height as usize; self.coefficients.clear(); self.coefficients .resize(num_coefficients, Coefficient::default()); self.neighbor_significances.clear(); self.neighbor_significances .resize(num_coefficients, NeighborSignificances::default()); self.coefficient_states.clear(); self.coefficient_states .resize(num_coefficients, CoefficientState::default()); self.width = width; self.padded_width = padded_width; self.height = height; self.sub_band_type = sub_band_type; self.style = *code_block_style; self.reset_contexts(); // "The maximum number of bit-planes available for the representation of // coefficients in any sub-band, b, is given by Mb as defined in Equation // (E-2). In general however, the number of actual bit-planes for which // coding passes are generated is Mb – P, where the number of missing most // significant bit-planes, P, may vary from code-block to code-block." // See issue 399. If this subtraction fails the file is in theory invalid, // but we still try to be lenient. self.bitplanes = if strict { total_bitplanes .checked_sub(code_block.missing_bit_planes) .ok_or(DecodingError::InvalidBitplaneCount)? } else { total_bitplanes.saturating_sub(code_block.missing_bit_planes) }; self.max_coding_passes = if self.bitplanes == 0 { 0 } else { 1 + 3 * (self.bitplanes - 1) }; if self.max_coding_passes < code_block.number_of_coding_passes && strict { bail!(DecodingError::TooManyCodingPasses); } Ok(()) } pub(crate) fn coefficient_rows(&self) -> impl Iterator { self.coefficients .chunks_exact(self.padded_width as usize) // Exclude the padding that we added. .map(|row| &row[COEFFICIENTS_PADDING as usize..][..self.width as usize]) .skip(COEFFICIENTS_PADDING as usize) .take(self.height as usize) } fn set_sign(&mut self, pos: Position, sign: u8) { self.coefficients[pos.index(self.padded_width)].set_sign(sign); } fn arithmetic_decoder_context(&mut self, ctx_label: u8) -> &mut ArithmeticDecoderContext { &mut self.contexts[ctx_label as usize] } /// Reset each context to the initial state defined in table D.7. fn reset_contexts(&mut self) { for context in &mut self.contexts { context.reset(); } self.contexts[0].reset_with_index(4); self.contexts[17].reset_with_index(3); self.contexts[18].reset_with_index(46); } /// Reset state that is transient for each bitplane that is decoded. fn reset_for_next_bitplane(&mut self) { for el in &mut self.coefficient_states { el.set_zero_coded(0); } } #[inline(always)] fn is_significant(&self, position: Position) -> bool { self.coefficient_states[position.index(self.padded_width)].is_significant() } #[inline(always)] fn set_significant(&mut self, position: Position) { let idx = position.index(self.padded_width); let is_significant = self.coefficient_states[idx].is_significant(); if !is_significant { self.coefficient_states[idx].set_significant(); // Update all neighbors so they know this coefficient is significant // now. self.neighbor_significances[position.top_left().index(self.padded_width)] .set_bottom_right(); self.neighbor_significances[position.top().index(self.padded_width)].set_bottom(); self.neighbor_significances[position.top_right().index(self.padded_width)] .set_bottom_left(); self.neighbor_significances[position.left().index(self.padded_width)].set_right(); self.neighbor_significances[position.right().index(self.padded_width)].set_left(); self.neighbor_significances[position.bottom_left().index(self.padded_width)] .set_top_right(); self.neighbor_significances[position.bottom().index(self.padded_width)].set_top(); self.neighbor_significances[position.bottom_right().index(self.padded_width)] .set_top_left(); } } #[inline(always)] fn set_zero_coded(&mut self, position: Position) { self.coefficient_states[position.index(self.padded_width)].set_zero_coded(1); } #[inline(always)] fn set_magnitude_refined(&mut self, position: Position) { self.coefficient_states[position.index(self.padded_width)].set_magnitude_refined(); } #[inline(always)] fn magnitude_refinement(&self, position: Position) -> u8 { self.coefficient_states[position.index(self.padded_width)].magnitude_refinement() } #[inline(always)] fn is_zero_coded(&self, position: Position) -> bool { self.coefficient_states[position.index(self.padded_width)].is_zero_coded() } #[inline(always)] fn push_magnitude_bit(&mut self, position: Position, bit: u32) { let idx = position.index(self.padded_width); self.coefficients[idx].push_bit_at(bit, self.current_bit_position); } #[inline(always)] fn sign(&self, position: Position) -> u8 { self.coefficients[position.index(self.padded_width)].sign() as u8 } #[inline(always)] fn neighbor_in_next_stripe(&self, pos: Position) -> bool { let neighbor = pos.bottom(); neighbor.real_y() < self.height && (neighbor.real_y() >> 2) > (pos.real_y() >> 2) } #[inline(always)] fn neighborhood_significance_states(&self, pos: Position) -> u8 { let neighbors = &self.neighbor_significances[pos.index(self.padded_width)]; if self.style.vertically_causal_context && self.neighbor_in_next_stripe(pos) { neighbors.all_without_bottom() } else { neighbors.all() } } } /// Perform the cleanup pass, specified in D.3.4. /// /// See also the flow chart in Figure 7.3 in the JPEG2000 book. fn cleanup_pass(ctx: &mut BitPlaneDecodeContext, decoder: &mut impl BitDecoder) -> Option<()> { for_each_position( ctx.width, ctx.height, #[inline(always)] |cur_pos| { if !ctx.is_significant(*cur_pos) && !ctx.is_zero_coded(*cur_pos) { let use_rl = cur_pos.real_y() % 4 == 0 && (ctx.height - cur_pos.real_y()) >= 4 && ctx.neighborhood_significance_states(*cur_pos) == 0 && ctx.neighborhood_significance_states(Position::new_index( cur_pos.index_x, cur_pos.index_y + 1, )) == 0 && ctx.neighborhood_significance_states(Position::new_index( cur_pos.index_x, cur_pos.index_y + 2, )) == 0 && ctx.neighborhood_significance_states(Position::new_index( cur_pos.index_x, cur_pos.index_y + 3, )) == 0; let bit = if use_rl { // "If the four contiguous coefficients in the column being scanned are all decoded // in the cleanup pass and the context label for all is 0 (including context // coefficients from previous magnitude, significance and cleanup passes), then the // unique run-length context is given to the arithmetic decoder along with the bit // stream." let bit = decoder.read_bit(ctx.arithmetic_decoder_context(17))?; if bit == 0 { // "If the symbol 0 is returned, then all four contiguous coefficients in // the column remain insignificant and are set to zero." ctx.push_magnitude_bit(*cur_pos, 0); for _ in 0..3 { cur_pos.index_y += 1; ctx.push_magnitude_bit(*cur_pos, 0); } return Some(()); } else { // "Otherwise, if the symbol 1 is returned, then at least // one of the four contiguous coefficients in the column is // significant. The next two bits, returned with the // UNIFORM context (index 46 in Table C.2), denote which // coefficient from the top of the column down is the first // to be found significant." let mut num_zeroes = decoder.read_bit(ctx.arithmetic_decoder_context(18))?; num_zeroes = (num_zeroes << 1) | decoder.read_bit(ctx.arithmetic_decoder_context(18))?; for _ in 0..num_zeroes { ctx.push_magnitude_bit(*cur_pos, 0); cur_pos.index_y += 1; } 1 } } else { let ctx_label = context_label_zero_coding(*cur_pos, ctx); decoder.read_bit(ctx.arithmetic_decoder_context(ctx_label))? }; ctx.push_magnitude_bit(*cur_pos, bit); if bit == 1 { decode_sign_bit(*cur_pos, ctx, decoder); ctx.set_significant(*cur_pos); } } Some(()) }, ) } /// Perform the significance propagation pass (Section D.3.1). /// /// See also the flow chart in Figure 7.4 in the JPEG2000 book. fn significance_propagation_pass( ctx: &mut BitPlaneDecodeContext, decoder: &mut impl BitDecoder, ) -> Option<()> { for_each_position( ctx.width, ctx.height, #[inline(always)] |cur_pos| { // "The significance propagation pass only includes bits of coefficients // that were insignificant (the significance state has yet to be set) // and have a non-zero context." if !ctx.is_significant(*cur_pos) && ctx.neighborhood_significance_states(*cur_pos) != 0 { let ctx_label = context_label_zero_coding(*cur_pos, ctx); let bit = decoder.read_bit(ctx.arithmetic_decoder_context(ctx_label))?; ctx.push_magnitude_bit(*cur_pos, bit); ctx.set_zero_coded(*cur_pos); // "If the value of this bit is 1 then the significance // state is set to 1 and the immediate next bit to be decoded is // the sign bit for the coefficient. Otherwise, the significance // state remains 0." if bit == 1 { decode_sign_bit(*cur_pos, ctx, decoder)?; ctx.set_significant(*cur_pos); } } Some(()) }, ) } /// Perform the magnitude refinement pass, specified in Section D.3.3. /// /// See also the flow chart in Figure 7.5 in the JPEG2000 book. fn magnitude_refinement_pass( ctx: &mut BitPlaneDecodeContext, decoder: &mut impl BitDecoder, ) -> Option<()> { for_each_position( ctx.width, ctx.height, #[inline(always)] |cur_pos| { if ctx.is_significant(*cur_pos) && !ctx.is_zero_coded(*cur_pos) { let ctx_label = context_label_magnitude_refinement_coding(*cur_pos, ctx); let bit = decoder.read_bit(ctx.arithmetic_decoder_context(ctx_label))?; ctx.push_magnitude_bit(*cur_pos, bit); ctx.set_magnitude_refined(*cur_pos); } Some(()) }, ) } fn for_each_position( width: u32, height: u32, mut action: impl FnMut(&mut Position) -> Option<()>, ) -> Option<()> { // "Each bit-plane of a code-block is scanned in a particular order. // Starting at the top left, the first four coefficients of the // first column are scanned, followed by the first four coefficients of // the second column and so on, until the right side of the code-block // is reached. The scan then returns to the left of the code-block and // the second set of four coefficients in each column is scanned. The // process is continued to the bottom of the code-block. If the // code-block height is not divisible by 4, the last set of coefficients // scanned in each column will contain fewer than 4 members." for base_row in (0..height).step_by(4) { for x in 0..width { let mut cur_pos = Position::new(x, base_row); while cur_pos.real_y() < (base_row + 4).min(height) { action(&mut cur_pos)?; cur_pos.index_y += 1; } } } Some(()) } /// See `context_label_sign_coding`. This table contains all context labels /// for each combination of the bit-packed field. (0, 0) represent /// impossible combinations. #[rustfmt::skip] const SIGN_CONTEXT_LOOKUP: [(u8, u8); 256] = [ (9,0), (10,0), (10,1), (0,0), (12,0), (13,0), (11,0), (0,0), (12,1), (11,1), (13,1), (0,0), (0,0), (0,0), (0,0), (0,0), (12,0), (13,0), (11,0), (0,0), (12,0), (13,0), (11,0), (0,0), (9,0), (10,0), (10,1), (0,0), (0,0), (0,0), (0,0), (0,0), (12,1), (11,1), (13,1), (0,0), (9,0), (10,0), (10,1), (0,0), (12,1), (11,1), (13,1), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (10,0), (10,0), (9,0), (0,0), (13,0), (13,0), (12,0), (0,0), (11,1), (11,1), (12,1), (0,0), (0,0), (0,0), (0,0), (0,0), (13,0), (13,0), (12,0), (0,0), (13,0), (13,0), (12,0), (0,0), (10,0), (10,0), (9,0), (0,0), (0,0), (0,0), (0,0), (0,0), (11,1), (11,1), (12,1), (0,0), (10,0), (10,0), (9,0), (0,0), (11,1), (11,1), (12,1), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (10,1), (9,0), (10,1), (0,0), (11,0), (12,0), (11,0), (0,0), (13,1), (12,1), (13,1), (0,0), (0,0), (0,0), (0,0), (0,0), (11,0), (12,0), (11,0), (0,0), (11,0), (12,0), (11,0), (0,0), (10,1), (9,0), (10,1), (0,0), (0,0), (0,0), (0,0), (0,0), (13,1), (12,1), (13,1), (0,0), (10,1), (9,0), (10,1), (0,0), (13,1), (12,1), (13,1), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0), ]; #[rustfmt::skip] const ZERO_CTX_LL_LH_LOOKUP: [u8; 256] = [ 0, 3, 1, 3, 5, 7, 6, 7, 1, 3, 2, 3, 6, 7, 6, 7, 5, 7, 6, 7, 8, 8, 8, 8, 6, 7, 6, 7, 8, 8, 8, 8, 1, 3, 2, 3, 6, 7, 6, 7, 2, 3, 2, 3, 6, 7, 6, 7, 6, 7, 6, 7, 8, 8, 8, 8, 6, 7, 6, 7, 8, 8, 8, 8, 3, 4, 3, 4, 7, 7, 7, 7, 3, 4, 3, 4, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 7, 7, 7, 7, 8, 8, 8, 8, 3, 4, 3, 4, 7, 7, 7, 7, 3, 4, 3, 4, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 7, 7, 7, 7, 8, 8, 8, 8, 1, 3, 2, 3, 6, 7, 6, 7, 2, 3, 2, 3, 6, 7, 6, 7, 6, 7, 6, 7, 8, 8, 8, 8, 6, 7, 6, 7, 8, 8, 8, 8, 2, 3, 2, 3, 6, 7, 6, 7, 2, 3, 2, 3, 6, 7, 6, 7, 6, 7, 6, 7, 8, 8, 8, 8, 6, 7, 6, 7, 8, 8, 8, 8, 3, 4, 3, 4, 7, 7, 7, 7, 3, 4, 3, 4, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 7, 7, 7, 7, 8, 8, 8, 8, 3, 4, 3, 4, 7, 7, 7, 7, 3, 4, 3, 4, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 7, 7, 7, 7, 8, 8, 8, 8, ]; #[rustfmt::skip] const ZERO_CTX_HL_LOOKUP: [u8; 256] = [ 0, 5, 1, 6, 3, 7, 3, 7, 1, 6, 2, 6, 3, 7, 3, 7, 3, 7, 3, 7, 4, 7, 4, 7, 3, 7, 3, 7, 4, 7, 4, 7, 1, 6, 2, 6, 3, 7, 3, 7, 2, 6, 2, 6, 3, 7, 3, 7, 3, 7, 3, 7, 4, 7, 4, 7, 3, 7, 3, 7, 4, 7, 4, 7, 5, 8, 6, 8, 7, 8, 7, 8, 6, 8, 6, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 6, 8, 6, 8, 7, 8, 7, 8, 6, 8, 6, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 1, 6, 2, 6, 3, 7, 3, 7, 2, 6, 2, 6, 3, 7, 3, 7, 3, 7, 3, 7, 4, 7, 4, 7, 3, 7, 3, 7, 4, 7, 4, 7, 2, 6, 2, 6, 3, 7, 3, 7, 2, 6, 2, 6, 3, 7, 3, 7, 3, 7, 3, 7, 4, 7, 4, 7, 3, 7, 3, 7, 4, 7, 4, 7, 6, 8, 6, 8, 7, 8, 7, 8, 6, 8, 6, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 6, 8, 6, 8, 7, 8, 7, 8, 6, 8, 6, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, ]; #[rustfmt::skip] const ZERO_CTX_HH_LOOKUP: [u8; 256] = [ 0, 1, 3, 4, 1, 2, 4, 5, 3, 4, 6, 7, 4, 5, 7, 7, 1, 2, 4, 5, 2, 2, 5, 5, 4, 5, 7, 7, 5, 5, 7, 7, 3, 4, 6, 7, 4, 5, 7, 7, 6, 7, 8, 8, 7, 7, 8, 8, 4, 5, 7, 7, 5, 5, 7, 7, 7, 7, 8, 8, 7, 7, 8, 8, 1, 2, 4, 5, 2, 2, 5, 5, 4, 5, 7, 7, 5, 5, 7, 7, 2, 2, 5, 5, 2, 2, 5, 5, 5, 5, 7, 7, 5, 5, 7, 7, 4, 5, 7, 7, 5, 5, 7, 7, 7, 7, 8, 8, 7, 7, 8, 8, 5, 5, 7, 7, 5, 5, 7, 7, 7, 7, 8, 8, 7, 7, 8, 8, 3, 4, 6, 7, 4, 5, 7, 7, 6, 7, 8, 8, 7, 7, 8, 8, 4, 5, 7, 7, 5, 5, 7, 7, 7, 7, 8, 8, 7, 7, 8, 8, 6, 7, 8, 8, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 8, 8, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 5, 7, 7, 5, 5, 7, 7, 7, 7, 8, 8, 7, 7, 8, 8, 5, 5, 7, 7, 5, 5, 7, 7, 7, 7, 8, 8, 7, 7, 8, 8, 7, 7, 8, 8, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 8, 8, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, ]; /// Decode a sign bit (Section D.3.2). #[inline(always)] fn decode_sign_bit( pos: Position, ctx: &mut BitPlaneDecodeContext, decoder: &mut T, ) -> Option<()> { /// Based on Table D.2. #[inline(always)] fn context_label_sign_coding(pos: Position, ctx: &BitPlaneDecodeContext) -> (u8, u8) { // A lot of subtleties going on here, all in the interest of achieving // the best performance. Fundamentally, we need to determine the // significances as well as signs of the four neighbors (i.e. not // including the diagonal neighbors) and based on what the sum of signs // is, we assign a context label. // First, let's get all neighbor significances and mask out the diagonals. let significances = ctx.neighborhood_significance_states(pos) & 0b0101_0101; // Get all the signs. let left_sign = ctx.sign(pos.left()); let right_sign = ctx.sign(pos.right()); let top_sign = ctx.sign(pos.top()); let bottom_sign = if ctx.style.vertically_causal_context && ctx.neighbor_in_next_stripe(pos) { 0 } else { ctx.sign(pos.bottom()) }; // Due to the specific layout of `NeighborSignificances`, direct neighbors // and diagonals are interleaved. Therefore, we create a new bit-packed // representation that indicates whether the top/left/right/bottom sign // is positive, negative, or insignificant. We need two bits for this. // 00 represents insignificant, 01 positive and 10 negative. 11 // is an invalid combination. let signs = (top_sign << 6) | (left_sign << 4) | (right_sign << 2) | bottom_sign; let negative_significances = significances & signs; let positive_significances = significances & !signs; let merged_significances = (negative_significances << 1) | positive_significances; // Now we can just perform one single lookup! SIGN_CONTEXT_LOOKUP[merged_significances as usize] } let (ctx_label, xor_bit) = context_label_sign_coding(pos, ctx); let ad_ctx = ctx.arithmetic_decoder_context(ctx_label); let sign_bit = if T::IS_BYPASS { decoder.read_bit(ad_ctx)? } else { decoder.read_bit(ad_ctx)? ^ xor_bit as u32 }; ctx.set_sign(pos, sign_bit as u8); Some(()) } /// Return the context label for zero coding (Section D.3.1). #[inline(always)] fn context_label_zero_coding(pos: Position, ctx: &BitPlaneDecodeContext) -> u8 { let neighbors = ctx.neighborhood_significance_states(pos); // Once again, the neighbors field is bit-packed, so we can just generate // a table for all u8 values and assign the correct context based on the // exact value of that field. match ctx.sub_band_type { SubBandType::LowLow | SubBandType::LowHigh => ZERO_CTX_LL_LH_LOOKUP[neighbors as usize], SubBandType::HighLow => ZERO_CTX_HL_LOOKUP[neighbors as usize], SubBandType::HighHigh => ZERO_CTX_HH_LOOKUP[neighbors as usize], } } /// Return the context label for magnitude refinement coding (Table D.4). fn context_label_magnitude_refinement_coding(pos: Position, ctx: &BitPlaneDecodeContext) -> u8 { // If magnitude refined, then 16. let m1 = ctx.magnitude_refinement(pos) * 16; // Else: If at least one neighbor is significant then 15, else 14. let m2 = 14 + ctx.neighborhood_significance_states(pos).min(1); u8::max(m1, m2) } #[derive(Default, Copy, Clone, Debug)] struct Position { // Since we use a padding scheme for bitplane decoding (so that we don't need // to special-case the neighbors of border values), these x and y values // are always COEFFICIENTS_PADDING more than the actual x and y index. index_x: u32, index_y: u32, } impl Position { fn new(x: u32, y: u32) -> Self { Self { index_x: x + COEFFICIENTS_PADDING, index_y: y + COEFFICIENTS_PADDING, } } fn new_index(x: u32, y: u32) -> Self { Self { index_x: x, index_y: y, } } fn left(&self) -> Self { Self::new_index(self.index_x - 1, self.index_y) } fn right(&self) -> Self { Self::new_index(self.index_x + 1, self.index_y) } fn top(&self) -> Self { Self::new_index(self.index_x, self.index_y - 1) } fn bottom(&self) -> Self { Self::new_index(self.index_x, self.index_y + 1) } fn top_left(&self) -> Self { Self::new_index(self.index_x - 1, self.index_y - 1) } fn top_right(&self) -> Self { Self::new_index(self.index_x + 1, self.index_y - 1) } fn bottom_left(&self) -> Self { Self::new_index(self.index_x - 1, self.index_y + 1) } fn bottom_right(&self) -> Self { Self::new_index(self.index_x + 1, self.index_y + 1) } fn real_y(&self) -> u32 { self.index_y - 1 } fn index(&self, padded_width: u32) -> usize { self.index_x as usize + self.index_y as usize * padded_width as usize } } // We use a trait so that we can mock the arithmetic decoder for tests. trait BitDecoder { const IS_BYPASS: bool; fn read_bit(&mut self, context: &mut ArithmeticDecoderContext) -> Option; } impl BitDecoder for ArithmeticDecoder<'_> { const IS_BYPASS: bool = false; #[inline(always)] fn read_bit(&mut self, context: &mut ArithmeticDecoderContext) -> Option { Some(Self::read_bit(self, context)) } } struct BypassDecoder<'a>(BitReader<'a>, bool); impl<'a> BypassDecoder<'a> { fn new(data: &'a [u8], strict: bool) -> Self { Self(BitReader::new(data), strict) } } impl BitDecoder for BypassDecoder<'_> { const IS_BYPASS: bool = true; fn read_bit(&mut self, _: &mut ArithmeticDecoderContext) -> Option { self.0.read_bits_with_stuffing(1).or({ if !self.1 { // If not in strict mode, just pad with ones. Not sure if // zeroes would be better here, but since the arithmetic decoder // is also padded with 0xFF maybe 1 is the better choice? Some(1) } else { // We have too little data, return `None`. None } }) } } hayro-jpeg2000-0.3.2/src/j2c/build.rs000064400000000000000000000247011046102023000151130ustar 00000000000000//! Building and setting up decompositions, sub-bands, precincts and code-blocks. use super::decode::{DecompositionStorage, TileDecodeContext, TileDecompositions}; use super::rect::IntRect; use super::tag_tree::TagTree; use super::tile::{ResolutionTile, Tile}; use crate::error::{DecodingError, Result}; use alloc::vec; use core::iter; use core::ops::Range; /// Build and allocate all necessary structures to process the code-blocks /// for a specific tile. Also parses the segments for each code-block. pub(crate) fn build( tile: &Tile<'_>, tile_ctx: &mut TileDecodeContext<'_>, storage: &mut DecompositionStorage<'_>, ) -> Result<()> { build_decompositions(tile, tile_ctx, storage) } fn build_decompositions( tile: &Tile<'_>, tile_ctx: &mut TileDecodeContext<'_>, storage: &mut DecompositionStorage<'_>, ) -> Result<()> { let mut total_coefficients = 0; for component_tile in tile.component_tiles() { total_coefficients += component_tile.rect.width() as usize * component_tile.rect.height() as usize; } if storage.coefficients.is_empty() { // Fast path that requests pre-zeroed memory from the OS where available. storage.coefficients = vec![0.0; total_coefficients]; } else { storage.coefficients.resize(total_coefficients, 0.0); } let mut coefficient_counter = 0; for (component_idx, component_tile) in tile.component_tiles().enumerate() { let d_start = storage.decompositions.len(); let mut resolution_tiles = component_tile.resolution_tiles(); let mut build_sub_band = |sub_band_type: SubBandType, resolution_tile: &ResolutionTile<'_>, storage: &mut DecompositionStorage<'_>| -> Result { let sub_band_rect = resolution_tile.sub_band_rect(sub_band_type); ltrace!( "r {} making sub-band {} for component {}", resolution_tile.resolution, sub_band_type as u8, component_idx ); ltrace!( "Sub-band rect: [{},{} {}x{}], ll rect [{},{} {}x{}]", sub_band_rect.x0, sub_band_rect.y0, sub_band_rect.width(), sub_band_rect.height(), resolution_tile.rect.x0, resolution_tile.rect.y0, resolution_tile.rect.width(), resolution_tile.rect.height(), ); let precincts = build_precincts(resolution_tile, sub_band_rect, tile_ctx, storage)?; let added_coefficients = (sub_band_rect.width() * sub_band_rect.height()) as usize; let coefficients = coefficient_counter..(coefficient_counter + added_coefficients); coefficient_counter += added_coefficients; let idx = storage.sub_bands.len(); storage.sub_bands.push(SubBand { sub_band_type, rect: sub_band_rect, precincts: precincts.clone(), coefficients, }); Ok(idx) }; // Resolution 0 always is the LL sub-band. let ll_resolution_tile = resolution_tiles.next().unwrap(); let first_ll_sub_band = build_sub_band(SubBandType::LowLow, &ll_resolution_tile, storage)?; for resolution_tile in resolution_tiles { let decomposition = Decomposition { sub_bands: [ build_sub_band(SubBandType::HighLow, &resolution_tile, storage)?, build_sub_band(SubBandType::LowHigh, &resolution_tile, storage)?, build_sub_band(SubBandType::HighHigh, &resolution_tile, storage)?, ], rect: resolution_tile.rect, }; storage.decompositions.push(decomposition); } let d_end = storage.decompositions.len(); storage.tile_decompositions.push(TileDecompositions { decompositions: d_start..d_end, first_ll_sub_band, }); } assert_eq!(coefficient_counter, storage.coefficients.len()); Ok(()) } fn build_precincts( resolution_tile: &ResolutionTile<'_>, sub_band_rect: IntRect, tile_ctx: &mut TileDecodeContext<'_>, storage: &mut DecompositionStorage<'_>, ) -> Result> { let start = storage.precincts.len(); for precinct_data in resolution_tile .precincts() .ok_or(DecodingError::InvalidPrecinct)? { let precinct_rect = precinct_data.rect; let cb_width = resolution_tile.code_block_width(); let cb_height = resolution_tile.code_block_height(); // See Figure B.9. Conceptually, the area of code-blocks is aligned // to the width/height of a code block. let cb_x0 = (u32::max(precinct_rect.x0, sub_band_rect.x0) / cb_width) * cb_width; let cb_y0 = (u32::max(precinct_rect.y0, sub_band_rect.y0) / cb_height) * cb_height; let cb_x1 = (u32::min(precinct_rect.x1, sub_band_rect.x1).div_ceil(cb_width)) * cb_width; let cb_y1 = (u32::min(precinct_rect.y1, sub_band_rect.y1).div_ceil(cb_height)) * cb_height; let code_block_area = IntRect::from_ltrb(cb_x0, cb_y0, cb_x1, cb_y1); // If the sub-band is empty, there are no code-blocks, but due to our // flooring/ceiling above, we would get 1 code-block in each direction. // Because of this, we need to special-case this. let code_blocks_x = if sub_band_rect.width() == 0 { 0 } else { code_block_area.width() / cb_width }; let code_blocks_y = if sub_band_rect.height() == 0 { 0 } else { code_block_area.height() / cb_height }; ltrace!( "Precinct rect: [{},{} {}x{}], num_code_blocks_wide: {}, num_code_blocks_high: {}", precinct_rect.x0, precinct_rect.y0, precinct_rect.width(), precinct_rect.height(), code_blocks_x, code_blocks_y ); let blocks = build_code_blocks( code_block_area, sub_band_rect, resolution_tile, code_blocks_x, code_blocks_y, tile_ctx, storage, ); let code_inclusion_tree = TagTree::new(code_blocks_x, code_blocks_y, &mut storage.tag_tree_nodes); let zero_bitplane_tree = TagTree::new(code_blocks_x, code_blocks_y, &mut storage.tag_tree_nodes); storage.precincts.push(Precinct { code_blocks: blocks, code_inclusion_tree, zero_bitplane_tree, }); } let end = storage.precincts.len(); Ok(start..end) } fn build_code_blocks( code_block_area: IntRect, sub_band_rect: IntRect, tile_instance: &ResolutionTile<'_>, code_blocks_x: u32, code_blocks_y: u32, tile_ctx: &mut TileDecodeContext<'_>, storage: &mut DecompositionStorage<'_>, ) -> Range { let mut y = code_block_area.y0; let code_block_width = tile_instance.code_block_width(); let code_block_height = tile_instance.code_block_height(); let start = storage.code_blocks.len(); for y_idx in 0..code_blocks_y { let mut x = code_block_area.x0; for x_idx in 0..code_blocks_x { // "Code-blocks in the partition may extend beyond the boundaries of // the sub-band coefficients. When this happens, only the // coefficients lying within the sub-band are coded using the method // described in Annex D." let area = IntRect::from_xywh(x, y, code_block_width, code_block_height) .intersect(sub_band_rect); ltrace!( "Codeblock rect: [{},{} {}x{}]", area.x0, area.y0, area.width(), area.height(), ); let start = storage.layers.len(); storage.layers.extend(iter::repeat_n( Layer { // This will be updated once we actually read the // layer segments. segments: None, }, tile_ctx.tile.num_layers as usize, )); let end = storage.layers.len(); storage.code_blocks.push(CodeBlock { x_idx, y_idx, rect: area, has_been_included: false, missing_bit_planes: 0, l_block: 3, number_of_coding_passes: 0, layers: start..end, non_empty_layer_count: 0, }); x += code_block_width; } y += code_block_height; } let end = storage.code_blocks.len(); start..end } pub(crate) struct Decomposition { /// In the order low-high, high-low and high-high. pub(crate) sub_bands: [usize; 3], /// The rectangle of the decomposition. pub(crate) rect: IntRect, } #[derive(Clone)] pub(crate) struct SubBand { pub(crate) sub_band_type: SubBandType, pub(crate) rect: IntRect, pub(crate) precincts: Range, pub(crate) coefficients: Range, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(crate) enum SubBandType { LowLow = 0, HighLow = 1, LowHigh = 2, HighHigh = 3, } #[derive(Clone)] pub(crate) struct Precinct { pub(crate) code_blocks: Range, pub(crate) code_inclusion_tree: TagTree, pub(crate) zero_bitplane_tree: TagTree, } pub(crate) struct PrecinctData { /// The x coordinate mapped back to the reference grid. pub(crate) r_x: u32, /// The y coordinate mapped back to the reference grid. pub(crate) r_y: u32, /// The actual rectangle of the precinct (in the sub-band coordinate /// system). pub(crate) rect: IntRect, /// The index of the precinct in the sub-band. pub(crate) idx: u64, } #[derive(Clone)] pub(crate) struct CodeBlock { pub(crate) rect: IntRect, pub(crate) x_idx: u32, pub(crate) y_idx: u32, pub(crate) layers: Range, pub(crate) has_been_included: bool, pub(crate) missing_bit_planes: u8, pub(crate) number_of_coding_passes: u8, pub(crate) l_block: u32, pub(crate) non_empty_layer_count: u8, } pub(crate) struct Segment<'a> { pub(crate) idx: u8, pub(crate) coding_pases: u8, pub(crate) data_length: u32, pub(crate) data: &'a [u8], } #[derive(Clone)] pub(crate) struct Layer { pub(crate) segments: Option>, } hayro-jpeg2000-0.3.2/src/j2c/codestream.rs000064400000000000000000000763301046102023000161470ustar 00000000000000//! Read and decode a JPEG2000 codestream, as described in Annex A. use alloc::vec; use alloc::vec::Vec; use super::DecodeSettings; use super::bitplane::BITPLANE_BIT_SIZE; use super::build::SubBandType; use crate::error::{MarkerError, Result, ValidationError, bail, err}; use crate::reader::BitReader; const MAX_LAYER_COUNT: u8 = 32; const MAX_RESOLUTION_COUNT: u8 = 32; const MAX_PRECINCT_EXPONENT: u8 = 31; #[derive(Debug)] pub(crate) struct Header<'a> { pub(crate) size_data: SizeData, pub(crate) global_coding_style: CodingStyleDefault, pub(crate) component_infos: Vec, pub(crate) ppm_packets: Vec>, pub(crate) skipped_resolution_levels: u8, /// Whether strict mode is enabled for decoding. pub(crate) strict: bool, } #[derive(Debug, Clone)] pub(crate) struct PpmMarkerData<'a> { pub(crate) sequence_idx: u8, pub(crate) packets: Vec>, } #[derive(Debug, Clone)] pub(crate) struct PpmPacket<'a> { pub(crate) data: &'a [u8], } pub(crate) fn read_header<'a>( reader: &mut BitReader<'a>, settings: &DecodeSettings, ) -> Result> { if reader.read_marker()? != markers::SIZ { bail!(MarkerError::Expected("SIZ")); } let mut size_data = size_marker(reader)?; let mut cod = None; let mut qcd = None; let num_components = size_data.component_sizes.len() as u16; let mut cod_components = vec![None; num_components as usize]; let mut qcd_components = vec![None; num_components as usize]; let mut ppm_markers = vec![]; loop { match reader.peek_marker().ok_or(MarkerError::Invalid)? { markers::SOT => break, markers::COD => { reader.read_marker()?; cod = Some(cod_marker(reader).ok_or(MarkerError::ParseFailure("COD"))?); } markers::COC => { reader.read_marker()?; let (component_index, coc) = coc_marker(reader, num_components).ok_or(MarkerError::ParseFailure("COC"))?; *cod_components .get_mut(component_index as usize) .ok_or(MarkerError::ParseFailure("COC"))? = Some(coc); } markers::QCD => { reader.read_marker()?; qcd = Some(qcd_marker(reader).ok_or(MarkerError::ParseFailure("QCD"))?); } markers::QCC => { reader.read_marker()?; let (component_index, qcc) = qcc_marker(reader, num_components).ok_or(MarkerError::ParseFailure("QCC"))?; *qcd_components .get_mut(component_index as usize) .ok_or(MarkerError::ParseFailure("QCC"))? = Some(qcc); } markers::RGN => { reader.read_marker()?; rgn_marker(reader).ok_or(MarkerError::ParseFailure("RGN"))?; } markers::TLM => { reader.read_marker()?; tlm_marker(reader).ok_or(MarkerError::ParseFailure("TLM"))?; } markers::COM => { reader.read_marker()?; com_marker(reader).ok_or(MarkerError::ParseFailure("COM"))?; } markers::PPM => { reader.read_marker()?; ppm_markers.push(ppm_marker(reader).ok_or(MarkerError::ParseFailure("PPM"))?); } markers::CRG => { reader.read_marker()?; skip_marker_segment(reader); } (0x30..=0x3F) => { // "All markers with the marker code between 0xFF30 and 0xFF3F // have no marker segment parameters. They shall be skipped by // the decoder." reader.read_marker()?; // skip_marker_segment(reader); } _ => { bail!(MarkerError::Unsupported); } } } let cod = cod.ok_or(MarkerError::Missing("COD"))?; let qcd = qcd.ok_or(MarkerError::Missing("QCD"))?; let component_infos: Vec = size_data .component_sizes .iter() .enumerate() .map(|(idx, csi)| ComponentInfo { size_info: *csi, coding_style: cod_components[idx] .clone() .map(|mut c| { c.flags.raw |= cod.component_parameters.flags.raw; c }) .unwrap_or(cod.component_parameters.clone()), quantization_info: qcd_components[idx].clone().unwrap_or(qcd.clone()), }) .collect(); // Components can have different number of resolution levels. In that case, we // can only skip as many resolution levels as the component with the smallest // number of resolution levels. let min_num_resolution_levels = component_infos .iter() .map(|c| c.num_resolution_levels()) .min() .unwrap(); let skipped_resolution_levels = if let Some((target_width, target_height)) = settings.target_resolution { let width_log = (size_data.image_width() / target_width) .checked_ilog2() .unwrap_or(0); let height_log = (size_data.image_height() / target_height) .checked_ilog2() .unwrap_or(0); width_log.min(height_log) as u8 } else { 0 } .min(min_num_resolution_levels - 1); // If the user defined a maximum resolution level that is lower than the // maximum available one, the final image needs to be shrinked further. size_data.x_resolution_shrink_factor *= 1 << skipped_resolution_levels; size_data.y_resolution_shrink_factor *= 1 << skipped_resolution_levels; ppm_markers.sort_by(|p0, p1| p0.sequence_idx.cmp(&p1.sequence_idx)); let header = Header { size_data, global_coding_style: cod.clone(), component_infos, ppm_packets: ppm_markers .into_iter() .flat_map(|i| i.packets) .filter_map(|p| if p.data.is_empty() { None } else { Some(p) }) .collect(), skipped_resolution_levels, strict: settings.strict, }; validate(&header)?; Ok(header) } fn validate(header: &Header<'_>) -> Result<()> { for info in &header.component_infos { let max_resolution_idx = info.coding_style.parameters.num_resolution_levels - 1; let quantization_style = info.quantization_info.quantization_style; let num_precinct_exponents = info.quantization_info.step_sizes.len(); if num_precinct_exponents == 0 { bail!(ValidationError::MissingPrecinctExponents); } else if matches!( quantization_style, QuantizationStyle::NoQuantization | QuantizationStyle::ScalarExpounded ) { // See the accesses in the `exponent_mantissa` method. The largest // access is 1 + (max_resolution_idx - 1) * 3 + 2. if max_resolution_idx == 0 { if num_precinct_exponents == 0 { bail!(ValidationError::InsufficientExponents); } } else if 1 + (max_resolution_idx as usize - 1) * 3 + 2 >= num_precinct_exponents { bail!(ValidationError::InsufficientExponents); } } } Ok(()) } #[derive(Debug, Clone)] pub(crate) struct ComponentInfo { pub(crate) size_info: ComponentSizeInfo, pub(crate) coding_style: CodingStyleComponent, pub(crate) quantization_info: QuantizationInfo, } impl ComponentInfo { pub(crate) fn exponent_mantissa( &self, sub_band_type: SubBandType, resolution: u8, ) -> Result<(u16, u16)> { let n_ll = self.coding_style.parameters.num_decomposition_levels; let sb_index = match sub_band_type { // TODO: Shouldn't be reached. SubBandType::LowLow => u16::MAX, SubBandType::HighLow => 0, SubBandType::LowHigh => 1, SubBandType::HighHigh => 2, }; let step_sizes = &self.quantization_info.step_sizes; match self.quantization_info.quantization_style { QuantizationStyle::NoQuantization | QuantizationStyle::ScalarExpounded => { let entry = if resolution == 0 { step_sizes.first() } else { step_sizes.get(1 + (resolution as usize - 1) * 3 + sb_index as usize) }; Ok(entry .map(|s| (s.exponent, s.mantissa)) .ok_or(ValidationError::MissingStepSize)?) } QuantizationStyle::ScalarDerived => { let (e_0, mantissa) = step_sizes .first() .map(|s| (s.exponent, s.mantissa)) .ok_or(ValidationError::MissingStepSize)?; let n_b = if resolution == 0 { n_ll as u16 } else { n_ll as u16 + 1 - resolution as u16 }; let exponent = e_0 .checked_sub(n_ll as u16) .and_then(|e| e.checked_add(n_b)) .ok_or(ValidationError::InvalidExponents)?; Ok((exponent, mantissa)) } } } pub(crate) fn wavelet_transform(&self) -> WaveletTransform { self.coding_style.parameters.transformation } pub(crate) fn num_resolution_levels(&self) -> u8 { self.coding_style.parameters.num_resolution_levels } pub(crate) fn num_decomposition_levels(&self) -> u8 { self.coding_style.parameters.num_decomposition_levels } pub(crate) fn code_block_style(&self) -> CodeBlockStyle { self.coding_style.parameters.code_block_style } } /// Progression order (Table A.16). #[derive(Debug, Clone, Copy)] pub(crate) enum ProgressionOrder { LayerResolutionComponentPosition, ResolutionLayerComponentPosition, ResolutionPositionComponentLayer, PositionComponentResolutionLayer, ComponentPositionResolutionLayer, } impl ProgressionOrder { fn from_u8(value: u8) -> Result { match value { 0 => Ok(Self::LayerResolutionComponentPosition), 1 => Ok(Self::ResolutionLayerComponentPosition), 2 => Ok(Self::ResolutionPositionComponentLayer), 3 => Ok(Self::PositionComponentResolutionLayer), 4 => Ok(Self::ComponentPositionResolutionLayer), _ => err!(ValidationError::InvalidProgressionOrder), } } } /// Wavelet transformation type (Table A.20). #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum WaveletTransform { Irreversible97, Reversible53, } impl WaveletTransform { fn from_u8(value: u8) -> Result { match value { 0 => Ok(Self::Irreversible97), 1 => Ok(Self::Reversible53), _ => err!(ValidationError::InvalidTransformation), } } } /// Coding style flags (Table A.13). #[derive(Debug, Clone, Copy, Default)] pub(crate) struct CodingStyleFlags { pub(crate) raw: u8, } impl CodingStyleFlags { fn from_u8(value: u8) -> Self { Self { raw: value } } pub(crate) fn has_precincts(&self) -> bool { (self.raw & 0x01) != 0 } pub(crate) fn may_use_sop_markers(&self) -> bool { (self.raw & 0x02) != 0 } pub(crate) fn uses_eph_marker(&self) -> bool { (self.raw & 0x04) != 0 } } /// Code-block style flags (Table A.19). #[derive(Debug, Clone, Copy, Default)] pub(crate) struct CodeBlockStyle { pub(crate) selective_arithmetic_coding_bypass: bool, pub(crate) reset_context_probabilities: bool, pub(crate) termination_on_each_pass: bool, pub(crate) vertically_causal_context: bool, pub(crate) segmentation_symbols: bool, } impl CodeBlockStyle { fn from_u8(value: u8) -> Self { Self { selective_arithmetic_coding_bypass: (value & 0x01) != 0, reset_context_probabilities: (value & 0x02) != 0, termination_on_each_pass: (value & 0x04) != 0, vertically_causal_context: (value & 0x08) != 0, // The predictable termination flag is only informative and // can therefore be ignored. segmentation_symbols: (value & 0x20) != 0, } } } /// Quantization style (Table A.28). #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum QuantizationStyle { NoQuantization, ScalarDerived, ScalarExpounded, } impl QuantizationStyle { fn from_u8(value: u8) -> Result { match value & 0x1F { 0 => Ok(Self::NoQuantization), 1 => Ok(Self::ScalarDerived), 2 => Ok(Self::ScalarExpounded), _ => err!(ValidationError::InvalidQuantizationStyle), } } } #[derive(Clone, Copy, Debug)] pub(crate) struct StepSize { pub(crate) mantissa: u16, pub(crate) exponent: u16, } /// Quantization properties, from the QCD and QCC markers (A.6.4 and A.6.5). #[derive(Clone, Debug)] pub(crate) struct QuantizationInfo { pub(crate) quantization_style: QuantizationStyle, pub(crate) guard_bits: u8, pub(crate) step_sizes: Vec, } /// Default values for coding style, from the COD marker (A.6.1). #[derive(Debug, Clone)] pub(crate) struct CodingStyleDefault { pub(crate) progression_order: ProgressionOrder, pub(crate) num_layers: u8, pub(crate) mct: bool, // This is the default used for all components, if not overridden by COC. pub(crate) component_parameters: CodingStyleComponent, } /// Values of coding style for each component, from the COC marker (A.6.2). #[derive(Clone, Debug)] pub(crate) struct CodingStyleComponent { pub(crate) flags: CodingStyleFlags, pub(crate) parameters: CodingStyleParameters, } /// Shared parameters between the COC and COD marker (A.6.1 and A.6.2). #[derive(Clone, Debug)] pub(crate) struct CodingStyleParameters { pub(crate) num_decomposition_levels: u8, pub(crate) num_resolution_levels: u8, pub(crate) code_block_width: u8, pub(crate) code_block_height: u8, pub(crate) code_block_style: CodeBlockStyle, pub(crate) transformation: WaveletTransform, pub(crate) precinct_exponents: Vec<(u8, u8)>, } #[derive(Debug)] pub(crate) struct SizeData { /// Width of the reference grid (Xsiz). pub(crate) reference_grid_width: u32, /// Height of the reference grid (Ysiz). pub(crate) reference_grid_height: u32, /// Horizontal offset from the origin of the reference grid to the /// left side of the image area (`XOsiz`). pub(crate) image_area_x_offset: u32, /// Vertical offset from the origin of the reference grid to the top side of the image area (`YOsiz`). pub(crate) image_area_y_offset: u32, /// Width of one reference tile with respect to the reference grid (`XTSiz`). pub(crate) tile_width: u32, /// Height of one reference tile with respect to the reference grid (`YTSiz`). pub(crate) tile_height: u32, /// Horizontal offset from the origin of the reference grid to the left side of the first tile (`XTOSiz`). pub(crate) tile_x_offset: u32, /// Vertical offset from the origin of the reference grid to the top side of the first tile (`YTOSiz`). pub(crate) tile_y_offset: u32, /// Component information (SSiz/XRSiz/YRSiz). pub(crate) component_sizes: Vec, /// Shrink factor in the x direction. See the comment in the parsing method. pub(crate) x_shrink_factor: u32, /// Shrink factor in the y direction. See the comment in the parsing method. pub(crate) y_shrink_factor: u32, /// Shrink factor in the x direction due to requesting a lower resolution level. pub(crate) x_resolution_shrink_factor: u32, /// Shrink factor in the y direction due to requesting a lower resolution level. pub(crate) y_resolution_shrink_factor: u32, } impl SizeData { pub(crate) fn tile_x_coord(&self, idx: u32) -> u32 { // See B-6. idx % self.num_x_tiles() } pub(crate) fn tile_y_coord(&self, idx: u32) -> u32 { // See B-6. idx / self.num_x_tiles() } } /// Component information (A.5.1 and Table A.11). #[derive(Debug, Clone, Copy)] pub(crate) struct ComponentSizeInfo { pub(crate) precision: u8, pub(crate) horizontal_resolution: u8, pub(crate) vertical_resolution: u8, } impl SizeData { /// The number of tiles in the x direction. pub(crate) fn num_x_tiles(&self) -> u32 { // See formula B-5. (self.reference_grid_width - self.tile_x_offset).div_ceil(self.tile_width) } /// The number of tiles in the y direction. pub(crate) fn num_y_tiles(&self) -> u32 { // See formula B-5. (self.reference_grid_height - self.tile_y_offset).div_ceil(self.tile_height) } /// The total number of tiles. pub(crate) fn num_tiles(&self) -> u32 { self.num_x_tiles() * self.num_y_tiles() } /// Return the overall width of the image. pub(crate) fn image_width(&self) -> u32 { (self.reference_grid_width - self.image_area_x_offset) .div_ceil(self.x_shrink_factor * self.x_resolution_shrink_factor) } /// Return the overall height of the image. pub(crate) fn image_height(&self) -> u32 { (self.reference_grid_height - self.image_area_y_offset) .div_ceil(self.y_shrink_factor * self.y_resolution_shrink_factor) } } /// SIZ marker (A.5.1). fn size_marker(reader: &mut BitReader<'_>) -> Result { let size_data = size_marker_inner(reader).ok_or(MarkerError::ParseFailure("SIZ"))?; if size_data.tile_width == 0 || size_data.tile_height == 0 || size_data.reference_grid_width == 0 || size_data.reference_grid_height == 0 { bail!(ValidationError::InvalidDimensions); } if size_data.tile_x_offset >= size_data.reference_grid_width || size_data.tile_y_offset >= size_data.reference_grid_height { bail!(ValidationError::InvalidDimensions); } // The tile grid offsets (XTOsiz, YTOsiz) are constrained to be no greater than the // image area offsets (B-3). if size_data.tile_x_offset > size_data.image_area_x_offset || size_data.tile_y_offset > size_data.image_area_y_offset { bail!(crate::TileError::InvalidOffsets); } // Also, the tile size plus the tile offset shall be greater than the image area offset. // This ensures that the first tile (tile 0) will contain at least one reference grid point // from the image area (B-4). if size_data .tile_x_offset .checked_add(size_data.tile_width) .ok_or(crate::TileError::InvalidOffsets)? <= size_data.image_area_x_offset || size_data .tile_y_offset .checked_add(size_data.tile_height) .ok_or(crate::TileError::InvalidOffsets)? <= size_data.image_area_y_offset { bail!(crate::TileError::InvalidOffsets); } for comp in &size_data.component_sizes { if comp.precision == 0 || comp.vertical_resolution == 0 || comp.horizontal_resolution == 0 { bail!(ValidationError::InvalidComponentMetadata); } } const MAX_DIMENSIONS: usize = 60000; if size_data.image_width() as usize > MAX_DIMENSIONS || size_data.image_height() as usize > MAX_DIMENSIONS { bail!(ValidationError::ImageTooLarge); } Ok(size_data) } fn size_marker_inner(reader: &mut BitReader<'_>) -> Option { // Length. let _ = reader.read_u16()?; // Decoder capabilities. let _ = reader.read_u16()?; let xsiz = reader.read_u32()?; let ysiz = reader.read_u32()?; let x_osiz = reader.read_u32()?; let y_osiz = reader.read_u32()?; let xt_siz = reader.read_u32()?; let yt_siz = reader.read_u32()?; let xto_siz = reader.read_u32()?; let yto_siz = reader.read_u32()?; let csiz = reader.read_u16()?; if x_osiz >= xsiz || y_osiz >= ysiz { return None; } if csiz == 0 { return None; } let mut components = Vec::with_capacity(csiz as usize); for _ in 0..csiz { let ssiz = reader.read_byte()?; let x_rsiz = reader.read_byte()?; let y_rsiz = reader.read_byte()?; let precision = (ssiz & 0x7F) + 1; // No idea how to process signed images, but as far as I can tell // openjpeg and others just accept it as is, so let's do the same. let _is_signed = (ssiz & 0x80) != 0; // In theory up to 38 is allowed, but we don't support more than that. if precision as u32 > BITPLANE_BIT_SIZE { return None; } components.push(ComponentSizeInfo { precision, horizontal_resolution: x_rsiz, vertical_resolution: y_rsiz, }); } // In case all components are sub-sampled at the same level, we // don't want to render them at the original resolution but instead // reduce their dimension so that we can assume a resolution of 1 for // all components. This makes the images much smaller. let mut x_shrink_factor = 1; let mut y_shrink_factor = 1; let hr = components[0].horizontal_resolution; let vr = components[0].vertical_resolution; let mut same_resolution = true; for component in &components[1..] { same_resolution &= component.horizontal_resolution == hr; same_resolution &= component.vertical_resolution == vr; } if same_resolution { x_shrink_factor = hr as u32; y_shrink_factor = vr as u32; } Some(SizeData { reference_grid_width: xsiz, reference_grid_height: ysiz, image_area_x_offset: x_osiz, image_area_y_offset: y_osiz, tile_width: xt_siz, tile_height: yt_siz, tile_x_offset: xto_siz, tile_y_offset: yto_siz, component_sizes: components, x_shrink_factor, y_shrink_factor, x_resolution_shrink_factor: 1, y_resolution_shrink_factor: 1, }) } fn coding_style_parameters( reader: &mut BitReader<'_>, coding_style: &CodingStyleFlags, ) -> Option { let num_decomposition_levels = reader.read_byte()?; if num_decomposition_levels > MAX_RESOLUTION_COUNT { return None; } let num_resolution_levels = num_decomposition_levels.checked_add(1)?; let code_block_width = reader.read_byte()?.checked_add(2)?; let code_block_height = reader.read_byte()?.checked_add(2)?; let code_block_style = CodeBlockStyle::from_u8(reader.read_byte()?); let transformation = WaveletTransform::from_u8(reader.read_byte()?).ok()?; let mut precinct_exponents = Vec::new(); if coding_style.has_precincts() { // "Entropy coder with precincts defined below." for _ in 0..num_resolution_levels { // Table A.21. let precinct_size = reader.read_byte()?; let width_exp = precinct_size & 0xF; let height_exp = precinct_size >> 4; if width_exp > MAX_PRECINCT_EXPONENT || height_exp > MAX_PRECINCT_EXPONENT { return None; } precinct_exponents.push((width_exp, height_exp)); } } else { // "Entropy coder, precincts with PPx = 15 and PPy = 15" for _ in 0..num_resolution_levels { precinct_exponents.push((15, 15)); } } Some(CodingStyleParameters { num_decomposition_levels, num_resolution_levels, code_block_width, code_block_height, code_block_style, transformation, precinct_exponents, }) } /// COM Marker (A.9.2). fn com_marker(reader: &mut BitReader<'_>) -> Option<()> { skip_marker_segment(reader) } /// TLM marker (A.7.1). fn tlm_marker(reader: &mut BitReader<'_>) -> Option<()> { skip_marker_segment(reader) } /// PPM marker (A.7.4). fn ppm_marker<'a>(reader: &mut BitReader<'a>) -> Option> { let segment_len = reader.read_u16()?.checked_sub(2)? as usize; let ppm_data = reader.read_bytes(segment_len)?; let mut packets = vec![]; let mut reader = BitReader::new(ppm_data); let sequence_idx = reader.read_byte()?; // TODO: Handle case where next packet doesn't have nppm parameter. while !reader.at_end() { let packet_len = reader.read_u16()? as usize; let data = reader.read_bytes(packet_len)?; packets.push(PpmPacket { data }); } Some(PpmMarkerData { sequence_idx, packets, }) } /// RGN marker (A.6.3). fn rgn_marker(reader: &mut BitReader<'_>) -> Option<()> { skip_marker_segment(reader) } pub(crate) fn skip_marker_segment(reader: &mut BitReader<'_>) -> Option<()> { let length = reader.read_u16()?.checked_sub(2)?; reader.skip_bytes(length as usize)?; Some(()) } /// COD marker (A.6.1). pub(crate) fn cod_marker(reader: &mut BitReader<'_>) -> Option { // Length. let _ = reader.read_u16()?; let coding_style_flags = CodingStyleFlags::from_u8(reader.read_byte()?); let progression_order = ProgressionOrder::from_u8(reader.read_byte()?).ok()?; let num_layers = reader.read_u16()?; // We don't support more than 32-bit (and thus 32 layers). if num_layers == 0 || num_layers > MAX_LAYER_COUNT as u16 { return None; } let mct = reader.read_byte()? == 1; let coding_style_parameters = coding_style_parameters(reader, &coding_style_flags)?; Some(CodingStyleDefault { progression_order, num_layers: num_layers as u8, mct, component_parameters: CodingStyleComponent { flags: coding_style_flags, parameters: coding_style_parameters, }, }) } /// COC marker (A.6.2). pub(crate) fn coc_marker( reader: &mut BitReader<'_>, csiz: u16, ) -> Option<(u16, CodingStyleComponent)> { // Length. let _ = reader.read_u16()?; let component_index = if csiz < 257 { reader.read_byte()? as u16 } else { reader.read_u16()? }; let coding_style = CodingStyleFlags::from_u8(reader.read_byte()?); let parameters = coding_style_parameters(reader, &coding_style)?; let coc = CodingStyleComponent { flags: coding_style, parameters, }; Some((component_index, coc)) } /// QCD marker (A.6.4). pub(crate) fn qcd_marker(reader: &mut BitReader<'_>) -> Option { // Length. let length = reader.read_u16()?; let sqcd_val = reader.read_byte()?; let quantization_style = QuantizationStyle::from_u8(sqcd_val & 0x1F).ok()?; let guard_bits = (sqcd_val >> 5) & 0x07; let remaining_bytes = length.checked_sub(3)? as usize; let mut parameters = quantization_parameters(reader, quantization_style, remaining_bytes)?; parameters.guard_bits = guard_bits; Some(parameters) } /// QCC marker (A.6.5). pub(crate) fn qcc_marker(reader: &mut BitReader<'_>, csiz: u16) -> Option<(u16, QuantizationInfo)> { let length = reader.read_u16()?; let component_index = if csiz < 257 { reader.read_byte()? as u16 } else { reader.read_u16()? }; let sqcc_val = reader.read_byte()?; let quantization_style = QuantizationStyle::from_u8(sqcc_val & 0x1F).ok()?; let guard_bits = (sqcc_val >> 5) & 0x07; let component_index_size = if csiz < 257 { 1 } else { 2 }; let remaining_bytes = length .checked_sub(2)? .checked_sub(component_index_size)? .checked_sub(1)? as usize; let mut parameters = quantization_parameters(reader, quantization_style, remaining_bytes)?; parameters.guard_bits = guard_bits; Some((component_index, parameters)) } fn quantization_parameters( reader: &mut BitReader<'_>, quantization_style: QuantizationStyle, remaining_bytes: usize, ) -> Option { let mut step_sizes = Vec::new(); let irreversible = |val: u16| { let exponent = val >> 11; let mantissa = val & ((1 << 11) - 1); StepSize { exponent, mantissa } }; match quantization_style { QuantizationStyle::NoQuantization => { // 8 bits per band (5 bits exponent, 3 bits reserved) for _ in 0..remaining_bytes { let value = reader.read_byte()? as u16; step_sizes.push(StepSize { // Unused. mantissa: 0, exponent: (value >> 3), }); } } QuantizationStyle::ScalarDerived => { let value = reader.read_u16()?; step_sizes.push(irreversible(value)); } QuantizationStyle::ScalarExpounded => { let num_bands = remaining_bytes / 2; for _ in 0..num_bands { let value = reader.read_u16()?; step_sizes.push(irreversible(value)); } } } Some(QuantizationInfo { quantization_style, guard_bits: 0, step_sizes, }) } #[allow( unused, reason = "Not all marker codes are used in every decoding path yet" )] /// Marker codes (Table A.2). pub(crate) mod markers { /// Start of codestream - 'SOC'. pub(crate) const SOC: u8 = 0x4F; /// Start of tile-part - 'SOT'. pub(crate) const SOT: u8 = 0x90; /// Start of data - 'SOD'. pub(crate) const SOD: u8 = 0x93; /// End of codestream - 'EOC'. pub(crate) const EOC: u8 = 0xD9; /// Image and tile size - 'SIZ'. pub(crate) const SIZ: u8 = 0x51; /// Coding style default - 'COD'. pub(crate) const COD: u8 = 0x52; /// Coding component - 'COC'. pub(crate) const COC: u8 = 0x53; /// Region-of-interest - 'RGN'. pub(crate) const RGN: u8 = 0x5E; /// Quantization default - 'QCD'. pub(crate) const QCD: u8 = 0x5C; /// Quantization component - 'QCC'. pub(crate) const QCC: u8 = 0x5D; /// Progression order change - 'POC'. pub(crate) const POC: u8 = 0x5F; /// Tile-part lengths - 'TLM'. pub(crate) const TLM: u8 = 0x55; /// Packet length, main header - 'PLM'. pub(crate) const PLM: u8 = 0x57; /// Packet length, tile-part header - 'PLT'. pub(crate) const PLT: u8 = 0x58; /// Packed packet headers, main header - 'PPM'. pub(crate) const PPM: u8 = 0x60; /// Packed packet headers, tile-part header - 'PPT'. pub(crate) const PPT: u8 = 0x61; /// Start of packet - 'SOP'. pub(crate) const SOP: u8 = 0x91; /// End of packet header - 'EPH'. pub(crate) const EPH: u8 = 0x92; /// Component registration - 'CRG'. pub(crate) const CRG: u8 = 0x63; /// Comment - 'COM'. pub(crate) const COM: u8 = 0x64; pub(crate) fn to_string(marker: u8) -> &'static str { match marker { // Delimiting markers. SOC => "SOC", SOT => "SOT", SOD => "SOD", EOC => "EOC", // Fixed information. SIZ => "SIZ", // Functional markers. COD => "COD", COC => "COC", RGN => "RGN", QCD => "QCD", QCC => "QCC", POC => "POC", // Pointer markers. TLM => "TLM", PLM => "PLM", PLT => "PLT", PPM => "PPM", PPT => "PPT", // In-bit-stream markers. SOP => "SOP", EPH => "EPH", // Informational markers. CRG => "CRG", COM => "COM", _ => "UNKNOWN", } } } hayro-jpeg2000-0.3.2/src/j2c/decode.rs000064400000000000000000000446771046102023000152550ustar 00000000000000//! Decoding JPEG2000 code streams. //! //! This is the "core" module of the crate that orchestrates all //! stages in such a way that a given codestream is decoded into its //! component channels. use alloc::boxed::Box; use alloc::vec; use alloc::vec::Vec; use super::bitplane::{BitPlaneDecodeBuffers, BitPlaneDecodeContext}; use super::build::{CodeBlock, Decomposition, Layer, Precinct, Segment, SubBand, SubBandType}; use super::codestream::{ComponentInfo, Header, ProgressionOrder, QuantizationStyle}; use super::idwt::IDWTOutput; use super::progression::{ IteratorInput, ProgressionData, component_position_resolution_layer_progression, layer_resolution_component_position_progression, position_component_resolution_layer_progression, resolution_layer_component_position_progression, resolution_position_component_layer_progression, }; use super::tag_tree::TagNode; use super::tile::{ComponentTile, ResolutionTile, Tile}; use super::{ComponentData, bitplane, build, idwt, mct, segment, tile}; use crate::error::{DecodingError, Result, TileError, bail}; use crate::j2c::segment::MAX_BITPLANE_COUNT; use crate::math::SimdBuffer; use crate::reader::BitReader; use core::ops::{DerefMut, Range}; pub(crate) fn decode(data: &[u8], header: &Header<'_>) -> Result> { let mut reader = BitReader::new(data); let tiles = tile::parse(&mut reader, header)?; if tiles.is_empty() { bail!(TileError::Invalid); } let mut tile_ctx = TileDecodeContext::new(header, &tiles[0]); let mut storage = DecompositionStorage::default(); for tile in tiles.iter() { ltrace!( "tile {} rect [{},{} {}x{}]", tile.idx, tile.rect.x0, tile.rect.y0, tile.rect.width(), tile.rect.height(), ); let iter_input = IteratorInput::new(tile); let progression_iterator: Box> = match tile.progression_order { ProgressionOrder::LayerResolutionComponentPosition => { Box::new(layer_resolution_component_position_progression(iter_input)) } ProgressionOrder::ResolutionLayerComponentPosition => { Box::new(resolution_layer_component_position_progression(iter_input)) } ProgressionOrder::ResolutionPositionComponentLayer => Box::new( resolution_position_component_layer_progression(iter_input) .ok_or(DecodingError::InvalidProgressionIterator)?, ), ProgressionOrder::PositionComponentResolutionLayer => Box::new( position_component_resolution_layer_progression(iter_input) .ok_or(DecodingError::InvalidProgressionIterator)?, ), ProgressionOrder::ComponentPositionResolutionLayer => Box::new( component_position_resolution_layer_progression(iter_input) .ok_or(DecodingError::InvalidProgressionIterator)?, ), }; decode_tile( tile, header, progression_iterator, &mut tile_ctx, &mut storage, )?; } // Note that this assumes that either all tiles have MCT or none of them. // In theory, only some could have it... But hopefully no such cursed // images exist! if tile_ctx.tile.mct { mct::apply_inverse(&mut tile_ctx, header)?; apply_sign_shift(&mut tile_ctx, &header.component_infos); } Ok(tile_ctx.channel_data) } fn decode_tile<'a>( tile: &'a Tile<'a>, header: &Header<'_>, progression_iterator: Box + '_>, tile_ctx: &mut TileDecodeContext<'a>, storage: &mut DecompositionStorage<'a>, ) -> Result<()> { tile_ctx.set_tile(tile); storage.reset(); // This is the method that orchestrates all steps. // First, we build the decompositions, including their sub-bands, precincts // and code blocks. build::build(tile, tile_ctx, storage)?; // Next, we parse the layers/segments for each code block. segment::parse(tile, progression_iterator, tile_ctx, header, storage)?; // We then decode the bitplanes of each code block, yielding the // (possibly dequantized) coefficients of each code block. decode_component_tile_bit_planes(tile, tile_ctx, storage, header)?; // Unlike before, we interleave the apply_idwt and store stages // for each component tile so we can reuse allocations better. for (idx, component_info) in header.component_infos.iter().enumerate() { // Next, we apply the inverse discrete wavelet transform. idwt::apply( storage, tile_ctx, idx, header, component_info.wavelet_transform(), ); // Finally, we store the raw samples for the tile area in the correct // location. Note that in case we have MCT, we are not applying it yet. // It will be applied in the very end once all tiles have been processed. // The reason we do this is that applying MCT requires access to the // data from _all_ components. If we didn't defer this until the end // we would have to collect the IDWT outputs of all components before // applying it. By not applying MCT here, we can get away with doing // IDWT and store on a per-component basis. Thus, we only need to // store one IDWT output at a time, allowing for better reuse of // allocations. store(tile, header, tile_ctx, component_info, idx); } Ok(()) } /// All decompositions for a single tile. #[derive(Clone)] pub(crate) struct TileDecompositions { pub(crate) first_ll_sub_band: usize, pub(crate) decompositions: Range, } impl TileDecompositions { pub(crate) fn sub_band_iter( &self, resolution: u8, decompositions: &[Decomposition], ) -> SubBandIter { let indices = if resolution == 0 { [ self.first_ll_sub_band, self.first_ll_sub_band, self.first_ll_sub_band, ] } else { decompositions[self.decompositions.clone()][resolution as usize - 1].sub_bands }; SubBandIter { next_idx: 0, indices, resolution, } } } #[derive(Clone)] pub(crate) struct SubBandIter { resolution: u8, next_idx: usize, indices: [usize; 3], } impl Iterator for SubBandIter { type Item = usize; fn next(&mut self) -> Option { let value = if self.resolution == 0 { if self.next_idx > 0 { None } else { Some(self.indices[0]) } } else if self.next_idx >= self.indices.len() { None } else { Some(self.indices[self.next_idx]) }; self.next_idx += 1; value } } /// A buffer so that we can reuse allocations for layers/code blocks/etc. /// across different tiles. #[derive(Default)] pub(crate) struct DecompositionStorage<'a> { pub(crate) segments: Vec>, pub(crate) layers: Vec, pub(crate) code_blocks: Vec, pub(crate) precincts: Vec, pub(crate) tag_tree_nodes: Vec, pub(crate) coefficients: Vec, pub(crate) sub_bands: Vec, pub(crate) decompositions: Vec, pub(crate) tile_decompositions: Vec, } impl DecompositionStorage<'_> { fn reset(&mut self) { self.segments.clear(); self.layers.clear(); self.code_blocks.clear(); // No need to clear the coefficients, as they will be resized // and then overridden. // self.coefficients.clear(); self.precincts.clear(); self.sub_bands.clear(); self.decompositions.clear(); self.tile_decompositions.clear(); self.tag_tree_nodes.clear(); } } /// A reusable context used during the decoding of a single tile. /// /// Some of the fields are temporary in nature and reset after moving on to the /// next tile, some contain global state. pub(crate) struct TileDecodeContext<'a> { /// The tile that we are currently decoding. pub(crate) tile: &'a Tile<'a>, /// A reusable buffer for the IDWT output. pub(crate) idwt_output: IDWTOutput, /// A scratch buffer used during IDWT. pub(crate) idwt_scratch_buffer: Vec, /// A reusable context for decoding code blocks. pub(crate) bit_plane_decode_context: BitPlaneDecodeContext, /// Reusable buffers for decoding bitplanes. pub(crate) bit_plane_decode_buffers: BitPlaneDecodeBuffers, /// The raw, decoded samples for each channel. pub(crate) channel_data: Vec, } impl<'a> TileDecodeContext<'a> { fn new(header: &Header<'_>, initial_tile: &'a Tile<'a>) -> Self { let mut channel_data = vec![]; for info in &initial_tile.component_infos { channel_data.push(ComponentData { container: SimdBuffer::zeros( header.size_data.image_width() as usize * header.size_data.image_height() as usize, ), bit_depth: info.size_info.precision, }); } Self { tile: initial_tile, idwt_scratch_buffer: vec![], idwt_output: IDWTOutput::dummy(), bit_plane_decode_context: BitPlaneDecodeContext::default(), bit_plane_decode_buffers: BitPlaneDecodeBuffers::default(), channel_data, } } fn set_tile(&mut self, tile: &'a Tile<'a>) { // This is all that is needed when advancing to a new tile. // The other fields will be resetted in due course as needed. self.tile = tile; } } fn decode_component_tile_bit_planes<'a>( tile: &'a Tile<'a>, tile_ctx: &mut TileDecodeContext<'a>, storage: &mut DecompositionStorage<'a>, header: &Header<'_>, ) -> Result<()> { for (tile_decompositions_idx, component_info) in tile.component_infos.iter().enumerate() { // Only decode the resolution levels we actually care about. for resolution in 0..component_info.num_resolution_levels() - header.skipped_resolution_levels { let tile_composition = &storage.tile_decompositions[tile_decompositions_idx]; let sub_band_iter = tile_composition.sub_band_iter(resolution, &storage.decompositions); for sub_band_idx in sub_band_iter { decode_sub_band_bitplanes( sub_band_idx, resolution, component_info, tile_ctx, storage, header, )?; } } } Ok(()) } fn decode_sub_band_bitplanes( sub_band_idx: usize, resolution: u8, component_info: &ComponentInfo, tile_ctx: &mut TileDecodeContext<'_>, storage: &mut DecompositionStorage<'_>, header: &Header<'_>, ) -> Result<()> { let sub_band = &storage.sub_bands[sub_band_idx]; let dequantization_step = { if component_info.quantization_info.quantization_style == QuantizationStyle::NoQuantization { 1.0 } else { let (exponent, mantissa) = component_info.exponent_mantissa(sub_band.sub_band_type, resolution)?; let r_b = { let log_gain = match sub_band.sub_band_type { SubBandType::LowLow => 0, SubBandType::LowHigh => 1, SubBandType::HighLow => 1, SubBandType::HighHigh => 2, }; component_info.size_info.precision as u16 + log_gain }; crate::math::pow2i(r_b as i32 - exponent as i32) * (1.0 + (mantissa as f32) / 2048.0) } }; let num_bitplanes = { let (exponent, _) = component_info.exponent_mantissa(sub_band.sub_band_type, resolution)?; // Equation (E-2) let num_bitplanes = (component_info.quantization_info.guard_bits as u16) .checked_add(exponent) .and_then(|x| x.checked_sub(1)) .ok_or(DecodingError::InvalidBitplaneCount)?; if num_bitplanes > MAX_BITPLANE_COUNT as u16 { bail!(DecodingError::TooManyBitplanes); } num_bitplanes as u8 }; for precinct in sub_band .precincts .clone() .map(|idx| &storage.precincts[idx]) { for code_block in precinct .code_blocks .clone() .map(|idx| &storage.code_blocks[idx]) { bitplane::decode( code_block, sub_band.sub_band_type, num_bitplanes, &component_info.coding_style.parameters.code_block_style, tile_ctx, storage, header.strict, )?; // Turn the signs and magnitudes into singular coefficients and // copy them into the sub-band. let x_offset = code_block.rect.x0 - sub_band.rect.x0; let y_offset = code_block.rect.y0 - sub_band.rect.y0; let base_store = &mut storage.coefficients[sub_band.coefficients.clone()]; let mut base_idx = (y_offset * sub_band.rect.width()) as usize + x_offset as usize; for coefficients in tile_ctx.bit_plane_decode_context.coefficient_rows() { let out_row = &mut base_store[base_idx..]; for (output, coefficient) in out_row.iter_mut().zip(coefficients.iter().copied()) { *output = coefficient.get() as f32; *output *= dequantization_step; } base_idx += sub_band.rect.width() as usize; } } } Ok(()) } fn apply_sign_shift(tile_ctx: &mut TileDecodeContext<'_>, component_infos: &[ComponentInfo]) { for (channel_data, component_info) in tile_ctx.channel_data.iter_mut().zip(component_infos.iter()) { for sample in channel_data.container.deref_mut() { *sample += (1_u32 << (component_info.size_info.precision - 1)) as f32; } } } fn store<'a>( tile: &'a Tile<'a>, header: &Header<'_>, tile_ctx: &mut TileDecodeContext<'a>, component_info: &ComponentInfo, component_idx: usize, ) { let channel_data = &mut tile_ctx.channel_data[component_idx]; let idwt_output = &mut tile_ctx.idwt_output; let component_tile = ComponentTile::new(tile, component_info); let resolution_tile = ResolutionTile::new( component_tile, component_info.num_resolution_levels() - 1 - header.skipped_resolution_levels, ); // If we have MCT, the sign shift needs to be applied after the // MCT transform. We take care of that in the main decode method. // Otherwise, we might as well just apply it now. if !tile.mct { for sample in idwt_output.coefficients.iter_mut() { *sample += (1_u32 << (component_info.size_info.precision - 1)) as f32; } } let (scale_x, scale_y) = ( component_info.size_info.horizontal_resolution, component_info.size_info.vertical_resolution, ); let (image_x_offset, image_y_offset) = ( header.size_data.image_area_x_offset, header.size_data.image_area_y_offset, ); if scale_x == 1 && scale_y == 1 { // If no sub-sampling, use a fast path where we copy rows of coefficients // at once. // The rect of the IDWT output corresponds to the rect of the highest // decomposition level of the tile, which is usually not 1:1 aligned // with the actual tile rectangle. We also need to account for the // offset of the reference grid. let skip_x = image_x_offset.saturating_sub(idwt_output.rect.x0); let skip_y = image_y_offset.saturating_sub(idwt_output.rect.y0); let input_row_iter = idwt_output .coefficients .chunks_exact(idwt_output.rect.width() as usize) .skip(skip_y as usize) .take(idwt_output.rect.height() as usize); let output_row_iter = channel_data .container .chunks_exact_mut(header.size_data.image_width() as usize) .skip(resolution_tile.rect.y0.saturating_sub(image_y_offset) as usize); for (input_row, output_row) in input_row_iter.zip(output_row_iter) { let input_row = &input_row[skip_x as usize..]; let output_row = &mut output_row [resolution_tile.rect.x0.saturating_sub(image_x_offset) as usize..] [..input_row.len()]; output_row.copy_from_slice(input_row); } } else { let image_width = header.size_data.image_width(); let image_height = header.size_data.image_height(); let x_shrink_factor = header.size_data.x_shrink_factor; let y_shrink_factor = header.size_data.y_shrink_factor; let x_offset = header .size_data .image_area_x_offset .div_ceil(x_shrink_factor); let y_offset = header .size_data .image_area_y_offset .div_ceil(y_shrink_factor); // Otherwise, copy sample by sample. for y in resolution_tile.rect.y0..resolution_tile.rect.y1 { let relative_y = (y - component_tile.rect.y0) as usize; let reference_grid_y = (scale_y as u32 * y) / y_shrink_factor; for x in resolution_tile.rect.x0..resolution_tile.rect.x1 { let relative_x = (x - component_tile.rect.x0) as usize; let reference_grid_x = (scale_x as u32 * x) / x_shrink_factor; let sample = idwt_output.coefficients [relative_y * idwt_output.rect.width() as usize + relative_x]; for x_position in u32::max(reference_grid_x, x_offset) ..u32::min(reference_grid_x + scale_x as u32, image_width + x_offset) { for y_position in u32::max(reference_grid_y, y_offset) ..u32::min(reference_grid_y + scale_y as u32, image_height + y_offset) { let pos = (y_position - y_offset) as usize * image_width as usize + (x_position - x_offset) as usize; channel_data.container[pos] = sample; } } } } } } hayro-jpeg2000-0.3.2/src/j2c/idwt.rs000064400000000000000000000570531046102023000147710ustar 00000000000000//! Performing the inverse discrete wavelet transform, as specified in Annex F. use alloc::vec; use alloc::vec::Vec; use super::build::{Decomposition, SubBand}; use super::codestream::WaveletTransform; use super::decode::{DecompositionStorage, TileDecodeContext}; use super::rect::IntRect; use crate::j2c::Header; use crate::math::{self, Level, SIMD_WIDTH, Simd, dispatch, f32x8}; /// The output from performing the IDWT operation. pub(crate) struct IDWTOutput { /// The buffer that will hold the final coefficients. pub(crate) coefficients: Vec, /// The rect that the coefficients belong to. This will be equivalent /// to the rectangle that forms the smallest decomposition level. It does /// not have to be equivalent to the original size of the tile, as the /// sub-bands that form a tile aren't necessarily aligned to it. Therefore, /// the samples need to be trimmed to the tile rectangle afterward. pub(crate) rect: IntRect, } impl IDWTOutput { pub(crate) fn dummy() -> Self { Self { coefficients: vec![], rect: IntRect::from_ltrb(0, 0, u32::MAX, u32::MAX), } } } struct IDWTTempOutput { rect: IntRect, } /// Apply the inverse discrete wavelet transform (see Annex F). The output /// will be transformed samples covering the rectangle of the smallest /// decomposition level. pub(crate) fn apply( storage: &DecompositionStorage<'_>, tile_ctx: &mut TileDecodeContext<'_>, component_idx: usize, header: &Header<'_>, transform: WaveletTransform, ) { let tile_decompositions = &storage.tile_decompositions[component_idx]; let mut decompositions = &storage.decompositions[tile_decompositions.decompositions.clone()]; // If we requested a lower resolution level, we can skip some decompositions. decompositions = &decompositions[..decompositions .len() .saturating_sub(header.skipped_resolution_levels as usize)]; let ll_sub_band = &storage.sub_bands[tile_decompositions.first_ll_sub_band]; // To explain a bit why we have this scratch buffer and another coefficient // buffer: During IDWT, we need to continuously interleave the 4 sub-bands // into a new buffer, which is then either returned or used as the input // for the next decomposition, etc. It would be very inefficient if we // kept allocating new buffers each time. Therefore, we try to reuse them, // not only for all decompositions of a single tile, but all decompositions // of _all_ tiles. // Due to the fact that the output from the previous iteration might be // used as the input of the next, we need two separate buffers, which // are continuously swapped. let (scratch_buf, output) = (&mut tile_ctx.idwt_scratch_buffer, &mut tile_ctx.idwt_output); let estimate_buffer_size = |decomposition: &Decomposition| { let total_width = decomposition.rect.width() as usize; let total_height = decomposition.rect.height() as usize; let min = total_width * total_height; // Different sub-bands can have shifts by one, so add padding // for the maximum case. let max = (total_width + 1) * (total_height + 1); (min, max) }; if decompositions.is_empty() { // Single decomposition, just copy the coefficients from the sub-band. output.coefficients.clear(); output .coefficients .extend_from_slice(&storage.coefficients[ll_sub_band.coefficients.clone()]); output.rect = ll_sub_band.rect; return; } // The coefficient array will always be the one that holds the coefficients // from the highest decomposition. Therefore, reserve as much. let (s_min, s_max) = estimate_buffer_size(decompositions.last().unwrap()); if output.coefficients.len() < s_min { output .coefficients .reserve_exact(s_max - output.coefficients.len()); } if decompositions.len() > 1 { // Due to the above, the intermediate buffer will never need more than // the second-highest decomposition. let (s_min, s_max) = estimate_buffer_size(&decompositions[decompositions.len() - 2]); if scratch_buf.len() < s_min { scratch_buf.reserve_exact(s_max - scratch_buf.len()); } } // Determine which buffer we should use first, such that the `coefficients` // array will always hold the final values. let mut use_scratch = decompositions.len().is_multiple_of(2); let mut temp_output = filter_2d( IDWTInput::from_sub_band(ll_sub_band, storage), if use_scratch { scratch_buf } else { &mut output.coefficients }, &decompositions[0], transform, storage, ); for decomposition in decompositions.iter().skip(1) { use_scratch = !use_scratch; temp_output = if use_scratch { filter_2d( IDWTInput::from_output(&output.coefficients), scratch_buf, decomposition, transform, storage, ) } else { filter_2d( IDWTInput::from_output(scratch_buf), &mut output.coefficients, decomposition, transform, storage, ) }; } output.rect = temp_output.rect; } struct IDWTInput<'a> { coefficients: &'a [f32], } impl<'a> IDWTInput<'a> { fn from_sub_band(sub_band: &'a SubBand, storage: &'a DecompositionStorage<'_>) -> Self { IDWTInput { coefficients: &storage.coefficients[sub_band.coefficients.clone()], } } fn from_output(coefficients: &'a [f32]) -> Self { IDWTInput { coefficients } } } /// The `2D_SR` procedure illustrated in Figure F.6. fn filter_2d( // The LL sub band of the given decomposition level. input: IDWTInput<'_>, coefficients: &mut Vec, decomposition: &Decomposition, transform: WaveletTransform, storage: &DecompositionStorage<'_>, ) -> IDWTTempOutput { // First interleave all sub-bands into a single buffer. interleave_samples(input, decomposition, coefficients, storage); if decomposition.rect.width() > 0 && decomposition.rect.height() > 0 { filter_horizontal(coefficients, decomposition.rect, transform); filter_vertical(coefficients, decomposition.rect, transform); } IDWTTempOutput { rect: decomposition.rect, } } /// The `2D_INTERLEAVE` procedure described in F.3.3. fn interleave_samples( input: IDWTInput<'_>, decomposition: &Decomposition, coefficients: &mut Vec, storage: &DecompositionStorage<'_>, ) { let level = Level::new(); dispatch!(level, simd => { interleave_samples_inner::<_>(simd, input, decomposition, coefficients, storage); }); } #[inline(always)] fn interleave_samples_inner( simd: S, input: IDWTInput<'_>, decomposition: &Decomposition, coefficients: &mut Vec, storage: &DecompositionStorage<'_>, ) { let width = decomposition.rect.width() as usize; let height = decomposition.rect.height() as usize; // Just a sanity check. We should have allocated enough upfront before // starting the IDWT. assert!(coefficients.capacity() >= width * height); // The cleaner way would be to first clear and then resize, so that we // have a clean buffer with just zeroes. However, this is actually not // necessary, because when interleaving and generating the border values // we will replace all the data anyway, so we can save the cost of // the clear operation. coefficients.resize(width * height, 0.0); let IntRect { x0: u0, x1: u1, y0: v0, y1: v1, } = decomposition.rect; let ll = input.coefficients; let hl = &storage.coefficients[storage.sub_bands[decomposition.sub_bands[0]] .coefficients .clone()]; let lh = &storage.coefficients[storage.sub_bands[decomposition.sub_bands[1]] .coefficients .clone()]; let hh = &storage.coefficients[storage.sub_bands[decomposition.sub_bands[2]] .coefficients .clone()]; // See Figure F.8. let num_u_low = (u1.div_ceil(2) - u0.div_ceil(2)) as usize; let num_u_high = (u1 / 2 - u0 / 2) as usize; let num_v_low = (v1.div_ceil(2) - v0.div_ceil(2)) as usize; let num_v_high = (v1 / 2 - v0 / 2) as usize; // Depending on whether the start row is even or odd, either LL/HL comes first // or HL/HH. let (first_w, second_w) = if u0 % 2 == 0 { (num_u_low, num_u_high) } else { (num_u_high, num_u_low) }; let even_row_start = if v0 % 2 == 0 { 0 } else { 1 }; let odd_row_start = if v0 % 2 == 0 { 1 } else { 0 }; // Determine whether LL or HL is the band in the first column. let (first_even, second_even) = if u0 % 2 == 0 { (ll, hl) } else { (hl, ll) }; interleave_rows( simd, first_even, second_even, first_w, second_w, coefficients, width, height, even_row_start, num_v_low, ); // Determine whether LH or HH is the band in the first column. let (first_odd, second_odd) = if u0 % 2 == 0 { (lh, hh) } else { (hh, lh) }; interleave_rows( simd, first_odd, second_odd, first_w, second_w, coefficients, width, height, odd_row_start, num_v_high, ); } #[inline(always)] fn interleave_rows( simd: S, first_band: &[f32], second_band: &[f32], first_w: usize, second_w: usize, output: &mut [f32], width: usize, height: usize, start_row: usize, num_rows: usize, ) { for v in 0..num_rows { let out_row = start_row + v * 2; if out_row >= height { break; } let first_row = &first_band[v * first_w..][..first_w]; let second_row = &second_band[v * second_w..][..second_w]; let out_slice = &mut output[out_row * width..][..width]; interleave_row(simd, first_row, second_row, out_slice); } } #[inline(always)] fn interleave_row(simd: S, first: &[f32], second: &[f32], output: &mut [f32]) { let num_pairs = first.len().min(second.len()); let simd_chunks = num_pairs / SIMD_WIDTH; // Process as much as possible using SIMD. for i in 0..simd_chunks { let base = i * SIMD_WIDTH; let f = f32x8::from_slice(simd, &first[base..base + SIMD_WIDTH]); let s = f32x8::from_slice(simd, &second[base..base + SIMD_WIDTH]); f.zip_low(s).store(&mut output[base * 2..]); f.zip_high(s).store(&mut output[base * 2 + SIMD_WIDTH..]); } // Scalar remainder. for i in (simd_chunks * SIMD_WIDTH)..num_pairs { output[i * 2] = first[i]; output[i * 2 + 1] = second[i]; } // Handle extra element if first is longer. if first.len() > num_pairs { output[num_pairs * 2] = first[num_pairs]; } } /// The `HOR_SR` procedure from F.3.4. fn filter_horizontal(coefficients: &mut [f32], rect: IntRect, transform: WaveletTransform) { let width = rect.width() as usize; for scanline in coefficients .chunks_exact_mut(width) .take(rect.height() as usize) { filter_row(scanline, width, rect.x0 as usize, transform); } } /// The `1D_SR` procedure from F.3.6. fn filter_row(scanline: &mut [f32], width: usize, x0: usize, transform: WaveletTransform) { if width == 1 { if !x0.is_multiple_of(2) { scanline[0] *= 0.5; } return; } match transform { WaveletTransform::Reversible53 => reversible_filter_53r(scanline, width, x0), WaveletTransform::Irreversible97 => irreversible_filter_97i(scanline, width, x0), } } /// The 1D FILTER 5-3R procedure from F.3.8.1. fn reversible_filter_53r(scanline: &mut [f32], width: usize, x0: usize) { let first_even = x0 % 2; let first_odd = 1 - first_even; // Equation (F-5). // Originally: for i in (start / 2)..(end / 2 + 1). filter_step_horizontal( scanline, width, first_even, #[inline(always)] |s, left, right| s - math::floor_f32(math::mul_add(left + right, 0.25, 0.5)), ); // Equation (F-6). // Originally: for i in (start / 2)..(end / 2). filter_step_horizontal( scanline, width, first_odd, #[inline(always)] |s, left, right| s + math::floor_f32((left + right) * 0.5), ); } /// The 1D Filter 9-7I procedure from F.3.8.2. fn irreversible_filter_97i(scanline: &mut [f32], width: usize, x0: usize) { // Table F.4. const NEG_ALPHA: f32 = 1.586_134_3; const NEG_BETA: f32 = 0.052_980_117; const NEG_GAMMA: f32 = -0.882_911_1; const NEG_DELTA: f32 = -0.443_506_87; const KAPPA: f32 = 1.230_174_1; const INV_KAPPA: f32 = 1.0 / KAPPA; let first_even = x0 % 2; let first_odd = 1 - first_even; let (k0, k1) = if first_even == 0 { (KAPPA, INV_KAPPA) } else { (INV_KAPPA, KAPPA) }; // Step 1 and 2. // Originally: for i in (start / 2 - 1)..(end / 2 + 2). // Originally: for i in (start / 2 - 2)..(end / 2 + 2). for i in (0..width.saturating_sub(1)).step_by(2) { scanline[i] *= k0; scanline[i + 1] *= k1; } if width % 2 == 1 { scanline[width - 1] *= k0; } // Step 3. // Originally: for i in (start / 2 - 1)..(end / 2 + 2). filter_step_horizontal( scanline, width, first_even, #[inline(always)] |s, left, right| math::mul_add(left + right, NEG_DELTA, s), ); // Step 4. // Originally: for i in (start / 2 - 1)..((x0 + width) / 2 + 1). filter_step_horizontal( scanline, width, first_odd, #[inline(always)] |s, left, right| math::mul_add(left + right, NEG_GAMMA, s), ); // Step 5. // Originally: for i in (start / 2)..(end / 2 + 1). filter_step_horizontal( scanline, width, first_even, #[inline(always)] |s, left, right| math::mul_add(left + right, NEG_BETA, s), ); // Step 6. // Originally: for i in (start / 2)..(end / 2). filter_step_horizontal( scanline, width, first_odd, #[inline(always)] |s, left, right| math::mul_add(left + right, NEG_ALPHA, s), ); } #[inline(always)] fn filter_step_horizontal( scanline: &mut [f32], width: usize, first: usize, f: impl Fn(f32, f32, f32) -> f32, ) { if first == 0 { let left = periodic_symmetric_extension_left(0, 1); let right = periodic_symmetric_extension_right(0, 1, width); scanline[0] = f(scanline[0], scanline[left], scanline[right]); } let middle_start = if first == 0 { 2 } else { 1 }; for i in (middle_start..width - 1).step_by(2) { scanline[i] = f(scanline[i], scanline[i - 1], scanline[i + 1]); } if width > 1 && (width - 1) % 2 == first { let i = width - 1; let left = periodic_symmetric_extension_left(i, 1); let right = periodic_symmetric_extension_right(i, 1, width); scanline[i] = f(scanline[i], scanline[left], scanline[right]); } } #[inline(always)] fn filter_step_vertical( simd: S, scanline: &mut [f32], height: usize, width: usize, simd_width: usize, first: usize, f_simd: impl Fn(f32x8, f32x8, f32x8) -> f32x8, f_scalar: impl Fn(f32, f32, f32) -> f32, ) { for row in (first..height).step_by(2) { let row_above = periodic_symmetric_extension_left(row, 1); let row_below = periodic_symmetric_extension_right(row, 1, height); // Process SIMD chunks. for base_column in (0..simd_width).step_by(SIMD_WIDTH) { let s1 = f32x8::from_slice(simd, &scanline[row * width + base_column..][..SIMD_WIDTH]); let s2 = f32x8::from_slice( simd, &scanline[row_above * width + base_column..][..SIMD_WIDTH], ); let s3 = f32x8::from_slice( simd, &scanline[row_below * width + base_column..][..SIMD_WIDTH], ); let result = f_simd(s1, s2, s3); result.store(&mut scanline[row * width + base_column..][..SIMD_WIDTH]); } // Process scalar remainder. for col in simd_width..width { let s1 = scanline[row * width + col]; let s2 = scanline[row_above * width + col]; let s3 = scanline[row_below * width + col]; scanline[row * width + col] = f_scalar(s1, s2, s3); } } } /// Part of the `1D_EXTR` procedure, defined in F.3.7. /// /// Applies the period symmetric extension on the left side. #[inline(always)] fn periodic_symmetric_extension_left(idx: usize, offset: usize) -> usize { offset.abs_diff(idx) } /// Part of the `1D_EXTR` procedure, defined in F.3.7. /// /// Applies the period symmetric extension on the right side. #[inline(always)] fn periodic_symmetric_extension_right(idx: usize, offset: usize, length: usize) -> usize { let new_idx = idx + offset; if new_idx >= length { let overshoot = new_idx - length; length - 2 - overshoot } else { new_idx } } /// The `VER_SR` procedure from F.3.5. fn filter_vertical(coefficients: &mut [f32], rect: IntRect, transform: WaveletTransform) { dispatch!(Level::new(), simd => filter_vertical_impl(simd, coefficients, rect, transform)); } #[inline(always)] fn filter_vertical_impl( simd: S, scanline: &mut [f32], rect: IntRect, transform: WaveletTransform, ) { let width = rect.width() as usize; let height = rect.height() as usize; let y0 = rect.y0 as usize; if height == 1 { if !y0.is_multiple_of(2) { let simd_width = width / SIMD_WIDTH * SIMD_WIDTH; for base_column in (0..simd_width).step_by(SIMD_WIDTH) { let mut loaded = f32x8::from_slice(simd, &scanline[base_column..][..SIMD_WIDTH]); loaded *= 0.5; loaded.store(&mut scanline[base_column..][..SIMD_WIDTH]); } // Scalar remainder. #[allow(clippy::needless_range_loop)] for col in simd_width..width { scanline[col] *= 0.5; } } return; } match transform { WaveletTransform::Reversible53 => { reversible_filter_53r_simd(simd, scanline, height, width, y0); } WaveletTransform::Irreversible97 => { irreversible_filter_97i_simd(simd, scanline, height, width, y0); } } } /// The 1D FILTER 5-3R procedure from F.3.8.1. #[inline(always)] fn reversible_filter_53r_simd( simd: S, scanline: &mut [f32], height: usize, width: usize, y0: usize, ) { let first_even = y0 % 2; let first_odd = 1 - first_even; let simd_width = width / SIMD_WIDTH * SIMD_WIDTH; // Equation (F-5). // Originally: for i in (start / 2)..(end / 2 + 1). filter_step_vertical( simd, scanline, height, width, simd_width, first_even, #[inline(always)] |s1, s2, s3| s1 - ((s2 + s3 + 2.0) * 0.25).floor(), #[inline(always)] |s1, s2, s3| s1 - math::floor_f32(math::mul_add(s2 + s3, 0.25, 0.5)), ); // Equation (F-6). // Originally: for i in (start / 2)..(end / 2). filter_step_vertical( simd, scanline, height, width, simd_width, first_odd, #[inline(always)] |s1, s2, s3| s1 + ((s2 + s3) * 0.5).floor(), #[inline(always)] |s1, s2, s3| s1 + math::floor_f32((s2 + s3) * 0.5), ); } /// The 1D Filter 9-7I procedure from F.3.8.2. #[inline(always)] fn irreversible_filter_97i_simd( simd: S, scanline: &mut [f32], height: usize, width: usize, y0: usize, ) { const NEG_ALPHA: f32 = 1.586_134_3; const NEG_BETA: f32 = 0.052_980_117; const NEG_GAMMA: f32 = -0.882_911_1; const NEG_DELTA: f32 = -0.443_506_87; const KAPPA: f32 = 1.230_174_1; const INV_KAPPA: f32 = 1.0 / KAPPA; let neg_alpha = f32x8::splat(simd, NEG_ALPHA); let neg_beta = f32x8::splat(simd, NEG_BETA); let neg_gamma = f32x8::splat(simd, NEG_GAMMA); let neg_delta = f32x8::splat(simd, NEG_DELTA); let kappa = f32x8::splat(simd, KAPPA); let inv_kappa = f32x8::splat(simd, INV_KAPPA); // Determine which local row indices correspond to even/odd global positions. let first_even = y0 % 2; let first_odd = 1 - first_even; let simd_width = width / SIMD_WIDTH * SIMD_WIDTH; let (k0, k1, k0_simd, k1_simd) = if first_even == 0 { (KAPPA, INV_KAPPA, kappa, inv_kappa) } else { (INV_KAPPA, KAPPA, inv_kappa, kappa) }; // Step 1 and 2. // Originally: for i in (start / 2 - 1)..(end / 2 + 2). // Originally: for i in (start / 2 - 2)..(end / 2 + 2). for row in (0..height.saturating_sub(1)).step_by(2) { for base_column in (0..simd_width).step_by(SIMD_WIDTH) { let mut vals0 = f32x8::from_slice(simd, &scanline[row * width + base_column..][..SIMD_WIDTH]); let mut vals1 = f32x8::from_slice( simd, &scanline[(row + 1) * width + base_column..][..SIMD_WIDTH], ); vals0 = vals0 * k0_simd; vals1 = vals1 * k1_simd; vals0.store(&mut scanline[row * width + base_column..][..SIMD_WIDTH]); vals1.store(&mut scanline[(row + 1) * width + base_column..][..SIMD_WIDTH]); } for col in simd_width..width { scanline[row * width + col] *= k0; scanline[(row + 1) * width + col] *= k1; } } if height % 2 == 1 { let row = height - 1; for base_column in (0..simd_width).step_by(SIMD_WIDTH) { let mut vals = f32x8::from_slice(simd, &scanline[row * width + base_column..][..SIMD_WIDTH]); vals = vals * k0_simd; vals.store(&mut scanline[row * width + base_column..][..SIMD_WIDTH]); } for col in simd_width..width { scanline[row * width + col] *= k0; } } // Step 3. // Originally: for i in (start / 2 - 1)..(end / 2 + 2). filter_step_vertical( simd, scanline, height, width, simd_width, first_even, #[inline(always)] |s1, s2, s3| (s2 + s3).mul_add(neg_delta, s1), #[inline(always)] |s1, s2, s3| math::mul_add(s2 + s3, NEG_DELTA, s1), ); // Step 4. // Originally: for i in (start / 2 - 1)..(end / 2 + 1). filter_step_vertical( simd, scanline, height, width, simd_width, first_odd, #[inline(always)] |s1, s2, s3| (s2 + s3).mul_add(neg_gamma, s1), #[inline(always)] |s1, s2, s3| math::mul_add(s2 + s3, NEG_GAMMA, s1), ); // Step 5. // Originally: for i in (start / 2)..(end / 2 + 1). filter_step_vertical( simd, scanline, height, width, simd_width, first_even, #[inline(always)] |s1, s2, s3| (s2 + s3).mul_add(neg_beta, s1), #[inline(always)] |s1, s2, s3| math::mul_add(s2 + s3, NEG_BETA, s1), ); // Step 6. // Originally: for i in (start / 2)..(end / 2). filter_step_vertical( simd, scanline, height, width, simd_width, first_odd, #[inline(always)] |s1, s2, s3| (s2 + s3).mul_add(neg_alpha, s1), #[inline(always)] |s1, s2, s3| math::mul_add(s2 + s3, NEG_ALPHA, s1), ); } hayro-jpeg2000-0.3.2/src/j2c/mct.rs000064400000000000000000000062711046102023000146010ustar 00000000000000//! The irreversible multi-component transformation, as specified in //! Annex G.2 and G.3. use super::codestream::{Header, WaveletTransform}; use super::decode::TileDecodeContext; use crate::error::{ColorError, Result, bail, err}; use crate::math::{Level, Simd, dispatch, f32x8}; /// Apply the inverse multi-component transform, as specified in G.2 and G.3. pub(crate) fn apply_inverse( tile_ctx: &mut TileDecodeContext<'_>, header: &Header<'_>, ) -> Result<()> { if tile_ctx.channel_data.len() < 3 { return if header.strict { err!(ColorError::Mct) } else { Ok(()) }; } let (s, _) = tile_ctx.channel_data.split_at_mut(3); let [s0, s1, s2] = s else { unreachable!() }; let transform = tile_ctx.tile.component_infos[0].wavelet_transform(); if transform != tile_ctx.tile.component_infos[1].wavelet_transform() || tile_ctx.tile.component_infos[1].wavelet_transform() != tile_ctx.tile.component_infos[2].wavelet_transform() { bail!(ColorError::Mct); } if s0.container.len() != s1.container.len() || s1.container.len() != s2.container.len() { bail!(ColorError::Mct); } apply_inner( transform, &mut s0.container, &mut s1.container, &mut s2.container, ); Ok(()) } fn apply_inner(transform: WaveletTransform, s0: &mut [f32], s1: &mut [f32], s2: &mut [f32]) { dispatch!(Level::new(), simd => apply_inner_impl(simd, transform, s0, s1, s2)); } #[inline(always)] fn apply_inner_impl( simd: S, transform: WaveletTransform, s0: &mut [f32], s1: &mut [f32], s2: &mut [f32], ) { match transform { // Irreversible MCT, specified in G.3. WaveletTransform::Irreversible97 => { for ((y0, y1), y2) in s0 .chunks_exact_mut(8) .zip(s1.chunks_exact_mut(8)) .zip(s2.chunks_exact_mut(8)) { let y_0 = f32x8::from_slice(simd, y0); let y_1 = f32x8::from_slice(simd, y1); let y_2 = f32x8::from_slice(simd, y2); let i0 = y_2.mul_add(f32x8::splat(simd, 1.402), y_0); let i1 = y_2.mul_add( f32x8::splat(simd, -0.71414), y_1.mul_add(f32x8::splat(simd, -0.34413), y_0), ); let i2 = y_1.mul_add(f32x8::splat(simd, 1.772), y_0); i0.store(y0); i1.store(y1); i2.store(y2); } } // Reversible MCT, specified in G.2. WaveletTransform::Reversible53 => { for ((y0, y1), y2) in s0 .chunks_exact_mut(8) .zip(s1.chunks_exact_mut(8)) .zip(s2.chunks_exact_mut(8)) { let y_0 = f32x8::from_slice(simd, y0); let y_1 = f32x8::from_slice(simd, y1); let y_2 = f32x8::from_slice(simd, y2); let i1 = y_0 - ((y_2 + y_1) * 0.25).floor(); let i0 = y_2 + i1; let i2 = y_1 + i1; i0.store(y0); i1.store(y1); i2.store(y2); } } } } hayro-jpeg2000-0.3.2/src/j2c/mod.rs000064400000000000000000000045461046102023000146000ustar 00000000000000mod arithmetic_decoder; mod bitplane; mod build; mod codestream; mod decode; mod idwt; mod mct; mod progression; mod rect; mod segment; mod tag_tree; mod tile; use alloc::vec::Vec; use super::jp2::ImageBoxes; use super::jp2::colr::{ColorSpace, ColorSpecificationBox, EnumeratedColorspace}; use crate::error::{FormatError, MarkerError, Result, bail}; use crate::j2c::codestream::markers; use crate::reader::BitReader; use crate::{DecodeSettings, Image, resolve_alpha_and_color_space}; use crate::math::{SIMD_WIDTH, SimdBuffer}; pub(crate) use codestream::Header; pub(crate) use decode::decode; pub(crate) struct ParsedCodestream<'a> { pub(crate) header: Header<'a>, pub(crate) data: &'a [u8], } pub(crate) struct DecodedCodestream { /// The decoded components. pub(crate) components: Vec, } #[derive(Debug, Clone)] pub(crate) struct ComponentData { pub(crate) container: SimdBuffer<{ SIMD_WIDTH }>, pub(crate) bit_depth: u8, } pub(crate) fn parse<'a>(stream: &'a [u8], settings: &DecodeSettings) -> Result> { let parsed_codestream = parse_raw(stream, settings)?; let header = &parsed_codestream.header; let mut boxes = ImageBoxes::default(); // If we are just decoding a raw codestream, we assume greyscale or // RGB. let cs = if header.component_infos.len() < 3 { ColorSpace::Enumerated(EnumeratedColorspace::Greyscale) } else { ColorSpace::Enumerated(EnumeratedColorspace::Srgb) }; boxes.color_specification = Some(ColorSpecificationBox { color_space: cs }); let (color_space, has_alpha) = resolve_alpha_and_color_space(&boxes, &parsed_codestream.header, settings)?; Ok(Image { codestream: parsed_codestream.data, header: parsed_codestream.header, boxes, settings: *settings, color_space, has_alpha, }) } pub(crate) fn parse_raw<'a>( stream: &'a [u8], settings: &DecodeSettings, ) -> Result> { let mut reader = BitReader::new(stream); let marker = reader.read_marker()?; if marker != markers::SOC { bail!(MarkerError::Expected("SOC")); } let header = codestream::read_header(&mut reader, settings)?; let code_stream_data = reader.tail().ok_or(FormatError::MissingCodestream)?; Ok(ParsedCodestream { header, data: code_stream_data, }) } hayro-jpeg2000-0.3.2/src/j2c/progression.rs000064400000000000000000000246241046102023000163720ustar 00000000000000//! Progression iterators, defined in Section B.12. //! //! A progression iterator essentially yields tuples of //! (`layer_num`, resolution, component, precinct) in a specific order that //! determines in which order the data appears in the codestream. use alloc::vec; use alloc::vec::Vec; use super::tile::{ComponentTile, ResolutionTile, Tile}; use core::cmp::Ordering; use core::iter; #[derive(Default, Copy, Clone, Debug, PartialEq, Hash, Eq)] pub(crate) struct ProgressionData { pub(crate) layer_num: u8, pub(crate) resolution: u8, pub(crate) component: u8, pub(crate) precinct: u64, } pub(crate) struct IteratorInput<'a> { layers: (u8, u8), tile: &'a Tile<'a>, resolutions: (u8, u8), components: (u8, u8), } impl<'a> IteratorInput<'a> { pub(crate) fn new(tile: &'a Tile<'a>) -> Self { Self::new_with_custom_bounds( tile, // Will be clamped automatically. (0, u8::MAX), (0, u8::MAX), (0, u8::MAX), ) } pub(crate) fn new_with_custom_bounds( tile: &'a Tile<'a>, mut resolutions: (u8, u8), mut layers: (u8, u8), mut components: (u8, u8), ) -> Self { let max_resolution = tile .component_infos .iter() .map(|c| c.coding_style.parameters.num_resolution_levels) .max() .unwrap_or(0); let max_layer = tile.num_layers; let max_component = tile.component_infos.len() as u8; // Make sure we don't exceed what's actually possible resolutions.1 = resolutions.1.min(max_resolution); layers.1 = layers.1.min(max_layer); components.1 = components.1.min(max_component); assert!(resolutions.1 > resolutions.0); assert!(layers.1 > layers.0); assert!(components.1 > components.0); Self { layers, tile, resolutions, components, } } fn min_layer(&self) -> u8 { self.layers.0 } fn max_layer(&self) -> u8 { self.layers.1 } fn min_resolution(&self) -> u8 { self.resolutions.0 } fn total_max_resolution(&self) -> u8 { self.resolutions.1 } fn max_resolution(&self, component_idx: u8) -> u8 { self.total_max_resolution() // It's possible that the different component tiles have different resolution levels // (self.resolutions.1 stores the maximum across all component tiles), so // take the minimum of both. .min(self.tile.component_infos[component_idx as usize].num_resolution_levels()) } fn min_comp(&self) -> u8 { self.components.0 } fn max_comp(&self) -> u8 { self.components.1 } fn component_tiles(&self) -> Vec> { self.tile .component_infos .iter() .map(|c| ComponentTile::new(self.tile, c)) .collect::>() } } /// B.12.1.1 Layer-resolution level-component-position progression. pub(crate) fn layer_resolution_component_position_progression<'a>( input: IteratorInput<'a>, ) -> impl Iterator + 'a { let component_tiles = input.component_tiles(); let mut layer = input.min_layer(); let mut resolution = input.min_resolution(); let mut component_idx = input.min_comp(); let mut resolution_tile = ResolutionTile::new(component_tiles[0], resolution); let mut precinct = 0; iter::from_fn(move || { if layer == input.max_layer() || resolution == input.total_max_resolution() { return None; } if precinct == resolution_tile.num_precincts() { loop { precinct = 0; component_idx += 1; if component_idx == input.max_comp() { component_idx = input.min_comp(); resolution += 1; if resolution == input.max_resolution(component_idx) { resolution = input.min_resolution(); layer += 1; if layer == input.max_layer() { return None; } } } resolution_tile = ResolutionTile::new(component_tiles[component_idx as usize], resolution); // Only yield if the resolution tile has precincts, otherwise // we need to keep advancing. if resolution_tile.num_precincts() != 0 { break; } } } let data = ProgressionData { layer_num: layer, resolution, component: component_idx, precinct, }; precinct += 1; Some(data) }) } /// B.12.1.2 Resolution level-layer-component-position progression. pub(crate) fn resolution_layer_component_position_progression<'a>( input: IteratorInput<'a>, ) -> impl Iterator + 'a { let component_tiles = input.component_tiles(); let mut layer = 0; let mut resolution = 0; let mut component_idx = 0; let mut resolution_tile = ResolutionTile::new(component_tiles[component_idx as usize], resolution); let mut precinct = 0; iter::from_fn(move || { if layer == input.max_layer() || resolution == input.total_max_resolution() { return None; } if precinct == resolution_tile.num_precincts() { loop { precinct = 0; component_idx += 1; if component_idx == input.max_comp() { component_idx = 0; layer += 1; if layer == input.max_layer() { layer = 0; resolution += 1; if resolution == input.total_max_resolution() { return None; } } } // If the given resolution level doesn't exist for the current // component, continue. if resolution >= input.max_resolution(component_idx) { continue; } resolution_tile = ResolutionTile::new(component_tiles[component_idx as usize], resolution); // Only yield if the resolution tile has precincts, otherwise // we need to keep advancing. if resolution_tile.num_precincts() != 0 { break; } } } let data = ProgressionData { layer_num: layer, resolution, component: component_idx, precinct, }; precinct += 1; Some(data) }) } // The formula for the remaining three progressions looks very intimidating. // But really, all they boil down to is that we need to determine all precinct // indices for each component/resolution combination and sort them by ascending // y/x coordinate on the reference grid. Other than that, they can be treated // exactly the same, except that the sort order precedence of the fields change. // Note that the order of fields here is important! struct PrecinctStore { resolution: u8, precinct_y: u32, precinct_x: u32, component_idx: u8, precinct_idx: u64, } fn position_progression_common<'a>( input: IteratorInput<'a>, sort: impl FnMut(&PrecinctStore, &PrecinctStore) -> Ordering, ) -> Option + 'a> { let mut elements = vec![]; for (component_idx, component) in input .tile .component_tiles() .enumerate() .skip(input.min_comp() as usize) .take(input.max_comp() as usize - input.min_comp() as usize) { for (resolution, resolution_tile) in component .resolution_tiles() .enumerate() .skip(input.min_resolution() as usize) .take(input.total_max_resolution() as usize - input.min_resolution() as usize) { elements.extend(resolution_tile.precincts()?.map(|d| PrecinctStore { precinct_y: d.r_y, precinct_x: d.r_x, component_idx: component_idx as u8, resolution: resolution as u8, precinct_idx: d.idx, })); } } elements.sort_by(sort); Some(elements.into_iter().flat_map(move |e| { (input.min_layer()..input.max_layer()).map(move |layer| ProgressionData { layer_num: layer, resolution: e.resolution, component: e.component_idx, precinct: e.precinct_idx, }) })) } /// B.12.1.3 Resolution level-position-component-layer progression. pub(crate) fn resolution_position_component_layer_progression<'a>( input: IteratorInput<'a>, ) -> Option + 'a> { position_progression_common(input, |p, s| { p.resolution .cmp(&s.resolution) .then_with(|| p.precinct_y.cmp(&s.precinct_y)) .then_with(|| p.precinct_x.cmp(&s.precinct_x)) .then_with(|| p.component_idx.cmp(&s.component_idx)) .then_with(|| p.precinct_idx.cmp(&s.precinct_idx)) }) } /// B.12.1.4 Position-component-resolution level-layer progression. pub(crate) fn position_component_resolution_layer_progression<'a>( input: IteratorInput<'a>, ) -> Option + 'a> { position_progression_common(input, |p, s| { p.precinct_y .cmp(&s.precinct_y) .then_with(|| p.precinct_x.cmp(&s.precinct_x)) .then_with(|| p.component_idx.cmp(&s.component_idx)) .then_with(|| p.resolution.cmp(&s.resolution)) .then_with(|| p.precinct_idx.cmp(&s.precinct_idx)) }) } /// B.12.1.5 Component-position-resolution level-layer progression. pub(crate) fn component_position_resolution_layer_progression<'a>( input: IteratorInput<'a>, ) -> Option + 'a> { position_progression_common(input, |p, s| { p.component_idx .cmp(&s.component_idx) .then_with(|| p.precinct_y.cmp(&s.precinct_y)) .then_with(|| p.precinct_x.cmp(&s.precinct_x)) .then_with(|| p.resolution.cmp(&s.resolution)) .then_with(|| p.precinct_idx.cmp(&s.precinct_idx)) }) } hayro-jpeg2000-0.3.2/src/j2c/rect.rs000064400000000000000000000021451046102023000147470ustar 00000000000000#[derive(Clone, Copy, Debug, PartialEq)] pub(crate) struct IntRect { pub(crate) x0: u32, pub(crate) y0: u32, pub(crate) x1: u32, pub(crate) y1: u32, } impl IntRect { pub(crate) fn from_ltrb(x0: u32, y0: u32, x1: u32, y1: u32) -> Self { Self { x0, y0, x1, y1 } } pub(crate) fn from_xywh(x: u32, y: u32, w: u32, h: u32) -> Self { Self { x0: x, y0: y, x1: x + w, y1: y + h, } } pub(crate) fn width(&self) -> u32 { // See B-11. self.x1 - self.x0 } pub(crate) fn height(&self) -> u32 { // See B-11. self.y1 - self.y0 } pub(crate) fn intersect(&self, other: Self) -> Self { if self.x1 < other.x0 || other.x1 < self.x0 || self.y1 < other.y0 || other.y1 < self.y0 { Self::from_xywh(0, 0, 0, 0) } else { Self::from_ltrb( u32::max(self.x0, other.x0), u32::max(self.y0, other.y0), u32::min(self.x1, other.x1), u32::min(self.y1, other.y1), ) } } } hayro-jpeg2000-0.3.2/src/j2c/segment.rs000064400000000000000000000330601046102023000154540ustar 00000000000000//! Parsing of layers and their segments, as specified in Annex B. use alloc::boxed::Box; use super::build::Segment; use super::codestream::markers::{EPH, SOP}; use super::codestream::{ComponentInfo, Header}; use super::decode::{DecompositionStorage, TileDecodeContext}; use super::progression::ProgressionData; use super::tile::{Tile, TilePart}; use crate::error::{Result, TileError, bail}; use crate::reader::BitReader; pub(crate) const MAX_BITPLANE_COUNT: u8 = 32; pub(crate) fn parse<'a>( tile: &'a Tile<'a>, mut progression_iterator: Box + '_>, tile_ctx: &mut TileDecodeContext<'a>, header: &Header<'_>, storage: &mut DecompositionStorage<'a>, ) -> Result<()> { for tile_part in &tile.tile_parts { if parse_inner( tile_part.clone(), &mut progression_iterator, tile_ctx, storage, ) .is_none() && header.strict { bail!(TileError::Invalid); } } Ok(()) } fn parse_inner<'a>( mut tile_part: TilePart<'a>, progression_iterator: &mut dyn Iterator, tile_ctx: &mut TileDecodeContext<'a>, storage: &mut DecompositionStorage<'a>, ) -> Option<()> { while !tile_part.header().at_end() { let progression_data = progression_iterator.next()?; let resolution = progression_data.resolution; let component_info = &tile_ctx.tile.component_infos[progression_data.component as usize]; let tile_decompositions = &mut storage.tile_decompositions[progression_data.component as usize]; let sub_band_iter = tile_decompositions.sub_band_iter(resolution, &storage.decompositions); let body_reader = tile_part.body(); if component_info.coding_style.flags.may_use_sop_markers() && body_reader.peek_marker() == Some(SOP) { body_reader.read_marker().ok()?; body_reader.skip_bytes(4)?; } let header_reader = tile_part.header(); let zero_length = header_reader.read_bits_with_stuffing(1)? == 0; // B.10.3 Zero length packet // "The first bit in the packet header denotes whether the packet has a length of zero // (empty packet). The value 0 indicates a zero length; no code-blocks are included in this // case. The value 1 indicates a non-zero length." if !zero_length { for sub_band in sub_band_iter.clone() { resolve_segments( sub_band, &progression_data, header_reader, storage, component_info, )?; } } header_reader.align(); if component_info.coding_style.flags.uses_eph_marker() && header_reader.read_marker().ok()? != EPH { return None; } // Now read the packet body. let body_reader = tile_part.body(); if !zero_length { for sub_band in sub_band_iter { let sub_band = &mut storage.sub_bands[sub_band]; let precinct = &mut storage.precincts[sub_band.precincts.clone()] [progression_data.precinct as usize]; let code_blocks = &mut storage.code_blocks[precinct.code_blocks.clone()]; for code_block in code_blocks { let layer = &mut storage.layers[code_block.layers.clone()] [progression_data.layer_num as usize]; if let Some(segments) = layer.segments.clone() { let segments = &mut storage.segments[segments.clone()]; for segment in segments { segment.data = body_reader.read_bytes(segment.data_length as usize)?; } } } } } } Some(()) } fn resolve_segments( sub_band_dx: usize, progression_data: &ProgressionData, reader: &mut BitReader<'_>, storage: &mut DecompositionStorage<'_>, component_info: &ComponentInfo, ) -> Option<()> { // We don't support more than 32-bit precision. const MAX_CODING_PASSES: u8 = 1 + 3 * (MAX_BITPLANE_COUNT - 1); let sub_band = &storage.sub_bands[sub_band_dx]; let precincts = &mut storage.precincts[sub_band.precincts.clone()]; let Some(precinct) = precincts.get_mut(progression_data.precinct as usize) else { // An invalid file could trigger this code path. lwarn!("progression data yielded invalid precinct index"); return None; }; let code_blocks = &mut storage.code_blocks[precinct.code_blocks.clone()]; for code_block in code_blocks { // B.10.4 Code-block inclusion let is_included = if code_block.has_been_included { // "For code-blocks that have been included in a previous packet, // a single bit is used to represent the information, where a 1 // means that the code-block is included in this layer and a 0 means // that it is not." reader.read_bits_with_stuffing(1)? == 1 } else { // "For code-blocks that have not been previously included in any packet, // this information is signalled with a separate tag tree code for each precinct // as confined to a sub-band. The values in this tag tree are the number of the // layer in which the current code-block is first included. Although the exact // sequence of bits that represent the inclusion tag tree appears in the bit // stream, only the bits needed for determining whether the code-block is // included are placed in the packet header. If some of the tag tree is already // known from previous code-blocks or previous layers, it is not repeated. // Likewise, only as much of the tag tree as is needed to determine inclusion in // the current layer is included. If a code-block is not included until a later // layer, then only a partial tag tree is included at that point in the bit // stream." precinct.code_inclusion_tree.read( code_block.x_idx, code_block.y_idx, reader, progression_data.layer_num as u32 + 1, &mut storage.tag_tree_nodes, )? <= progression_data.layer_num as u32 }; ltrace!("code-block inclusion: {}", is_included); if !is_included { continue; } let layer = &mut storage.layers[code_block.layers.clone()][progression_data.layer_num as usize]; let included_first_time = is_included && !code_block.has_been_included; // B.10.5 Zero bit-plane information // "If a code-block is included for the first time, the packet header contains // information identifying the actual number of bit-planes used to represent // coefficients from the code-block. The maximum number of bit-planes available // for the representation of coefficients in any sub-band, b, is given by Mb as // defined in Equation (E-2). In general, however, the // number of actual bit-planes for which coding passes are generated is Mb – P, // where the number of missing most significant bit-planes, P, may vary from // code-block to code-block; these missing bit-planes are all taken to be zero. The // value of P is coded in the packet header with a separate tag tree for every // precinct, in the same manner as the code block inclusion information." if included_first_time { code_block.missing_bit_planes = precinct.zero_bitplane_tree.read( code_block.x_idx, code_block.y_idx, reader, u32::MAX, &mut storage.tag_tree_nodes, )? as u8; ltrace!( "zero bit-plane information: {}", code_block.missing_bit_planes ); } code_block.has_been_included |= is_included; // B.10.6 Number of coding passes // "The number of coding passes included in this packet from each code-block is // identified in the packet header using the codewords shown in Table B.4. This // table provides for the possibility of signalling up to 164 coding passes." let added_coding_passes = if reader.peak_bits_with_stuffing(9) == Some(0x1ff) { reader.read_bits_with_stuffing(9)?; reader.read_bits_with_stuffing(7)? + 37 } else if reader.peak_bits_with_stuffing(4) == Some(0x0f) { reader.read_bits_with_stuffing(4)?; reader.read_bits_with_stuffing(5)? + 6 } else if reader.peak_bits_with_stuffing(4) == Some(0b1110) { reader.read_bits_with_stuffing(4)?; 5 } else if reader.peak_bits_with_stuffing(4) == Some(0b1101) { reader.read_bits_with_stuffing(4)?; 4 } else if reader.peak_bits_with_stuffing(4) == Some(0b1100) { reader.read_bits_with_stuffing(4)?; 3 } else if reader.peak_bits_with_stuffing(2) == Some(0b10) { reader.read_bits_with_stuffing(2)?; 2 } else if reader.peak_bits_with_stuffing(1) == Some(0) { reader.read_bits_with_stuffing(1)?; 1 } else { return None; } as u8; ltrace!("number of coding passes: {}", added_coding_passes); let mut k = 0; while reader.read_bits_with_stuffing(1)? == 1 { k += 1; } code_block.l_block += k; let previous_layers_passes = code_block.number_of_coding_passes; let cumulative_passes = previous_layers_passes.checked_add(added_coding_passes)?; if cumulative_passes > MAX_CODING_PASSES { return None; } let get_segment_idx = |pass_idx: u8| { if component_info.code_block_style().termination_on_each_pass { // If we terminate on each pass, the segment is just the index // of the pass. pass_idx } else if component_info .code_block_style() .selective_arithmetic_coding_bypass { // Use the formula derived from the table in the spec. segment_idx_for_bypass(pass_idx) } else { // If none of the above flags is activated, the number of // segments just corresponds to the number of layers. code_block.non_empty_layer_count } }; let start = storage.segments.len(); let mut push_segment = |segment: u8, coding_passes_for_segment: u8| { let length = { assert!(coding_passes_for_segment > 0); // "A codeword segment is the number of bytes contributed to a packet by a // code-block. The length of a codeword segment is represented by a binary number of length: // bits = Lblock + floor(log_2(coding passes added)) // where Lblock is a code-block state variable. A separate Lblock is used for each // code-block in the precinct. The value of Lblock is initially set to three. The // number of bytes contributed by each code-block is preceded by signalling bits // that increase the value of Lblock, as needed. A signalling bit of zero indicates // the current value of Lblock is sufficient. If there are k ones followed by a // zero, the value of Lblock is incremented by k. While Lblock can only increase, // the number of bits used to signal the length of the code-block contribution can // increase or decrease depending on the number of coding passes included." let length_bits = code_block.l_block + coding_passes_for_segment.ilog2(); reader.read_bits_with_stuffing(length_bits as u8) }?; storage.segments.push(Segment { idx: segment, data_length: length, coding_pases: coding_passes_for_segment, // Will be set later. data: &[], }); ltrace!("length({segment}) {}", length); Some(()) }; let mut last_segment = get_segment_idx(previous_layers_passes); let mut coding_passes_for_segment = 0; for coding_pass in previous_layers_passes..cumulative_passes { let segment = get_segment_idx(coding_pass); if segment != last_segment { push_segment(last_segment, coding_passes_for_segment)?; last_segment = segment; coding_passes_for_segment = 1; } else { coding_passes_for_segment += 1; } } // Flush the final segment if applicable. if coding_passes_for_segment > 0 { push_segment(last_segment, coding_passes_for_segment)?; } let end = storage.segments.len(); layer.segments = Some(start..end); code_block.number_of_coding_passes += added_coding_passes; code_block.non_empty_layer_count += 1; } Some(()) } /// Calculate the segment index for the given pass in arithmetic decoder /// bypass (see section D.6, Table D.9). fn segment_idx_for_bypass(pass_idx: u8) -> u8 { if pass_idx < 10 { 0 } else { 1 + (2 * ((pass_idx - 10) / 3)) + (if ((pass_idx - 10) % 3) == 2 { 1 } else { 0 }) } } hayro-jpeg2000-0.3.2/src/j2c/tag_tree.rs000064400000000000000000000163111046102023000156040ustar 00000000000000//! The tag tree, described in Section B.10.2. //! //! Tag trees are quad trees where each leaf stores an integer value. //! Each intermediate node stores the smallest value of all of its children. //! For example, if a node stores the value 3, it means that all children //! have a value of 3 or higher. The root node therefore stores the smallest //! values across all children. use alloc::vec::Vec; use crate::reader::BitReader; #[derive(Debug, PartialEq, Eq, Clone, Default)] pub(crate) struct TagNode { /// The width of the area covered by the node. /// /// For leaf nodes, this value is always 1. In some cases, the width might /// be 0, in which case the leaf node doesn't actually "exist" and is just /// a dummy node. width: u32, /// The height of the area covered by the node. /// /// For leaf nodes, this value is always 1. In some cases, the height might /// be 0, in which case the leaf node doesn't actually "exist" and is just /// a dummy node. height: u32, /// The actual value stored in the node. Only valid once `initialized` /// is set to `true`. value: u32, /// Whether the node has been fully initialized. The tag tree is not /// stored in its complete form in the JP2 file, but is instead built /// up incrementally, each packet contributing the information of the /// tag tree. The node is therefore only initialized with its actual /// value once we cross it the first time. initialized: bool, /// The level inside the tree. Zero indicates that the given node is /// a leaf node, otherwise the level is > 0. The root node has the highest /// level. level: u16, /// The indices of the children of the node, some of which might be dummy /// nodes (indicated by the fact that the index is `usize::MAX`). children: [usize; 4], } impl TagNode { fn new(width: u32, height: u32, level: u16) -> Self { Self { width, height, level, value: 0, initialized: false, children: [usize::MAX, usize::MAX, usize::MAX, usize::MAX], } } /// The width of the top-left child. fn top_left_width(&self) -> u32 { u32::min(1 << (self.level - 1), self.width) } /// The height of the top-left child. fn top_left_height(&self) -> u32 { u32::min(1 << (self.level - 1), self.height) } } impl TagNode { fn build(width: u32, height: u32, level: u16, nodes: &mut Vec) -> Self { let mut tag = Self::new(width, height, level); if level == 0 { // We reached the leaf node. assert!(width <= 1 && height <= 1); return tag; } // Determine the width and height of the top-left child node. Based // on this, we can infer the dimensions of all other child nodes. let top_left_width = tag.top_left_width(); let top_left_height = tag.top_left_height(); let mut push = |node: Self, child_idx: usize, nodes: &mut Vec| { // If this is not the case, the child doesn't actually exist. if node.width > 0 && node.height > 0 { let node_idx = nodes.len(); nodes.push(node); tag.children[child_idx] = node_idx; } }; // We always push four children, but some nodes might in reality have // fewer than that. In this case, the resulting node will simply have // a width or height of 0 and we can recognize that it technically // doesn't exist. let n1 = Self::build(top_left_width, top_left_height, level - 1, nodes); push(n1, 0, nodes); let n2 = Self::build(width - top_left_width, top_left_height, level - 1, nodes); push(n2, 1, nodes); let n3 = Self::build(top_left_width, height - top_left_height, level - 1, nodes); push(n3, 2, nodes); let n4 = Self::build( width - top_left_width, height - top_left_height, level - 1, nodes, ); push(n4, 3, nodes); tag } } fn read_tag_node( node_idx: usize, x: u32, y: u32, reader: &mut BitReader<'_>, parent_val: u32, max_val: u32, nodes: &mut [TagNode], ) -> Option { let node = &mut nodes[node_idx]; if !node.initialized { let mut val = u32::max(parent_val, node.value); loop { if val >= max_val { break; } // "Each node has an associated current value, which is // initialized to zero (the minimum). A 0 bit in the tag tree // means that the minimum (or the value in the case of the // highest level) is larger than the current value and a 1 bit // means that the minimum (or the value in the case of the // highest level) is equal to the current value." match reader.read_bits_with_stuffing(1)? { 0 => val += 1, 1 => { node.initialized = true; break; } _ => unreachable!(), } } node.value = val; } // Abort early if we already reached the leaf node or the minimum // value of all children is too large. if node.value >= max_val || node.level == 0 { return Some(node.value); } let top_left_width = node.top_left_width(); let top_left_height = node.top_left_height(); let left = x < top_left_width; let top = y < top_left_height; match (left, top) { (true, true) => read_tag_node(node.children[0], x, y, reader, node.value, max_val, nodes), (false, true) => read_tag_node( node.children[1], x - top_left_width, y, reader, node.value, max_val, nodes, ), (true, false) => read_tag_node( node.children[2], x, y - top_left_height, reader, node.value, max_val, nodes, ), (false, false) => read_tag_node( node.children[3], x - top_left_width, y - top_left_height, reader, node.value, max_val, nodes, ), } } #[derive(Copy, Clone)] pub(crate) struct TagTree { root: usize, width: u32, height: u32, } impl TagTree { pub(crate) fn new(width: u32, height: u32, nodes: &mut Vec) -> Self { // Calculate how many levels the tree has in total. let level = u32::max( width.next_power_of_two().ilog2(), height.next_power_of_two().ilog2(), ); let node = TagNode::build(width, height, level as u16, nodes); let idx = nodes.len(); nodes.push(node); Self { root: idx, width, height, } } pub(crate) fn read( &mut self, x: u32, y: u32, reader: &mut BitReader<'_>, max_val: u32, nodes: &mut [TagNode], ) -> Option { debug_assert!(x < self.width && y < self.height); read_tag_node(self.root, x, y, reader, 0, max_val, nodes) } } hayro-jpeg2000-0.3.2/src/j2c/tile.rs000064400000000000000000000754361046102023000147640ustar 00000000000000//! Creating tiles and parsing their constituent tile parts. use alloc::vec; use alloc::vec::Vec; use super::build::{PrecinctData, SubBandType}; use super::codestream::{ComponentInfo, Header, ProgressionOrder, markers, skip_marker_segment}; use super::rect::IntRect; use crate::error::{MarkerError, Result, TileError, ValidationError, bail, err}; use crate::j2c::codestream; use crate::reader::BitReader; /// A single tile in the image. #[derive(Clone, Debug)] pub(crate) struct Tile<'a> { /// The index of the tile, in row-major order. pub(crate) idx: u32, /// The concatenated tile parts that contain all the information for all /// constituent codeblocks. pub(crate) tile_parts: Vec>, /// Parameters for each component. In most cases, those are directly /// inherited from the main header. But in some cases, individual tiles /// might override them. pub(crate) component_infos: Vec, /// The rectangle making up the area of the tile. `x1` and `y1` are /// exclusive. pub(crate) rect: IntRect, pub(crate) progression_order: ProgressionOrder, pub(crate) num_layers: u8, pub(crate) mct: bool, } /// A tile part where packet headers and packet data are interleaved. #[derive(Clone, Debug)] pub(crate) struct MergedTilePart<'a> { pub(crate) data: BitReader<'a>, } /// A tile part where packet headers and packet data are separated. #[derive(Clone, Debug)] pub(crate) struct SeparatedTilePart<'a> { pub(crate) headers: Vec>, pub(crate) active_header_reader: usize, pub(crate) body: BitReader<'a>, } #[derive(Clone, Debug)] pub(crate) enum TilePart<'a> { Merged(MergedTilePart<'a>), Separated(SeparatedTilePart<'a>), } impl<'a> TilePart<'a> { pub(crate) fn header(&mut self) -> &mut BitReader<'a> { match self { TilePart::Merged(m) => &mut m.data, TilePart::Separated(s) => { if s.headers[s.active_header_reader].at_end() && s.headers.len() - 1 > s.active_header_reader { s.active_header_reader += 1; } &mut s.headers[s.active_header_reader] } } } pub(crate) fn body(&mut self) -> &mut BitReader<'a> { match self { TilePart::Merged(m) => &mut m.data, TilePart::Separated(s) => &mut s.body, } } } impl<'a> Tile<'a> { fn new(idx: u32, header: &Header<'_>) -> Self { let rect = { let size_data = &header.size_data; let x_coord = size_data.tile_x_coord(idx); let y_coord = size_data.tile_y_coord(idx); // See B-7, B-8, B-9 and B-10. let x0 = u32::max( size_data.tile_x_offset + x_coord * size_data.tile_width, size_data.image_area_x_offset, ); let y0 = u32::max( size_data.tile_y_offset + y_coord * size_data.tile_height, size_data.image_area_y_offset, ); // Note that `x1` and `y1` are exclusive. let x1 = u32::min( size_data.tile_x_offset + (x_coord + 1) * size_data.tile_width, size_data.reference_grid_width, ); let y1 = u32::min( size_data.tile_y_offset + (y_coord + 1) * size_data.tile_height, size_data.reference_grid_height, ); IntRect::from_ltrb(x0, y0, x1, y1) }; Tile { idx, // Will be filled once we start parsing. tile_parts: vec![], rect, // By default, each tile inherits the settings from the main // header. When parsing the tile parts, some of these settings // might be overridden. component_infos: header.component_infos.clone(), progression_order: header.global_coding_style.progression_order, mct: header.global_coding_style.mct, num_layers: header.global_coding_style.num_layers, } } pub(crate) fn component_tiles(&self) -> impl Iterator> { self.component_infos .iter() .map(|i| ComponentTile::new(self, i)) } } /// Create the tiles and parse their constituent tile parts. pub(crate) fn parse<'a>( reader: &mut BitReader<'a>, main_header: &'a Header<'a>, ) -> Result>> { let mut tiles = (0..main_header.size_data.num_tiles() as usize) .map(|idx| Tile::new(idx as u32, main_header)) .collect::>(); let mut tile_part_idx = 0; parse_tile_part(reader, main_header, &mut tiles, tile_part_idx)?; tile_part_idx += 1; while reader.peek_marker() == Some(markers::SOT) { parse_tile_part(reader, main_header, &mut tiles, tile_part_idx)?; tile_part_idx += 1; } if main_header.strict && reader.read_marker()? != markers::EOC { bail!(MarkerError::Expected("EOC")); } Ok(tiles) } fn parse_tile_part<'a>( reader: &mut BitReader<'a>, main_header: &'a Header<'a>, tiles: &mut [Tile<'a>], tile_part_idx: usize, ) -> Result<()> { if reader.read_marker()? != markers::SOT { bail!(MarkerError::Expected("SOT")); } let tile_part_header = sot_marker(reader).ok_or(MarkerError::ParseFailure("SOT"))?; if tile_part_header.tile_index as u32 >= main_header.size_data.num_tiles() { bail!(TileError::InvalidIndex); } let data_len = if tile_part_header.tile_part_length == 0 { reader.tail().map(|d| d.len()).unwrap_or(0) } else { // Subtract 12 to account for the marker length. (tile_part_header.tile_part_length as usize) .checked_sub(12) .ok_or(TileError::Invalid)? }; let start = reader.offset(); let tile = &mut tiles[tile_part_header.tile_index as usize]; let num_components = tile.component_infos.len(); let mut ppt_headers = vec![]; loop { let Some(marker) = reader.peek_marker() else { return if main_header.strict { err!(MarkerError::Invalid) } else { Ok(()) }; }; match marker { markers::SOD => { reader.read_marker()?; break; } // COD, COC, QCD and QCC should only be used in the _first_ // tile-part header, if they appear at all. markers::COD => { reader.read_marker()?; let cod = codestream::cod_marker(reader).ok_or(MarkerError::ParseFailure("COD"))?; tile.mct = cod.mct; tile.num_layers = cod.num_layers; tile.progression_order = cod.progression_order; for component in &mut tile.component_infos { component.coding_style.flags.raw |= cod.component_parameters.flags.raw; component.coding_style.parameters = cod.component_parameters.clone().parameters; } } markers::COC => { reader.read_marker()?; let (component_index, coc) = codestream::coc_marker(reader, num_components as u16) .ok_or(MarkerError::ParseFailure("COC"))?; let old = tile .component_infos .get_mut(component_index as usize) .ok_or(ValidationError::InvalidComponentMetadata)?; old.coding_style.parameters = coc.parameters; old.coding_style.flags.raw |= coc.flags.raw; } markers::QCD => { reader.read_marker()?; let qcd = codestream::qcd_marker(reader).ok_or(MarkerError::ParseFailure("QCD"))?; for component_info in &mut tile.component_infos { component_info.quantization_info = qcd.clone(); } } markers::QCC => { reader.read_marker()?; let (component_index, qcc) = codestream::qcc_marker(reader, num_components as u16) .ok_or(MarkerError::ParseFailure("QCC"))?; tile.component_infos .get_mut(component_index as usize) .ok_or(ValidationError::InvalidComponentMetadata)? .quantization_info = qcc.clone(); } markers::EOC => break, markers::PPT => { if !main_header.ppm_packets.is_empty() { bail!(TileError::PpmPptConflict); } reader.read_marker()?; ppt_headers.push(ppt_marker(reader).ok_or(MarkerError::ParseFailure("PPT"))?); } markers::PLT => { // Can be inferred ourselves. reader.read_marker()?; skip_marker_segment(reader).ok_or(MarkerError::ParseFailure("PLT"))?; } markers::COM => { reader.read_marker()?; skip_marker_segment(reader).ok_or(MarkerError::ParseFailure("COM"))?; } (0x30..=0x3F) => { // "All markers with the marker code between 0xFF30 and 0xFF3F // have no marker segment parameters. They shall be skipped by // the decoder." reader.read_marker()?; // skip_marker_segment(reader); } _ => { bail!(MarkerError::Unsupported); } } } let remaining_bytes = if let Some(len) = data_len.checked_sub(reader.offset() - start) { len } else { return if main_header.strict { err!(TileError::Invalid) } else { Ok(()) }; }; ppt_headers.sort_by(|p1, p2| p1.sequence_idx.cmp(&p2.sequence_idx)); let mut headers: Vec<_> = ppt_headers.iter().map(|i| BitReader::new(i.data)).collect(); if let Some(ppm_marker) = main_header.ppm_packets.get(tile_part_idx) { headers.push(BitReader::new(ppm_marker.data)); } let data = reader .read_bytes(remaining_bytes) .ok_or(TileError::Invalid)?; let tile_part = if !headers.is_empty() { TilePart::Separated(SeparatedTilePart { headers, active_header_reader: 0, body: BitReader::new(data), }) } else { TilePart::Merged(MergedTilePart { data: BitReader::new(data), }) }; tile.tile_parts.push(tile_part); Ok(()) } /// A tile, instantiated to a specific component. #[derive(Debug, Copy, Clone)] pub(crate) struct ComponentTile<'a> { pub(crate) tile: &'a Tile<'a>, /// The information of the component of the tile. pub(crate) component_info: &'a ComponentInfo, /// The rectangle of the component tile. pub(crate) rect: IntRect, } impl<'a> ComponentTile<'a> { pub(crate) fn new(tile: &'a Tile<'a>, component_info: &'a ComponentInfo) -> Self { let tile_rect = tile.rect; let rect = if component_info.size_info.horizontal_resolution == 1 && component_info.size_info.vertical_resolution == 1 { tile_rect } else { // As described in B-12. let t_x0 = tile_rect .x0 .div_ceil(component_info.size_info.horizontal_resolution as u32); let t_y0 = tile_rect .y0 .div_ceil(component_info.size_info.vertical_resolution as u32); let t_x1 = tile_rect .x1 .div_ceil(component_info.size_info.horizontal_resolution as u32); let t_y1 = tile_rect .y1 .div_ceil(component_info.size_info.vertical_resolution as u32); IntRect::from_ltrb(t_x0, t_y0, t_x1, t_y1) }; ComponentTile { tile, component_info, rect, } } pub(crate) fn resolution_tiles(&self) -> impl Iterator> { (0..self .component_info .coding_style .parameters .num_resolution_levels) .map(|r| ResolutionTile::new(*self, r)) } } /// A tile instantiated to a specific resolution of a component tile. pub(crate) struct ResolutionTile<'a> { /// The resolution of the tile. pub(crate) resolution: u8, /// The decomposition level of the tile. pub(crate) decomposition_level: u8, /// The underlying component tile. pub(crate) component_tile: ComponentTile<'a>, /// The rectangle of the resolution tile. pub(crate) rect: IntRect, } impl<'a> ResolutionTile<'a> { pub(crate) fn new(component_tile: ComponentTile<'a>, resolution: u8) -> Self { assert!( component_tile .component_info .coding_style .parameters .num_resolution_levels > resolution ); let rect = { // See formula B-14. let n_l = component_tile .component_info .coding_style .parameters .num_decomposition_levels; let tx0 = (component_tile.rect.x0 as u64) .div_ceil(2_u64.pow(n_l as u32 - resolution as u32)) as u32; let ty0 = (component_tile.rect.y0 as u64) .div_ceil(2_u64.pow(n_l as u32 - resolution as u32)) as u32; let tx1 = (component_tile.rect.x1 as u64) .div_ceil(2_u64.pow(n_l as u32 - resolution as u32)) as u32; let ty1 = (component_tile.rect.y1 as u64) .div_ceil(2_u64.pow(n_l as u32 - resolution as u32)) as u32; IntRect::from_ltrb(tx0, ty0, tx1, ty1) }; // Decomposition level and resolution level are inversely related // to each other. In addition to that, there is always one more // resolution than decomposition levels (resolution level 0 only // include the LL subband of the N_L decomposition, resolution level // 1 includes the HL, LH and HH subbands of the N_L decomposition. let decomposition_level = { if resolution == 0 { component_tile .component_info .coding_style .parameters .num_decomposition_levels } else { component_tile .component_info .coding_style .parameters .num_decomposition_levels - (resolution - 1) } }; ResolutionTile { resolution, decomposition_level, component_tile, rect, } } pub(crate) fn sub_band_rect(&self, sub_band_type: SubBandType) -> IntRect { // This is the only permissible sub-band type for the given resolution. if self.resolution == 0 { assert_eq!(sub_band_type, SubBandType::LowLow); } // Formula B-15. let xo_b = if matches!(sub_band_type, SubBandType::HighLow | SubBandType::HighHigh) { 1 } else { 0 }; let yo_b = if matches!(sub_band_type, SubBandType::LowHigh | SubBandType::HighHigh) { 1 } else { 0 }; let mut numerator_x = 0; let mut numerator_y = 0; // If decomposition level is 0, xo_b and yo_b are 0 as well. if self.decomposition_level > 0 { numerator_x = 2_u64.pow(self.decomposition_level as u32 - 1) * xo_b as u64; numerator_y = 2_u64.pow(self.decomposition_level as u32 - 1) * yo_b as u64; } let denominator = 2_u64.pow(self.decomposition_level as u32); let tbx_0 = (self.component_tile.rect.x0 as u64) .saturating_sub(numerator_x) .div_ceil(denominator) as u32; let tbx_1 = (self.component_tile.rect.x1 as u64) .saturating_sub(numerator_x) .div_ceil(denominator) as u32; let tby_0 = (self.component_tile.rect.y0 as u64) .saturating_sub(numerator_y) .div_ceil(denominator) as u32; let tby_1 = (self.component_tile.rect.y1 as u64) .saturating_sub(numerator_y) .div_ceil(denominator) as u32; IntRect::from_ltrb(tbx_0, tby_0, tbx_1, tby_1) } /// The exponent for determining the horizontal size of a precinct. /// /// `PPx` in the specification. fn precinct_exponent_x(&self) -> u8 { self.component_tile .component_info .coding_style .parameters .precinct_exponents[self.resolution as usize] .0 } /// The exponent for determining the vertical size of a precinct. /// /// `PPx` in the specification. fn precinct_exponent_y(&self) -> u8 { self.component_tile .component_info .coding_style .parameters .precinct_exponents[self.resolution as usize] .1 } fn num_precincts_x(&self) -> u32 { // See B-16. let IntRect { x0, x1, .. } = self.rect; if x0 == x1 { 0 } else { x1.div_ceil(2_u32.pow(self.precinct_exponent_x() as u32)) - x0 / 2_u32.pow(self.precinct_exponent_x() as u32) } } fn num_precincts_y(&self) -> u32 { // See B-16. let IntRect { y0, y1, .. } = self.rect; if y0 == y1 { 0 } else { y1.div_ceil(2_u32.pow(self.precinct_exponent_y() as u32)) - y0 / 2_u32.pow(self.precinct_exponent_y() as u32) } } pub(crate) fn num_precincts(&self) -> u64 { self.num_precincts_x() as u64 * self.num_precincts_y() as u64 } /// Return an iterator over the data of the precincts in this resolution /// tile. pub(crate) fn precincts(&self) -> Option> { let num_precincts_y = self.num_precincts_y(); let num_precincts_x = self.num_precincts_x(); let mut ppx = self.precinct_exponent_x(); let mut ppy = self.precinct_exponent_y(); let mut y_start = (self.rect.y0 / (1 << ppy)) * (1 << ppy); let mut x_start = (self.rect.x0 / (1 << ppx)) * (1 << ppx); // It is unclear why this is necessary, but it is. The spec only // mentions that ppx/ppy must be decreased when calculating codeblock // dimensions, but not that it's necessary for precincts as well. if self.resolution > 0 { ppx = ppx.checked_sub(1)?; ppy = ppy.checked_sub(1)?; x_start /= 2; y_start /= 2; } let ppx_pow2 = 1_u32 << ppx; let ppy_pow2 = 1_u32 << ppy; let nl_minus_r = self .component_tile .component_info .num_decomposition_levels() - self.resolution; let x_stride = 1_u32.checked_shl(self.precinct_exponent_x().checked_add(nl_minus_r)? as u32)?; let y_stride = 1_u32.checked_shl(self.precinct_exponent_y().checked_add(nl_minus_r)? as u32)?; let precinct_x_step = (self .component_tile .component_info .size_info .horizontal_resolution as u32) .checked_mul(x_stride)?; let precinct_y_step = (self .component_tile .component_info .size_info .vertical_resolution as u32) .checked_mul(y_stride)?; // These variables are used to map the start coordinates of each // precinct _on the reference grid_. Remember that the first // precinct in each row/column is at the start position of the tile // which might not be a multiple of precinct exponent, but all subsequent // precincts are at a multiple of the exponent. let mut r_x = self.component_tile.tile.rect.x0; let mut r_y = self.component_tile.tile.rect.y0; // The second part of the condition in the formula in B.12.1.3. If it // is divisible, then we can't take the x/y position of the tile // as the start of the precinct, but instead have to advance to the // next multiple. if !r_x.is_multiple_of(precinct_x_step) && (self.rect.x0 * (1 << nl_minus_r)).is_multiple_of(precinct_x_step) { r_x = r_x.checked_next_multiple_of(precinct_x_step)?; } // Same as above. if !r_y.is_multiple_of(precinct_y_step) && (self.rect.y0 * (1 << nl_minus_r)).is_multiple_of(precinct_y_step) { r_y = r_y.checked_next_multiple_of(precinct_y_step)?; } let iter = (0..num_precincts_y).flat_map(move |y| { let y0 = y * ppy_pow2 + y_start; let mut r_x = r_x; let res = (0..num_precincts_x).map(move |x| { let x0 = x * ppx_pow2 + x_start; let data = PrecinctData { r_x, r_y, rect: IntRect::from_xywh(x0, y0, ppx_pow2, ppy_pow2), idx: num_precincts_x as u64 * y as u64 + x as u64, }; // If r_x is already aligned, we simply step by `precinct_x_step`. // Otherwise (can only be the case for precincts in the first // row or column), align to the next multiple. r_x = (r_x + 1).next_multiple_of(precinct_x_step); data }); // Same as for r_x. r_y = (r_y + 1).next_multiple_of(precinct_y_step); res }); Some(iter) } pub(crate) fn code_block_width(&self) -> u32 { // See B-17. let xcb = self .component_tile .component_info .coding_style .parameters .code_block_width; let xcb = if self.resolution > 0 { u8::min(xcb, self.precinct_exponent_x() - 1) } else { u8::min(xcb, self.precinct_exponent_x()) }; 2_u32.pow(xcb as u32) } pub(crate) fn code_block_height(&self) -> u32 { // See B-18. let ycb = self .component_tile .component_info .coding_style .parameters .code_block_height; let ycb = if self.resolution > 0 { u8::min(ycb, self.precinct_exponent_y() - 1) } else { u8::min(ycb, self.precinct_exponent_y()) }; 2_u32.pow(ycb as u32) } } struct TilePartHeader { tile_index: u16, tile_part_length: u32, } struct PptMarkerData<'a> { data: &'a [u8], sequence_idx: u8, } /// PPT marker (A.7.5). fn ppt_marker<'a>(reader: &mut BitReader<'a>) -> Option> { let length = reader.read_u16()?.checked_sub(2)?; let header_len = length.checked_sub(1)?; let sequence_idx = reader.read_byte()?; Some(PptMarkerData { data: reader.read_bytes(header_len as usize)?, sequence_idx, }) } /// SOT marker (A.4.2). fn sot_marker(reader: &mut BitReader<'_>) -> Option { // Length. let _ = reader.read_u16()?; let tile_index = reader.read_u16()?; let tile_part_length = reader.read_u32()?; // We infer those ourselves. let _tile_part_index = reader.read_byte()? as u16; let _num_tile_parts = reader.read_byte()?; Some(TilePartHeader { tile_index, tile_part_length, }) } #[cfg(test)] mod tests { use super::*; use crate::j2c::codestream::{ CodeBlockStyle, CodingStyleComponent, CodingStyleDefault, CodingStyleFlags, CodingStyleParameters, ComponentSizeInfo, QuantizationInfo, QuantizationStyle, SizeData, WaveletTransform, }; /// Test case for the example in B.4. #[test] fn test_jpeg2000_standard_example_b4() { let component_size_info_0 = ComponentSizeInfo { precision: 8, horizontal_resolution: 1, vertical_resolution: 1, }; let dummy_component_coding_style = CodingStyleComponent { flags: CodingStyleFlags::default(), parameters: CodingStyleParameters { num_decomposition_levels: 0, num_resolution_levels: 0, code_block_width: 0, code_block_height: 0, code_block_style: CodeBlockStyle::default(), transformation: WaveletTransform::Irreversible97, precinct_exponents: vec![], }, }; let dummy_quantization_info = QuantizationInfo { quantization_style: QuantizationStyle::NoQuantization, guard_bits: 0, step_sizes: vec![], }; let component_info_0 = ComponentInfo { size_info: component_size_info_0, coding_style: dummy_component_coding_style.clone(), quantization_info: dummy_quantization_info.clone(), }; let component_size_info_1 = ComponentSizeInfo { precision: 8, horizontal_resolution: 2, vertical_resolution: 2, }; let component_info_1 = ComponentInfo { size_info: component_size_info_1, coding_style: dummy_component_coding_style.clone(), quantization_info: dummy_quantization_info.clone(), }; let size_data = SizeData { reference_grid_width: 1432, reference_grid_height: 954, image_area_x_offset: 152, image_area_y_offset: 234, tile_width: 396, tile_height: 297, tile_x_offset: 0, tile_y_offset: 0, component_sizes: vec![component_size_info_0, component_size_info_1], x_shrink_factor: 1, y_shrink_factor: 1, x_resolution_shrink_factor: 1, y_resolution_shrink_factor: 1, }; assert_eq!(size_data.image_width(), 1280); assert_eq!(size_data.image_height(), 720); assert_eq!(size_data.num_x_tiles(), 4); assert_eq!(size_data.num_y_tiles(), 4); assert_eq!(size_data.num_tiles(), 16); let header = Header { size_data, // Just dummy values. global_coding_style: CodingStyleDefault { progression_order: ProgressionOrder::LayerResolutionComponentPosition, num_layers: 0, mct: false, component_parameters: CodingStyleComponent { flags: CodingStyleFlags::default(), parameters: CodingStyleParameters { num_decomposition_levels: 0, num_resolution_levels: 0, code_block_width: 0, code_block_height: 0, code_block_style: CodeBlockStyle::default(), transformation: WaveletTransform::Irreversible97, precinct_exponents: vec![], }, }, }, component_infos: vec![], ppm_packets: vec![], skipped_resolution_levels: 0, strict: false, }; let tile_0_0 = Tile::new(0, &header); let coords_0_0 = ComponentTile::new(&tile_0_0, &component_info_0).rect; assert_eq!(coords_0_0.x0, 152); assert_eq!(coords_0_0.y0, 234); assert_eq!(coords_0_0.x1, 396); assert_eq!(coords_0_0.y1, 297); assert_eq!(coords_0_0.width(), 244); assert_eq!(coords_0_0.height(), 63); let tile_1_0 = Tile::new(1, &header); let coords_1_0 = ComponentTile::new(&tile_1_0, &component_info_0).rect; assert_eq!(coords_1_0.x0, 396); assert_eq!(coords_1_0.y0, 234); assert_eq!(coords_1_0.x1, 792); assert_eq!(coords_1_0.y1, 297); assert_eq!(coords_1_0.width(), 396); assert_eq!(coords_1_0.height(), 63); let tile_0_1 = Tile::new(4, &header); let coords_0_1 = ComponentTile::new(&tile_0_1, &component_info_0).rect; assert_eq!(coords_0_1.x0, 152); assert_eq!(coords_0_1.y0, 297); assert_eq!(coords_0_1.x1, 396); assert_eq!(coords_0_1.y1, 594); assert_eq!(coords_0_1.width(), 244); assert_eq!(coords_0_1.height(), 297); let tile_1_1 = Tile::new(5, &header); let coords_1_1 = ComponentTile::new(&tile_1_1, &component_info_0).rect; assert_eq!(coords_1_1.x0, 396); assert_eq!(coords_1_1.y0, 297); assert_eq!(coords_1_1.x1, 792); assert_eq!(coords_1_1.y1, 594); assert_eq!(coords_1_1.width(), 396); assert_eq!(coords_1_1.height(), 297); let tile_3_3 = Tile::new(15, &header); let coords_3_3 = ComponentTile::new(&tile_3_3, &component_info_0).rect; assert_eq!(coords_3_3.x0, 1188); assert_eq!(coords_3_3.y0, 891); assert_eq!(coords_3_3.x1, 1432); assert_eq!(coords_3_3.y1, 954); assert_eq!(coords_3_3.width(), 244); assert_eq!(coords_3_3.height(), 63); let tile_0_0_comp1 = ComponentTile::new(&tile_0_0, &component_info_1).rect; assert_eq!(tile_0_0_comp1.x0, 76); assert_eq!(tile_0_0_comp1.y0, 117); assert_eq!(tile_0_0_comp1.x1, 198); assert_eq!(tile_0_0_comp1.y1, 149); assert_eq!(tile_0_0_comp1.width(), 122); assert_eq!(tile_0_0_comp1.height(), 32); let tile_1_0_comp1 = ComponentTile::new(&tile_1_0, &component_info_1).rect; assert_eq!(tile_1_0_comp1.x0, 198); assert_eq!(tile_1_0_comp1.y0, 117); assert_eq!(tile_1_0_comp1.x1, 396); assert_eq!(tile_1_0_comp1.y1, 149); assert_eq!(tile_1_0_comp1.width(), 198); assert_eq!(tile_1_0_comp1.height(), 32); let tile_0_1_comp1 = ComponentTile::new(&tile_0_1, &component_info_1).rect; assert_eq!(tile_0_1_comp1.x0, 76); assert_eq!(tile_0_1_comp1.y0, 149); assert_eq!(tile_0_1_comp1.x1, 198); assert_eq!(tile_0_1_comp1.y1, 297); assert_eq!(tile_0_1_comp1.width(), 122); assert_eq!(tile_0_1_comp1.height(), 148); let tile_1_1_comp1 = ComponentTile::new(&tile_1_1, &component_info_1).rect; assert_eq!(tile_1_1_comp1.x0, 198); assert_eq!(tile_1_1_comp1.y0, 149); assert_eq!(tile_1_1_comp1.x1, 396); assert_eq!(tile_1_1_comp1.y1, 297); assert_eq!(tile_1_1_comp1.width(), 198); assert_eq!(tile_1_1_comp1.height(), 148); let tile_2_1 = Tile::new(6, &header); let tile_2_1_comp1 = ComponentTile::new(&tile_2_1, &component_info_1).rect; assert_eq!(tile_2_1_comp1.x0, 396); assert_eq!(tile_2_1_comp1.y0, 149); assert_eq!(tile_2_1_comp1.x1, 594); assert_eq!(tile_2_1_comp1.y1, 297); assert_eq!(tile_2_1_comp1.width(), 198); assert_eq!(tile_2_1_comp1.height(), 148); assert_eq!(tile_1_1_comp1.width(), tile_2_1_comp1.width()); assert_eq!(tile_1_1_comp1.height(), tile_2_1_comp1.height()); } } hayro-jpeg2000-0.3.2/src/jp2/box.rs000064400000000000000000000067701046102023000146270ustar 00000000000000//! Parsing a JP2 box, as specified in I.4. #![allow( dead_code, reason = "JP2 box constants exist for completeness but not all are referenced yet" )] use alloc::string::{String, ToString}; use crate::reader::BitReader; /// JP2 signature box - 'jP\040\040'. pub(crate) const JP2_SIGNATURE: u32 = 0x6A502020; /// File Type box - 'ftyp'. pub(crate) const FILE_TYPE: u32 = 0x66747970; /// JP2 Header box - 'jp2h'. pub(crate) const JP2_HEADER: u32 = 0x6A703268; /// Image Header box - 'ihdr'. pub(crate) const IMAGE_HEADER: u32 = 0x69686472; /// Bits Per Component box - 'bpcc'. pub(crate) const BITS_PER_COMPONENT: u32 = 0x62706363; /// Colour Specification box - 'colr'. pub(crate) const COLOUR_SPECIFICATION: u32 = 0x636F6C72; /// Palette box - 'pclr'. pub(crate) const PALETTE: u32 = 0x70636C72; /// Component Mapping box - 'cmap'. pub(crate) const COMPONENT_MAPPING: u32 = 0x636D6170; /// Channel Definition box - 'cdef'. pub(crate) const CHANNEL_DEFINITION: u32 = 0x63646566; /// Resolution box - 'res\x20'. pub(crate) const RESOLUTION: u32 = 0x72657320; /// Capture Resolution box - 'resc'. pub(crate) const CAPTURE_RESOLUTION: u32 = 0x72657363; /// Default Display Resolution box - 'resd'. pub(crate) const DISPLAY_RESOLUTION: u32 = 0x72657364; /// Contiguous Codestream box - 'jp2c'. pub(crate) const CONTIGUOUS_CODESTREAM: u32 = 0x6A703263; /// Intellectual Property box - 'jp2i'. pub(crate) const INTELLECTUAL_PROPERTY: u32 = 0x6A703269; /// XML box - 'xml\x20'. pub(crate) const XML: u32 = 0x786D6C20; /// UUID box - 'uuid'. pub(crate) const UUID: u32 = 0x75756964; /// UUID Info box - 'uinf'. pub(crate) const UUID_INFO: u32 = 0x75696E66; /// UUID List box - 'ulst'. pub(crate) const UUID_LIST: u32 = 0x756C7374; /// URL box - 'url\x20'. pub(crate) const URL: u32 = 0x75726C20; pub(crate) struct Jp2Box<'a> { pub(crate) data: &'a [u8], pub(crate) box_type: u32, } /// Converts a box tag to its string representation. /// /// Box tags are stored as 4-byte ASCII codes in big-endian format. pub(crate) fn tag_to_string(tag: u32) -> String { let bytes = [ ((tag >> 24) & 0xFF) as u8, ((tag >> 16) & 0xFF) as u8, ((tag >> 8) & 0xFF) as u8, (tag & 0xFF) as u8, ]; String::from_utf8_lossy(&bytes).to_string() } pub(crate) fn read<'a>(reader: &mut BitReader<'a>) -> Option> { let l_box = reader.read_u32()?; let t_box = reader.read_u32()?; let data = match l_box { // If the value of this field is 0, then the length of the box // was not known when the LBox field was written. In this case, this box contains // all bytes up to the end of the file. 0 => { let data = reader.tail()?; reader.jump_to_end(); data } // If the value of this field is 1, then the XLBox field shall exist and the value of // that field shall be the actual length of the box. // The value includes all of the fields of the box, including the LBox, TBox and XLBox // fields. 1 => { let xl_box = reader.read_u64()?.checked_sub(16)?; reader.read_bytes(xl_box as usize)? } // This field specifies the length of the box, stored as a 4-byte big-endian unsigned integer. // This value includes all of the fields of the box, including the length and type. _ => { let length = l_box.checked_sub(8)?; reader.read_bytes(length as usize)? } }; Some(Jp2Box { data, box_type: t_box, }) } hayro-jpeg2000-0.3.2/src/jp2/cdef.rs000064400000000000000000000047461046102023000147410ustar 00000000000000//! The channel definition box (cdef), defined in I.5.3.6. use alloc::vec::Vec; use crate::error::{FormatError, Result, bail}; use crate::jp2::ImageBoxes; use crate::reader::BitReader; pub(crate) fn parse(boxes: &mut ImageBoxes, data: &[u8]) -> Result<()> { let mut reader = BitReader::new(data); let count = reader.read_u16().ok_or(FormatError::InvalidBox)? as usize; let mut definitions = Vec::with_capacity(count); if count == 0 { bail!(FormatError::InvalidBox); } for _ in 0..count { let channel_index = reader.read_u16().ok_or(FormatError::InvalidBox)?; let channel_type = reader.read_u16().ok_or(FormatError::InvalidBox)?; let association = reader.read_u16().ok_or(FormatError::InvalidBox)?; definitions.push(ChannelDefinition { channel_index, channel_type: ChannelType::from_raw(channel_type).ok_or(FormatError::InvalidBox)?, _association: ChannelAssociation::from_raw(association) .ok_or(FormatError::InvalidBox)?, }); } definitions.sort_by(|a, b| a.channel_index.cmp(&b.channel_index)); // Ensure channel indices increases in steps of 1, starting from 0. for (idx, def) in definitions.iter().enumerate() { if def.channel_index as usize != idx { bail!(FormatError::InvalidBox); } } boxes.channel_definition = Some(ChannelDefinitionBox { channel_definitions: definitions, }); Ok(()) } #[derive(Debug, Clone)] pub(crate) struct ChannelDefinitionBox { pub(crate) channel_definitions: Vec, } #[derive(Debug, Clone)] pub(crate) struct ChannelDefinition { pub(crate) channel_index: u16, pub(crate) channel_type: ChannelType, pub(crate) _association: ChannelAssociation, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum ChannelType { Colour, Opacity, } impl ChannelType { fn from_raw(value: u16) -> Option { match value { 0 => Some(Self::Colour), 1 => Some(Self::Opacity), // We don't support the others. _ => None, } } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum ChannelAssociation { WholeImage, Colour(u16), } impl ChannelAssociation { fn from_raw(value: u16) -> Option { match value { 0 => Some(Self::WholeImage), // Unspecified. u16::MAX => None, v => Some(Self::Colour(v)), } } } hayro-jpeg2000-0.3.2/src/jp2/cmap.rs000064400000000000000000000027321046102023000147510ustar 00000000000000//! The component mapping box (cmap), defined in I.5.3.5. use alloc::vec::Vec; use crate::error::{FormatError, Result, bail}; use crate::jp2::ImageBoxes; use crate::reader::BitReader; pub(crate) fn parse(boxes: &mut ImageBoxes, data: &[u8]) -> Result<()> { let mut reader = BitReader::new(data); let mut entries = Vec::with_capacity(data.len() / 4); while !reader.at_end() { let component_index = reader.read_u16().ok_or(FormatError::InvalidBox)?; let mapping_type = reader.read_byte().ok_or(FormatError::InvalidBox)?; let palette_column = reader.read_byte().ok_or(FormatError::InvalidBox)?; let mapping_type = match mapping_type { 0 => ComponentMappingType::Direct, 1 => ComponentMappingType::Palette { column: palette_column, }, _ => bail!(FormatError::InvalidBox), }; entries.push(ComponentMappingEntry { component_index, mapping_type, }); } boxes.component_mapping = Some(ComponentMappingBox { entries }); Ok(()) } #[derive(Debug, Clone)] pub(crate) struct ComponentMappingBox { pub(crate) entries: Vec, } #[derive(Debug, Clone)] pub(crate) struct ComponentMappingEntry { pub(crate) component_index: u16, pub(crate) mapping_type: ComponentMappingType, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum ComponentMappingType { Direct, Palette { column: u8 }, } hayro-jpeg2000-0.3.2/src/jp2/colr.rs000064400000000000000000000100241046102023000147610ustar 00000000000000//! The color specification box (colr), defined in I.5.3.3. use alloc::vec::Vec; use crate::error::{FormatError, Result}; use crate::jp2::ImageBoxes; use crate::reader::BitReader; pub(crate) fn parse(boxes: &mut ImageBoxes, data: &[u8]) -> Result<()> { if boxes.color_specification.is_some() { // "A JP2 file may contain multiple Colour Specification boxes, but // must contain at least one, specifying different methods // for achieving "equivalent" results. A conforming JP2 reader shall // ignore all Colour Specification boxes after the first. // However, readers conforming to other standards may use those boxes as // defined in those other standards." return Ok(()); } let mut reader = BitReader::new(data); let meth = reader.read_byte().ok_or(FormatError::InvalidBox)?; // We don't care about those. let _prec = reader.read_byte().ok_or(FormatError::InvalidBox)?; let _approx = reader.read_byte().ok_or(FormatError::InvalidBox)?; let method = match meth { 1 => { let enumerated = reader.read_u32().ok_or(FormatError::InvalidBox)?; ColorSpace::Enumerated( EnumeratedColorspace::from_raw(enumerated, &mut reader) .ok_or(FormatError::InvalidBox)?, ) } 2 => { let profile_data = reader.tail().ok_or(FormatError::InvalidBox)?.to_vec(); ColorSpace::Icc(profile_data) } _ => ColorSpace::Unknown, }; boxes.color_specification = Some(ColorSpecificationBox { color_space: method, }); Ok(()) } #[derive(Debug, Clone)] pub(crate) struct ColorSpecificationBox { pub(crate) color_space: ColorSpace, } #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) enum ColorSpace { Enumerated(EnumeratedColorspace), Icc(Vec), Unknown, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum EnumeratedColorspace { BiLevel1, YCbCr1, YCbCr2, YCbCr3, PhotoYcc, Cmy, Cmyk, Ycck, CieLab(CieLab), BiLevel2, Srgb, Greyscale, Sycc, CieJab, EsRgb, RommRgb, YPbPr112560, YPbPr125050, EsYcc, ScRgb, ScRgbGray, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) struct CieLab { pub(crate) rl: Option, pub(crate) ol: Option, pub(crate) ra: Option, pub(crate) oa: Option, pub(crate) rb: Option, pub(crate) ob: Option, } impl EnumeratedColorspace { fn from_raw(value: u32, reader: &mut BitReader<'_>) -> Option { match value { 0 => Some(Self::BiLevel1), 1 => Some(Self::YCbCr1), 3 => Some(Self::YCbCr2), 4 => Some(Self::YCbCr3), 9 => Some(Self::PhotoYcc), 11 => Some(Self::Cmy), 12 => Some(Self::Cmyk), 13 => Some(Self::Ycck), 14 => { // M.11.7.4.1 EP field format for the CIELab colourspace let rl = reader.read_u32(); let ol = reader.read_u32(); let ra = reader.read_u32(); let oa = reader.read_u32(); let rb = reader.read_u32(); let ob = reader.read_u32(); // Not supported for now. let _il = reader.read_u32(); Some(Self::CieLab(CieLab { rl, ol, ra, oa, rb, ob, })) } 15 => Some(Self::BiLevel2), 16 => Some(Self::Srgb), 17 => Some(Self::Greyscale), 18 => Some(Self::Sycc), 19 => Some(Self::CieJab), 20 => Some(Self::EsRgb), 21 => Some(Self::RommRgb), 22 => Some(Self::YPbPr112560), 23 => Some(Self::YPbPr125050), 24 => Some(Self::EsYcc), 25 => Some(Self::ScRgb), 26 => Some(Self::ScRgbGray), _ => None, } } } hayro-jpeg2000-0.3.2/src/jp2/icc.rs000064400000000000000000000040401046102023000145610ustar 00000000000000#[derive(Clone, Hash, Debug, Eq, PartialEq)] pub(crate) enum ICCColorSpace { Xyz, Lab, Luv, Ycbr, Yxy, Lms, Rgb, Gray, Hsv, Hls, Cmyk, Cmy, OneClr, ThreeClr, FourClr, // There are more, but those should be the most important // ones. } impl ICCColorSpace { pub(crate) fn num_components(&self) -> u8 { match self { Self::Xyz => 3, Self::Lab => 3, Self::Luv => 3, Self::Ycbr => 3, Self::Yxy => 3, Self::Lms => 3, Self::Rgb => 3, Self::Gray => 1, Self::Hsv => 3, Self::Hls => 3, Self::Cmyk => 4, Self::Cmy => 3, Self::OneClr => 1, Self::ThreeClr => 3, Self::FourClr => 4, } } } impl TryFrom for ICCColorSpace { type Error = (); fn try_from(value: u32) -> Result { match value { 0x58595A20 => Ok(Self::Xyz), 0x4C616220 => Ok(Self::Lab), 0x4C757620 => Ok(Self::Luv), 0x59436272 => Ok(Self::Ycbr), 0x59787920 => Ok(Self::Yxy), 0x4C4D5320 => Ok(Self::Lms), 0x52474220 => Ok(Self::Rgb), 0x47524159 => Ok(Self::Gray), 0x48535620 => Ok(Self::Hsv), 0x484C5320 => Ok(Self::Hls), 0x434D594B => Ok(Self::Cmyk), 0x434D5920 => Ok(Self::Cmy), 0x31434C52 => Ok(Self::OneClr), 0x33434C52 => Ok(Self::ThreeClr), 0x34434C52 => Ok(Self::FourClr), _ => Err(()), } } } #[derive(Clone, Hash, Debug, Eq, PartialEq)] pub(crate) struct ICCMetadata { pub(crate) color_space: ICCColorSpace, } impl ICCMetadata { pub(crate) fn from_data(data: &[u8]) -> Option { let color_space = { let marker = u32::from_be_bytes(data.get(16..20)?.try_into().ok()?); ICCColorSpace::try_from(marker).ok()? }; Some(Self { color_space }) } } hayro-jpeg2000-0.3.2/src/jp2/mod.rs000064400000000000000000000125371046102023000146140ustar 00000000000000//! Reading a JP2 file, defined in Annex I. use alloc::vec::Vec; use crate::error::{FormatError, Result, bail}; use crate::j2c::DecodedCodestream; use crate::jp2::r#box::{FILE_TYPE, JP2_SIGNATURE}; use crate::jp2::cdef::ChannelDefinitionBox; use crate::jp2::cmap::{ComponentMappingBox, ComponentMappingEntry, ComponentMappingType}; use crate::jp2::colr::ColorSpecificationBox; use crate::jp2::pclr::PaletteBox; use crate::reader::BitReader; use crate::{DecodeSettings, Image, resolve_alpha_and_color_space}; pub(crate) mod r#box; pub(crate) mod cdef; pub(crate) mod cmap; pub(crate) mod colr; pub(crate) mod icc; pub(crate) mod pclr; #[derive(Debug, Clone, Default)] pub(crate) struct ImageBoxes { pub(crate) color_specification: Option, pub(crate) channel_definition: Option, pub(crate) palette: Option, pub(crate) component_mapping: Option, } pub(crate) struct DecodedImage { /// The raw decoded JPEG2000 codestream. pub(crate) decoded: DecodedCodestream, /// The JP2 boxes of the image. In the case of a raw codestream, we /// will synthesize the necessary boxes. pub(crate) boxes: ImageBoxes, } pub(crate) fn parse<'a>(data: &'a [u8], mut settings: DecodeSettings) -> Result> { let mut reader = BitReader::new(data); let signature_box = r#box::read(&mut reader).ok_or(FormatError::InvalidBox)?; if signature_box.box_type != JP2_SIGNATURE { bail!(FormatError::InvalidSignature); } let file_type_box = r#box::read(&mut reader).ok_or(FormatError::InvalidBox)?; if file_type_box.box_type != FILE_TYPE { bail!(FormatError::InvalidFileType); } let mut image_boxes: Option = None; let mut parsed_codestream = None; // Read boxes until we find the JP2 Header box while !reader.at_end() { let Some(current_box) = r#box::read(&mut reader) else { if settings.strict { bail!(FormatError::InvalidBox); } break; }; match current_box.box_type { r#box::JP2_HEADER => { let mut boxes = ImageBoxes::default(); let mut jp2h_reader = BitReader::new(current_box.data); // Read child boxes within JP2 Header box while !jp2h_reader.at_end() { let child_box = r#box::read(&mut jp2h_reader).ok_or(FormatError::InvalidBox)?; match child_box.box_type { r#box::CHANNEL_DEFINITION => { if cdef::parse(&mut boxes, child_box.data).is_err() && settings.strict { bail!(FormatError::InvalidBox); } // If not strict decoding, just assume default // configuration. } r#box::COLOUR_SPECIFICATION => { colr::parse(&mut boxes, child_box.data)?; } r#box::PALETTE => { if pclr::parse(&mut boxes, child_box.data).is_err() && settings.strict { bail!(FormatError::InvalidBox); } // If we have a palettized image, decoding at a // lower resolution will corrupt it, so we can't do // it in this case. settings.target_resolution = None; } r#box::COMPONENT_MAPPING => { cmap::parse(&mut boxes, child_box.data)?; } _ => { ldebug!( "ignoring header box {}", r#box::tag_to_string(child_box.box_type) ); } } } image_boxes = Some(boxes); } r#box::CONTIGUOUS_CODESTREAM => { parsed_codestream = Some(crate::j2c::parse_raw(current_box.data, &settings)?); } _ => {} } } let mut image_boxes = image_boxes.ok_or(FormatError::InvalidBox)?; let parsed_codestream = parsed_codestream.ok_or(FormatError::MissingCodestream)?; if let Some(palette) = image_boxes.palette.as_ref() && image_boxes.component_mapping.is_none() { // In theory, a cmap is required if we have pclr, but since there are // some files that don't seem to do so, we assume // that all channels are mapped via the palette in case not. let mappings = (0..palette.columns.len()) .map(|i| ComponentMappingEntry { component_index: 0, mapping_type: ComponentMappingType::Palette { column: i as u8 }, }) .collect::>(); image_boxes.component_mapping = Some(ComponentMappingBox { entries: mappings }); } let (color_space, has_alpha) = resolve_alpha_and_color_space(&image_boxes, &parsed_codestream.header, &settings)?; Ok(Image { codestream: parsed_codestream.data, header: parsed_codestream.header, boxes: image_boxes, settings, color_space, has_alpha, }) } hayro-jpeg2000-0.3.2/src/jp2/pclr.rs000064400000000000000000000042111046102023000147630ustar 00000000000000//! The palette box (pclr), defined in I.5.3.4. use alloc::vec::Vec; use crate::error::{FormatError, Result, bail}; use crate::jp2::ImageBoxes; use crate::reader::BitReader; pub(crate) fn parse(boxes: &mut ImageBoxes, data: &[u8]) -> Result<()> { let mut reader = BitReader::new(data); let num_entries = reader.read_u16().ok_or(FormatError::InvalidBox)? as usize; let num_components = reader.read_byte().ok_or(FormatError::InvalidBox)? as usize; if num_entries == 0 || num_components == 0 { bail!(FormatError::InvalidBox); } let mut columns = Vec::with_capacity(num_components); for _ in 0..num_components { let descriptor = reader.read_byte().ok_or(FormatError::InvalidBox)?; let bit_depth = (descriptor & 0x7F) .checked_add(1) .ok_or(FormatError::InvalidBox)?; let is_signed = (descriptor & 0x80) != 0; if is_signed { bail!(FormatError::InvalidBox); } columns.push(PaletteColumn { bit_depth }); } let mut entries = Vec::with_capacity(num_entries); for _ in 0..num_entries { let mut row = Vec::with_capacity(num_components); for column in &columns { let num_bytes = (column.bit_depth as usize).div_ceil(8).max(1); let raw_bytes = reader .read_bytes(num_bytes) .ok_or(FormatError::InvalidBox)?; let mut raw_value = 0_u64; for &byte in raw_bytes { raw_value = (raw_value << 8) | byte as u64; } row.push(raw_value); } entries.push(row); } boxes.palette = Some(PaletteBox { entries, columns }); Ok(()) } #[derive(Debug, Clone)] pub(crate) struct PaletteBox { pub(crate) entries: Vec>, pub(crate) columns: Vec, } impl PaletteBox { #[inline(always)] pub(crate) fn map(&self, entry: usize, column: usize) -> Option { self.entries .get(entry) .and_then(|row| row.get(column)) .copied() } } #[derive(Debug, Clone, Copy)] pub(crate) struct PaletteColumn { pub(crate) bit_depth: u8, } hayro-jpeg2000-0.3.2/src/lib.rs000064400000000000000000000627141046102023000141120ustar 00000000000000/*! A memory-safe, pure-Rust JPEG 2000 decoder. `hayro-jpeg2000` can decode both raw JPEG 2000 codestreams (`.j2c`) and images wrapped inside the JP2 container format. The decoder supports the vast majority of features defined in the JPEG2000 core coding system (ISO/IEC 15444-1) as well as some color spaces from the extensions (ISO/IEC 15444-2). There are still some missing pieces for some "obscure" features(like for example support for progression order changes in tile-parts), but all features that actually commonly appear in real-life images should be supported (if not, please open an issue!). The decoder abstracts away most of the internal complexity of JPEG2000 and yields a simple 8-bit image with either greyscale, RGB, CMYK or an ICC-based color space, which can then be processed further according to your needs. # Example ```rust,no_run use hayro_jpeg2000::{Image, DecodeSettings}; let data = std::fs::read("image.jp2").unwrap(); let image = Image::new(&data, &DecodeSettings::default()).unwrap(); println!( "{}x{} image in {:?} with alpha={}", image.width(), image.height(), image.color_space(), image.has_alpha(), ); let bitmap = image.decode().unwrap(); ``` If you want to see a more comprehensive example, please take a look at the example in [GitHub](https://github.com/LaurenzV/hayro/blob/main/hayro-jpeg2000/examples/png.rs), which shows you the main steps needed to convert a JPEG2000 image into PNG for example. # Testing The decoder has been tested against 20.000+ images scraped from random PDFs on the internet and also passes a large part of the `OpenJPEG` test suite. So you can expect the crate to perform decently in terms of decoding correctness. # Performance A decent amount of effort has already been put into optimizing this crate (both in terms of raw performance but also memory allocations). However, there are some more important optimizations that have not been implemented yet, so there is definitely still room for improvement (and I am planning on implementing them eventually). Overall, you should expect this crate to have worse performance than `OpenJPEG`, but the difference gap should not be too large. # Safety By default, the crate has the `simd` feature enabled, which uses the [`fearless_simd`](https://github.com/linebender/fearless_simd) crate to accelerate important parts of the pipeline. If you want to eliminate any usage of unsafe in this crate as well as its dependencies, you can simply disable this feature, at the cost of worse decoding performance. Unsafe code is forbidden via a crate-level attribute. The crate is `no_std` compatible but requires an allocator to be available. */ #![cfg_attr(not(feature = "std"), no_std)] #![forbid(unsafe_code)] #![forbid(missing_docs)] extern crate alloc; use alloc::vec; use alloc::vec::Vec; use crate::error::{bail, err}; use crate::j2c::{ComponentData, DecodedCodestream, Header}; use crate::jp2::cdef::{ChannelAssociation, ChannelType}; use crate::jp2::cmap::ComponentMappingType; use crate::jp2::colr::{CieLab, EnumeratedColorspace}; use crate::jp2::icc::ICCMetadata; use crate::jp2::{DecodedImage, ImageBoxes}; pub mod error; #[macro_use] pub(crate) mod log; pub(crate) mod math; use crate::math::{Level, SIMD_WIDTH, Simd, dispatch, f32x8}; pub use error::{ ColorError, DecodeError, DecodingError, FormatError, MarkerError, Result, TileError, ValidationError, }; #[cfg(feature = "image")] pub mod integration; mod j2c; mod jp2; pub(crate) mod reader; /// JP2 signature box: 00 00 00 0C 6A 50 20 20 pub(crate) const JP2_MAGIC: &[u8] = b"\x00\x00\x00\x0C\x6A\x50\x20\x20"; /// Codestream signature: FF 4F FF 51 (SOC + SIZ markers) pub(crate) const CODESTREAM_MAGIC: &[u8] = b"\xFF\x4F\xFF\x51"; /// Settings to apply during decoding. #[derive(Debug, Copy, Clone)] pub struct DecodeSettings { /// Whether palette indices should be resolved. /// /// JPEG2000 images can be stored in two different ways. First, by storing /// RGB values (depending on the color space) for each pixel. Secondly, by /// only storing a single index for each channel, and then resolving the /// actual color using the index. /// /// If you disable this option, in case you have an image with palette /// indices, they will not be resolved, but instead a grayscale image /// will be returned, with each pixel value corresponding to the palette /// index of the location. pub resolve_palette_indices: bool, /// Whether strict mode should be enabled when decoding. /// /// It is recommended to leave this flag disabled, unless you have a /// specific reason not to. pub strict: bool, /// A hint for the target resolution that the image should be decoded at. pub target_resolution: Option<(u32, u32)>, } impl Default for DecodeSettings { fn default() -> Self { Self { resolve_palette_indices: true, strict: false, target_resolution: None, } } } /// A JPEG2000 image or codestream. pub struct Image<'a> { /// The codestream containing the data to decode. pub(crate) codestream: &'a [u8], /// The header of the J2C codestream. pub(crate) header: Header<'a>, /// The JP2 boxes of the image. In the case of a raw codestream, we /// will synthesize the necessary boxes. pub(crate) boxes: ImageBoxes, /// Settings that should be applied during decoding. pub(crate) settings: DecodeSettings, /// Whether the image has an alpha channel. pub(crate) has_alpha: bool, /// The color space of the image. pub(crate) color_space: ColorSpace, } impl<'a> Image<'a> { /// Try to create a new JPEG2000 image from the given data. pub fn new(data: &'a [u8], settings: &DecodeSettings) -> Result { if data.starts_with(JP2_MAGIC) { jp2::parse(data, *settings) } else if data.starts_with(CODESTREAM_MAGIC) { j2c::parse(data, settings) } else { err!(FormatError::InvalidSignature) } } /// Whether the image has an alpha channel. pub fn has_alpha(&self) -> bool { self.has_alpha } /// The color space of the image. pub fn color_space(&self) -> &ColorSpace { &self.color_space } /// The width of the image. pub fn width(&self) -> u32 { self.header.size_data.image_width() } /// The height of the image. pub fn height(&self) -> u32 { self.header.size_data.image_height() } /// The original bit depth of the image. You usually don't need to do anything /// with this parameter, it just exists for informational purposes. pub fn original_bit_depth(&self) -> u8 { // Note that this only works if all components have the same precision. self.header.component_infos[0].size_info.precision } /// Decode the image. pub fn decode(&self) -> Result> { let buffer_size = self.width() as usize * self.height() as usize * (self.color_space.num_channels() as usize + if self.has_alpha { 1 } else { 0 }); let mut buf = vec![0; buffer_size]; self.decode_into(&mut buf)?; Ok(buf) } /// Decode the image into the given buffer. The buffer must have the correct /// size. pub(crate) fn decode_into(&self, buf: &mut [u8]) -> Result<()> { let settings = &self.settings; let mut decoded_image = j2c::decode(self.codestream, &self.header).map(move |data| DecodedImage { decoded: DecodedCodestream { components: data }, boxes: self.boxes.clone(), })?; // Resolve palette indices. if settings.resolve_palette_indices { decoded_image.decoded.components = resolve_palette_indices(decoded_image.decoded.components, &decoded_image.boxes)?; } if let Some(cdef) = &decoded_image.boxes.channel_definition { // Sort by the channel association. Note that this will only work if // each component is referenced only once. let mut components = decoded_image .decoded .components .into_iter() .zip( cdef.channel_definitions .iter() .map(|c| match c._association { ChannelAssociation::WholeImage => u16::MAX, ChannelAssociation::Colour(c) => c, }), ) .collect::>(); components.sort_by(|c1, c2| c1.1.cmp(&c2.1)); decoded_image.decoded.components = components.into_iter().map(|c| c.0).collect(); } // Note that this is only valid if all images have the same bit depth. let bit_depth = decoded_image.decoded.components[0].bit_depth; convert_color_space(&mut decoded_image, bit_depth)?; interleave_and_convert(decoded_image, buf); Ok(()) } } pub(crate) fn resolve_alpha_and_color_space( boxes: &ImageBoxes, header: &Header<'_>, settings: &DecodeSettings, ) -> Result<(ColorSpace, bool)> { let mut num_components = header.component_infos.len(); // Override number of components with what is actually in the palette box // in case we resolve them. if settings.resolve_palette_indices && let Some(palette_box) = &boxes.palette { num_components = palette_box.columns.len(); } let mut has_alpha = false; if let Some(cdef) = &boxes.channel_definition { let last = cdef.channel_definitions.last().unwrap(); has_alpha = last.channel_type == ChannelType::Opacity; } let mut color_space = get_color_space(boxes, num_components)?; // If we didn't resolve palette indices, we need to assume grayscale image. if !settings.resolve_palette_indices && boxes.palette.is_some() { has_alpha = false; color_space = ColorSpace::Gray; } let actual_num_components = header.component_infos.len(); // Validate the number of channels. if boxes.palette.is_none() && actual_num_components != (color_space.num_channels() + if has_alpha { 1 } else { 0 }) as usize { if !settings.strict && actual_num_components == color_space.num_channels() as usize + 1 && !has_alpha { // See OPENJPEG test case orb-blue10-lin-j2k. Assume that we have an // alpha channel in this case. has_alpha = true; } else { // Color space is invalid, attempt to repair. if actual_num_components == 1 || (actual_num_components == 2 && has_alpha) { color_space = ColorSpace::Gray; } else if actual_num_components == 3 { color_space = ColorSpace::RGB; } else if actual_num_components == 4 { if has_alpha { color_space = ColorSpace::RGB; } else { color_space = ColorSpace::CMYK; } } else { bail!(ValidationError::TooManyChannels); } } } Ok((color_space, has_alpha)) } /// The color space of the image. #[derive(Debug, Clone)] pub enum ColorSpace { /// A grayscale image. Gray, /// An RGB image. RGB, /// A CMYK image. CMYK, /// An unknown color space. Unknown { /// The number of channels of the color space. num_channels: u8, }, /// An image based on an ICC profile. Icc { /// The raw data of the ICC profile. profile: Vec, /// The number of channels used by the ICC profile. num_channels: u8, }, } impl ColorSpace { /// Return the number of expected channels for the color space. pub fn num_channels(&self) -> u8 { match self { Self::Gray => 1, Self::RGB => 3, Self::CMYK => 4, Self::Unknown { num_channels } => *num_channels, Self::Icc { num_channels: num_components, .. } => *num_components, } } } /// A bitmap storing the decoded result of the image. pub struct Bitmap { /// The color space of the image. pub color_space: ColorSpace, /// The raw pixel data of the image. The result will always be in /// 8-bit (in case the original image had a different bit-depth, /// hayro-jpeg2000 always scales to 8-bit). /// /// The size is guaranteed to equal /// `width * height * (num_channels + (if has_alpha { 1 } else { 0 })`. /// Pixels are interleaved on a per-channel basis, the alpha channel always /// appearing as the last channel, if available. pub data: Vec, /// Whether the image has an alpha channel. pub has_alpha: bool, /// The width of the image. pub width: u32, /// The height of the image. pub height: u32, /// The original bit depth of the image. You usually don't need to do anything /// with this parameter, it just exists for informational purposes. pub original_bit_depth: u8, } fn interleave_and_convert(image: DecodedImage, buf: &mut [u8]) { let mut components = image.decoded.components; let num_components = components.len(); let mut all_same_bit_depth = Some(components[0].bit_depth); for component in components.iter().skip(1) { if Some(component.bit_depth) != all_same_bit_depth { all_same_bit_depth = None; } } let max_len = components[0].container.truncated().len(); let mut output_iter = buf.iter_mut(); if all_same_bit_depth == Some(8) && num_components <= 4 { // Fast path for the common case. match num_components { // Gray-scale. 1 => { for (output, input) in output_iter.zip( components[0] .container .iter() .map(|v| math::round_f32(*v) as u8), ) { *output = input; } } // Gray-scale with alpha. 2 => { let c1 = components.pop().unwrap(); let c0 = components.pop().unwrap(); let c0 = &c0.container[..max_len]; let c1 = &c1.container[..max_len]; for i in 0..max_len { *output_iter.next().unwrap() = math::round_f32(c0[i]) as u8; *output_iter.next().unwrap() = math::round_f32(c1[i]) as u8; } } // RGB 3 => { let c2 = components.pop().unwrap(); let c1 = components.pop().unwrap(); let c0 = components.pop().unwrap(); let c0 = &c0.container[..max_len]; let c1 = &c1.container[..max_len]; let c2 = &c2.container[..max_len]; for i in 0..max_len { *output_iter.next().unwrap() = math::round_f32(c0[i]) as u8; *output_iter.next().unwrap() = math::round_f32(c1[i]) as u8; *output_iter.next().unwrap() = math::round_f32(c2[i]) as u8; } } // RGBA or CMYK. 4 => { let c3 = components.pop().unwrap(); let c2 = components.pop().unwrap(); let c1 = components.pop().unwrap(); let c0 = components.pop().unwrap(); let c0 = &c0.container[..max_len]; let c1 = &c1.container[..max_len]; let c2 = &c2.container[..max_len]; let c3 = &c3.container[..max_len]; for i in 0..max_len { *output_iter.next().unwrap() = math::round_f32(c0[i]) as u8; *output_iter.next().unwrap() = math::round_f32(c1[i]) as u8; *output_iter.next().unwrap() = math::round_f32(c2[i]) as u8; *output_iter.next().unwrap() = math::round_f32(c3[i]) as u8; } } _ => unreachable!(), } } else { // Slow path that also requires us to scale to 8 bit. let mul_factor = ((1 << 8) - 1) as f32; for sample in 0..max_len { for channel in components.iter() { *output_iter.next().unwrap() = math::round_f32( (channel.container[sample] / ((1_u32 << channel.bit_depth) - 1) as f32) * mul_factor, ) as u8; } } } } fn convert_color_space(image: &mut DecodedImage, bit_depth: u8) -> Result<()> { if let Some(jp2::colr::ColorSpace::Enumerated(e)) = &image .boxes .color_specification .as_ref() .map(|i| &i.color_space) { match e { EnumeratedColorspace::Sycc => { dispatch!(Level::new(), simd => { sycc_to_rgb(simd, &mut image.decoded.components, bit_depth) })?; } EnumeratedColorspace::CieLab(cielab) => { dispatch!(Level::new(), simd => { cielab_to_rgb(simd, &mut image.decoded.components, bit_depth, cielab) })?; } _ => {} } } Ok(()) } fn get_color_space(boxes: &ImageBoxes, num_components: usize) -> Result { let cs = match boxes .color_specification .as_ref() .map(|c| &c.color_space) .unwrap_or(&jp2::colr::ColorSpace::Unknown) { jp2::colr::ColorSpace::Enumerated(e) => { match e { EnumeratedColorspace::Cmyk => ColorSpace::CMYK, EnumeratedColorspace::Srgb => ColorSpace::RGB, EnumeratedColorspace::RommRgb => { // Use an ICC profile to process the RommRGB color space. ColorSpace::Icc { profile: include_bytes!("../assets/ProPhoto-v2-micro.icc").to_vec(), num_channels: 3, } } EnumeratedColorspace::EsRgb => ColorSpace::RGB, EnumeratedColorspace::Greyscale => ColorSpace::Gray, EnumeratedColorspace::Sycc => ColorSpace::RGB, EnumeratedColorspace::CieLab(_) => ColorSpace::Icc { profile: include_bytes!("../assets/LAB.icc").to_vec(), num_channels: 3, }, _ => bail!(FormatError::Unsupported), } } jp2::colr::ColorSpace::Icc(icc) => { if let Some(metadata) = ICCMetadata::from_data(icc) { ColorSpace::Icc { profile: icc.clone(), num_channels: metadata.color_space.num_components(), } } else { // See OPENJPEG test orb-blue10-lin-jp2.jp2. They seem to // assume RGB in this case (even though the image has 4 // components with no opacity channel, they assume RGBA instead // of CMYK). ColorSpace::RGB } } jp2::colr::ColorSpace::Unknown => match num_components { 1 => ColorSpace::Gray, 3 => ColorSpace::RGB, 4 => ColorSpace::CMYK, _ => ColorSpace::Unknown { num_channels: num_components as u8, }, }, }; Ok(cs) } fn resolve_palette_indices( components: Vec, boxes: &ImageBoxes, ) -> Result> { let Some(palette) = boxes.palette.as_ref() else { // Nothing to resolve. return Ok(components); }; let mapping = boxes.component_mapping.as_ref().unwrap(); let mut resolved = Vec::with_capacity(mapping.entries.len()); for entry in &mapping.entries { let component_idx = entry.component_index as usize; let component = components .get(component_idx) .ok_or(ColorError::PaletteResolutionFailed)?; match entry.mapping_type { ComponentMappingType::Direct => resolved.push(component.clone()), ComponentMappingType::Palette { column } => { let column_idx = column as usize; let column_info = palette .columns .get(column_idx) .ok_or(ColorError::PaletteResolutionFailed)?; let mut mapped = Vec::with_capacity(component.container.truncated().len() + SIMD_WIDTH); for &sample in component.container.truncated() { let index = math::round_f32(sample) as i64; let value = palette .map(index as usize, column_idx) .ok_or(ColorError::PaletteResolutionFailed)?; mapped.push(value as f32); } resolved.push(ComponentData { container: math::SimdBuffer::new(mapped), bit_depth: column_info.bit_depth, }); } } } Ok(resolved) } #[inline(always)] fn cielab_to_rgb( simd: S, components: &mut [ComponentData], bit_depth: u8, lab: &CieLab, ) -> Result<()> { let (head, _) = components .split_at_mut_checked(3) .ok_or(ColorError::LabConversionFailed)?; let [l, a, b] = head else { unreachable!(); }; let prec0 = l.bit_depth; let prec1 = a.bit_depth; let prec2 = b.bit_depth; // Prevent underflows/divisions by zero further below. if prec0 < 4 || prec1 < 4 || prec2 < 4 { bail!(ColorError::LabConversionFailed); } let rl = lab.rl.unwrap_or(100); let ra = lab.ra.unwrap_or(170); let rb = lab.ra.unwrap_or(200); let ol = lab.ol.unwrap_or(0); let oa = lab.oa.unwrap_or(1 << (bit_depth - 1)); let ob = lab .ob .unwrap_or((1 << (bit_depth - 2)) + (1 << (bit_depth - 3))); // Copied from OpenJPEG. let min_l = -(rl as f32 * ol as f32) / ((1 << prec0) - 1) as f32; let max_l = min_l + rl as f32; let min_a = -(ra as f32 * oa as f32) / ((1 << prec1) - 1) as f32; let max_a = min_a + ra as f32; let min_b = -(rb as f32 * ob as f32) / ((1 << prec2) - 1) as f32; let max_b = min_b + rb as f32; let bit_max = (1_u32 << bit_depth) - 1; // Note that we are not doing the actual conversion with the ICC profile yet, // just decoding the raw LAB values. // We leave applying the ICC profile to the user. let divisor_l = ((1 << prec0) - 1) as f32; let divisor_a = ((1 << prec1) - 1) as f32; let divisor_b = ((1 << prec2) - 1) as f32; let scale_l_final = bit_max as f32 / 100.0; let scale_ab_final = bit_max as f32 / 255.0; let l_offset = min_l * scale_l_final; let l_scale = (max_l - min_l) / divisor_l * scale_l_final; let a_offset = (min_a + 128.0) * scale_ab_final; let a_scale = (max_a - min_a) / divisor_a * scale_ab_final; let b_offset = (min_b + 128.0) * scale_ab_final; let b_scale = (max_b - min_b) / divisor_b * scale_ab_final; let l_offset_v = f32x8::splat(simd, l_offset); let l_scale_v = f32x8::splat(simd, l_scale); let a_offset_v = f32x8::splat(simd, a_offset); let a_scale_v = f32x8::splat(simd, a_scale); let b_offset_v = f32x8::splat(simd, b_offset); let b_scale_v = f32x8::splat(simd, b_scale); // Note that we are not doing the actual conversion with the ICC profile yet, // just decoding the raw LAB values. // We leave applying the ICC profile to the user. for ((l_chunk, a_chunk), b_chunk) in l .container .chunks_exact_mut(SIMD_WIDTH) .zip(a.container.chunks_exact_mut(SIMD_WIDTH)) .zip(b.container.chunks_exact_mut(SIMD_WIDTH)) { let l_v = f32x8::from_slice(simd, l_chunk); let a_v = f32x8::from_slice(simd, a_chunk); let b_v = f32x8::from_slice(simd, b_chunk); l_v.mul_add(l_scale_v, l_offset_v).store(l_chunk); a_v.mul_add(a_scale_v, a_offset_v).store(a_chunk); b_v.mul_add(b_scale_v, b_offset_v).store(b_chunk); } Ok(()) } #[inline(always)] fn sycc_to_rgb(simd: S, components: &mut [ComponentData], bit_depth: u8) -> Result<()> { let offset = (1_u32 << (bit_depth as u32 - 1)) as f32; let max_value = ((1_u32 << bit_depth as u32) - 1) as f32; let (head, _) = components .split_at_mut_checked(3) .ok_or(ColorError::SyccConversionFailed)?; let [y, cb, cr] = head else { unreachable!(); }; let offset_v = f32x8::splat(simd, offset); let max_v = f32x8::splat(simd, max_value); let zero_v = f32x8::splat(simd, 0.0); let cr_to_r = f32x8::splat(simd, 1.402); let cb_to_g = f32x8::splat(simd, -0.344136); let cr_to_g = f32x8::splat(simd, -0.714136); let cb_to_b = f32x8::splat(simd, 1.772); for ((y_chunk, cb_chunk), cr_chunk) in y .container .chunks_exact_mut(SIMD_WIDTH) .zip(cb.container.chunks_exact_mut(SIMD_WIDTH)) .zip(cr.container.chunks_exact_mut(SIMD_WIDTH)) { let y_v = f32x8::from_slice(simd, y_chunk); let cb_v = f32x8::from_slice(simd, cb_chunk) - offset_v; let cr_v = f32x8::from_slice(simd, cr_chunk) - offset_v; // r = y + 1.402 * cr let r = cr_v.mul_add(cr_to_r, y_v); // g = y - 0.344136 * cb - 0.714136 * cr let g = cr_v.mul_add(cr_to_g, cb_v.mul_add(cb_to_g, y_v)); // b = y + 1.772 * cb let b = cb_v.mul_add(cb_to_b, y_v); r.min(max_v).max(zero_v).store(y_chunk); g.min(max_v).max(zero_v).store(cb_chunk); b.min(max_v).max(zero_v).store(cr_chunk); } Ok(()) } hayro-jpeg2000-0.3.2/src/log.rs000064400000000000000000000014151046102023000141140ustar 00000000000000//! Logging macros that optionally forward to the `log` crate. macro_rules! ldebug { ($fmt:literal $(, $($arg:expr),* $(,)?)?) => { #[cfg(feature = "logging")] ::log::debug!($fmt $(, $($arg),*)?); #[cfg(not(feature = "logging"))] { $($(let _ = &$arg;)*)? } }; } macro_rules! ltrace { ($fmt:literal $(, $($arg:expr),* $(,)?)?) => { #[cfg(feature = "logging")] ::log::trace!($fmt $(, $($arg),*)?); #[cfg(not(feature = "logging"))] { $($(let _ = &$arg;)*)? } }; } macro_rules! lwarn { ($fmt:literal $(, $($arg:expr),* $(,)?)?) => { #[cfg(feature = "logging")] ::log::warn!($fmt $(, $($arg),*)?); #[cfg(not(feature = "logging"))] { $($(let _ = &$arg;)*)? } }; } hayro-jpeg2000-0.3.2/src/math.rs000064400000000000000000000347351046102023000142770ustar 00000000000000use alloc::vec; use alloc::vec::Vec; pub(crate) const SIMD_WIDTH: usize = 8; #[cfg(feature = "simd")] mod inner { use super::SIMD_WIDTH; use core::ops::{Add, AddAssign, DivAssign, Mul, MulAssign, Sub, SubAssign}; use fearless_simd::{SimdBase, SimdFloat}; pub(crate) use fearless_simd::{Level, Simd, dispatch}; #[derive(Copy, Clone)] #[allow(non_camel_case_types)] #[repr(C, align(32))] pub(crate) struct f32x8 { inner: fearless_simd::f32x8, } impl f32x8 { #[inline(always)] pub(crate) fn from_slice(simd: S, slice: &[f32]) -> Self { Self { inner: fearless_simd::f32x8::from_slice(simd, slice), } } #[inline(always)] pub(crate) fn splat(simd: S, value: f32) -> Self { Self { inner: fearless_simd::f32x8::splat(simd, value), } } #[inline(always)] pub(crate) fn mul_add(self, mul: Self, addend: Self) -> Self { Self { inner: self.inner.madd(mul.inner, addend.inner), } } #[inline(always)] pub(crate) fn floor(self) -> Self { Self { inner: self.inner.floor(), } } #[inline(always)] pub(crate) fn store(self, slice: &mut [f32]) { slice[..SIMD_WIDTH].copy_from_slice(&self.inner.val); } #[inline(always)] pub(crate) fn zip_low(self, other: Self) -> Self { Self { inner: self.inner.zip_low(other.inner), } } #[inline(always)] pub(crate) fn zip_high(self, other: Self) -> Self { Self { inner: self.inner.zip_high(other.inner), } } #[inline(always)] pub(crate) fn min(self, other: Self) -> Self { Self { inner: self.inner.min(other.inner), } } #[inline(always)] pub(crate) fn max(self, other: Self) -> Self { Self { inner: self.inner.max(other.inner), } } } impl Add for f32x8 { type Output = Self; #[inline(always)] fn add(self, rhs: Self) -> Self { Self { inner: self.inner + rhs.inner, } } } impl Sub for f32x8 { type Output = Self; #[inline(always)] fn sub(self, rhs: Self) -> Self { Self { inner: self.inner - rhs.inner, } } } impl Mul for f32x8 { type Output = Self; #[inline(always)] fn mul(self, rhs: Self) -> Self { Self { inner: self.inner * rhs.inner, } } } impl Add for f32x8 { type Output = Self; #[inline(always)] fn add(self, rhs: f32) -> Self { Self { inner: self.inner + rhs, } } } impl Mul for f32x8 { type Output = Self; #[inline(always)] fn mul(self, rhs: f32) -> Self { Self { inner: self.inner * rhs, } } } impl AddAssign for f32x8 { #[inline(always)] fn add_assign(&mut self, rhs: Self) { self.inner = self.inner + rhs.inner; } } impl SubAssign for f32x8 { #[inline(always)] fn sub_assign(&mut self, rhs: Self) { self.inner = self.inner - rhs.inner; } } impl MulAssign for f32x8 { #[inline(always)] fn mul_assign(&mut self, rhs: f32) { self.inner = self.inner * rhs; } } impl DivAssign for f32x8 { #[inline(always)] fn div_assign(&mut self, rhs: f32) { self.inner = self.inner / rhs; } } } #[cfg(not(feature = "simd"))] mod inner { use super::SIMD_WIDTH; use core::marker::PhantomData; use core::ops::{Add, AddAssign, DivAssign, Mul, MulAssign, Sub, SubAssign}; pub(crate) trait Simd: Copy + Clone {} #[derive(Copy, Clone)] pub(crate) struct ScalarSimd; impl Simd for ScalarSimd {} pub(crate) struct Level; impl Level { #[inline(always)] pub(crate) fn new() -> Self { Level } } #[derive(Copy, Clone)] #[allow(non_camel_case_types)] #[repr(C, align(32))] pub(crate) struct f32x8 { val: [f32; SIMD_WIDTH], _marker: PhantomData, } impl f32x8 { #[inline(always)] pub(crate) fn from_slice(_simd: S, slice: &[f32]) -> Self { let mut val = [0.0f32; SIMD_WIDTH]; val.copy_from_slice(&slice[..SIMD_WIDTH]); Self { val, _marker: PhantomData, } } #[inline(always)] pub(crate) fn splat(_simd: S, value: f32) -> Self { Self { val: [value; SIMD_WIDTH], _marker: PhantomData, } } #[inline(always)] pub(crate) fn mul_add(self, mul: Self, addend: Self) -> Self { let mut result = [0.0f32; SIMD_WIDTH]; for i in 0..SIMD_WIDTH { result[i] = super::mul_add(self.val[i], mul.val[i], addend.val[i]); } Self { val: result, _marker: PhantomData, } } #[inline(always)] pub(crate) fn floor(self) -> Self { let mut result = [0.0f32; SIMD_WIDTH]; for i in 0..SIMD_WIDTH { result[i] = super::floor_f32(self.val[i]); } Self { val: result, _marker: PhantomData, } } #[inline(always)] pub(crate) fn store(self, slice: &mut [f32]) { slice[..SIMD_WIDTH].copy_from_slice(&self.val); } #[inline(always)] pub(crate) fn zip_low(self, other: Self) -> Self { Self { val: [ self.val[0], other.val[0], self.val[1], other.val[1], self.val[2], other.val[2], self.val[3], other.val[3], ], _marker: PhantomData, } } #[inline(always)] pub(crate) fn zip_high(self, other: Self) -> Self { Self { val: [ self.val[4], other.val[4], self.val[5], other.val[5], self.val[6], other.val[6], self.val[7], other.val[7], ], _marker: PhantomData, } } #[inline(always)] pub(crate) fn min(self, other: Self) -> Self { let mut result = [0.0f32; SIMD_WIDTH]; for i in 0..SIMD_WIDTH { result[i] = super::min_f32(self.val[i], other.val[i]); } Self { val: result, _marker: PhantomData, } } #[inline(always)] pub(crate) fn max(self, other: Self) -> Self { let mut result = [0.0f32; SIMD_WIDTH]; for i in 0..SIMD_WIDTH { result[i] = super::max_f32(self.val[i], other.val[i]); } Self { val: result, _marker: PhantomData, } } } impl Add for f32x8 { type Output = Self; #[inline(always)] fn add(self, rhs: Self) -> Self { let mut result = [0.0f32; SIMD_WIDTH]; for i in 0..SIMD_WIDTH { result[i] = self.val[i] + rhs.val[i]; } Self { val: result, _marker: PhantomData, } } } impl Sub for f32x8 { type Output = Self; #[inline(always)] fn sub(self, rhs: Self) -> Self { let mut result = [0.0f32; SIMD_WIDTH]; for i in 0..SIMD_WIDTH { result[i] = self.val[i] - rhs.val[i]; } Self { val: result, _marker: PhantomData, } } } impl Mul for f32x8 { type Output = Self; #[inline(always)] fn mul(self, rhs: Self) -> Self { let mut result = [0.0f32; SIMD_WIDTH]; for i in 0..SIMD_WIDTH { result[i] = self.val[i] * rhs.val[i]; } Self { val: result, _marker: PhantomData, } } } impl Add for f32x8 { type Output = Self; #[inline(always)] fn add(self, rhs: f32) -> Self { let mut result = [0.0f32; SIMD_WIDTH]; for i in 0..SIMD_WIDTH { result[i] = self.val[i] + rhs; } Self { val: result, _marker: PhantomData, } } } impl Mul for f32x8 { type Output = Self; #[inline(always)] fn mul(self, rhs: f32) -> Self { let mut result = [0.0f32; SIMD_WIDTH]; for i in 0..SIMD_WIDTH { result[i] = self.val[i] * rhs; } Self { val: result, _marker: PhantomData, } } } impl AddAssign for f32x8 { #[inline(always)] fn add_assign(&mut self, rhs: Self) { for i in 0..SIMD_WIDTH { self.val[i] += rhs.val[i]; } } } impl SubAssign for f32x8 { #[inline(always)] fn sub_assign(&mut self, rhs: Self) { for i in 0..SIMD_WIDTH { self.val[i] -= rhs.val[i]; } } } impl MulAssign for f32x8 { #[inline(always)] fn mul_assign(&mut self, rhs: f32) { for i in 0..SIMD_WIDTH { self.val[i] *= rhs; } } } impl DivAssign for f32x8 { #[inline(always)] fn div_assign(&mut self, rhs: f32) { for i in 0..SIMD_WIDTH { self.val[i] /= rhs; } } } /// Scalar fallback for SIMD dispatch. #[macro_export] macro_rules! simd_dispatch { ($level:expr, $simd:ident => $body:expr) => {{ let _ = $level; let $simd = $crate::math::ScalarSimd; $body }}; } pub(crate) use simd_dispatch as dispatch; } // Note that these polyfills can be very imprecise, but hopefully good enough // for the vast majority of cases. #[inline(always)] pub(crate) fn mul_add(a: f32, b: f32, c: f32) -> f32 { #[cfg(all( feature = "std", any( all( any(target_arch = "x86", target_arch = "x86_64"), target_feature = "fma" ), all(target_arch = "aarch64", target_feature = "neon") ) ))] { f32::mul_add(a, b, c) } #[cfg(not(all( feature = "std", any( all( any(target_arch = "x86", target_arch = "x86_64"), target_feature = "fma" ), all(target_arch = "aarch64", target_feature = "neon") ) )))] { a * b + c } } #[inline(always)] pub(crate) fn floor_f32(x: f32) -> f32 { #[cfg(feature = "std")] { x.floor() } #[cfg(not(feature = "std"))] { let xi = x as i32; let xf = xi as f32; if x < xf { xf - 1.0 } else { xf } } } #[inline(always)] pub(crate) fn round_f32(x: f32) -> f32 { #[cfg(feature = "std")] { x.round() } #[cfg(not(feature = "std"))] { if x >= 0.0 { floor_f32(x + 0.5) } else { -floor_f32(-x + 0.5) } } } #[inline(always)] pub(crate) fn pow2i(exp: i32) -> f32 { if exp >= 0 { (1_u32 << exp) as f32 } else { 1.0 / (1_u32 << -exp) as f32 } } #[inline(always)] #[cfg_attr(feature = "simd", allow(dead_code))] pub(crate) fn min_f32(a: f32, b: f32) -> f32 { #[cfg(feature = "std")] { a.min(b) } #[cfg(not(feature = "std"))] { if a < b { a } else { b } } } #[inline(always)] #[cfg_attr(feature = "simd", allow(dead_code))] pub(crate) fn max_f32(a: f32, b: f32) -> f32 { #[cfg(feature = "std")] { a.max(b) } #[cfg(not(feature = "std"))] { if a > b { a } else { b } } } pub(crate) use inner::*; /// A wrapper around `Vec` that pads the vector to a multiple of `N` elements. /// This allows SIMD operations to safely process the data without bounds checking /// at the end of the buffer. #[derive(Debug, Clone)] pub(crate) struct SimdBuffer { data: Vec, original_len: usize, } impl SimdBuffer { /// Create a new `SimdBuffer` from a `Vec`, padding it to a multiple of `N`. pub(crate) fn new(mut data: Vec) -> Self { let original_len = data.len(); let padded_len = Self::padded_len(original_len); if padded_len > original_len { data.resize(padded_len, 0.0); } Self { data, original_len } } /// Create a new `SimdBuffer` filled with zeros. pub(crate) fn zeros(original_len: usize) -> Self { let padded_len = Self::padded_len(original_len); let data = vec![0.0; padded_len]; Self { data, original_len } } /// Returns only the original (non-padded) data as an immutable slice. pub(crate) fn truncated(&self) -> &[f32] { &self.data[..self.original_len] } /// Returns the length padded to a multiple of `N` fn padded_len(original_len: usize) -> usize { let remainder = original_len % N; let padding = N - remainder; original_len + padding } } impl core::ops::Deref for SimdBuffer { type Target = [f32]; #[inline] fn deref(&self) -> &Self::Target { &self.data } } impl core::ops::DerefMut for SimdBuffer { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.data } } hayro-jpeg2000-0.3.2/src/reader.rs000064400000000000000000000105531046102023000146000ustar 00000000000000//! Combined byte and bit reader utilities. use crate::error::{MarkerError, Result, bail}; use core::fmt::Debug; #[derive(Debug, Clone)] pub(crate) struct BitReader<'a> { data: &'a [u8], cur_pos: usize, } impl<'a> BitReader<'a> { #[inline] pub(crate) fn new(data: &'a [u8]) -> Self { Self { data, cur_pos: 0 } } #[inline] pub(crate) fn align(&mut self) { let bit_pos = self.bit_pos(); if !bit_pos.is_multiple_of(8) { self.cur_pos += 8 - bit_pos; } } #[inline] pub(crate) fn at_end(&self) -> bool { self.byte_pos() >= self.data.len() } #[inline] pub(crate) fn jump_to_end(&mut self) { self.cur_pos = self.data.len() * 8; } #[inline] pub(crate) fn tail(&self) -> Option<&'a [u8]> { self.data.get(self.byte_pos()..) } #[inline] pub(crate) fn offset(&self) -> usize { self.byte_pos() } #[inline] pub(crate) fn read_bytes(&mut self, len: usize) -> Option<&'a [u8]> { debug_assert_eq!(self.bit_pos(), 0); let bytes = self.peek_bytes(len)?; self.cur_pos += len * 8; Some(bytes) } #[inline] pub(crate) fn read_byte(&mut self) -> Option { debug_assert_eq!(self.bit_pos(), 0); let byte = self.peek_byte()?; self.cur_pos += 8; Some(byte) } #[inline] pub(crate) fn skip_bytes(&mut self, len: usize) -> Option<()> { self.read_bytes(len).map(|_| ()) } #[inline] pub(crate) fn peek_bytes(&self, len: usize) -> Option<&'a [u8]> { let start = self.byte_pos(); let end = start.checked_add(len)?; self.data.get(start..end) } #[inline] pub(crate) fn peek_byte(&self) -> Option { self.data.get(self.byte_pos()).copied() } #[inline] pub(crate) fn read_u16(&mut self) -> Option { let bytes = self.read_bytes(2)?; Some(u16::from_be_bytes([bytes[0], bytes[1]])) } #[inline] pub(crate) fn read_u32(&mut self) -> Option { let bytes = self.read_bytes(4)?; Some(u32::from_be_bytes([bytes[0], bytes[1], bytes[2], bytes[3]])) } #[inline] pub(crate) fn read_u64(&mut self) -> Option { let bytes = self.read_bytes(8)?; Some(u64::from_be_bytes([ bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], ])) } #[inline(always)] pub(crate) fn read_bit(&mut self) -> Option { let byte_pos = self.byte_pos(); let byte = *self.data.get(byte_pos)? as u32; let shift = 7 - self.bit_pos(); self.cur_pos += 1; Some((byte >> shift) & 1) } #[inline] pub(crate) fn byte_pos(&self) -> usize { self.cur_pos / 8 } #[inline] pub(crate) fn bit_pos(&self) -> usize { self.cur_pos % 8 } /// Like the normal `read_bits` method, but accounts for stuffing bits /// in addition. #[inline] pub(crate) fn read_bits_with_stuffing(&mut self, bit_size: u8) -> Option { let mut bit = 0; for _ in 0..bit_size { let needs_stuff_bit = self.needs_to_read_stuff_bit(); bit = (bit << 1) | self.read_bit()?; if needs_stuff_bit { self.read_stuff_bit()?; } } Some(bit) } pub(crate) fn needs_to_read_stuff_bit(&mut self) -> bool { // B.10.1: "If the value of the byte is 0xFF, the next byte includes an extra zero bit // stuffed into the MSB." self.bit_pos() == 7 && self.data[self.byte_pos()] == 0xff } #[inline] pub(crate) fn read_stuff_bit(&mut self) -> Option<()> { let stuff_bit = self.read_bit()?; if stuff_bit != 0 { return None; } Some(()) } #[inline] pub(crate) fn peak_bits_with_stuffing(&mut self, bit_size: u8) -> Option { self.clone().read_bits_with_stuffing(bit_size) } #[inline] pub(crate) fn read_marker(&mut self) -> Result { if self.peek_byte().ok_or(MarkerError::Invalid)? != 0xFF { bail!(MarkerError::Invalid); } self.read_byte().unwrap(); Ok(self.read_byte().ok_or(MarkerError::Invalid)?) } #[inline] pub(crate) fn peek_marker(&mut self) -> Option { self.clone().read_marker().ok() } } hayro-jpeg2000-0.3.2/sync.py000075500000000000000000000056631046102023000135400ustar 00000000000000#!/usr/bin/env python3 import argparse import json from pathlib import Path from urllib.error import HTTPError, URLError from urllib.request import Request, urlopen SCRIPT_DIR = Path(__file__).resolve().parent TEST_INPUTS_DIR = SCRIPT_DIR / "test-inputs" REMOTE_BASE = "https://hayro-assets.dev/jpeg2000" MANIFESTS = [ ("serenity", SCRIPT_DIR / "manifest_serenity.json"), ("openjpeg", SCRIPT_DIR / "manifest_openjpeg.json"), ("custom", SCRIPT_DIR / "manifest_custom.json"), ] def load_manifest(path: Path) -> list[dict]: if not path.exists(): return [] raw_entries = json.loads(path.read_text()) entries: list[dict] = [] for item in raw_entries: if isinstance(item, str): entries.append({"id": item, "path": item, "render": True}) continue entry = dict(item) entry.setdefault("render", True) entry.setdefault( "path", entry.get("path") or entry.get("file") or entry.get("id") ) entries.append(entry) return entries def download_file(namespace: str, asset_path: str, *, force: bool) -> tuple[bool, str]: target_dir = TEST_INPUTS_DIR / namespace target_dir.mkdir(parents=True, exist_ok=True) destination = target_dir / asset_path was_cached = destination.exists() if was_cached and not force: return True, "cached" url = f"{REMOTE_BASE}/{namespace}/{asset_path}" request = Request(url, headers={"User-Agent": "hayro-jpeg2000-sync/1.0"}) try: with urlopen(request, timeout=60) as response: data = response.read() except (HTTPError, URLError) as exc: return False, str(exc) temp_path = destination.with_suffix(destination.suffix + ".tmp") temp_path.write_bytes(data) temp_path.replace(destination) if was_cached: return True, "updated" return True, "downloaded" def main() -> None: parser = argparse.ArgumentParser(description="Download jpeg2000 test inputs") parser.add_argument( "--force", action="store_true", help="redownload files even if cached" ) args = parser.parse_args() TEST_INPUTS_DIR.mkdir(exist_ok=True) failures: list[tuple[str, str]] = [] total = 0 for namespace, manifest_path in MANIFESTS: entries = load_manifest(manifest_path) for entry in entries: total += 1 asset_path = entry["path"] label = f"{namespace}/{asset_path}" success, status = download_file(namespace, asset_path, force=args.force) print(f"[{status}] {label}") if not success: failures.append((label, status)) if failures: print("\nFailed downloads:") for label, message in failures: print(f"- {label}: {message}") else: if total: print("\nAll test inputs are ready.") else: print("No manifest entries were found.") if __name__ == "__main__": main() hayro-jpeg2000-0.3.2/tests/mod.rs000064400000000000000000000324011046102023000144640ustar 00000000000000#![allow(missing_docs)] use hayro_jpeg2000::{DecodeSettings, Image}; use image::{ColorType, DynamicImage, ImageBuffer, ImageDecoder, ImageFormat, Rgba, RgbaImage}; use indicatif::{ProgressBar, ProgressStyle}; use rayon::prelude::*; use serde::Deserialize; use std::any::Any; use std::cmp::max; use std::fs; use std::panic::{AssertUnwindSafe, PanicHookInfo, catch_unwind}; use std::path::{Path, PathBuf}; use std::sync::LazyLock; use std::time::{Duration, Instant}; const REPLACE: Option<&str> = option_env!("REPLACE"); static WORKSPACE_PATH: LazyLock = LazyLock::new(|| PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("")); static SNAPSHOTS_PATH: LazyLock = LazyLock::new(|| WORKSPACE_PATH.join("snapshots")); static TEST_INPUTS_PATH: LazyLock = LazyLock::new(|| WORKSPACE_PATH.join("test-inputs")); const INPUT_MANIFESTS: &[(&str, &str)] = &[ ("serenity", "manifest_serenity.json"), ("openjpeg", "manifest_openjpeg.json"), ("custom", "manifest_custom.json"), ]; static DIFFS_PATH: LazyLock = LazyLock::new(|| { let path = WORKSPACE_PATH.join("diffs"); let _ = fs::remove_dir_all(&path); let _ = fs::create_dir_all(&path); path }); struct TestReport { name: String, duration: Duration, outcome: Result<(), String>, } fn main() { let _panic_hook_guard = PanicHookGuard::install(); if !run_harness() { std::process::exit(1); } } fn run_harness() -> bool { let asset_files = match collect_asset_files() { Ok(files) => files, Err(err) => { eprintln!("Failed to read asset directory: {err}"); return false; } }; if asset_files.is_empty() { eprintln!("No test inputs were found. Run `python sync.py` to download them."); return false; } let progress_bar = ProgressBar::new(asset_files.len() as u64); progress_bar.set_style( ProgressStyle::with_template( "{spinner} {pos}/{len} [{elapsed_precise}] [{wide_bar}] {msg}", ) .unwrap() .progress_chars("=>-"), ); let reports: Vec = asset_files .par_iter() .map(|asset| { let name = asset.display_name.clone(); progress_bar.set_message(name.clone()); let start = Instant::now(); let outcome = catch_unwind(AssertUnwindSafe(|| run_asset_test(asset))).unwrap_or_else( |payload| { let panic_msg = describe_panic(payload.as_ref()); Err(format!("panic: {panic_msg}")) }, ); progress_bar.inc(1); TestReport { name, duration: start.elapsed(), outcome, } }) .collect(); progress_bar.finish_with_message("asset tests complete"); println!("\nDetailed results:"); for report in &reports { match &report.outcome { Ok(_) => println!("[PASS] {:<60} ({:.2?})", report.name, report.duration), Err(err) => { println!("[FAIL] {:<60} ({:.2?})", report.name, report.duration); println!(" {err}"); } } } let failures: Vec<_> = reports .iter() .filter_map(|report| report.outcome.as_ref().err().map(|err| (&report.name, err))) .collect(); if failures.is_empty() { true } else { println!( "\n{} of {} asset tests failed:", failures.len(), reports.len() ); for (name, err) in failures { println!(" - {name}: {err}"); } false } } fn describe_panic(payload: &(dyn Any + Send)) -> String { if let Some(msg) = payload.downcast_ref::() { msg.clone() } else if let Some(msg) = payload.downcast_ref::<&'static str>() { (*msg).to_owned() } else { "unknown panic payload".to_owned() } } #[allow(clippy::type_complexity)] struct PanicHookGuard(Option) + Sync + Send + 'static>>); impl PanicHookGuard { fn install() -> Self { let previous = std::panic::take_hook(); std::panic::set_hook(Box::new(|_| { // Swallow default panic output; harness reports failures explicitly. })); Self(Some(previous)) } } impl Drop for PanicHookGuard { fn drop(&mut self) { if let Some(previous) = self.0.take() { std::panic::set_hook(previous); } } } #[derive(Deserialize)] #[serde(untagged)] enum ManifestItem { Simple(String), Detailed(ManifestEntry), } #[derive(Deserialize)] struct ManifestEntry { /// Human-readable test name (used for display and snapshots). id: String, /// Path to the actual asset file under the namespace. #[serde(default, alias = "file")] path: String, #[serde(default = "default_render")] render: bool, #[serde(default)] strict: Option, #[serde(default)] resolve_palette_indices: Option, #[serde(default)] target_resolution: Option<(u32, u32)>, } struct AssetEntry { input_relative_path: PathBuf, snapshot_stem: PathBuf, display_name: String, render: bool, decode_settings: DecodeSettings, } impl AssetEntry { fn new( namespace: &str, id: String, path: String, render: bool, decode_settings: DecodeSettings, ) -> Self { let display_name = format!("{namespace}/{id}"); let input_relative_path = Path::new(namespace).join(path); let snapshot_stem = Path::new(namespace).join(id); Self { input_relative_path, snapshot_stem, display_name, render, decode_settings, } } } impl ManifestItem { fn into_asset(self, namespace: &str) -> AssetEntry { let default_settings = DecodeSettings::default(); match self { Self::Simple(id) => AssetEntry::new(namespace, id.clone(), id, true, default_settings), Self::Detailed(entry) => { let decode_settings = DecodeSettings { resolve_palette_indices: entry .resolve_palette_indices .unwrap_or(default_settings.resolve_palette_indices), strict: entry.strict.unwrap_or(default_settings.strict), target_resolution: entry .target_resolution .or(default_settings.target_resolution), }; AssetEntry::new( namespace, entry.id, entry.path, entry.render, decode_settings, ) } } } } fn default_render() -> bool { true } fn collect_asset_files() -> Result, String> { let mut files = vec![]; for (namespace, manifest_rel_path) in INPUT_MANIFESTS { let manifest_path = WORKSPACE_PATH.join(manifest_rel_path); let content = fs::read_to_string(&manifest_path) .map_err(|err| format!("failed to read manifest {}: {err}", manifest_path.display()))?; let entries: Vec = serde_json::from_str(&content).map_err(|err| { format!( "failed to parse manifest {}: {err}", manifest_path.display() ) })?; for entry in entries { let asset_entry = entry.into_asset(namespace); let absolute_path = TEST_INPUTS_PATH.join(&asset_entry.input_relative_path); if !absolute_path.exists() { return Err(format!( "missing test input {} (expected at {})", asset_entry.display_name, absolute_path.display() )); } files.push(asset_entry); } } files.sort_by(|a, b| a.display_name.cmp(&b.display_name)); Ok(files) } fn run_asset_test(asset: &AssetEntry) -> Result<(), String> { let asset_path = TEST_INPUTS_PATH.join(&asset.input_relative_path); let asset_name = &asset.display_name; let data = fs::read(&asset_path).map_err(|err| format!("failed to read {}: {err}", asset_name))?; let image = Image::new(&data, &asset.decode_settings); if !asset.render { // Crash-only test: just execute the decoder to ensure it handles the file. let _ = image.and_then(|i| i.decode()); return Ok(()); } let image = image.unwrap(); let color_type = image.color_type(); let width = image.width(); let height = image.height(); let mut buf = vec![0_u8; image.total_bytes() as usize]; image.read_image(&mut buf).unwrap(); let rgba = match color_type { ColorType::L8 => { DynamicImage::ImageLuma8(ImageBuffer::from_raw(width, height, buf).unwrap()) } ColorType::La8 => { DynamicImage::ImageLumaA8(ImageBuffer::from_raw(width, height, buf).unwrap()) } ColorType::Rgb8 => { DynamicImage::ImageRgb8(ImageBuffer::from_raw(width, height, buf).unwrap()) } ColorType::Rgba8 => { DynamicImage::ImageRgba8(ImageBuffer::from_raw(width, height, buf).unwrap()) } _ => unimplemented!(), } .into_rgba8(); let reference_path = asset.snapshot_stem.with_extension("png"); let snapshot_path = SNAPSHOTS_PATH.join(&reference_path); if let Some(parent) = snapshot_path.parent() { fs::create_dir_all(parent) .map_err(|err| format!("failed to create snapshot directory: {err}"))?; } if !snapshot_path.exists() { rgba.save_with_format(&snapshot_path, ImageFormat::Png) .map_err(|err| format!("failed to save snapshot for {}: {err}", asset_name))?; return Err(format!( "new reference image was created for {}", asset_name )); } let expected = image::open(&snapshot_path) .map_err(|err| format!("failed to load snapshot for {}: {err}", asset_name))? .into_rgba8(); let (diff_image, pixel_diff) = get_diff(&expected, &rgba); if pixel_diff > 0 { let diff_path = DIFFS_PATH.join(&reference_path); if let Some(parent) = diff_path.parent() { fs::create_dir_all(parent) .map_err(|err| format!("failed to create diff directory: {err}"))?; } diff_image .save_with_format(&diff_path, ImageFormat::Png) .map_err(|err| format!("failed to save diff for {}: {err}", asset_name))?; if REPLACE.is_some() { rgba.save_with_format(&snapshot_path, ImageFormat::Png) .map_err(|err| format!("failed to replace snapshot for {}: {err}", asset_name))?; return Err(format!("snapshot was replaced for {}", asset_name)); } return Err(format!( "pixel diff {} detected for {}", pixel_diff, asset_name )); } Ok(()) } fn get_diff(expected_image: &RgbaImage, actual_image: &RgbaImage) -> (RgbaImage, u32) { let width = max(expected_image.width(), actual_image.width()); let height = max(expected_image.height(), actual_image.height()); let mut diff_image = RgbaImage::new(width * 3, height); let mut pixel_diff = 0; for x in 0..width { for y in 0..height { let actual_pixel = get_pixel_checked(actual_image, x, y); let expected_pixel = get_pixel_checked(expected_image, x, y); match (actual_pixel, expected_pixel) { (Some(actual), Some(expected)) => { diff_image.put_pixel(x, y, expected); diff_image.put_pixel(x + width, y, diff_pixel(expected, actual)); diff_image.put_pixel(x + 2 * width, y, actual); if is_pixel_different(expected, actual) { pixel_diff += 1; } } (Some(actual), None) => { pixel_diff += 1; diff_image.put_pixel(x + width, y, Rgba([255, 0, 0, 255])); diff_image.put_pixel(x + 2 * width, y, actual); } (None, Some(expected)) => { pixel_diff += 1; diff_image.put_pixel(x, y, expected); diff_image.put_pixel(x + width, y, Rgba([255, 0, 0, 255])); } (None, None) => {} } } } (diff_image, pixel_diff) } fn get_pixel_checked(image: &RgbaImage, x: u32, y: u32) -> Option> { if x < image.width() && y < image.height() { Some(*image.get_pixel(x, y)) } else { None } } fn diff_pixel(expected: Rgba, actual: Rgba) -> Rgba { if is_pixel_different(expected, actual) { Rgba([255, 0, 0, 255]) } else { Rgba([0, 0, 0, 255]) } } fn is_pixel_different(lhs: Rgba, rhs: Rgba) -> bool { // One test fails in CI because of a small difference, so we don't check // for exact pixel match const THRESHOLD: u8 = 1; if lhs[3] == 0 && rhs[3] == 0 { return false; } lhs[0].abs_diff(rhs[0]) > THRESHOLD || lhs[1].abs_diff(rhs[1]) > THRESHOLD || lhs[2].abs_diff(rhs[2]) > THRESHOLD || lhs[3].abs_diff(rhs[3]) > THRESHOLD }